├── .gitattributes
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── help-wanted.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── deployer.yml
│ ├── master_cleaner.yml
│ ├── monthly-tagger.yml
│ ├── tester.yml
│ └── tutorial_exporter.yml
├── .gitignore
├── .pylintrc
├── CITATION.cff
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE.rst
├── README.md
├── SECURITY.md
├── code_formatter.sh
├── docs
├── Makefile
├── source
│ ├── _LICENSE.rst
│ ├── _cite.rst
│ ├── _contributing.rst
│ ├── _installation.rst
│ ├── _rst
│ │ ├── _code.rst
│ │ ├── adaptive_function
│ │ │ ├── AdaptiveActivationFunctionInterface.rst
│ │ │ ├── AdaptiveCELU.rst
│ │ │ ├── AdaptiveELU.rst
│ │ │ ├── AdaptiveExp.rst
│ │ │ ├── AdaptiveGELU.rst
│ │ │ ├── AdaptiveMish.rst
│ │ │ ├── AdaptiveReLU.rst
│ │ │ ├── AdaptiveSIREN.rst
│ │ │ ├── AdaptiveSiLU.rst
│ │ │ ├── AdaptiveSigmoid.rst
│ │ │ ├── AdaptiveSoftmax.rst
│ │ │ ├── AdaptiveSoftmin.rst
│ │ │ └── AdaptiveTanh.rst
│ │ ├── callback
│ │ │ ├── adaptive_refinment_callback.rst
│ │ │ ├── linear_weight_update_callback.rst
│ │ │ ├── optimizer_callback.rst
│ │ │ └── processing_callback.rst
│ │ ├── condition
│ │ │ ├── condition.rst
│ │ │ ├── condition_interface.rst
│ │ │ ├── data_condition.rst
│ │ │ ├── domain_equation_condition.rst
│ │ │ ├── input_equation_condition.rst
│ │ │ └── input_target_condition.rst
│ │ ├── data
│ │ │ ├── data_module.rst
│ │ │ └── dataset.rst
│ │ ├── domain
│ │ │ ├── cartesian.rst
│ │ │ ├── difference_domain.rst
│ │ │ ├── domain.rst
│ │ │ ├── ellipsoid.rst
│ │ │ ├── exclusion_domain.rst
│ │ │ ├── intersection_domain.rst
│ │ │ ├── operation_interface.rst
│ │ │ ├── simplex.rst
│ │ │ └── union_domain.rst
│ │ ├── equation
│ │ │ ├── equation.rst
│ │ │ ├── equation_factory.rst
│ │ │ ├── equation_interface.rst
│ │ │ └── system_equation.rst
│ │ ├── graph
│ │ │ ├── graph.rst
│ │ │ ├── graph_builder.rst
│ │ │ ├── knn_graph.rst
│ │ │ ├── label_batch.rst
│ │ │ └── radius_graph.rst
│ │ ├── label_tensor.rst
│ │ ├── loss
│ │ │ ├── loss_interface.rst
│ │ │ ├── lploss.rst
│ │ │ ├── ntk_weighting.rst
│ │ │ ├── powerloss.rst
│ │ │ ├── scalar_weighting.rst
│ │ │ └── weighting_interface.rst
│ │ ├── model
│ │ │ ├── average_neural_operator.rst
│ │ │ ├── block
│ │ │ │ ├── average_neural_operator_block.rst
│ │ │ │ ├── convolution.rst
│ │ │ │ ├── convolution_interface.rst
│ │ │ │ ├── enhanced_linear.rst
│ │ │ │ ├── fourier_block.rst
│ │ │ │ ├── fourier_embedding.rst
│ │ │ │ ├── gno_block.rst
│ │ │ │ ├── low_rank_block.rst
│ │ │ │ ├── orthogonal.rst
│ │ │ │ ├── pbc_embedding.rst
│ │ │ │ ├── pod_block.rst
│ │ │ │ ├── rbf_block.rst
│ │ │ │ ├── residual.rst
│ │ │ │ └── spectral.rst
│ │ │ ├── deeponet.rst
│ │ │ ├── feed_forward.rst
│ │ │ ├── fourier_integral_kernel.rst
│ │ │ ├── fourier_neural_operator.rst
│ │ │ ├── graph_neural_operator.rst
│ │ │ ├── graph_neural_operator_integral_kernel.rst
│ │ │ ├── kernel_neural_operator.rst
│ │ │ ├── low_rank_neural_operator.rst
│ │ │ ├── mionet.rst
│ │ │ ├── multi_feed_forward.rst
│ │ │ ├── residual_feed_forward.rst
│ │ │ └── spline.rst
│ │ ├── operator.rst
│ │ ├── optim
│ │ │ ├── optimizer_interface.rst
│ │ │ ├── scheduler_interface.rst
│ │ │ ├── torch_optimizer.rst
│ │ │ └── torch_scheduler.rst
│ │ ├── problem
│ │ │ ├── abstract_problem.rst
│ │ │ ├── inverse_problem.rst
│ │ │ ├── parametric_problem.rst
│ │ │ ├── spatial_problem.rst
│ │ │ ├── time_dependent_problem.rst
│ │ │ └── zoo
│ │ │ │ ├── advection.rst
│ │ │ │ ├── allen_cahn.rst
│ │ │ │ ├── diffusion_reaction.rst
│ │ │ │ ├── helmholtz.rst
│ │ │ │ ├── inverse_poisson_2d_square.rst
│ │ │ │ ├── poisson_2d_square.rst
│ │ │ │ └── supervised_problem.rst
│ │ ├── solver
│ │ │ ├── ensemble_solver
│ │ │ │ ├── ensemble_pinn.rst
│ │ │ │ ├── ensemble_solver_interface.rst
│ │ │ │ └── ensemble_supervised.rst
│ │ │ ├── garom.rst
│ │ │ ├── multi_solver_interface.rst
│ │ │ ├── physics_informed_solver
│ │ │ │ ├── causal_pinn.rst
│ │ │ │ ├── competitive_pinn.rst
│ │ │ │ ├── gradient_pinn.rst
│ │ │ │ ├── pinn.rst
│ │ │ │ ├── pinn_interface.rst
│ │ │ │ ├── rba_pinn.rst
│ │ │ │ └── self_adaptive_pinn.rst
│ │ │ ├── single_solver_interface.rst
│ │ │ ├── solver_interface.rst
│ │ │ └── supervised_solver
│ │ │ │ ├── reduced_order_model.rst
│ │ │ │ ├── supervised.rst
│ │ │ │ └── supervised_solver_interface.rst
│ │ └── trainer.rst
│ ├── _team.rst
│ ├── _templates
│ │ └── layout.html
│ ├── _tutorial.rst
│ ├── conf.py
│ ├── index.rst
│ ├── index_files
│ │ ├── PINA_API.png
│ │ ├── PINA_logo.png
│ │ ├── fast_mathlab.png
│ │ ├── foudings.png
│ │ ├── output_21_0.png
│ │ ├── output_8_0.png
│ │ ├── tutorial_13_01.png
│ │ ├── tutorial_13_3.png
│ │ ├── tutorial_15_0.png
│ │ ├── tutorial_32_0.png
│ │ ├── tutorial_36_0.png
│ │ ├── tutorial_5_0.png
│ │ └── university_dev_pina.png
│ └── tutorials
│ │ ├── tutorial1
│ │ └── tutorial.html
│ │ ├── tutorial10
│ │ └── tutorial.html
│ │ ├── tutorial11
│ │ └── tutorial.html
│ │ ├── tutorial12
│ │ └── tutorial.html
│ │ ├── tutorial13
│ │ └── tutorial.html
│ │ ├── tutorial14
│ │ └── tutorial.html
│ │ ├── tutorial15
│ │ └── tutorial.html
│ │ ├── tutorial16
│ │ └── tutorial.html
│ │ ├── tutorial17
│ │ └── tutorial.html
│ │ ├── tutorial18
│ │ └── tutorial.html
│ │ ├── tutorial19
│ │ └── tutorial.html
│ │ ├── tutorial2
│ │ └── tutorial.html
│ │ ├── tutorial20
│ │ └── tutorial.html
│ │ ├── tutorial21
│ │ └── tutorial.html
│ │ ├── tutorial3
│ │ └── tutorial.html
│ │ ├── tutorial4
│ │ └── tutorial.html
│ │ ├── tutorial5
│ │ └── tutorial.html
│ │ ├── tutorial6
│ │ └── tutorial.html
│ │ ├── tutorial7
│ │ └── tutorial.html
│ │ ├── tutorial8
│ │ └── tutorial.html
│ │ └── tutorial9
│ │ └── tutorial.html
└── sphinx_extensions
│ └── paramref_extension.py
├── joss
├── paper.bib
├── paper.md
├── pina_logo.png
├── pinn_base.pdf
├── pinn_feat.pdf
└── pinn_learn.pdf
├── pina
├── __init__.py
├── adaptive_function
│ ├── __init__.py
│ ├── adaptive_function.py
│ └── adaptive_function_interface.py
├── adaptive_functions
│ └── __init__.py
├── callback
│ ├── __init__.py
│ ├── adaptive_refinement_callback.py
│ ├── linear_weight_update_callback.py
│ ├── optimizer_callback.py
│ └── processing_callback.py
├── callbacks
│ └── __init__.py
├── collector.py
├── condition
│ ├── __init__.py
│ ├── condition.py
│ ├── condition_interface.py
│ ├── data_condition.py
│ ├── domain_equation_condition.py
│ ├── input_equation_condition.py
│ └── input_target_condition.py
├── data
│ ├── __init__.py
│ ├── data_module.py
│ └── dataset.py
├── domain
│ ├── __init__.py
│ ├── cartesian.py
│ ├── difference_domain.py
│ ├── domain_interface.py
│ ├── ellipsoid.py
│ ├── exclusion_domain.py
│ ├── intersection_domain.py
│ ├── operation_interface.py
│ ├── simplex.py
│ └── union_domain.py
├── equation
│ ├── __init__.py
│ ├── equation.py
│ ├── equation_factory.py
│ ├── equation_interface.py
│ └── system_equation.py
├── geometry
│ └── __init__.py
├── graph.py
├── label_tensor.py
├── loss
│ ├── __init__.py
│ ├── loss_interface.py
│ ├── lp_loss.py
│ ├── ntk_weighting.py
│ ├── power_loss.py
│ ├── scalar_weighting.py
│ └── weighting_interface.py
├── model
│ ├── __init__.py
│ ├── average_neural_operator.py
│ ├── block
│ │ ├── __init__.py
│ │ ├── average_neural_operator_block.py
│ │ ├── convolution.py
│ │ ├── convolution_2d.py
│ │ ├── embedding.py
│ │ ├── fourier_block.py
│ │ ├── gno_block.py
│ │ ├── integral.py
│ │ ├── low_rank_block.py
│ │ ├── orthogonal.py
│ │ ├── pod_block.py
│ │ ├── rbf_block.py
│ │ ├── residual.py
│ │ ├── spectral.py
│ │ ├── stride.py
│ │ └── utils_convolution.py
│ ├── deeponet.py
│ ├── feed_forward.py
│ ├── fourier_neural_operator.py
│ ├── graph_neural_operator.py
│ ├── kernel_neural_operator.py
│ ├── layers
│ │ └── __init__.py
│ ├── low_rank_neural_operator.py
│ ├── multi_feed_forward.py
│ └── spline.py
├── operator.py
├── operators.py
├── optim
│ ├── __init__.py
│ ├── optimizer_interface.py
│ ├── scheduler_interface.py
│ ├── torch_optimizer.py
│ └── torch_scheduler.py
├── plotter.py
├── problem
│ ├── __init__.py
│ ├── abstract_problem.py
│ ├── inverse_problem.py
│ ├── parametric_problem.py
│ ├── spatial_problem.py
│ ├── time_dependent_problem.py
│ └── zoo
│ │ ├── __init__.py
│ │ ├── advection.py
│ │ ├── allen_cahn.py
│ │ ├── diffusion_reaction.py
│ │ ├── helmholtz.py
│ │ ├── inverse_poisson_2d_square.py
│ │ ├── poisson_2d_square.py
│ │ └── supervised_problem.py
├── solver
│ ├── __init__.py
│ ├── ensemble_solver
│ │ ├── __init__.py
│ │ ├── ensemble_pinn.py
│ │ ├── ensemble_solver_interface.py
│ │ └── ensemble_supervised.py
│ ├── garom.py
│ ├── physics_informed_solver
│ │ ├── __init__.py
│ │ ├── causal_pinn.py
│ │ ├── competitive_pinn.py
│ │ ├── gradient_pinn.py
│ │ ├── pinn.py
│ │ ├── pinn_interface.py
│ │ ├── rba_pinn.py
│ │ └── self_adaptive_pinn.py
│ ├── solver.py
│ └── supervised_solver
│ │ ├── __init__.py
│ │ ├── reduced_order_model.py
│ │ ├── supervised.py
│ │ └── supervised_solver_interface.py
├── solvers
│ ├── __init__.py
│ └── pinns
│ │ └── __init__.py
├── trainer.py
└── utils.py
├── pyproject.toml
├── readme
├── PINA_API.png
└── pina_logo.png
├── tests
├── test_adaptive_function.py
├── test_blocks
│ ├── test_convolution.py
│ ├── test_embedding.py
│ ├── test_fourier.py
│ ├── test_low_rank_block.py
│ ├── test_orthogonal.py
│ ├── test_pod.py
│ ├── test_rbf.py
│ ├── test_residual.py
│ └── test_spectral_convolution.py
├── test_callback
│ ├── test_adaptive_refinement_callback.py
│ ├── test_linear_weight_update_callback.py
│ ├── test_metric_tracker.py
│ ├── test_optimizer_callback.py
│ └── test_progress_bar.py
├── test_collector.py
├── test_condition.py
├── test_data
│ ├── test_data_module.py
│ ├── test_graph_dataset.py
│ └── test_tensor_dataset.py
├── test_equations
│ ├── test_equation.py
│ └── test_system_equation.py
├── test_geometry
│ ├── test_cartesian.py
│ ├── test_difference.py
│ ├── test_ellipsoid.py
│ ├── test_exclusion.py
│ ├── test_intersection.py
│ ├── test_simplex.py
│ └── test_union.py
├── test_graph.py
├── test_label_tensor
│ ├── test_label_tensor.py
│ └── test_label_tensor_01.py
├── test_loss
│ ├── test_lp_loss.py
│ └── test_power_loss.py
├── test_model
│ ├── test_average_neural_operator.py
│ ├── test_deeponet.py
│ ├── test_feed_forward.py
│ ├── test_fourier_neural_operator.py
│ ├── test_graph_neural_operator.py
│ ├── test_kernel_neural_operator.py
│ ├── test_low_rank_neural_operator.py
│ ├── test_mionet.py
│ ├── test_residual_feed_forward.py
│ └── test_spline.py
├── test_operator.py
├── test_optimizer.py
├── test_package.py
├── test_problem.py
├── test_problem_zoo
│ ├── test_advection.py
│ ├── test_allen_cahn.py
│ ├── test_diffusion_reaction.py
│ ├── test_helmholtz.py
│ ├── test_inverse_poisson_2d_square.py
│ ├── test_poisson_2d_square.py
│ └── test_supervised_problem.py
├── test_scheduler.py
├── test_solver
│ ├── test_causal_pinn.py
│ ├── test_competitive_pinn.py
│ ├── test_ensemble_pinn.py
│ ├── test_ensemble_supervised_solver.py
│ ├── test_garom.py
│ ├── test_gradient_pinn.py
│ ├── test_pinn.py
│ ├── test_rba_pinn.py
│ ├── test_reduced_order_model_solver.py
│ ├── test_self_adaptive_pinn.py
│ └── test_supervised_solver.py
├── test_utils.py
└── test_weighting
│ ├── test_ntk_weighting.py
│ └── test_standard_weighting.py
├── tutorials
├── README.md
├── TUTORIAL_GUIDELINES.md
├── static
│ ├── API_color.png
│ ├── deep_ensemble.png
│ ├── logging.png
│ ├── neural_operator.png
│ ├── pina_logo.png
│ └── pina_wokflow.png
├── tutorial1
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial10
│ ├── data
│ │ ├── Data_KS.mat
│ │ └── Data_KS2.mat
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial11
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial12
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial13
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial14
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial15
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial16
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial17
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial18
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial19
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial2
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial20
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial21
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial3
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial4
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial5
│ ├── Data_Darcy.mat
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial6
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial7
│ ├── data
│ │ ├── pinn_solution_0.5_0.5
│ │ └── pts_0.5_0.5
│ ├── tutorial.ipynb
│ └── tutorial.py
├── tutorial8
│ ├── tutorial.ipynb
│ └── tutorial.py
└── tutorial9
│ ├── tutorial.ipynb
│ └── tutorial.py
└── utils
└── mathlab_versioning.py
/.gitattributes:
--------------------------------------------------------------------------------
1 | tutorials/** linguist-vendored=true
2 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | The piece of code that reproduce the bug.
15 |
16 | **Expected behavior**
17 | A clear and concise description of what you expected to happen.
18 |
19 | **Output**
20 | The obtained output. Please include the entire error trace.
21 |
22 | **Additional context**
23 | Add any other context about the problem here.
24 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/help-wanted.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Help wanted
3 | about: Ask help for using the package
4 | title: ''
5 | labels: help wanted
6 | assignees: ''
7 |
8 | ---
9 |
10 | **The objective**
11 | A clear description of the purpose of your application.
12 |
13 | **Already tried tests**
14 | The snippet of code you have already tried in order to obtain the wanted outcome.
15 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 |
4 |
5 | This PR fixes #ISSUE_NUMBER.
6 |
7 | ## Checklist
8 |
9 | - [ ] Code follows the project’s [Code Style Guidelines](https://github.com/mathLab/PINA/blob/master/CONTRIBUTING.md#code-style--guidelines)
10 | - [ ] Tests have been added or updated
11 | - [ ] Documentation has been updated if necessary
12 | - [ ] Pull request is linked to an open issue
13 |
--------------------------------------------------------------------------------
/.github/workflows/deployer.yml:
--------------------------------------------------------------------------------
1 | name: "Deployer"
2 |
3 | on:
4 | push:
5 | tags:
6 | - "*"
7 |
8 | jobs:
9 |
10 | docs: #######################################################################
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 |
15 | - name: Install Python dependencies
16 | run: python3 -m pip install .[doc]
17 |
18 | - name: Build Documentation
19 | run: |
20 | make html
21 | working-directory: docs/
22 |
23 | - name: Deploy
24 | uses: peaceiris/actions-gh-pages@v3
25 | with:
26 | github_token: ${{ secrets.GITHUB_TOKEN }}
27 | #deploy_key: ${{ secrets.DEPLOY_PRIVATE_KEY }}
28 | publish_dir: ./docs/build/html
29 | allow_empty_commit: true
30 |
31 | release_github: #############################################################
32 | runs-on: ubuntu-latest
33 | permissions:
34 | contents: write
35 | steps:
36 | - uses: actions/checkout@v4
37 | - uses: ncipollo/release-action@v1
38 | with:
39 | token: ${{ secrets.GITHUB_TOKEN }}
40 |
41 | pypi: #######################################################################
42 | runs-on: ubuntu-latest
43 | steps:
44 | - uses: actions/checkout@v4
45 |
46 | - name: Install build
47 | run: >-
48 | python -m pip install build --user
49 |
50 | - name: Build a binary wheel and a source tarball
51 | run: >-
52 | python -m build --sdist --wheel --outdir dist/ .
53 |
54 | - name: Publish distribution to PyPI
55 | if: startsWith(github.ref, 'refs/tags')
56 | uses: pypa/gh-action-pypi-publish@release/v1
57 | with:
58 | password: ${{ secrets.PYPI_API_TOKEN }}
--------------------------------------------------------------------------------
/.github/workflows/master_cleaner.yml:
--------------------------------------------------------------------------------
1 | name: Master Cleaner
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 |
8 | jobs:
9 | formatter:
10 | name: runner / black
11 | runs-on: ubuntu-latest
12 | steps:
13 | - uses: actions/checkout@v4
14 |
15 | - uses: psf/black@stable
16 | with:
17 | src: "./pina"
18 |
19 | - name: Create Pull Request
20 | uses: peter-evans/create-pull-request@v3
21 | with:
22 | token: ${{ secrets.GITHUB_TOKEN }}
23 | title: "Format Python code with psf/black push"
24 | commit-message: ":art: Format Python code with psf/black"
25 | body: |
26 | There appear to be some python formatting errors in ${{ github.sha }}. This pull request
27 | uses the [psf/black](https://github.com/psf/black) formatter to fix these issues.
28 | base: ${{ github.head_ref }} # Creates pull request onto pull request or commit branch
29 | branch: actions/black
--------------------------------------------------------------------------------
/.github/workflows/monthly-tagger.yml:
--------------------------------------------------------------------------------
1 | name: "Monthly Tagger"
2 |
3 | on:
4 | schedule:
5 | - cron: '20 2 1 * *'
6 |
7 | jobs:
8 |
9 | test:
10 | runs-on: ${{ matrix.os }}
11 | strategy:
12 | matrix:
13 | os: [windows-latest, macos-latest, ubuntu-latest]
14 | python-version: [3.9, '3.10', '3.11', '3.12', '3.13']
15 | steps:
16 | - uses: actions/checkout@v2
17 | - name: Set up Python
18 | uses: actions/setup-python@v2
19 | with:
20 | python-version: ${{ matrix.python-version }}
21 | - name: Install Python dependencies
22 | run: |
23 | python3 -m pip install --upgrade pip
24 | python3 -m pip install .[test]
25 | - name: Test with pytest
26 | run: |
27 | python3 -m pytest
28 |
29 | monthly_tag:
30 | runs-on: ubuntu-latest
31 | needs: test
32 | steps:
33 | - uses: actions/checkout@v4
34 | with:
35 | token: ${{ secrets.NDEMO_PAT_TOKEN }}
36 |
37 | - name: Create and push the tag
38 | run: |
39 | python utils/mathlab_versioning.py set --only-date "post$(date +%y%m)"
40 | VERS=$(python utils/mathlab_versioning.py get)
41 | git config --global user.name 'Monthly Tag bot'
42 | git config --global user.email 'mtbot@noreply.github.com'
43 | git add pyproject.toml
44 | git commit -m "monthly version $VERS"
45 | git tag -a "v$VERS" -m "Monthly version $VERS"
46 | git push origin "v$VERS"
47 |
--------------------------------------------------------------------------------
/.github/workflows/tester.yml:
--------------------------------------------------------------------------------
1 | name: "Testing Pull Request"
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - "master"
7 | - "dev"
8 |
9 | jobs:
10 | unittests: #################################################################
11 | runs-on: ${{ matrix.os }}
12 | strategy:
13 | fail-fast: false
14 | matrix:
15 | os: [windows-latest, macos-latest, ubuntu-latest]
16 | python-version: [3.9, '3.10', '3.11', '3.12', '3.13']
17 |
18 | steps:
19 | - uses: actions/checkout@v4
20 | - uses: actions/setup-python@v5
21 | with:
22 | python-version: ${{ matrix.python-version }}
23 |
24 | - name: Install Python dependencies
25 | run: |
26 | python3 -m pip install --upgrade pip
27 | python3 -m pip install .[test]
28 |
29 | - name: Test with pytest
30 | run: |
31 | python3 -m pytest
32 |
33 | linter: ####################################################################
34 | runs-on: ubuntu-latest
35 | steps:
36 | - uses: actions/checkout@v4
37 |
38 | - name: Run Black formatter (check mode)
39 | uses: psf/black@stable
40 | with:
41 | src: "./pina"
42 |
43 | testdocs: ##################################################################
44 | runs-on: ubuntu-latest
45 | steps:
46 | - uses: actions/checkout@v4
47 |
48 | - name: Install Python dependencies
49 | run: python3 -m pip install .[doc]
50 |
51 | - name: Build Documentation
52 | run: |
53 | make html SPHINXOPTS+='-W'
54 | working-directory: docs/
55 |
56 | coverage: ##################################################################
57 | runs-on: ubuntu-latest
58 |
59 | steps:
60 | - uses: actions/checkout@v4
61 |
62 | - name: Install Python dependencies
63 | run: |
64 | python3 -m pip install --upgrade pip
65 | python3 -m pip install .[test]
66 |
67 | - name: Generate coverage report
68 | run: |
69 | python3 -m pytest --cov-report term --cov-report xml:cobertura.xml --cov=pina
70 |
71 | - name: Produce the coverage report
72 | uses: insightsengineering/coverage-action@v2
73 | with:
74 | path: ./cobertura.xml
75 | threshold: 80.123
76 | fail: true
77 | publish: true
78 | coverage-summary-title: "Code Coverage Summary"
79 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | **__pycache__/
3 | **.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
98 | __pypackages__/
99 |
100 | # Celery stuff
101 | celerybeat-schedule
102 | celerybeat.pid
103 |
104 | # SageMath parsed files
105 | *.sage.py
106 |
107 | # Environments
108 | .env
109 | .venv
110 | env/
111 | venv/
112 | ENV/
113 | env.bak/
114 | venv.bak/
115 |
116 | # Spyder project settings
117 | .spyderproject
118 | .spyproject
119 |
120 | # Rope project settings
121 | .ropeproject
122 |
123 | # mkdocs documentation
124 | /site
125 |
126 | # mypy
127 | .mypy_cache/
128 | .dmypy.json
129 | dmypy.json
130 |
131 | # Pyre type checker
132 | .pyre/
133 |
134 | # pytype static type analyzer
135 | .pytype/
136 |
137 | # Cython debug symbols
138 | cython_debug/
139 |
140 | # Lightning logs dir
141 | **lightning_logs
142 |
143 | # Tutorial logs dir
144 | **tutorial_logs
145 |
146 | # tmp dir
147 | **tmp*
148 |
149 | # Avoid add of DS_Store files
150 | **.DS_Store
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: "1.2.0"
2 | authors:
3 | - family-names: Coscia
4 | given-names: Dario
5 | orcid: "https://orcid.org/0000-0001-8833-6833"
6 | - family-names: Ivagnes
7 | given-names: Anna
8 | orcid: "https://orcid.org/0000-0002-2369-4493"
9 | - family-names: Demo
10 | given-names: Nicola
11 | orcid: "https://orcid.org/0000-0003-3107-9738"
12 | - family-names: Rozza
13 | given-names: Gianluigi
14 | orcid: "https://orcid.org/0000-0002-0810-8812"
15 | doi: 10.5281/zenodo.8163732
16 | message: If you use this software, please cite our article in the
17 | Journal of Open Source Software.
18 | preferred-citation:
19 | authors:
20 | - family-names: Coscia
21 | given-names: Dario
22 | orcid: "https://orcid.org/0000-0001-8833-6833"
23 | - family-names: Ivagnes
24 | given-names: Anna
25 | orcid: "https://orcid.org/0000-0002-2369-4493"
26 | - family-names: Demo
27 | given-names: Nicola
28 | orcid: "https://orcid.org/0000-0003-3107-9738"
29 | - family-names: Rozza
30 | given-names: Gianluigi
31 | orcid: "https://orcid.org/0000-0002-0810-8812"
32 | date-published: 2023-07-19
33 | doi: 10.21105/joss.05352
34 | issn: 2475-9066
35 | issue: 87
36 | journal: Journal of Open Source Software
37 | publisher:
38 | name: Open Journals
39 | start: 5352
40 | title: Physics-Informed Neural networks for Advanced modeling
41 | type: article
42 | url: "https://joss.theoj.org/papers/10.21105/joss.05352"
43 | volume: 8
44 | title: Physics-Informed Neural networks for Advanced modeling
45 |
--------------------------------------------------------------------------------
/LICENSE.rst:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2021-current PINA contributors
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | Security and bug fixes are generally provided only for the last minor version. Fixes are released either as part of the next minor version or as an on-demand patch version.
4 |
5 | Security fixes are given priority and might be enough to cause a new version to be released.
6 |
7 |
8 | ## Supported Versions
9 |
10 |
11 | | Version | Supported |
12 | | ------- | ------------------ |
13 | | 0.2 | ✅ |
14 | | 0.1 | ✅ |
15 |
16 | ## Reporting a Vulnerability
17 |
18 | To ensure vulnerability reports reach the maintainers as quickly as possible, the preferred way is to use the ["Report a vulnerability"](https://github.com/mathLab/PINA/security/advisories/new) button under the "Security" tab of the associated GitHub project. This creates a private communication channel between the reporter and the maintainers.
19 |
--------------------------------------------------------------------------------
/code_formatter.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | #######################################
4 |
5 | required_command="black"
6 | code_directories=("pina" "tests")
7 |
8 | #######################################
9 |
10 | # Test for required program
11 | if ! command -v $required_command >/dev/null 2>&1; then
12 | echo "I require $required_command but it's not installed. Install dev dependencies."
13 | echo "Aborting." >&2
14 | exit 1
15 | fi
16 |
17 | # Run black formatter
18 | for dir in "${code_directories[@]}"; do
19 | python -m black --line-length 80 "$dir"
20 | done
--------------------------------------------------------------------------------
/docs/source/_LICENSE.rst:
--------------------------------------------------------------------------------
1 | License
2 | ==============
3 |
4 | .. include:: ../../LICENSE.rst
5 |
6 |
--------------------------------------------------------------------------------
/docs/source/_cite.rst:
--------------------------------------------------------------------------------
1 | Cite PINA
2 | ==============
3 |
4 | If **PINA** has been significant in your research, and you would like to acknowledge the project in your academic publication,
5 | we suggest citing the following paper:
6 |
7 | *Coscia, D., Ivagnes, A., Demo, N., & Rozza, G. (2023). Physics-Informed Neural networks for Advanced modeling. Journal of Open Source Software, 8(87), 5352.*
8 |
9 | Or in BibTex format
10 |
11 | .. code:: bash
12 |
13 | @article{coscia2023physics,
14 | title={Physics-Informed Neural networks for Advanced modeling},
15 | author={Coscia, Dario and Ivagnes, Anna and Demo, Nicola and Rozza, Gianluigi},
16 | journal={Journal of Open Source Software},
17 | volume={8},
18 | number={87},
19 | pages={5352},
20 | year={2023}
21 | }
--------------------------------------------------------------------------------
/docs/source/_installation.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | **PINA** requires requires `torch`, `lightning`, `torch_geometric` and `matplotlib`.
5 |
6 | Installing via PIP
7 | __________________
8 |
9 | Mac and Linux users can install pre-built binary packages using pip.
10 | To install the package just type:
11 |
12 | .. code-block:: bash
13 |
14 | $ pip install pina-mathlab
15 |
16 | To uninstall the package:
17 |
18 | .. code-block:: bash
19 |
20 | $ pip uninstall pina-mathlab
21 |
22 | Installing from source
23 | ______________________
24 | The official distribution is on GitHub, and you can clone the repository using
25 |
26 | .. code-block:: bash
27 |
28 | $ git clone https://github.com/mathLab/PINA
29 |
30 | To install the package just type:
31 |
32 | .. code-block:: bash
33 |
34 | $ pip install -e .
35 |
36 |
37 | Install with extra packages
38 | ____________________________
39 |
40 | To install extra dependencies required to run tests or tutorials directories, please use the following command:
41 |
42 | .. code-block:: bash
43 |
44 | $ pip install "pina-mathlab[extras]"
45 |
46 |
47 | Available extras include:
48 |
49 | * `dev` for development purpuses, use this if you want to Contribute.
50 | * `test` for running test locally.
51 | * `doc` for building documentation locally.
52 | * `tutorial` for running tutorials
53 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveActivationFunctionInterface.rst:
--------------------------------------------------------------------------------
1 | AdaptiveActivationFunctionInterface
2 | =======================================
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function_interface
5 |
6 | .. automodule:: pina.adaptive_function.adaptive_function_interface
7 | :members:
8 | :show-inheritance:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveCELU.rst:
--------------------------------------------------------------------------------
1 | AdaptiveCELU
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveCELU
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveELU.rst:
--------------------------------------------------------------------------------
1 | AdaptiveELU
2 | ===========
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveELU
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveExp.rst:
--------------------------------------------------------------------------------
1 | AdaptiveExp
2 | ===========
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveExp
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveGELU.rst:
--------------------------------------------------------------------------------
1 | AdaptiveGELU
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveGELU
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveMish.rst:
--------------------------------------------------------------------------------
1 | AdaptiveMish
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveMish
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveReLU.rst:
--------------------------------------------------------------------------------
1 | AdaptiveReLU
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveReLU
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveSIREN.rst:
--------------------------------------------------------------------------------
1 | AdaptiveSIREN
2 | =============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveSIREN
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveSiLU.rst:
--------------------------------------------------------------------------------
1 | AdaptiveSiLU
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveSiLU
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveSigmoid.rst:
--------------------------------------------------------------------------------
1 | AdaptiveSigmoid
2 | ===============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveSigmoid
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveSoftmax.rst:
--------------------------------------------------------------------------------
1 | AdaptiveSoftmax
2 | ===============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveSoftmax
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveSoftmin.rst:
--------------------------------------------------------------------------------
1 | AdaptiveSoftmin
2 | ===============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveSoftmin
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/adaptive_function/AdaptiveTanh.rst:
--------------------------------------------------------------------------------
1 | AdaptiveTanh
2 | ============
3 |
4 | .. currentmodule:: pina.adaptive_function.adaptive_function
5 |
6 | .. autoclass:: AdaptiveTanh
7 | :members:
8 | :show-inheritance:
9 | :inherited-members: AdaptiveActivationFunctionInterface
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/callback/adaptive_refinment_callback.rst:
--------------------------------------------------------------------------------
1 | Refinments callbacks
2 | =======================
3 |
4 | .. currentmodule:: pina.callback.adaptive_refinement_callback
5 | .. autoclass:: R3Refinement
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/callback/linear_weight_update_callback.rst:
--------------------------------------------------------------------------------
1 | Weighting callbacks
2 | ========================
3 |
4 | .. currentmodule:: pina.callback.linear_weight_update_callback
5 | .. autoclass:: LinearWeightUpdate
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/callback/optimizer_callback.rst:
--------------------------------------------------------------------------------
1 | Optimizer callbacks
2 | =====================
3 |
4 | .. currentmodule:: pina.callback.optimizer_callback
5 | .. autoclass:: SwitchOptimizer
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/callback/processing_callback.rst:
--------------------------------------------------------------------------------
1 | Processing callbacks
2 | =======================
3 |
4 | .. currentmodule:: pina.callback.processing_callback
5 | .. autoclass:: MetricTracker
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: PINAProgressBar
10 | :members:
11 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/condition.rst:
--------------------------------------------------------------------------------
1 | Conditions
2 | =============
3 | .. currentmodule:: pina.condition.condition
4 |
5 | .. autoclass:: Condition
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/condition_interface.rst:
--------------------------------------------------------------------------------
1 | ConditionInterface
2 | ======================
3 | .. currentmodule:: pina.condition.condition_interface
4 |
5 | .. autoclass:: ConditionInterface
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/data_condition.rst:
--------------------------------------------------------------------------------
1 | Data Conditions
2 | ==================
3 | .. currentmodule:: pina.condition.data_condition
4 |
5 | .. autoclass:: DataCondition
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: GraphDataCondition
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: TensorDataCondition
14 | :members:
15 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/domain_equation_condition.rst:
--------------------------------------------------------------------------------
1 | Domain Equation Condition
2 | ===========================
3 | .. currentmodule:: pina.condition.domain_equation_condition
4 |
5 | .. autoclass:: DomainEquationCondition
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/input_equation_condition.rst:
--------------------------------------------------------------------------------
1 | Input Equation Condition
2 | ===========================
3 | .. currentmodule:: pina.condition.input_equation_condition
4 |
5 | .. autoclass:: InputEquationCondition
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: InputTensorEquationCondition
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: InputGraphEquationCondition
14 | :members:
15 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/condition/input_target_condition.rst:
--------------------------------------------------------------------------------
1 | Input Target Condition
2 | ===========================
3 | .. currentmodule:: pina.condition.input_target_condition
4 |
5 | .. autoclass:: InputTargetCondition
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: TensorInputTensorTargetCondition
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: TensorInputGraphTargetCondition
14 | :members:
15 | :show-inheritance:
16 |
17 | .. autoclass:: GraphInputTensorTargetCondition
18 | :members:
19 | :show-inheritance:
20 |
21 | .. autoclass:: GraphInputGraphTargetCondition
22 | :members:
23 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/data/data_module.rst:
--------------------------------------------------------------------------------
1 | DataModule
2 | ======================
3 | .. currentmodule:: pina.data.data_module
4 |
5 | .. autoclass:: Collator
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: PinaDataModule
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: PinaSampler
14 | :members:
15 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/data/dataset.rst:
--------------------------------------------------------------------------------
1 | Dataset
2 | ======================
3 | .. currentmodule:: pina.data.dataset
4 |
5 | .. autoclass:: PinaDataset
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: PinaDatasetFactory
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: PinaGraphDataset
14 | :members:
15 | :show-inheritance:
16 |
17 | .. autoclass:: PinaTensorDataset
18 | :members:
19 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/domain/cartesian.rst:
--------------------------------------------------------------------------------
1 | CartesianDomain
2 | ======================
3 | .. currentmodule:: pina.domain.cartesian
4 |
5 | .. automodule:: pina.domain.cartesian
6 |
7 | .. autoclass:: CartesianDomain
8 | :members:
9 | :show-inheritance:
10 | :noindex:
11 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/difference_domain.rst:
--------------------------------------------------------------------------------
1 | Difference
2 | ======================
3 | .. currentmodule:: pina.domain.difference_domain
4 |
5 | .. automodule:: pina.domain.difference_domain
6 |
7 | .. autoclass:: Difference
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/domain.rst:
--------------------------------------------------------------------------------
1 | Domain
2 | ===========
3 | .. currentmodule:: pina.domain.domain_interface
4 |
5 | .. automodule:: pina.domain.domain_interface
6 |
7 | .. autoclass:: DomainInterface
8 | :members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/domain/ellipsoid.rst:
--------------------------------------------------------------------------------
1 | EllipsoidDomain
2 | ======================
3 | .. currentmodule:: pina.domain.ellipsoid
4 |
5 | .. automodule:: pina.domain.ellipsoid
6 |
7 | .. autoclass:: EllipsoidDomain
8 | :members:
9 | :show-inheritance:
10 | :noindex:
11 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/exclusion_domain.rst:
--------------------------------------------------------------------------------
1 | Exclusion
2 | ======================
3 | .. currentmodule:: pina.domain.exclusion_domain
4 |
5 | .. automodule:: pina.domain.exclusion_domain
6 |
7 | .. autoclass:: Exclusion
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/intersection_domain.rst:
--------------------------------------------------------------------------------
1 | Intersection
2 | ======================
3 | .. currentmodule:: pina.domain.intersection_domain
4 |
5 | .. automodule:: pina.domain.intersection_domain
6 |
7 | .. autoclass:: Intersection
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/operation_interface.rst:
--------------------------------------------------------------------------------
1 | OperationInterface
2 | ======================
3 | .. currentmodule:: pina.domain.operation_interface
4 |
5 | .. automodule:: pina.domain.operation_interface
6 |
7 | .. autoclass:: OperationInterface
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/simplex.rst:
--------------------------------------------------------------------------------
1 | SimplexDomain
2 | ======================
3 | .. currentmodule:: pina.domain.simplex
4 |
5 | .. automodule:: pina.domain.simplex
6 |
7 | .. autoclass:: SimplexDomain
8 | :members:
9 | :show-inheritance:
10 | :noindex:
11 |
--------------------------------------------------------------------------------
/docs/source/_rst/domain/union_domain.rst:
--------------------------------------------------------------------------------
1 | Union
2 | ======================
3 | .. currentmodule:: pina.domain.union_domain
4 |
5 | .. automodule:: pina.domain.union_domain
6 |
7 | .. autoclass:: Union
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/equation/equation.rst:
--------------------------------------------------------------------------------
1 | Equation
2 | ==========
3 |
4 | .. currentmodule:: pina.equation.equation
5 | .. autoclass:: Equation
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/equation/equation_factory.rst:
--------------------------------------------------------------------------------
1 | Equation Factory
2 | ==================
3 |
4 | .. currentmodule:: pina.equation.equation_factory
5 | .. autoclass:: FixedValue
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: FixedGradient
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: FixedFlux
14 | :members:
15 | :show-inheritance:
16 |
17 | .. autoclass:: Laplace
18 | :members:
19 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/equation/equation_interface.rst:
--------------------------------------------------------------------------------
1 | Equation Interface
2 | ====================
3 |
4 | .. currentmodule:: pina.equation.equation_interface
5 | .. autoclass:: EquationInterface
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/equation/system_equation.rst:
--------------------------------------------------------------------------------
1 | System Equation
2 | =================
3 |
4 | .. currentmodule:: pina.equation.system_equation
5 | .. autoclass:: SystemEquation
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/graph/graph.rst:
--------------------------------------------------------------------------------
1 | Graph
2 | ===========
3 | .. currentmodule:: pina.graph
4 |
5 |
6 | .. autoclass:: Graph
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/graph/graph_builder.rst:
--------------------------------------------------------------------------------
1 | GraphBuilder
2 | ==============
3 | .. currentmodule:: pina.graph
4 |
5 |
6 | .. autoclass:: GraphBuilder
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/graph/knn_graph.rst:
--------------------------------------------------------------------------------
1 | KNNGraph
2 | ===========
3 | .. currentmodule:: pina.graph
4 |
5 |
6 | .. autoclass:: KNNGraph
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/graph/label_batch.rst:
--------------------------------------------------------------------------------
1 | LabelBatch
2 | ===========
3 | .. currentmodule:: pina.graph
4 |
5 |
6 | .. autoclass:: LabelBatch
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/graph/radius_graph.rst:
--------------------------------------------------------------------------------
1 | RadiusGraph
2 | =============
3 | .. currentmodule:: pina.graph
4 |
5 |
6 | .. autoclass:: RadiusGraph
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/label_tensor.rst:
--------------------------------------------------------------------------------
1 | LabelTensor
2 | ===========
3 | .. currentmodule:: pina.label_tensor
4 |
5 |
6 | .. autoclass:: LabelTensor
7 | :members:
8 | :private-members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/loss/loss_interface.rst:
--------------------------------------------------------------------------------
1 | LossInterface
2 | ===============
3 | .. currentmodule:: pina.loss.loss_interface
4 |
5 | .. automodule:: pina.loss.loss_interface
6 |
7 | .. autoclass:: LossInterface
8 | :members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/loss/lploss.rst:
--------------------------------------------------------------------------------
1 | LpLoss
2 | ===============
3 | .. currentmodule:: pina.loss.lp_loss
4 |
5 | .. autoclass:: LpLoss
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/loss/ntk_weighting.rst:
--------------------------------------------------------------------------------
1 | NeuralTangentKernelWeighting
2 | =============================
3 | .. currentmodule:: pina.loss.ntk_weighting
4 |
5 | .. automodule:: pina.loss.ntk_weighting
6 |
7 | .. autoclass:: NeuralTangentKernelWeighting
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/loss/powerloss.rst:
--------------------------------------------------------------------------------
1 | PowerLoss
2 | ====================
3 | .. currentmodule:: pina.loss.power_loss
4 |
5 | .. autoclass:: PowerLoss
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/loss/scalar_weighting.rst:
--------------------------------------------------------------------------------
1 | ScalarWeighting
2 | ===================
3 | .. currentmodule:: pina.loss.scalar_weighting
4 |
5 | .. automodule:: pina.loss.scalar_weighting
6 |
7 | .. autoclass:: ScalarWeighting
8 | :members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/loss/weighting_interface.rst:
--------------------------------------------------------------------------------
1 | WeightingInterface
2 | ===================
3 | .. currentmodule:: pina.loss.weighting_interface
4 |
5 | .. automodule:: pina.loss.weighting_interface
6 |
7 | .. autoclass:: WeightingInterface
8 | :members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/average_neural_operator.rst:
--------------------------------------------------------------------------------
1 | Averaging Neural Operator
2 | ==============================
3 | .. currentmodule:: pina.model.average_neural_operator
4 |
5 | .. autoclass:: AveragingNeuralOperator
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/average_neural_operator_block.rst:
--------------------------------------------------------------------------------
1 | Averaging Neural Operator Block
2 | ==================================
3 | .. currentmodule:: pina.model.block.average_neural_operator_block
4 |
5 | .. autoclass:: AVNOBlock
6 | :members:
7 | :show-inheritance:
8 | :noindex:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/convolution.rst:
--------------------------------------------------------------------------------
1 | Continuous Convolution Block
2 | ===============================
3 | .. currentmodule:: pina.model.block.convolution_2d
4 |
5 | .. autoclass:: ContinuousConvBlock
6 | :members:
7 | :show-inheritance:
8 | :noindex:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/convolution_interface.rst:
--------------------------------------------------------------------------------
1 | Continuous Convolution Interface
2 | ==================================
3 | .. currentmodule:: pina.model.block.convolution
4 |
5 | .. autoclass:: BaseContinuousConv
6 | :members:
7 | :show-inheritance:
8 | :noindex:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/enhanced_linear.rst:
--------------------------------------------------------------------------------
1 | EnhancedLinear Block
2 | =====================
3 | .. currentmodule:: pina.model.block.residual
4 |
5 | .. autoclass:: EnhancedLinear
6 | :members:
7 | :show-inheritance:
8 | :noindex:
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/fourier_block.rst:
--------------------------------------------------------------------------------
1 | Fourier Neural Operator Block
2 | ======================================
3 | .. currentmodule:: pina.model.block.fourier_block
4 |
5 |
6 | .. autoclass:: FourierBlock1D
7 | :members:
8 | :show-inheritance:
9 |
10 | .. autoclass:: FourierBlock2D
11 | :members:
12 | :show-inheritance:
13 |
14 | .. autoclass:: FourierBlock3D
15 | :members:
16 | :show-inheritance:
17 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/fourier_embedding.rst:
--------------------------------------------------------------------------------
1 | Fourier Feature Embedding
2 | =======================================
3 | .. currentmodule:: pina.model.block.embedding
4 |
5 | .. autoclass:: FourierFeatureEmbedding
6 | :members:
7 | :show-inheritance:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/gno_block.rst:
--------------------------------------------------------------------------------
1 | Graph Neural Operator Block
2 | ===============================
3 | .. currentmodule:: pina.model.block.gno_block
4 |
5 | .. autoclass:: GNOBlock
6 | :members:
7 | :show-inheritance:
8 | :noindex:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/low_rank_block.rst:
--------------------------------------------------------------------------------
1 | Low Rank Neural Operator Block
2 | =================================
3 | .. currentmodule:: pina.model.block.low_rank_block
4 |
5 | .. autoclass:: LowRankBlock
6 | :members:
7 | :show-inheritance:
8 | :noindex:
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/orthogonal.rst:
--------------------------------------------------------------------------------
1 | Orthogonal Block
2 | ======================
3 | .. currentmodule:: pina.model.block.orthogonal
4 |
5 | .. autoclass:: OrthogonalBlock
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/pbc_embedding.rst:
--------------------------------------------------------------------------------
1 | Periodic Boundary Condition Embedding
2 | =======================================
3 | .. currentmodule:: pina.model.block.embedding
4 |
5 | .. autoclass:: PeriodicBoundaryEmbedding
6 | :members:
7 | :show-inheritance:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/pod_block.rst:
--------------------------------------------------------------------------------
1 | Proper Orthogonal Decomposition Block
2 | ============================================
3 | .. currentmodule:: pina.model.block.pod_block
4 |
5 | .. autoclass:: PODBlock
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/rbf_block.rst:
--------------------------------------------------------------------------------
1 | Radias Basis Function Block
2 | =============================
3 | .. currentmodule:: pina.model.block.rbf_block
4 |
5 | .. autoclass:: RBFBlock
6 | :members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/residual.rst:
--------------------------------------------------------------------------------
1 | Residual Block
2 | ===================
3 | .. currentmodule:: pina.model.block.residual
4 |
5 | .. autoclass:: ResidualBlock
6 | :members:
7 | :show-inheritance:
8 |
--------------------------------------------------------------------------------
/docs/source/_rst/model/block/spectral.rst:
--------------------------------------------------------------------------------
1 | Spectral Convolution Block
2 | ============================
3 | .. currentmodule:: pina.model.block.spectral
4 |
5 | .. autoclass:: SpectralConvBlock1D
6 | :members:
7 | :show-inheritance:
8 |
9 | .. autoclass:: SpectralConvBlock2D
10 | :members:
11 | :show-inheritance:
12 |
13 | .. autoclass:: SpectralConvBlock3D
14 | :members:
15 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/deeponet.rst:
--------------------------------------------------------------------------------
1 | DeepONet
2 | ===========
3 | .. currentmodule:: pina.model.deeponet
4 |
5 | .. autoclass:: DeepONet
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/feed_forward.rst:
--------------------------------------------------------------------------------
1 | FeedForward
2 | ======================
3 | .. currentmodule:: pina.model.feed_forward
4 |
5 | .. autoclass:: FeedForward
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/fourier_integral_kernel.rst:
--------------------------------------------------------------------------------
1 | FourierIntegralKernel
2 | =========================
3 | .. currentmodule:: pina.model.fourier_neural_operator
4 |
5 | .. autoclass:: FourierIntegralKernel
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/fourier_neural_operator.rst:
--------------------------------------------------------------------------------
1 | FNO
2 | ===========
3 | .. currentmodule:: pina.model.fourier_neural_operator
4 |
5 | .. autoclass:: FNO
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/graph_neural_operator.rst:
--------------------------------------------------------------------------------
1 | GraphNeuralOperator
2 | =======================
3 | .. currentmodule:: pina.model.graph_neural_operator
4 |
5 | .. autoclass:: GraphNeuralOperator
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/graph_neural_operator_integral_kernel.rst:
--------------------------------------------------------------------------------
1 | GraphNeuralKernel
2 | =======================
3 | .. currentmodule:: pina.model.graph_neural_operator
4 |
5 | .. autoclass:: GraphNeuralKernel
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/kernel_neural_operator.rst:
--------------------------------------------------------------------------------
1 | KernelNeuralOperator
2 | =======================
3 | .. currentmodule:: pina.model.kernel_neural_operator
4 |
5 | .. autoclass:: KernelNeuralOperator
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/low_rank_neural_operator.rst:
--------------------------------------------------------------------------------
1 | Low Rank Neural Operator
2 | ==============================
3 | .. currentmodule:: pina.model.low_rank_neural_operator
4 |
5 | .. autoclass:: LowRankNeuralOperator
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/mionet.rst:
--------------------------------------------------------------------------------
1 | MIONet
2 | ===========
3 | .. currentmodule:: pina.model.deeponet
4 |
5 | .. autoclass:: MIONet
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/multi_feed_forward.rst:
--------------------------------------------------------------------------------
1 | MultiFeedForward
2 | ==================
3 | .. currentmodule:: pina.model.multi_feed_forward
4 |
5 | .. autoclass:: MultiFeedForward
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/residual_feed_forward.rst:
--------------------------------------------------------------------------------
1 | ResidualFeedForward
2 | ======================
3 | .. currentmodule:: pina.model.feed_forward
4 |
5 | .. autoclass:: ResidualFeedForward
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/model/spline.rst:
--------------------------------------------------------------------------------
1 | Spline
2 | ========
3 | .. currentmodule:: pina.model.spline
4 |
5 | .. autoclass:: Spline
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/operator.rst:
--------------------------------------------------------------------------------
1 | Operators
2 | ===========
3 |
4 | .. currentmodule:: pina.operator
5 |
6 | .. automodule:: pina.operator
7 | :members:
8 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/optim/optimizer_interface.rst:
--------------------------------------------------------------------------------
1 | Optimizer
2 | ============
3 | .. currentmodule:: pina.optim.optimizer_interface
4 |
5 | .. autoclass:: Optimizer
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/optim/scheduler_interface.rst:
--------------------------------------------------------------------------------
1 | Scheduler
2 | =============
3 | .. currentmodule:: pina.optim.scheduler_interface
4 |
5 | .. autoclass:: Scheduler
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/optim/torch_optimizer.rst:
--------------------------------------------------------------------------------
1 | TorchOptimizer
2 | ===============
3 | .. currentmodule:: pina.optim.torch_optimizer
4 |
5 | .. autoclass:: TorchOptimizer
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/optim/torch_scheduler.rst:
--------------------------------------------------------------------------------
1 | TorchScheduler
2 | ===============
3 | .. currentmodule:: pina.optim.torch_scheduler
4 |
5 | .. autoclass:: TorchScheduler
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/problem/abstract_problem.rst:
--------------------------------------------------------------------------------
1 | AbstractProblem
2 | ===============
3 | .. currentmodule:: pina.problem.abstract_problem
4 |
5 | .. automodule:: pina.problem.abstract_problem
6 |
7 | .. autoclass:: AbstractProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/inverse_problem.rst:
--------------------------------------------------------------------------------
1 | InverseProblem
2 | ==============
3 | .. currentmodule:: pina.problem.inverse_problem
4 |
5 | .. automodule:: pina.problem.inverse_problem
6 |
7 | .. autoclass:: InverseProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/parametric_problem.rst:
--------------------------------------------------------------------------------
1 | ParametricProblem
2 | ====================
3 | .. currentmodule:: pina.problem.parametric_problem
4 |
5 | .. automodule:: pina.problem.parametric_problem
6 |
7 | .. autoclass:: ParametricProblem
8 | :members:
9 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/problem/spatial_problem.rst:
--------------------------------------------------------------------------------
1 | SpatialProblem
2 | ==============
3 | .. currentmodule:: pina.problem.spatial_problem
4 |
5 | .. automodule:: pina.problem.spatial_problem
6 |
7 | .. autoclass:: SpatialProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/time_dependent_problem.rst:
--------------------------------------------------------------------------------
1 | TimeDependentProblem
2 | ====================
3 | .. currentmodule:: pina.problem.time_dependent_problem
4 |
5 | .. automodule:: pina.problem.time_dependent_problem
6 |
7 | .. autoclass:: TimeDependentProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/advection.rst:
--------------------------------------------------------------------------------
1 | AdvectionProblem
2 | ==================
3 | .. currentmodule:: pina.problem.zoo.advection
4 |
5 | .. automodule:: pina.problem.zoo.advection
6 |
7 | .. autoclass:: AdvectionProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/allen_cahn.rst:
--------------------------------------------------------------------------------
1 | AllenCahnProblem
2 | ==================
3 | .. currentmodule:: pina.problem.zoo.allen_cahn
4 |
5 | .. automodule:: pina.problem.zoo.allen_cahn
6 |
7 | .. autoclass:: AllenCahnProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/diffusion_reaction.rst:
--------------------------------------------------------------------------------
1 | DiffusionReactionProblem
2 | =========================
3 | .. currentmodule:: pina.problem.zoo.diffusion_reaction
4 |
5 | .. automodule:: pina.problem.zoo.diffusion_reaction
6 |
7 | .. autoclass:: DiffusionReactionProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/helmholtz.rst:
--------------------------------------------------------------------------------
1 | HelmholtzProblem
2 | ==================
3 | .. currentmodule:: pina.problem.zoo.helmholtz
4 |
5 | .. automodule:: pina.problem.zoo.helmholtz
6 |
7 | .. autoclass:: HelmholtzProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/inverse_poisson_2d_square.rst:
--------------------------------------------------------------------------------
1 | InversePoisson2DSquareProblem
2 | ==============================
3 | .. currentmodule:: pina.problem.zoo.inverse_poisson_2d_square
4 |
5 | .. automodule:: pina.problem.zoo.inverse_poisson_2d_square
6 |
7 | .. autoclass:: InversePoisson2DSquareProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/poisson_2d_square.rst:
--------------------------------------------------------------------------------
1 | Poisson2DSquareProblem
2 | ========================
3 | .. currentmodule:: pina.problem.zoo.poisson_2d_square
4 |
5 | .. automodule:: pina.problem.zoo.poisson_2d_square
6 |
7 | .. autoclass:: Poisson2DSquareProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/problem/zoo/supervised_problem.rst:
--------------------------------------------------------------------------------
1 | SupervisedProblem
2 | ==================
3 | .. currentmodule:: pina.problem.zoo.supervised_problem
4 |
5 | .. automodule:: pina.problem.zoo.supervised_problem
6 |
7 | .. autoclass:: SupervisedProblem
8 | :members:
9 | :show-inheritance:
10 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/ensemble_solver/ensemble_pinn.rst:
--------------------------------------------------------------------------------
1 | DeepEnsemblePINN
2 | ==================
3 | .. currentmodule:: pina.solver.ensemble_solver.ensemble_pinn
4 |
5 | .. autoclass:: DeepEnsemblePINN
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/ensemble_solver/ensemble_solver_interface.rst:
--------------------------------------------------------------------------------
1 | DeepEnsembleSolverInterface
2 | =============================
3 | .. currentmodule:: pina.solver.ensemble_solver.ensemble_solver_interface
4 |
5 | .. autoclass:: DeepEnsembleSolverInterface
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/ensemble_solver/ensemble_supervised.rst:
--------------------------------------------------------------------------------
1 | DeepEnsembleSupervisedSolver
2 | =============================
3 | .. currentmodule:: pina.solver.ensemble_solver.ensemble_supervised
4 |
5 | .. autoclass:: DeepEnsembleSupervisedSolver
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/garom.rst:
--------------------------------------------------------------------------------
1 | GAROM
2 | ======
3 | .. currentmodule:: pina.solver.garom
4 |
5 | .. autoclass:: GAROM
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/multi_solver_interface.rst:
--------------------------------------------------------------------------------
1 | MultiSolverInterface
2 | ======================
3 | .. currentmodule:: pina.solver.solver
4 |
5 | .. autoclass:: MultiSolverInterface
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/causal_pinn.rst:
--------------------------------------------------------------------------------
1 | CausalPINN
2 | ==============
3 | .. currentmodule:: pina.solver.physics_informed_solver.causal_pinn
4 |
5 | .. autoclass:: CausalPINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/competitive_pinn.rst:
--------------------------------------------------------------------------------
1 | CompetitivePINN
2 | =================
3 | .. currentmodule:: pina.solver.physics_informed_solver.competitive_pinn
4 |
5 | .. autoclass:: CompetitivePINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/gradient_pinn.rst:
--------------------------------------------------------------------------------
1 | GradientPINN
2 | ==============
3 | .. currentmodule:: pina.solver.physics_informed_solver.gradient_pinn
4 |
5 | .. autoclass:: GradientPINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/pinn.rst:
--------------------------------------------------------------------------------
1 | PINN
2 | ======
3 | .. currentmodule:: pina.solver.physics_informed_solver.pinn
4 |
5 | .. autoclass:: PINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/pinn_interface.rst:
--------------------------------------------------------------------------------
1 | PINNInterface
2 | =================
3 | .. currentmodule:: pina.solver.physics_informed_solver.pinn_interface
4 |
5 | .. autoclass:: PINNInterface
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/rba_pinn.rst:
--------------------------------------------------------------------------------
1 | RBAPINN
2 | ========
3 | .. currentmodule:: pina.solver.physics_informed_solver.rba_pinn
4 |
5 | .. autoclass:: RBAPINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/physics_informed_solver/self_adaptive_pinn.rst:
--------------------------------------------------------------------------------
1 | SelfAdaptivePINN
2 | ==================
3 | .. currentmodule:: pina.solver.physics_informed_solver.self_adaptive_pinn
4 |
5 | .. autoclass:: SelfAdaptivePINN
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/single_solver_interface.rst:
--------------------------------------------------------------------------------
1 | SingleSolverInterface
2 | ======================
3 | .. currentmodule:: pina.solver.solver
4 |
5 | .. autoclass:: SingleSolverInterface
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/solver_interface.rst:
--------------------------------------------------------------------------------
1 | SolverInterface
2 | =================
3 | .. currentmodule:: pina.solver.solver
4 |
5 | .. autoclass:: SolverInterface
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/solver/supervised_solver/reduced_order_model.rst:
--------------------------------------------------------------------------------
1 | ReducedOrderModelSolver
2 | ==========================
3 | .. currentmodule:: pina.solver.supervised_solver.reduced_order_model
4 |
5 | .. autoclass:: ReducedOrderModelSolver
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/supervised_solver/supervised.rst:
--------------------------------------------------------------------------------
1 | SupervisedSolver
2 | ===================
3 | .. currentmodule:: pina.solver.supervised_solver.supervised
4 |
5 | .. autoclass:: SupervisedSolver
6 | :members:
7 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_rst/solver/supervised_solver/supervised_solver_interface.rst:
--------------------------------------------------------------------------------
1 | SupervisedSolverInterface
2 | ==========================
3 | .. currentmodule:: pina.solver.supervised_solver.supervised_solver_interface
4 |
5 | .. autoclass:: SupervisedSolverInterface
6 | :show-inheritance:
7 | :members:
8 |
9 |
--------------------------------------------------------------------------------
/docs/source/_rst/trainer.rst:
--------------------------------------------------------------------------------
1 | Trainer
2 | ===========
3 |
4 | .. automodule:: pina.trainer
5 |
6 | .. autoclass:: Trainer
7 | :members:
8 | :show-inheritance:
--------------------------------------------------------------------------------
/docs/source/_team.rst:
--------------------------------------------------------------------------------
1 | PINA Team
2 | ==============
3 |
4 | **PINA** is currently developed in the `SISSA MathLab `_, in collaboration with `Fast Computing `_.
5 |
6 | .. figure:: index_files/fast_mathlab.png
7 | :align: center
8 | :width: 500
9 |
10 | A significant part of **PINA** has been written either as a by-product for other projects people were funded for, or by people on university-funded positions.
11 | There are probably many of such projects that have led to some development of **PINA**. We are very grateful for this support!
12 | In particular, we acknowledge the following sources of support with great gratitude:
13 |
14 | * `H2020 ERC CoG 2015 AROMA-CFD project 681447 `_, P.I. Professor `Prof. Gianluigi Rozza `_ at `SISSA MathLab `_.
15 | * `Next Generation EU `_ for ambiental and digital transition for Italy.
16 |
17 | .. figure:: index_files/foudings.png
18 | :align: center
19 | :width: 500
20 |
21 | We also acknowledge the contribuition of `Maria Strazzullo `_ in the early developments of the package. A special
22 | thank goeas to all the students and researchers from different universities which contributed to the package.
23 | Finally we warmly thank all the
24 | `contributors `_ which are the real heart of **PINA**!
25 |
26 | .. figure:: index_files/university_dev_pina.png
27 | :align: center
28 | :width: 500
29 |
--------------------------------------------------------------------------------
/docs/source/_templates/layout.html:
--------------------------------------------------------------------------------
1 | {% extends "!layout.html" %}
2 |
3 | {%- block footer %}
4 |
17 | {%- endblock %}
--------------------------------------------------------------------------------
/docs/source/_tutorial.rst:
--------------------------------------------------------------------------------
1 | 🚀 Welcome to the PINA Tutorials!
2 | ==================================
3 |
4 |
5 | In this folder we collect useful tutorials in order to understand the principles and the potential of **PINA**.
6 | Whether you're just getting started or looking to deepen your understanding, these resources are here to guide you.
7 |
8 | Getting started with PINA
9 | -------------------------
10 |
11 | - `Introductory Tutorial: A Beginner's Guide to PINA `_
12 | - `How to build a Problem in PINA `_
13 | - `Introduction to Solver classes `_
14 | - `Introduction to Trainer class `_
15 | - `Data structure for SciML: Tensor, LabelTensor, Data and Graph `_
16 | - `Building geometries with DomainInterface class `_
17 | - `Introduction to PINA Equation class `_
18 |
19 | Physics Informed Neural Networks
20 | --------------------------------
21 |
22 | - `Introductory Tutorial: Physics Informed Neural Networks with PINA `_
23 | - `Enhancing PINNs with Extra Features to solve the Poisson Problem `_
24 | - `Applying Hard Constraints in PINNs to solve the Wave Problem `_
25 | - `Applying Periodic Boundary Conditions in PINNs to solve the Helmotz Problem `_
26 | - `Inverse Problem Solving with Physics-Informed Neural Network `_
27 | - `Learning Multiscale PDEs Using Fourier Feature Networks `_
28 | - `Learning Bifurcating PDE Solutions with Physics-Informed Deep Ensembles `_
29 |
30 | Neural Operator Learning
31 | ------------------------
32 |
33 | - `Introductory Tutorial: Neural Operator Learning with PINA `_
34 | - `Modeling 2D Darcy Flow with the Fourier Neural Operator `_
35 | - `Solving the Kuramoto-Sivashinsky Equation with Averaging Neural Operator `_
36 |
37 | Supervised Learning
38 | -------------------
39 |
40 | - `Introductory Tutorial: Supervised Learning with PINA `_
41 | - `Chemical Properties Prediction with Graph Neural Networks `_
42 | - `Unstructured Convolutional Autoencoders with Continuous Convolution `_
43 | - `Reduced Order Modeling with POD-RBF and POD-NN Approaches for Fluid Dynamics `_
44 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | :html_theme.sidebar_secondary.remove:
2 |
3 | Welcome to PINA's documentation!
4 | =======================================
5 |
6 | .. grid:: 6
7 | :gutter: 1
8 |
9 | .. grid-item::
10 |
11 | .. image:: index_files/tutorial_13_3.png
12 | :target: tutorial2/tutorial.html
13 |
14 | .. grid-item::
15 |
16 | .. image:: index_files/tutorial_32_0.png
17 | :target: tutorial4/tutorial.html
18 |
19 | .. grid-item::
20 |
21 | .. image:: index_files/tutorial_13_01.png
22 | :target: tutorial9/tutorial.html
23 |
24 | .. grid-item::
25 |
26 | .. image:: index_files/tutorial_36_0.png
27 | :target: tutorial6/tutorial.html
28 |
29 | .. grid-item::
30 |
31 | .. image:: index_files/tutorial_15_0.png
32 | :target: tutorial13/tutorial.html
33 |
34 | .. grid-item::
35 |
36 | .. image:: index_files/tutorial_5_0.png
37 | :target: tutorial10/tutorial.html
38 |
39 | .. grid:: 1 1 3 3
40 |
41 | .. grid-item::
42 | :columns: 12 12 8 8
43 |
44 | **PINA** is an open-source Python library designed to simplify and accelerate
45 | the development of Scientific Machine Learning (SciML) solutions.
46 | Built on top of `PyTorch `_, `PyTorch Lightning `_,
47 | and `PyTorch Geometric `_,
48 | PINA provides an intuitive framework for defining, experimenting with,
49 | and solving complex problems using Neural Networks,
50 | Physics-Informed Neural Networks (PINNs), Neural Operators, and more.
51 |
52 | - **Modular Architecture**: Designed with modularity in mind and relying on powerful yet composable abstractions, PINA allows users to easily plug, replace, or extend components, making experimentation and customization straightforward.
53 |
54 | - **Scalable Performance**: With native support for multi-device training, PINA handles large datasets efficiently, offering performance close to hand-crafted implementations with minimal overhead.
55 |
56 | - **Highly Flexible**: Whether you're looking for full automation or granular control, PINA adapts to your workflow. High-level abstractions simplify model definition, while expert users can dive deep to fine-tune every aspect of the training and inference process.
57 |
58 | For further information or questions about **PINA** contact us by email.
59 |
60 | .. grid-item-card:: Contents
61 | :class-title: sd-fs-5
62 | :class-body: sd-pl-4
63 |
64 | .. toctree::
65 | :maxdepth: 1
66 |
67 | Installing <_installation>
68 | API <_rst/_code>
69 | Tutorials <_tutorial>
70 | Cite PINA <_cite.rst>
71 | Contributing <_contributing>
72 | Team & Foundings <_team.rst>
73 | License <_LICENSE.rst>
74 |
75 |
76 |
77 |
78 |
--------------------------------------------------------------------------------
/docs/source/index_files/PINA_API.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/PINA_API.png
--------------------------------------------------------------------------------
/docs/source/index_files/PINA_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/PINA_logo.png
--------------------------------------------------------------------------------
/docs/source/index_files/fast_mathlab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/fast_mathlab.png
--------------------------------------------------------------------------------
/docs/source/index_files/foudings.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/foudings.png
--------------------------------------------------------------------------------
/docs/source/index_files/output_21_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/output_21_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/output_8_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/output_8_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_13_01.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_13_01.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_13_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_13_3.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_15_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_15_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_32_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_32_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_36_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_36_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/tutorial_5_0.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/tutorial_5_0.png
--------------------------------------------------------------------------------
/docs/source/index_files/university_dev_pina.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/docs/source/index_files/university_dev_pina.png
--------------------------------------------------------------------------------
/docs/sphinx_extensions/paramref_extension.py:
--------------------------------------------------------------------------------
1 | from docutils import nodes
2 | from docutils.parsers.rst.roles import register_local_role
3 |
4 |
5 | def paramref_role(name, rawtext, text, lineno, inliner, options={}, content=[]):
6 | # Simply replace :paramref: with :param:
7 | new_role = nodes.literal(text=text[1:])
8 | return [new_role], []
9 |
10 |
11 | def setup(app):
12 | register_local_role("paramref", paramref_role)
13 |
--------------------------------------------------------------------------------
/joss/pina_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/joss/pina_logo.png
--------------------------------------------------------------------------------
/joss/pinn_base.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/joss/pinn_base.pdf
--------------------------------------------------------------------------------
/joss/pinn_feat.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/joss/pinn_feat.pdf
--------------------------------------------------------------------------------
/joss/pinn_learn.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/joss/pinn_learn.pdf
--------------------------------------------------------------------------------
/pina/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Pina library."""
2 |
3 | __all__ = [
4 | "Trainer",
5 | "LabelTensor",
6 | "Condition",
7 | "PinaDataModule",
8 | "Graph",
9 | "SolverInterface",
10 | "MultiSolverInterface",
11 | ]
12 |
13 | from .label_tensor import LabelTensor
14 | from .graph import Graph
15 | from .solver import SolverInterface, MultiSolverInterface
16 | from .trainer import Trainer
17 | from .condition.condition import Condition
18 | from .data import PinaDataModule
19 |
--------------------------------------------------------------------------------
/pina/adaptive_function/__init__.py:
--------------------------------------------------------------------------------
1 | """Adaptive Activation Functions Module."""
2 |
3 | __all__ = [
4 | "AdaptiveActivationFunctionInterface",
5 | "AdaptiveReLU",
6 | "AdaptiveSigmoid",
7 | "AdaptiveTanh",
8 | "AdaptiveSiLU",
9 | "AdaptiveMish",
10 | "AdaptiveELU",
11 | "AdaptiveCELU",
12 | "AdaptiveGELU",
13 | "AdaptiveSoftmin",
14 | "AdaptiveSoftmax",
15 | "AdaptiveSIREN",
16 | "AdaptiveExp",
17 | ]
18 |
19 | from .adaptive_function import (
20 | AdaptiveReLU,
21 | AdaptiveSigmoid,
22 | AdaptiveTanh,
23 | AdaptiveSiLU,
24 | AdaptiveMish,
25 | AdaptiveELU,
26 | AdaptiveCELU,
27 | AdaptiveGELU,
28 | AdaptiveSoftmin,
29 | AdaptiveSoftmax,
30 | AdaptiveSIREN,
31 | AdaptiveExp,
32 | )
33 | from .adaptive_function_interface import AdaptiveActivationFunctionInterface
34 |
--------------------------------------------------------------------------------
/pina/adaptive_functions/__init__.py:
--------------------------------------------------------------------------------
1 | """Old module for adaptive functions. Deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from ..adaptive_function import *
6 | from ..utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.adaptive_functions' is deprecated and will be removed "
14 | "in future versions. Please use 'pina.adaptive_function' instead.",
15 | DeprecationWarning,
16 | )
17 |
--------------------------------------------------------------------------------
/pina/callback/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Pina Callbacks."""
2 |
3 | __all__ = [
4 | "SwitchOptimizer",
5 | "R3Refinement",
6 | "MetricTracker",
7 | "PINAProgressBar",
8 | "LinearWeightUpdate",
9 | ]
10 |
11 | from .optimizer_callback import SwitchOptimizer
12 | from .adaptive_refinement_callback import R3Refinement
13 | from .processing_callback import MetricTracker, PINAProgressBar
14 | from .linear_weight_update_callback import LinearWeightUpdate
15 |
--------------------------------------------------------------------------------
/pina/callback/optimizer_callback.py:
--------------------------------------------------------------------------------
1 | """Module for the SwitchOptimizer callback."""
2 |
3 | from lightning.pytorch.callbacks import Callback
4 | from ..optim import TorchOptimizer
5 | from ..utils import check_consistency
6 |
7 |
8 | class SwitchOptimizer(Callback):
9 | """
10 | PINA Implementation of a Lightning Callback to switch optimizer during
11 | training.
12 | """
13 |
14 | def __init__(self, new_optimizers, epoch_switch):
15 | """
16 | This callback allows switching between different optimizers during
17 | training, enabling the exploration of multiple optimization strategies
18 | without interrupting the training process.
19 |
20 | :param new_optimizers: The model optimizers to switch to. Can be a
21 | single :class:`torch.optim.Optimizer` instance or a list of them
22 | for multiple model solver.
23 | :type new_optimizers: pina.optim.TorchOptimizer | list
24 | :param epoch_switch: The epoch at which the optimizer switch occurs.
25 | :type epoch_switch: int
26 |
27 | Example:
28 | >>> switch_callback = SwitchOptimizer(new_optimizers=optimizer,
29 | >>> epoch_switch=10)
30 | """
31 | super().__init__()
32 |
33 | if epoch_switch < 1:
34 | raise ValueError("epoch_switch must be greater than one.")
35 |
36 | if not isinstance(new_optimizers, list):
37 | new_optimizers = [new_optimizers]
38 |
39 | # check type consistency
40 | for optimizer in new_optimizers:
41 | check_consistency(optimizer, TorchOptimizer)
42 | check_consistency(epoch_switch, int)
43 | # save new optimizers
44 | self._new_optimizers = new_optimizers
45 | self._epoch_switch = epoch_switch
46 |
47 | def on_train_epoch_start(self, trainer, __):
48 | """
49 | Switch the optimizer at the start of the specified training epoch.
50 |
51 | :param trainer: The trainer object managing the training process.
52 | :type trainer: pytorch_lightning.Trainer
53 | :param _: Placeholder argument (not used).
54 |
55 | :return: None
56 | :rtype: None
57 | """
58 | if trainer.current_epoch == self._epoch_switch:
59 | optims = []
60 |
61 | for idx, optim in enumerate(self._new_optimizers):
62 | optim.hook(trainer.solver._pina_models[idx].parameters())
63 | optims.append(optim)
64 |
65 | trainer.solver._pina_optimizers = optims
66 |
--------------------------------------------------------------------------------
/pina/callbacks/__init__.py:
--------------------------------------------------------------------------------
1 | """Old module for callbacks. Deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from ..callback import *
6 | from ..utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.callbacks' is deprecated and will be removed "
14 | "in future versions. Please use 'pina.callback' instead.",
15 | DeprecationWarning,
16 | )
17 |
--------------------------------------------------------------------------------
/pina/condition/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for PINA Conditions classes."""
2 |
3 | __all__ = [
4 | "Condition",
5 | "ConditionInterface",
6 | "DomainEquationCondition",
7 | "InputTargetCondition",
8 | "TensorInputTensorTargetCondition",
9 | "TensorInputGraphTargetCondition",
10 | "GraphInputTensorTargetCondition",
11 | "GraphInputGraphTargetCondition",
12 | "InputEquationCondition",
13 | "InputTensorEquationCondition",
14 | "InputGraphEquationCondition",
15 | "DataCondition",
16 | "GraphDataCondition",
17 | "TensorDataCondition",
18 | ]
19 |
20 | from .condition_interface import ConditionInterface
21 | from .condition import Condition
22 | from .domain_equation_condition import DomainEquationCondition
23 | from .input_target_condition import (
24 | InputTargetCondition,
25 | TensorInputTensorTargetCondition,
26 | TensorInputGraphTargetCondition,
27 | GraphInputTensorTargetCondition,
28 | GraphInputGraphTargetCondition,
29 | )
30 | from .input_equation_condition import (
31 | InputEquationCondition,
32 | InputTensorEquationCondition,
33 | InputGraphEquationCondition,
34 | )
35 | from .data_condition import (
36 | DataCondition,
37 | GraphDataCondition,
38 | TensorDataCondition,
39 | )
40 |
--------------------------------------------------------------------------------
/pina/condition/domain_equation_condition.py:
--------------------------------------------------------------------------------
1 | """Module for the DomainEquationCondition class."""
2 |
3 | from .condition_interface import ConditionInterface
4 | from ..utils import check_consistency
5 | from ..domain import DomainInterface
6 | from ..equation.equation_interface import EquationInterface
7 |
8 |
9 | class DomainEquationCondition(ConditionInterface):
10 | """
11 | Condition defined by a domain and an equation. It can be used in Physics
12 | Informed problems. Before using this condition, make sure that input data
13 | are correctly sampled from the domain.
14 | """
15 |
16 | __slots__ = ["domain", "equation"]
17 |
18 | def __init__(self, domain, equation):
19 | """
20 | Initialise the object by storing the domain and equation.
21 |
22 | :param DomainInterface domain: Domain object containing the domain data.
23 | :param EquationInterface equation: Equation object containing the
24 | equation data.
25 | """
26 | super().__init__()
27 | self.domain = domain
28 | self.equation = equation
29 |
30 | def __setattr__(self, key, value):
31 | if key == "domain":
32 | check_consistency(value, (DomainInterface, str))
33 | DomainEquationCondition.__dict__[key].__set__(self, value)
34 | elif key == "equation":
35 | check_consistency(value, (EquationInterface))
36 | DomainEquationCondition.__dict__[key].__set__(self, value)
37 | elif key in ("_problem"):
38 | super().__setattr__(key, value)
39 |
--------------------------------------------------------------------------------
/pina/data/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for data, data module, and dataset."""
2 |
3 | __all__ = ["PinaDataModule", "PinaDataset"]
4 |
5 |
6 | from .data_module import PinaDataModule
7 | from .dataset import PinaDataset
8 |
--------------------------------------------------------------------------------
/pina/domain/__init__.py:
--------------------------------------------------------------------------------
1 | """Module to create and handle domains."""
2 |
3 | __all__ = [
4 | "DomainInterface",
5 | "CartesianDomain",
6 | "EllipsoidDomain",
7 | "Union",
8 | "Intersection",
9 | "Exclusion",
10 | "Difference",
11 | "OperationInterface",
12 | "SimplexDomain",
13 | ]
14 |
15 | from .domain_interface import DomainInterface
16 | from .cartesian import CartesianDomain
17 | from .ellipsoid import EllipsoidDomain
18 | from .exclusion_domain import Exclusion
19 | from .intersection_domain import Intersection
20 | from .union_domain import Union
21 | from .difference_domain import Difference
22 | from .operation_interface import OperationInterface
23 | from .simplex import SimplexDomain
24 |
--------------------------------------------------------------------------------
/pina/domain/domain_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the Domain Interface."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 |
5 |
6 | class DomainInterface(metaclass=ABCMeta):
7 | """
8 | Abstract base class for geometric domains. All specific domain types should
9 | inherit from this class.
10 | """
11 |
12 | available_sampling_modes = ["random", "grid", "lh", "chebyshev", "latin"]
13 |
14 | @property
15 | @abstractmethod
16 | def sample_modes(self):
17 | """
18 | Abstract method defining sampling methods.
19 | """
20 |
21 | @property
22 | @abstractmethod
23 | def variables(self):
24 | """
25 | Abstract method returning the domain variables.
26 | """
27 |
28 | @sample_modes.setter
29 | def sample_modes(self, values):
30 | """
31 | Setter for the sample_modes property.
32 |
33 | :param values: Sampling modes to be set.
34 | :type values: str | list[str]
35 | :raises TypeError: Invalid sampling mode.
36 | """
37 | if not isinstance(values, (list, tuple)):
38 | values = [values]
39 | for value in values:
40 | if value not in DomainInterface.available_sampling_modes:
41 | raise TypeError(
42 | f"mode {value} not valid. Expected at least "
43 | "one in "
44 | f"{DomainInterface.available_sampling_modes}."
45 | )
46 |
47 | @abstractmethod
48 | def sample(self):
49 | """
50 | Abstract method for the sampling routine.
51 | """
52 |
53 | @abstractmethod
54 | def is_inside(self, point, check_border=False):
55 | """
56 | Abstract method for checking if a point is inside the domain.
57 |
58 | :param LabelTensor point: Point to be checked.
59 | :param bool check_border: If ``True``, the border is considered inside
60 | the domain. Default is ``False``.
61 | """
62 |
--------------------------------------------------------------------------------
/pina/domain/operation_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the Operation Interface."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 | from .domain_interface import DomainInterface
5 | from ..utils import check_consistency
6 |
7 |
8 | class OperationInterface(DomainInterface, metaclass=ABCMeta):
9 | """
10 | Abstract class for set operations defined on geometric domains.
11 | """
12 |
13 | def __init__(self, geometries):
14 | """
15 | Initialization of the :class:`OperationInterface` class.
16 |
17 | :param list[DomainInterface] geometries: A list of instances of the
18 | :class:`~pina.domain.domain_interface.DomainInterface` class on
19 | which the set operation is performed.
20 | """
21 | # check consistency geometries
22 | check_consistency(geometries, DomainInterface)
23 |
24 | # check we are passing always different
25 | # geometries with the same labels.
26 | self._check_dimensions(geometries)
27 |
28 | # assign geometries
29 | self._geometries = geometries
30 |
31 | @property
32 | def sample_modes(self):
33 | """
34 | List of available sampling modes.
35 |
36 | :return: List of available sampling modes.
37 | :rtype: list[str]
38 | """
39 | return ["random"]
40 |
41 | @property
42 | def geometries(self):
43 | """
44 | The domains on which to perform the set operation.
45 |
46 | :return: The domains on which to perform the set operation.
47 | :rtype: list[DomainInterface]
48 | """
49 | return self._geometries
50 |
51 | @property
52 | def variables(self):
53 | """
54 | List of variables of the domain.
55 |
56 | :return: List of variables of the domain.
57 | :rtype: list[str]
58 | """
59 | variables = []
60 | for geom in self.geometries:
61 | variables += geom.variables
62 | return sorted(list(set(variables)))
63 |
64 | @abstractmethod
65 | def is_inside(self, point, check_border=False):
66 | """
67 | Abstract method to check if a point lies inside the resulting domain
68 | after performing the set operation.
69 |
70 | :param LabelTensor point: Point to be checked.
71 | :param bool check_border: If ``True``, the border is considered inside
72 | the resulting domain. Default is ``False``.
73 | :return: ``True`` if the point is inside the domain,
74 | ``False`` otherwise.
75 | :rtype: bool
76 | """
77 |
78 | def _check_dimensions(self, geometries):
79 | """
80 | Check if the dimensions of the geometries are consistent.
81 |
82 | :param list[DomainInterface] geometries: Domains to be checked.
83 | :raises NotImplementedError: If the dimensions of the geometries are not
84 | consistent.
85 | """
86 | for geometry in geometries:
87 | if geometry.variables != geometries[0].variables:
88 | raise NotImplementedError(
89 | "The geometries need to have same dimensions and labels."
90 | )
91 |
--------------------------------------------------------------------------------
/pina/equation/__init__.py:
--------------------------------------------------------------------------------
1 | """Module to define equations and systems of equations."""
2 |
3 | __all__ = [
4 | "SystemEquation",
5 | "Equation",
6 | "FixedValue",
7 | "FixedGradient",
8 | "FixedFlux",
9 | "Laplace",
10 | ]
11 |
12 | from .equation import Equation
13 | from .equation_factory import FixedFlux, FixedGradient, Laplace, FixedValue
14 | from .system_equation import SystemEquation
15 |
--------------------------------------------------------------------------------
/pina/equation/equation.py:
--------------------------------------------------------------------------------
1 | """Module for the Equation."""
2 |
3 | from .equation_interface import EquationInterface
4 |
5 |
6 | class Equation(EquationInterface):
7 | """
8 | Implementation of the Equation class. Every ``equation`` passed to a
9 | :class:`~pina.condition.condition.Condition` object must be either an
10 | instance of :class:`Equation` or
11 | :class:`~pina.equation.system_equation.SystemEquation`.
12 | """
13 |
14 | def __init__(self, equation):
15 | """
16 | Initialization of the :class:`Equation` class.
17 |
18 | :param Callable equation: A ``torch`` callable function used to compute
19 | the residual of a mathematical equation.
20 | :raises ValueError: If the equation is not a callable function.
21 | """
22 | if not callable(equation):
23 | raise ValueError(
24 | "equation must be a callable function."
25 | "Expected a callable function, got "
26 | f"{equation}"
27 | )
28 | self.__equation = equation
29 |
30 | def residual(self, input_, output_, params_=None):
31 | """
32 | Compute the residual of the equation.
33 |
34 | :param LabelTensor input_: Input points where the equation is evaluated.
35 | :param LabelTensor output_: Output tensor, eventually produced by a
36 | :class:`torch.nn.Module` instance.
37 | :param dict params_: Dictionary of unknown parameters, associated with a
38 | :class:`~pina.problem.inverse_problem.InverseProblem` instance.
39 | If the equation is not related to a
40 | :class:`~pina.problem.inverse_problem.InverseProblem` instance, the
41 | parameters must be initialized to ``None``. Default is ``None``.
42 | :return: The computed residual of the equation.
43 | :rtype: LabelTensor
44 | """
45 | if params_ is None:
46 | result = self.__equation(input_, output_)
47 | else:
48 | result = self.__equation(input_, output_, params_)
49 | return result
50 |
--------------------------------------------------------------------------------
/pina/equation/equation_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the Equation Interface."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 |
5 |
6 | class EquationInterface(metaclass=ABCMeta):
7 | """
8 | Abstract base class for equations.
9 |
10 | Equations in PINA simplify the training process. When defining a problem,
11 | each equation passed to a :class:`~pina.condition.condition.Condition`
12 | object must be either an :class:`~pina.equation.equation.Equation` or a
13 | :class:`~pina.equation.system_equation.SystemEquation` instance.
14 |
15 | An :class:`~pina.equation.equation.Equation` is a wrapper for a callable
16 | function, while :class:`~pina.equation.system_equation.SystemEquation`
17 | wraps a list of callable functions. To streamline code writing, PINA
18 | provides a diverse set of pre-implemented equations, such as
19 | :class:`~pina.equation.equation_factory.FixedValue`,
20 | :class:`~pina.equation.equation_factory.FixedGradient`, and many others.
21 | """
22 |
23 | @abstractmethod
24 | def residual(self, input_, output_, params_):
25 | """
26 | Abstract method to compute the residual of an equation.
27 |
28 | :param LabelTensor input_: Input points where the equation is evaluated.
29 | :param LabelTensor output_: Output tensor, eventually produced by a
30 | :class:`torch.nn.Module` instance.
31 | :param dict params_: Dictionary of unknown parameters, associated with a
32 | :class:`~pina.problem.inverse_problem.InverseProblem` instance.
33 | :return: The computed residual of the equation.
34 | :rtype: LabelTensor
35 | """
36 |
--------------------------------------------------------------------------------
/pina/geometry/__init__.py:
--------------------------------------------------------------------------------
1 | """Old module for geometry classes and functions. Deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from ..domain import *
6 | from ..utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # creating alias
10 | Location = DomainInterface
11 |
12 | # Set the custom format for warnings
13 | warnings.formatwarning = custom_warning_format
14 | warnings.filterwarnings("always", category=DeprecationWarning)
15 | warnings.warn(
16 | "'pina.geometry' is deprecated and will be removed "
17 | "in future versions. Please use 'pina.domain' instead. "
18 | "Location moved to DomainInferface object.",
19 | DeprecationWarning,
20 | )
21 |
--------------------------------------------------------------------------------
/pina/loss/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for loss functions and weighting functions."""
2 |
3 | __all__ = [
4 | "LossInterface",
5 | "LpLoss",
6 | "PowerLoss",
7 | "WeightingInterface",
8 | "ScalarWeighting",
9 | "NeuralTangentKernelWeighting",
10 | ]
11 |
12 | from .loss_interface import LossInterface
13 | from .power_loss import PowerLoss
14 | from .lp_loss import LpLoss
15 | from .weighting_interface import WeightingInterface
16 | from .scalar_weighting import ScalarWeighting
17 | from .ntk_weighting import NeuralTangentKernelWeighting
18 |
--------------------------------------------------------------------------------
/pina/loss/loss_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the Loss Interface."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 | from torch.nn.modules.loss import _Loss
5 | import torch
6 |
7 |
8 | class LossInterface(_Loss, metaclass=ABCMeta):
9 | """
10 | Abstract base class for all losses. All classes defining a loss function
11 | should inherit from this interface.
12 | """
13 |
14 | def __init__(self, reduction="mean"):
15 | """
16 | Initialization of the :class:`LossInterface` class.
17 |
18 | :param str reduction: The reduction method for the loss.
19 | Available options: ``none``, ``mean``, ``sum``.
20 | If ``none``, no reduction is applied. If ``mean``, the sum of the
21 | loss values is divided by the number of values. If ``sum``, the loss
22 | values are summed. Default is ``mean``.
23 | """
24 | super().__init__(reduction=reduction, size_average=None, reduce=None)
25 |
26 | @abstractmethod
27 | def forward(self, input, target):
28 | """
29 | Forward method of the loss function.
30 |
31 | :param torch.Tensor input: Input tensor from real data.
32 | :param torch.Tensor target: Model tensor output.
33 | """
34 |
35 | def _reduction(self, loss):
36 | """
37 | Apply the reduction to the loss.
38 |
39 | :param torch.Tensor loss: The tensor containing the pointwise losses.
40 | :raises ValueError: If the reduction method is not valid.
41 | :return: Reduced loss.
42 | :rtype: torch.Tensor
43 | """
44 | if self.reduction == "none":
45 | ret = loss
46 | elif self.reduction == "mean":
47 | ret = torch.mean(loss, keepdim=True, dim=-1)
48 | elif self.reduction == "sum":
49 | ret = torch.sum(loss, keepdim=True, dim=-1)
50 | else:
51 | raise ValueError(self.reduction + " is not valid")
52 | return ret
53 |
--------------------------------------------------------------------------------
/pina/loss/lp_loss.py:
--------------------------------------------------------------------------------
1 | """Module for the LpLoss class."""
2 |
3 | import torch
4 |
5 | from ..utils import check_consistency
6 | from .loss_interface import LossInterface
7 |
8 |
9 | class LpLoss(LossInterface):
10 | r"""
11 | Implementation of the Lp Loss. It defines a criterion to measures the
12 | pointwise Lp error between values in the input :math:`x` and values in the
13 | target :math:`y`.
14 |
15 | If ``reduction`` is set to ``none``, the loss can be written as:
16 |
17 | .. math::
18 | \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
19 | l_n = \left[\sum_{i=1}^{D} \left| x_n^i - y_n^i \right|^p \right],
20 |
21 | If ``relative`` is set to ``True``, the relative Lp error is computed:
22 |
23 | .. math::
24 | \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
25 | l_n = \frac{ [\sum_{i=1}^{D} | x_n^i - y_n^i|^p] }
26 | {[\sum_{i=1}^{D}|y_n^i|^p]},
27 |
28 | where :math:`N` is the batch size.
29 |
30 | If ``reduction`` is not ``none``, then:
31 |
32 | .. math::
33 | \ell(x, y) =
34 | \begin{cases}
35 | \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
36 | \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
37 | \end{cases}
38 | """
39 |
40 | def __init__(self, p=2, reduction="mean", relative=False):
41 | """
42 | Initialization of the :class:`LpLoss` class.
43 |
44 | :param int p: Degree of the Lp norm. It specifies the norm to be
45 | computed. Default is ``2`` (euclidean norm).
46 | :param str reduction: The reduction method for the loss.
47 | Available options: ``none``, ``mean``, ``sum``.
48 | If ``none``, no reduction is applied. If ``mean``, the sum of the
49 | loss values is divided by the number of values. If ``sum``, the loss
50 | values are summed. Default is ``mean``.
51 | :param bool relative: If ``True``, the relative error is computed.
52 | Default is ``False``.
53 | """
54 | super().__init__(reduction=reduction)
55 |
56 | # check consistency
57 | check_consistency(p, (str, int, float))
58 | check_consistency(relative, bool)
59 |
60 | self.p = p
61 | self.relative = relative
62 |
63 | def forward(self, input, target):
64 | """
65 | Forward method of the loss function.
66 |
67 | :param torch.Tensor input: Input tensor from real data.
68 | :param torch.Tensor target: Model tensor output.
69 | :return: Loss evaluation.
70 | :rtype: torch.Tensor
71 | """
72 | loss = torch.linalg.norm((input - target), ord=self.p, dim=-1)
73 | if self.relative:
74 | loss = loss / torch.linalg.norm(input, ord=self.p, dim=-1)
75 | return self._reduction(loss)
76 |
--------------------------------------------------------------------------------
/pina/loss/ntk_weighting.py:
--------------------------------------------------------------------------------
1 | """Module for Neural Tangent Kernel Class"""
2 |
3 | import torch
4 | from torch.nn import Module
5 | from .weighting_interface import WeightingInterface
6 | from ..utils import check_consistency
7 |
8 |
9 | class NeuralTangentKernelWeighting(WeightingInterface):
10 | """
11 | A neural tangent kernel scheme for weighting different losses to
12 | boost the convergence.
13 |
14 | .. seealso::
15 |
16 | **Original reference**: Wang, Sifan, Xinling Yu, and
17 | Paris Perdikaris. *When and why PINNs fail to train:
18 | A neural tangent kernel perspective*. Journal of
19 | Computational Physics 449 (2022): 110768.
20 | DOI: `10.1016 `_.
21 |
22 | """
23 |
24 | def __init__(self, model, alpha=0.5):
25 | """
26 | Initialization of the :class:`NeuralTangentKernelWeighting` class.
27 |
28 | :param torch.nn.Module model: The neural network model.
29 | :param float alpha: The alpha parameter.
30 |
31 | :raises ValueError: If ``alpha`` is not between 0 and 1 (inclusive).
32 | """
33 |
34 | super().__init__()
35 | check_consistency(alpha, float)
36 | check_consistency(model, Module)
37 | if alpha < 0 or alpha > 1:
38 | raise ValueError("alpha should be a value between 0 and 1")
39 | self.alpha = alpha
40 | self.model = model
41 | self.weights = {}
42 | self.default_value_weights = 1
43 |
44 | def aggregate(self, losses):
45 | """
46 | Weight the losses according to the Neural Tangent Kernel
47 | algorithm.
48 |
49 | :param dict(torch.Tensor) input: The dictionary of losses.
50 | :return: The losses aggregation. It should be a scalar Tensor.
51 | :rtype: torch.Tensor
52 | """
53 | losses_norm = {}
54 | for condition in losses:
55 | losses[condition].backward(retain_graph=True)
56 | grads = []
57 | for param in self.model.parameters():
58 | grads.append(param.grad.view(-1))
59 | grads = torch.cat(grads)
60 | losses_norm[condition] = torch.norm(grads)
61 | self.weights = {
62 | condition: self.alpha
63 | * self.weights.get(condition, self.default_value_weights)
64 | + (1 - self.alpha)
65 | * losses_norm[condition]
66 | / sum(losses_norm.values())
67 | for condition in losses
68 | }
69 | return sum(
70 | self.weights[condition] * loss for condition, loss in losses.items()
71 | )
72 |
--------------------------------------------------------------------------------
/pina/loss/power_loss.py:
--------------------------------------------------------------------------------
1 | """Module for the PowerLoss class."""
2 |
3 | import torch
4 |
5 | from ..utils import check_consistency
6 | from .loss_interface import LossInterface
7 |
8 |
9 | class PowerLoss(LossInterface):
10 | r"""
11 | Implementation of the Power Loss. It defines a criterion to measures the
12 | pointwise error between values in the input :math:`x` and values in the
13 | target :math:`y`.
14 |
15 | If ``reduction`` is set to ``none``, the loss can be written as:
16 |
17 | .. math::
18 | \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
19 | l_n = \frac{1}{D}\left[\sum_{i=1}^{D}
20 | \left| x_n^i - y_n^i \right|^p\right],
21 |
22 | If ``relative`` is set to ``True``, the relative error is computed:
23 |
24 | .. math::
25 | \ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
26 | l_n = \frac{ \sum_{i=1}^{D} | x_n^i - y_n^i|^p }
27 | {\sum_{i=1}^{D}|y_n^i|^p},
28 |
29 | where :math:`N` is the batch size.
30 |
31 | If ``reduction`` is not ``none``, then:
32 |
33 | .. math::
34 | \ell(x, y) =
35 | \begin{cases}
36 | \operatorname{mean}(L), & \text{if reduction} = \text{`mean';}\\
37 | \operatorname{sum}(L), & \text{if reduction} = \text{`sum'.}
38 | \end{cases}
39 | """
40 |
41 | def __init__(self, p=2, reduction="mean", relative=False):
42 | """
43 | Initialization of the :class:`PowerLoss` class.
44 |
45 | :param int p: Degree of the Lp norm. It specifies the norm to be
46 | computed. Default is ``2`` (euclidean norm).
47 | :param str reduction: The reduction method for the loss.
48 | Available options: ``none``, ``mean``, ``sum``.
49 | If ``none``, no reduction is applied. If ``mean``, the sum of the
50 | loss values is divided by the number of values. If ``sum``, the loss
51 | values are summed. Default is ``mean``.
52 | :param bool relative: If ``True``, the relative error is computed.
53 | Default is ``False``.
54 | """
55 | super().__init__(reduction=reduction)
56 |
57 | # check consistency
58 | check_consistency(p, (str, int, float))
59 | check_consistency(relative, bool)
60 |
61 | self.p = p
62 | self.relative = relative
63 |
64 | def forward(self, input, target):
65 | """
66 | Forward method of the loss function.
67 |
68 | :param torch.Tensor input: Input tensor from real data.
69 | :param torch.Tensor target: Model tensor output.
70 | :return: Loss evaluation.
71 | :rtype: torch.Tensor
72 | """
73 | loss = torch.abs((input - target)).pow(self.p).mean(-1)
74 | if self.relative:
75 | loss = loss / torch.abs(input).pow(self.p).mean(-1)
76 | return self._reduction(loss)
77 |
--------------------------------------------------------------------------------
/pina/loss/scalar_weighting.py:
--------------------------------------------------------------------------------
1 | """Module for the Scalar Weighting."""
2 |
3 | from .weighting_interface import WeightingInterface
4 | from ..utils import check_consistency
5 |
6 |
7 | class _NoWeighting(WeightingInterface):
8 | """
9 | Weighting scheme that does not apply any weighting to the losses.
10 | """
11 |
12 | def aggregate(self, losses):
13 | """
14 | Aggregate the losses.
15 |
16 | :param dict losses: The dictionary of losses.
17 | :return: The aggregated losses.
18 | :rtype: torch.Tensor
19 | """
20 | return sum(losses.values())
21 |
22 |
23 | class ScalarWeighting(WeightingInterface):
24 | """
25 | Weighting scheme that assigns a scalar weight to each loss term.
26 | """
27 |
28 | def __init__(self, weights):
29 | """
30 | Initialization of the :class:`ScalarWeighting` class.
31 |
32 | :param weights: The weights to be assigned to each loss term.
33 | If a single scalar value is provided, it is assigned to all loss
34 | terms. If a dictionary is provided, the keys are the conditions and
35 | the values are the weights. If a condition is not present in the
36 | dictionary, the default value is used.
37 | :type weights: float | int | dict
38 | """
39 | super().__init__()
40 | check_consistency([weights], (float, dict, int))
41 | if isinstance(weights, (float, int)):
42 | self.default_value_weights = weights
43 | self.weights = {}
44 | else:
45 | self.default_value_weights = 1
46 | self.weights = weights
47 |
48 | def aggregate(self, losses):
49 | """
50 | Aggregate the losses.
51 |
52 | :param dict losses: The dictionary of losses.
53 | :return: The aggregated losses.
54 | :rtype: torch.Tensor
55 | """
56 | return sum(
57 | self.weights.get(condition, self.default_value_weights) * loss
58 | for condition, loss in losses.items()
59 | )
60 |
--------------------------------------------------------------------------------
/pina/loss/weighting_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the Weighting Interface."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 |
5 |
6 | class WeightingInterface(metaclass=ABCMeta):
7 | """
8 | Abstract base class for all loss weighting schemas. All weighting schemas
9 | should inherit from this class.
10 | """
11 |
12 | def __init__(self):
13 | """
14 | Initialization of the :class:`WeightingInterface` class.
15 | """
16 | self.condition_names = None
17 |
18 | @abstractmethod
19 | def aggregate(self, losses):
20 | """
21 | Aggregate the losses.
22 |
23 | :param dict losses: The dictionary of losses.
24 | """
25 |
--------------------------------------------------------------------------------
/pina/model/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Neural model classes."""
2 |
3 | __all__ = [
4 | "FeedForward",
5 | "ResidualFeedForward",
6 | "MultiFeedForward",
7 | "DeepONet",
8 | "MIONet",
9 | "FNO",
10 | "FourierIntegralKernel",
11 | "KernelNeuralOperator",
12 | "AveragingNeuralOperator",
13 | "LowRankNeuralOperator",
14 | "Spline",
15 | "GraphNeuralOperator",
16 | ]
17 |
18 | from .feed_forward import FeedForward, ResidualFeedForward
19 | from .multi_feed_forward import MultiFeedForward
20 | from .deeponet import DeepONet, MIONet
21 | from .fourier_neural_operator import FNO, FourierIntegralKernel
22 | from .kernel_neural_operator import KernelNeuralOperator
23 | from .average_neural_operator import AveragingNeuralOperator
24 | from .low_rank_neural_operator import LowRankNeuralOperator
25 | from .spline import Spline
26 | from .graph_neural_operator import GraphNeuralOperator
27 |
--------------------------------------------------------------------------------
/pina/model/block/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the building blocks of the neural models."""
2 |
3 | __all__ = [
4 | "ContinuousConvBlock",
5 | "ResidualBlock",
6 | "EnhancedLinear",
7 | "SpectralConvBlock1D",
8 | "SpectralConvBlock2D",
9 | "SpectralConvBlock3D",
10 | "FourierBlock1D",
11 | "FourierBlock2D",
12 | "FourierBlock3D",
13 | "PODBlock",
14 | "OrthogonalBlock",
15 | "PeriodicBoundaryEmbedding",
16 | "FourierFeatureEmbedding",
17 | "AVNOBlock",
18 | "LowRankBlock",
19 | "RBFBlock",
20 | "GNOBlock",
21 | ]
22 |
23 | from .convolution_2d import ContinuousConvBlock
24 | from .residual import ResidualBlock, EnhancedLinear
25 | from .spectral import (
26 | SpectralConvBlock1D,
27 | SpectralConvBlock2D,
28 | SpectralConvBlock3D,
29 | )
30 | from .fourier_block import FourierBlock1D, FourierBlock2D, FourierBlock3D
31 | from .pod_block import PODBlock
32 | from .orthogonal import OrthogonalBlock
33 | from .embedding import PeriodicBoundaryEmbedding, FourierFeatureEmbedding
34 | from .average_neural_operator_block import AVNOBlock
35 | from .low_rank_block import LowRankBlock
36 | from .rbf_block import RBFBlock
37 | from .gno_block import GNOBlock
38 |
--------------------------------------------------------------------------------
/pina/model/block/average_neural_operator_block.py:
--------------------------------------------------------------------------------
1 | """Module for the Averaging Neural Operator Block class."""
2 |
3 | import torch
4 | from torch import nn
5 | from ...utils import check_consistency
6 |
7 |
8 | class AVNOBlock(nn.Module):
9 | r"""
10 | The inner block of the Averaging Neural Operator.
11 |
12 | The operator layer performs an affine transformation where the convolution
13 | is approximated with a local average. Given the input function
14 | :math:`v(x)\in\mathbb{R}^{\rm{emb}}` the layer computes the operator update
15 | :math:`K(v)` as:
16 |
17 | .. math::
18 | K(v) = \sigma\left(Wv(x) + b + \frac{1}{|\mathcal{A}|}\int v(y)dy\right)
19 |
20 | where:
21 |
22 | * :math:`\mathbb{R}^{\rm{emb}}` is the embedding (hidden) size
23 | corresponding to the ``hidden_size`` object
24 | * :math:`\sigma` is a non-linear activation, corresponding to the
25 | ``func`` object
26 | * :math:`W\in\mathbb{R}^{\rm{emb}\times\rm{emb}}` is a tunable matrix.
27 | * :math:`b\in\mathbb{R}^{\rm{emb}}` is a tunable bias.
28 |
29 | .. seealso::
30 |
31 | **Original reference**: Lanthaler S., Li, Z., Stuart, A. (2020).
32 | *The Nonlocal Neural Operator: Universal Approximation*.
33 | DOI: `arXiv preprint arXiv:2304.13221.
34 | `_
35 | """
36 |
37 | def __init__(self, hidden_size=100, func=nn.GELU):
38 | """
39 | Initialization of the :class:`AVNOBlock` class.
40 |
41 | :param int hidden_size: The size of the hidden layer.
42 | Defaults is ``100``.
43 | :param func: The activation function.
44 | Default is :class:`torch.nn.GELU`.
45 | """
46 | super().__init__()
47 |
48 | # Check type consistency
49 | check_consistency(hidden_size, int)
50 | check_consistency(func, nn.Module, subclass=True)
51 | # Assignment
52 | self._nn = nn.Linear(hidden_size, hidden_size)
53 | self._func = func()
54 |
55 | def forward(self, x):
56 | r"""
57 | Forward pass of the block. It performs a sum of local average and an
58 | affine transformation of the field.
59 |
60 | :param torch.Tensor x: The input tensor for performing the computation.
61 | :return: The output tensor.
62 | :rtype: torch.Tensor
63 | """
64 | return self._func(self._nn(x) + torch.mean(x, dim=1, keepdim=True))
65 |
--------------------------------------------------------------------------------
/pina/model/block/integral.py:
--------------------------------------------------------------------------------
1 | """Module to perform integration for continuous convolution."""
2 |
3 | import torch
4 |
5 |
6 | class Integral:
7 | """
8 | Class allowing integration for continous convolution.
9 | """
10 |
11 | def __init__(self, param):
12 | """
13 | Initializzation of the :class:`Integral` class.
14 |
15 | :param param: The type of continuous convolution.
16 | :type param: string
17 | :raises TypeError: If the parameter is neither ``discrete``
18 | nor ``continuous``.
19 | """
20 | if param == "discrete":
21 | self.make_integral = self.integral_param_disc
22 | elif param == "continuous":
23 | self.make_integral = self.integral_param_cont
24 | else:
25 | raise TypeError
26 |
27 | def __call__(self, *args, **kwds):
28 | """
29 | Call the integral function
30 |
31 | :param list args: Arguments for the integral function.
32 | :param dict kwds: Keyword arguments for the integral function.
33 | :return: The integral of the input.
34 | :rtype: torch.tensor
35 | """
36 | return self.make_integral(*args, **kwds)
37 |
38 | def _prepend_zero(self, x):
39 | """
40 | Create bins to perform integration.
41 |
42 | :param torch.Tensor x: The input tensor.
43 | :return: The bins for the integral.
44 | :rtype: torch.Tensor
45 | """
46 | return torch.cat((torch.zeros(1, dtype=x.dtype, device=x.device), x))
47 |
48 | def integral_param_disc(self, x, y, idx):
49 | """
50 | Perform discrete integration with discrete parameters.
51 |
52 | :param torch.Tensor x: The first input tensor.
53 | :param torch.Tensor y: The second input tensor.
54 | :param list[int] idx: The indices for different strides.
55 | :return: The discrete integral.
56 | :rtype: torch.Tensor
57 | """
58 | cs_idxes = self._prepend_zero(torch.cumsum(torch.tensor(idx), 0))
59 | cs = self._prepend_zero(torch.cumsum(x.flatten() * y.flatten(), 0))
60 | return cs[cs_idxes[1:]] - cs[cs_idxes[:-1]]
61 |
62 | def integral_param_cont(self, x, y, idx):
63 | """
64 | Perform continuous integration with continuous parameters.
65 |
66 | :param torch.Tensor x: The first input tensor.
67 | :param torch.Tensor y: The second input tensor.
68 | :param list[int] idx: The indices for different strides.
69 | :raises NotImplementedError: The method is not implemented.
70 | """
71 | raise NotImplementedError
72 |
--------------------------------------------------------------------------------
/pina/model/block/stride.py:
--------------------------------------------------------------------------------
1 | """Module for the Stride class."""
2 |
3 | import torch
4 |
5 |
6 | class Stride:
7 | """
8 | Stride class for continous convolution.
9 | """
10 |
11 | def __init__(self, dict_):
12 | """
13 | Initialization of the :class:`Stride` class.
14 |
15 | :param dict dict_: Dictionary having as keys the domain size ``domain``,
16 | the starting position of the filter ``start``, the jump size for the
17 | filter ``jump``, and the direction of the filter ``direction``.
18 | """
19 |
20 | self._dict_stride = dict_
21 | self._stride_continuous = None
22 | self._stride_discrete = self._create_stride_discrete(dict_)
23 |
24 | def _create_stride_discrete(self, my_dict):
25 | """
26 | Create a tensor of positions where to apply the filter.
27 |
28 | :param dict my_dict_: Dictionary having as keys the domain size
29 | ``domain``, the starting position of the filter ``start``, the jump
30 | size for the filter ``jump``, and the direction of the filter
31 | ``direction``.
32 | :raises IndexError: Values in the dict must have all same length.
33 | :raises ValueError: Domain values must be greater than 0.
34 | :raises ValueError: Direction must be either equal to ``1``, ``-1`` or
35 | ``0``.
36 | :raises IndexError: Direction and jumps must be zero in the same index.
37 | :return: The positions for the filter
38 | :rtype: torch.Tensor
39 |
40 | :Example:
41 |
42 | >>> stride_dict = {
43 | ... "domain": [4, 4],
44 | ... "start": [-4, 2],
45 | ... "jump": [2, 2],
46 | ... "direction": [1, 1],
47 | ... }
48 | >>> Stride(stride_dict)
49 | """
50 | # we must check boundaries of the input as well
51 | domain, start, jumps, direction = my_dict.values()
52 |
53 | # checking
54 | if not all(len(s) == len(domain) for s in my_dict.values()):
55 | raise IndexError("Values in the dict must have all same length")
56 |
57 | if not all(v >= 0 for v in domain):
58 | raise ValueError("Domain values must be greater than 0")
59 |
60 | if not all(v in (0, -1, 1) for v in direction):
61 | raise ValueError("Direction must be either equal to 1, -1 or 0")
62 |
63 | seq_jumps = [i for i, e in enumerate(jumps) if e == 0]
64 | seq_direction = [i for i, e in enumerate(direction) if e == 0]
65 |
66 | if seq_direction != seq_jumps:
67 | raise IndexError(
68 | "Direction and jumps must have zero in the same index"
69 | )
70 |
71 | if seq_jumps:
72 | for i in seq_jumps:
73 | jumps[i] = domain[i]
74 | direction[i] = 1
75 |
76 | # creating the stride grid
77 | values_mesh = [
78 | torch.arange(0, i, step).float() for i, step in zip(domain, jumps)
79 | ]
80 |
81 | values_mesh = [
82 | single * dim for single, dim in zip(values_mesh, direction)
83 | ]
84 |
85 | mesh = torch.meshgrid(values_mesh)
86 | coordinates_mesh = [x.reshape(-1, 1) for x in mesh]
87 |
88 | stride = torch.cat(coordinates_mesh, dim=1) + torch.tensor(start)
89 |
90 | return stride
91 |
--------------------------------------------------------------------------------
/pina/model/block/utils_convolution.py:
--------------------------------------------------------------------------------
1 | """Module for utility functions for the convolutional layer."""
2 |
3 | import torch
4 |
5 |
6 | def check_point(x, current_stride, dim):
7 | """
8 | Check if the point is in the current stride.
9 |
10 | :param torch.Tensor x: The input data.
11 | :param int current_stride: The current stride.
12 | :param int dim: The shape of the filter.
13 | :return: The indeces of the points in the current stride.
14 | :rtype: torch.Tensor
15 | """
16 | max_stride = current_stride + dim
17 | indeces = torch.logical_and(
18 | x[..., :-1] < max_stride, x[..., :-1] >= current_stride
19 | ).all(dim=-1)
20 | return indeces
21 |
22 |
23 | def map_points_(x, filter_position):
24 | """
25 | The mapping function for n-dimensional case.
26 |
27 | :param torch.Tensor x: The two-dimensional input data.
28 | :param list[int] filter_position: The position of the filter.
29 | :return: The data mapped in-place.
30 | :rtype: torch.tensor
31 | """
32 | x.add_(-filter_position)
33 |
34 | return x
35 |
36 |
37 | def optimizing(f):
38 | """
39 | Decorator to call the function only once.
40 |
41 | :param f: python function
42 | :type f: Callable
43 | """
44 |
45 | def wrapper(*args, **kwargs):
46 | """
47 | Wrapper function.
48 |
49 | :param args: The arguments of the function.
50 | :param kwargs: The keyword arguments of the function.
51 | """
52 | if kwargs["type_"] == "forward":
53 | if not wrapper.has_run_inverse:
54 | wrapper.has_run_inverse = True
55 | return f(*args, **kwargs)
56 |
57 | if kwargs["type_"] == "inverse":
58 | if not wrapper.has_run:
59 | wrapper.has_run = True
60 | return f(*args, **kwargs)
61 |
62 | return f(*args, **kwargs)
63 |
64 | wrapper.has_run_inverse = False
65 | wrapper.has_run = False
66 |
67 | return wrapper
68 |
--------------------------------------------------------------------------------
/pina/model/layers/__init__.py:
--------------------------------------------------------------------------------
1 | """Old layers module, deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from ..block import *
6 | from ...utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.model.layers' is deprecated and will be removed "
14 | "in future versions. Please use 'pina.model.block' instead.",
15 | DeprecationWarning,
16 | )
17 |
--------------------------------------------------------------------------------
/pina/model/multi_feed_forward.py:
--------------------------------------------------------------------------------
1 | """Module for the Multi Feed Forward model class."""
2 |
3 | from abc import ABC, abstractmethod
4 | import torch
5 | from .feed_forward import FeedForward
6 |
7 |
8 | class MultiFeedForward(torch.nn.Module, ABC):
9 | """
10 | Multi Feed Forward neural network model class.
11 |
12 | This model allows to create a network with multiple Feed Forward neural
13 | networks combined together. The user is required to define the ``forward``
14 | method to choose how to combine the networks.
15 | """
16 |
17 | def __init__(self, ffn_dict):
18 | """
19 | Initialization of the :class:`MultiFeedForward` class.
20 |
21 | :param dict ffn_dict: A dictionary containing the Feed Forward neural
22 | networks to be combined.
23 | :raises TypeError: If the input is not a dictionary.
24 | """
25 | super().__init__()
26 |
27 | if not isinstance(ffn_dict, dict):
28 | raise TypeError
29 |
30 | for name, constructor_args in ffn_dict.items():
31 | setattr(self, name, FeedForward(**constructor_args))
32 |
33 | @abstractmethod
34 | def forward(self, *args, **kwargs):
35 | """
36 | Forward pass for the :class:`MultiFeedForward` model.
37 |
38 | The user is required to define this method to choose how to combine the
39 | networks.
40 | """
41 |
--------------------------------------------------------------------------------
/pina/operators.py:
--------------------------------------------------------------------------------
1 | """Old module for operators. Deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from .operator import *
6 | from .utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.operators' is deprecated and will be removed "
14 | "in future versions. Please use 'pina.operator' instead.",
15 | DeprecationWarning,
16 | )
17 |
--------------------------------------------------------------------------------
/pina/optim/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Optimizers and Schedulers."""
2 |
3 | __all__ = [
4 | "Optimizer",
5 | "TorchOptimizer",
6 | "Scheduler",
7 | "TorchScheduler",
8 | ]
9 |
10 | from .optimizer_interface import Optimizer
11 | from .torch_optimizer import TorchOptimizer
12 | from .scheduler_interface import Scheduler
13 | from .torch_scheduler import TorchScheduler
14 |
--------------------------------------------------------------------------------
/pina/optim/optimizer_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the PINA Optimizer."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 |
5 |
6 | class Optimizer(metaclass=ABCMeta):
7 | """
8 | Abstract base class for defining an optimizer. All specific optimizers
9 | should inherit form this class and implement the required methods.
10 | """
11 |
12 | @property
13 | @abstractmethod
14 | def instance(self):
15 | """
16 | Abstract property to retrieve the optimizer instance.
17 | """
18 |
19 | @abstractmethod
20 | def hook(self):
21 | """
22 | Abstract method to define the hook logic for the optimizer.
23 | """
24 |
--------------------------------------------------------------------------------
/pina/optim/scheduler_interface.py:
--------------------------------------------------------------------------------
1 | """Module for the PINA Scheduler."""
2 |
3 | from abc import ABCMeta, abstractmethod
4 |
5 |
6 | class Scheduler(metaclass=ABCMeta):
7 | """
8 | Abstract base class for defining a scheduler. All specific schedulers should
9 | inherit form this class and implement the required methods.
10 | """
11 |
12 | @property
13 | @abstractmethod
14 | def instance(self):
15 | """
16 | Abstract property to retrieve the scheduler instance.
17 | """
18 |
19 | @abstractmethod
20 | def hook(self):
21 | """
22 | Abstract method to define the hook logic for the scheduler.
23 | """
24 |
--------------------------------------------------------------------------------
/pina/optim/torch_optimizer.py:
--------------------------------------------------------------------------------
1 | """Module for the PINA Torch Optimizer"""
2 |
3 | import torch
4 |
5 | from ..utils import check_consistency
6 | from .optimizer_interface import Optimizer
7 |
8 |
9 | class TorchOptimizer(Optimizer):
10 | """
11 | A wrapper class for using PyTorch optimizers.
12 | """
13 |
14 | def __init__(self, optimizer_class, **kwargs):
15 | """
16 | Initialization of the :class:`TorchOptimizer` class.
17 |
18 | :param torch.optim.Optimizer optimizer_class: A
19 | :class:`torch.optim.Optimizer` class.
20 | :param dict kwargs: Additional parameters passed to ``optimizer_class``,
21 | see more
22 | `here `_.
23 | """
24 | check_consistency(optimizer_class, torch.optim.Optimizer, subclass=True)
25 |
26 | self.optimizer_class = optimizer_class
27 | self.kwargs = kwargs
28 | self._optimizer_instance = None
29 |
30 | def hook(self, parameters):
31 | """
32 | Initialize the optimizer instance with the given parameters.
33 |
34 | :param dict parameters: The parameters of the model to be optimized.
35 | """
36 | self._optimizer_instance = self.optimizer_class(
37 | parameters, **self.kwargs
38 | )
39 |
40 | @property
41 | def instance(self):
42 | """
43 | Get the optimizer instance.
44 |
45 | :return: The optimizer instance.
46 | :rtype: torch.optim.Optimizer
47 | """
48 | return self._optimizer_instance
49 |
--------------------------------------------------------------------------------
/pina/optim/torch_scheduler.py:
--------------------------------------------------------------------------------
1 | """Module for the PINA Torch Optimizer"""
2 |
3 | try:
4 | from torch.optim.lr_scheduler import LRScheduler # torch >= 2.0
5 | except ImportError:
6 | from torch.optim.lr_scheduler import (
7 | _LRScheduler as LRScheduler,
8 | ) # torch < 2.0
9 |
10 | from ..utils import check_consistency
11 | from .optimizer_interface import Optimizer
12 | from .scheduler_interface import Scheduler
13 |
14 |
15 | class TorchScheduler(Scheduler):
16 | """
17 | A wrapper class for using PyTorch schedulers.
18 | """
19 |
20 | def __init__(self, scheduler_class, **kwargs):
21 | """
22 | Initialization of the :class:`TorchScheduler` class.
23 |
24 | :param torch.optim.LRScheduler scheduler_class: A
25 | :class:`torch.optim.LRScheduler` class.
26 | :param dict kwargs: Additional parameters passed to ``scheduler_class``,
27 | see more
28 | `here _`.
29 | """
30 | check_consistency(scheduler_class, LRScheduler, subclass=True)
31 |
32 | self.scheduler_class = scheduler_class
33 | self.kwargs = kwargs
34 | self._scheduler_instance = None
35 |
36 | def hook(self, optimizer):
37 | """
38 | Initialize the scheduler instance with the given parameters.
39 |
40 | :param dict parameters: The parameters of the optimizer.
41 | """
42 | check_consistency(optimizer, Optimizer)
43 | self._scheduler_instance = self.scheduler_class(
44 | optimizer.instance, **self.kwargs
45 | )
46 |
47 | @property
48 | def instance(self):
49 | """
50 | Get the scheduler instance.
51 |
52 | :return: The scheduelr instance.
53 | :rtype: torch.optim.LRScheduler
54 | """
55 | return self._scheduler_instance
56 |
--------------------------------------------------------------------------------
/pina/plotter.py:
--------------------------------------------------------------------------------
1 | """Module for Plotter"""
2 |
3 | raise ImportError("'pina.plotter' is deprecated and cannot be imported.")
4 |
--------------------------------------------------------------------------------
/pina/problem/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Problems."""
2 |
3 | __all__ = [
4 | "AbstractProblem",
5 | "SpatialProblem",
6 | "TimeDependentProblem",
7 | "ParametricProblem",
8 | "InverseProblem",
9 | ]
10 |
11 | from .abstract_problem import AbstractProblem
12 | from .spatial_problem import SpatialProblem
13 | from .time_dependent_problem import TimeDependentProblem
14 | from .parametric_problem import ParametricProblem
15 | from .inverse_problem import InverseProblem
16 |
--------------------------------------------------------------------------------
/pina/problem/inverse_problem.py:
--------------------------------------------------------------------------------
1 | """Module for the InverseProblem class."""
2 |
3 | from abc import abstractmethod
4 | import torch
5 | from .abstract_problem import AbstractProblem
6 |
7 |
8 | class InverseProblem(AbstractProblem):
9 | """
10 | Class for defining inverse problems, where the objective is to determine
11 | unknown parameters through training, based on given data.
12 | """
13 |
14 | def __init__(self):
15 | """
16 | Initialization of the :class:`InverseProblem` class.
17 | """
18 | super().__init__()
19 | # storing unknown_parameters for optimization
20 | self.unknown_parameters = {}
21 | for var in self.unknown_variables:
22 | range_var = self.unknown_parameter_domain.range_[var]
23 | tensor_var = (
24 | torch.rand(1, requires_grad=True) * range_var[1] + range_var[0]
25 | )
26 | self.unknown_parameters[var] = torch.nn.Parameter(tensor_var)
27 |
28 | @abstractmethod
29 | def unknown_parameter_domain(self):
30 | """
31 | The domain of the unknown parameters of the problem.
32 | """
33 |
34 | @property
35 | def unknown_variables(self):
36 | """
37 | Get the unknown variables of the problem.
38 |
39 | :return: The unknown variables of the problem.
40 | :rtype: list[str]
41 | """
42 | return self.unknown_parameter_domain.variables
43 |
44 | @property
45 | def unknown_parameters(self):
46 | """
47 | Get the unknown parameters of the problem.
48 |
49 | :return: The unknown parameters of the problem.
50 | :rtype: torch.nn.Parameter
51 | """
52 | return self.__unknown_parameters
53 |
54 | @unknown_parameters.setter
55 | def unknown_parameters(self, value):
56 | """
57 | Set the unknown parameters of the problem.
58 |
59 | :param torch.nn.Parameter value: The unknown parameters of the problem.
60 | """
61 | self.__unknown_parameters = value
62 |
--------------------------------------------------------------------------------
/pina/problem/parametric_problem.py:
--------------------------------------------------------------------------------
1 | """Module for the ParametricProblem class."""
2 |
3 | from abc import abstractmethod
4 |
5 | from .abstract_problem import AbstractProblem
6 |
7 |
8 | class ParametricProblem(AbstractProblem):
9 | """
10 | Class for defining parametric problems, where certain input variables are
11 | treated as parameters that can vary, allowing the model to adapt to
12 | different scenarios based on the chosen parameters.
13 | """
14 |
15 | @abstractmethod
16 | def parameter_domain(self):
17 | """
18 | The domain of the parameters of the problem.
19 | """
20 |
21 | @property
22 | def parameters(self):
23 | """
24 | Get the parameters of the problem.
25 |
26 | :return: The parameters of the problem.
27 | :rtype: list[str]
28 | """
29 | return self.parameter_domain.variables
30 |
--------------------------------------------------------------------------------
/pina/problem/spatial_problem.py:
--------------------------------------------------------------------------------
1 | """Module for the SpatialProblem class."""
2 |
3 | from abc import abstractmethod
4 |
5 | from .abstract_problem import AbstractProblem
6 |
7 |
8 | class SpatialProblem(AbstractProblem):
9 | """
10 | Class for defining spatial problems, where the problem domain is defined in
11 | terms of spatial variables.
12 | """
13 |
14 | @abstractmethod
15 | def spatial_domain(self):
16 | """
17 | The spatial domain of the problem.
18 | """
19 |
20 | @property
21 | def spatial_variables(self):
22 | """
23 | Get the spatial input variables of the problem.
24 |
25 | :return: The spatial input variables of the problem.
26 | :rtype: list[str]
27 | """
28 | return self.spatial_domain.variables
29 |
--------------------------------------------------------------------------------
/pina/problem/time_dependent_problem.py:
--------------------------------------------------------------------------------
1 | """Module for the TimeDependentProblem class."""
2 |
3 | from abc import abstractmethod
4 |
5 | from .abstract_problem import AbstractProblem
6 |
7 |
8 | class TimeDependentProblem(AbstractProblem):
9 | """
10 | Class for defining time-dependent problems, where the system's behavior
11 | changes with respect to time.
12 | """
13 |
14 | @abstractmethod
15 | def temporal_domain(self):
16 | """
17 | The temporal domain of the problem.
18 | """
19 |
20 | @property
21 | def temporal_variable(self):
22 | """
23 | Get the time variable of the problem.
24 |
25 | :return: The time variable of the problem.
26 | :rtype: list[str]
27 | """
28 | return self.temporal_domain.variables
29 |
--------------------------------------------------------------------------------
/pina/problem/zoo/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for implemented problems."""
2 |
3 | __all__ = [
4 | "SupervisedProblem",
5 | "HelmholtzProblem",
6 | "AllenCahnProblem",
7 | "AdvectionProblem",
8 | "Poisson2DSquareProblem",
9 | "DiffusionReactionProblem",
10 | "InversePoisson2DSquareProblem",
11 | ]
12 |
13 | from .supervised_problem import SupervisedProblem
14 | from .helmholtz import HelmholtzProblem
15 | from .allen_cahn import AllenCahnProblem
16 | from .advection import AdvectionProblem
17 | from .poisson_2d_square import Poisson2DSquareProblem
18 | from .diffusion_reaction import DiffusionReactionProblem
19 | from .inverse_poisson_2d_square import InversePoisson2DSquareProblem
20 |
--------------------------------------------------------------------------------
/pina/problem/zoo/allen_cahn.py:
--------------------------------------------------------------------------------
1 | """Formulation of the Allen Cahn problem."""
2 |
3 | import torch
4 | from ... import Condition
5 | from ...equation import Equation
6 | from ...domain import CartesianDomain
7 | from ...operator import grad, laplacian
8 | from ...problem import SpatialProblem, TimeDependentProblem
9 |
10 |
11 | def allen_cahn_equation(input_, output_):
12 | """
13 | Implementation of the Allen Cahn equation.
14 |
15 | :param LabelTensor input_: Input data of the problem.
16 | :param LabelTensor output_: Output data of the problem.
17 | :return: The residual of the Allen Cahn equation.
18 | :rtype: LabelTensor
19 | """
20 | u_t = grad(output_, input_, components=["u"], d=["t"])
21 | u_xx = laplacian(output_, input_, components=["u"], d=["x"])
22 | return u_t - 0.0001 * u_xx + 5 * output_**3 - 5 * output_
23 |
24 |
25 | def initial_condition(input_, output_):
26 | """
27 | Definition of the initial condition of the Allen Cahn problem.
28 |
29 | :param LabelTensor input_: Input data of the problem.
30 | :param LabelTensor output_: Output data of the problem.
31 | :return: The residual of the initial condition.
32 | :rtype: LabelTensor
33 | """
34 | x = input_.extract("x")
35 | u_0 = x**2 * torch.cos(torch.pi * x)
36 | return output_ - u_0
37 |
38 |
39 | class AllenCahnProblem(TimeDependentProblem, SpatialProblem):
40 | r"""
41 | Implementation of the Allen Cahn problem in the spatial interval
42 | :math:`[-1, 1]` and temporal interval :math:`[0, 1]`.
43 |
44 | .. seealso::
45 | **Original reference**: Sokratis J. Anagnostopoulos, Juan D. Toscano,
46 | Nikolaos Stergiopulos, and George E. Karniadakis.
47 | *Residual-based attention and connection to information
48 | bottleneck theory in PINNs*.
49 | Computer Methods in Applied Mechanics and Engineering 421 (2024): 116805
50 | DOI: `10.1016/
51 | j.cma.2024.116805 `_.
52 |
53 | :Example:
54 | >>> problem = AllenCahnProblem()
55 | """
56 |
57 | output_variables = ["u"]
58 | spatial_domain = CartesianDomain({"x": [-1, 1]})
59 | temporal_domain = CartesianDomain({"t": [0, 1]})
60 |
61 | domains = {
62 | "D": CartesianDomain({"x": [-1, 1], "t": [0, 1]}),
63 | "t0": CartesianDomain({"x": [-1, 1], "t": 0.0}),
64 | }
65 |
66 | conditions = {
67 | "D": Condition(domain="D", equation=Equation(allen_cahn_equation)),
68 | "t0": Condition(domain="t0", equation=Equation(initial_condition)),
69 | }
70 |
--------------------------------------------------------------------------------
/pina/problem/zoo/inverse_poisson_2d_square.py:
--------------------------------------------------------------------------------
1 | """Formulation of the inverse Poisson problem in a square domain."""
2 |
3 | import requests
4 | import torch
5 | from io import BytesIO
6 | from ... import Condition
7 | from ... import LabelTensor
8 | from ...operator import laplacian
9 | from ...domain import CartesianDomain
10 | from ...equation import Equation, FixedValue
11 | from ...problem import SpatialProblem, InverseProblem
12 |
13 |
14 | def laplace_equation(input_, output_, params_):
15 | """
16 | Implementation of the laplace equation.
17 |
18 | :param LabelTensor input_: Input data of the problem.
19 | :param LabelTensor output_: Output data of the problem.
20 | :param dict params_: Parameters of the problem.
21 | :return: The residual of the laplace equation.
22 | :rtype: LabelTensor
23 | """
24 | force_term = torch.exp(
25 | -2 * (input_.extract(["x"]) - params_["mu1"]) ** 2
26 | - 2 * (input_.extract(["y"]) - params_["mu2"]) ** 2
27 | )
28 | delta_u = laplacian(output_, input_, components=["u"], d=["x", "y"])
29 | return delta_u - force_term
30 |
31 |
32 | # URL of the file
33 | url = "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pts_0.5_0.5"
34 | # Download the file
35 | response = requests.get(url)
36 | response.raise_for_status()
37 | file_like_object = BytesIO(response.content)
38 | # Set the data
39 | input_data = LabelTensor(
40 | torch.load(file_like_object, weights_only=False).tensor.detach(),
41 | ["x", "y", "mu1", "mu2"],
42 | )
43 |
44 | # URL of the file
45 | url = "https://github.com/mathLab/PINA/raw/refs/heads/master/tutorials/tutorial7/data/pinn_solution_0.5_0.5"
46 | # Download the file
47 | response = requests.get(url)
48 | response.raise_for_status()
49 | file_like_object = BytesIO(response.content)
50 | # Set the data
51 | output_data = LabelTensor(
52 | torch.load(file_like_object, weights_only=False).tensor.detach(), ["u"]
53 | )
54 |
55 |
56 | class InversePoisson2DSquareProblem(SpatialProblem, InverseProblem):
57 | r"""
58 | Implementation of the inverse 2-dimensional Poisson problem in the square
59 | domain :math:`[0, 1] \times [0, 1]`,
60 | with unknown parameter domain :math:`[-1, 1] \times [-1, 1]`.
61 |
62 | :Example:
63 | >>> problem = InversePoisson2DSquareProblem()
64 | """
65 |
66 | output_variables = ["u"]
67 | x_min, x_max = -2, 2
68 | y_min, y_max = -2, 2
69 | spatial_domain = CartesianDomain({"x": [x_min, x_max], "y": [y_min, y_max]})
70 | unknown_parameter_domain = CartesianDomain({"mu1": [-1, 1], "mu2": [-1, 1]})
71 |
72 | domains = {
73 | "g1": CartesianDomain({"x": [x_min, x_max], "y": y_max}),
74 | "g2": CartesianDomain({"x": [x_min, x_max], "y": y_min}),
75 | "g3": CartesianDomain({"x": x_max, "y": [y_min, y_max]}),
76 | "g4": CartesianDomain({"x": x_min, "y": [y_min, y_max]}),
77 | "D": CartesianDomain({"x": [x_min, x_max], "y": [y_min, y_max]}),
78 | }
79 |
80 | conditions = {
81 | "g1": Condition(domain="g1", equation=FixedValue(0.0)),
82 | "g2": Condition(domain="g2", equation=FixedValue(0.0)),
83 | "g3": Condition(domain="g3", equation=FixedValue(0.0)),
84 | "g4": Condition(domain="g4", equation=FixedValue(0.0)),
85 | "D": Condition(domain="D", equation=Equation(laplace_equation)),
86 | "data": Condition(input=input_data, target=output_data),
87 | }
88 |
--------------------------------------------------------------------------------
/pina/problem/zoo/poisson_2d_square.py:
--------------------------------------------------------------------------------
1 | """Formulation of the Poisson problem in a square domain."""
2 |
3 | import torch
4 | from ... import Condition
5 | from ...operator import laplacian
6 | from ...problem import SpatialProblem
7 | from ...domain import CartesianDomain
8 | from ...equation import Equation, FixedValue
9 |
10 |
11 | def laplace_equation(input_, output_):
12 | """
13 | Implementation of the laplace equation.
14 |
15 | :param LabelTensor input_: Input data of the problem.
16 | :param LabelTensor output_: Output data of the problem.
17 | :return: The residual of the laplace equation.
18 | :rtype: LabelTensor
19 | """
20 | force_term = (
21 | torch.sin(input_.extract(["x"]) * torch.pi)
22 | * torch.sin(input_.extract(["y"]) * torch.pi)
23 | * (2 * torch.pi**2)
24 | )
25 | delta_u = laplacian(output_, input_, components=["u"], d=["x", "y"])
26 | return delta_u - force_term
27 |
28 |
29 | class Poisson2DSquareProblem(SpatialProblem):
30 | r"""
31 | Implementation of the 2-dimensional Poisson problem in the square domain
32 | :math:`[0, 1] \times [0, 1]`.
33 |
34 | :Example:
35 | >>> problem = Poisson2DSquareProblem()
36 | """
37 |
38 | output_variables = ["u"]
39 | spatial_domain = CartesianDomain({"x": [0, 1], "y": [0, 1]})
40 |
41 | domains = {
42 | "D": CartesianDomain({"x": [0, 1], "y": [0, 1]}),
43 | "g1": CartesianDomain({"x": [0, 1], "y": 1.0}),
44 | "g2": CartesianDomain({"x": [0, 1], "y": 0.0}),
45 | "g3": CartesianDomain({"x": 1.0, "y": [0, 1]}),
46 | "g4": CartesianDomain({"x": 0.0, "y": [0, 1]}),
47 | }
48 |
49 | conditions = {
50 | "g1": Condition(domain="g1", equation=FixedValue(0.0)),
51 | "g2": Condition(domain="g2", equation=FixedValue(0.0)),
52 | "g3": Condition(domain="g3", equation=FixedValue(0.0)),
53 | "g4": Condition(domain="g4", equation=FixedValue(0.0)),
54 | "D": Condition(domain="D", equation=Equation(laplace_equation)),
55 | }
56 |
57 | def solution(self, pts):
58 | """
59 | Implementation of the analytical solution of the Poisson problem.
60 |
61 | :param LabelTensor pts: Points where the solution is evaluated.
62 | :return: The analytical solution of the Poisson problem.
63 | :rtype: LabelTensor
64 | """
65 | sol = -(
66 | torch.sin(pts.extract(["x"]) * torch.pi)
67 | * torch.sin(pts.extract(["y"]) * torch.pi)
68 | )
69 | sol.labels = self.output_variables
70 | return sol
71 |
--------------------------------------------------------------------------------
/pina/problem/zoo/supervised_problem.py:
--------------------------------------------------------------------------------
1 | """Formulation of a Supervised Problem in PINA."""
2 |
3 | from ..abstract_problem import AbstractProblem
4 | from ... import Condition
5 |
6 |
7 | class SupervisedProblem(AbstractProblem):
8 | """
9 | Definition of a supervised-learning problem.
10 |
11 | This class provides a simple way to define a supervised problem
12 | using a single condition of type
13 | :class:`~pina.condition.input_target_condition.InputTargetCondition`.
14 |
15 | :Example:
16 | >>> import torch
17 | >>> input_data = torch.rand((100, 10))
18 | >>> output_data = torch.rand((100, 10))
19 | >>> problem = SupervisedProblem(input_data, output_data)
20 | """
21 |
22 | conditions = {}
23 | output_variables = None
24 | input_variables = None
25 |
26 | def __init__(
27 | self, input_, output_, input_variables=None, output_variables=None
28 | ):
29 | """
30 | Initialization of the :class:`SupervisedProblem` class.
31 |
32 | :param input_: Input data of the problem.
33 | :type input_: torch.Tensor | LabelTensor | Graph | Data
34 | :param output_: Output data of the problem.
35 | :type output_: torch.Tensor | LabelTensor | Graph | Data
36 | """
37 | # Set input and output variables
38 | self.input_variables = input_variables
39 | self.output_variables = output_variables
40 | # Set the condition
41 | self.conditions["data"] = Condition(input=input_, target=output_)
42 | super().__init__()
43 |
--------------------------------------------------------------------------------
/pina/solver/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the solver classes."""
2 |
3 | __all__ = [
4 | "SolverInterface",
5 | "SingleSolverInterface",
6 | "MultiSolverInterface",
7 | "PINNInterface",
8 | "PINN",
9 | "GradientPINN",
10 | "CausalPINN",
11 | "CompetitivePINN",
12 | "SelfAdaptivePINN",
13 | "RBAPINN",
14 | "SupervisedSolverInterface",
15 | "SupervisedSolver",
16 | "ReducedOrderModelSolver",
17 | "DeepEnsembleSolverInterface",
18 | "DeepEnsembleSupervisedSolver",
19 | "DeepEnsemblePINN",
20 | "GAROM",
21 | ]
22 |
23 | from .solver import SolverInterface, SingleSolverInterface, MultiSolverInterface
24 | from .physics_informed_solver import (
25 | PINNInterface,
26 | PINN,
27 | GradientPINN,
28 | CausalPINN,
29 | CompetitivePINN,
30 | SelfAdaptivePINN,
31 | RBAPINN,
32 | )
33 | from .supervised_solver import (
34 | SupervisedSolverInterface,
35 | SupervisedSolver,
36 | ReducedOrderModelSolver,
37 | )
38 | from .ensemble_solver import (
39 | DeepEnsembleSolverInterface,
40 | DeepEnsembleSupervisedSolver,
41 | DeepEnsemblePINN,
42 | )
43 | from .garom import GAROM
44 |
--------------------------------------------------------------------------------
/pina/solver/ensemble_solver/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Ensemble solver classes."""
2 |
3 | __all__ = [
4 | "DeepEnsembleSolverInterface",
5 | "DeepEnsembleSupervisedSolver",
6 | "DeepEnsemblePINN",
7 | ]
8 |
9 | from .ensemble_solver_interface import DeepEnsembleSolverInterface
10 | from .ensemble_supervised import DeepEnsembleSupervisedSolver
11 | from .ensemble_pinn import DeepEnsemblePINN
12 |
--------------------------------------------------------------------------------
/pina/solver/physics_informed_solver/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Physics-Informed solvers."""
2 |
3 | __all__ = [
4 | "PINNInterface",
5 | "PINN",
6 | "GradientPINN",
7 | "CausalPINN",
8 | "CompetitivePINN",
9 | "SelfAdaptivePINN",
10 | "RBAPINN",
11 | ]
12 |
13 | from .pinn_interface import PINNInterface
14 | from .pinn import PINN
15 | from .rba_pinn import RBAPINN
16 | from .causal_pinn import CausalPINN
17 | from .gradient_pinn import GradientPINN
18 | from .competitive_pinn import CompetitivePINN
19 | from .self_adaptive_pinn import SelfAdaptivePINN
20 |
--------------------------------------------------------------------------------
/pina/solver/supervised_solver/__init__.py:
--------------------------------------------------------------------------------
1 | """Module for the Supervised solvers."""
2 |
3 | __all__ = [
4 | "SupervisedSolverInterface",
5 | "SupervisedSolver",
6 | "ReducedOrderModelSolver",
7 | ]
8 |
9 | from .supervised_solver_interface import SupervisedSolverInterface
10 | from .supervised import SupervisedSolver
11 | from .reduced_order_model import ReducedOrderModelSolver
12 |
--------------------------------------------------------------------------------
/pina/solvers/__init__.py:
--------------------------------------------------------------------------------
1 | """Old module for solvers. Deprecated in 0.2.0 ."""
2 |
3 | import warnings
4 |
5 | from ..solver import *
6 | from ..utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.solvers' is deprecated and will be removed "
14 | "in future versions. Please use 'pina.solver' instead.",
15 | DeprecationWarning,
16 | )
17 |
--------------------------------------------------------------------------------
/pina/solvers/pinns/__init__.py:
--------------------------------------------------------------------------------
1 | """Old module for the PINNs solver. Deprecated in 0.2.0."""
2 |
3 | import warnings
4 |
5 | from ...solver.physics_informed_solver import *
6 | from ...utils import custom_warning_format
7 |
8 | # back-compatibility 0.1
9 | # Set the custom format for warnings
10 | warnings.formatwarning = custom_warning_format
11 | warnings.filterwarnings("always", category=DeprecationWarning)
12 | warnings.warn(
13 | "'pina.solvers.pinns' is deprecated and will be removed "
14 | "in future versions. Please use "
15 | "'pina.solver.physics_informed_solver' instead.",
16 | DeprecationWarning,
17 | )
18 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [project]
2 | name = "pina-mathlab"
3 | version = "0.2.1"
4 | description = "Physic Informed Neural networks for Advance modeling."
5 | readme = "README.md"
6 | authors = [
7 | {name = "PINA Contributors", email = "pina.mathlab@gmail.com"}
8 | ]
9 | license = { text = "MIT" }
10 | keywords = [
11 | "machine-learning", "deep-learning", "modeling", "pytorch", "ode",
12 | "neural-networks", "differential-equations", "pde", "hacktoberfest",
13 | "pinn", "physics-informed", "physics-informed-neural-networks",
14 | "neural-operators", "equation-learning", "lightining"
15 | ]
16 | dependencies = [
17 | "torch",
18 | "lightning",
19 | "torch_geometric",
20 | "matplotlib",
21 | ]
22 | requires-python = ">=3.9"
23 |
24 | [project.optional-dependencies]
25 | doc = [
26 | "sphinx>5.0,<8.2",
27 | "sphinx_rtd_theme",
28 | "sphinx_copybutton",
29 | "sphinx_design",
30 | "pydata_sphinx_theme"
31 | ]
32 | test = [
33 | "pytest",
34 | "pytest-cov",
35 | "scipy"
36 | ]
37 | dev = [
38 | "black"
39 | ]
40 | tutorial = [
41 | "jupyter",
42 | "smithers",
43 | "torchvision",
44 | "tensorboard",
45 | "scipy",
46 | "numpy",
47 | ]
48 |
49 | [project.urls]
50 | Homepage = "https://mathlab.github.io/PINA/"
51 | Repository = "https://github.com/mathLab/PINA"
52 |
53 | [build-system]
54 | requires = [ "setuptools>=41", "wheel", "setuptools-git-versioning>=2.0,<3", ]
55 | build-backend = "setuptools.build_meta"
56 |
57 | [tool.setuptools.packages.find]
58 | include = ["pina*"]
59 |
60 | [tool.black]
61 | line-length = 80
62 |
63 | [tool.isort]
64 | profile = "black"
65 |
--------------------------------------------------------------------------------
/readme/PINA_API.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/readme/PINA_API.png
--------------------------------------------------------------------------------
/readme/pina_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/readme/pina_logo.png
--------------------------------------------------------------------------------
/tests/test_adaptive_function.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina.adaptive_function import (
5 | AdaptiveReLU,
6 | AdaptiveSigmoid,
7 | AdaptiveTanh,
8 | AdaptiveSiLU,
9 | AdaptiveMish,
10 | AdaptiveELU,
11 | AdaptiveCELU,
12 | AdaptiveGELU,
13 | AdaptiveSoftmin,
14 | AdaptiveSoftmax,
15 | AdaptiveSIREN,
16 | AdaptiveExp,
17 | )
18 |
19 |
20 | adaptive_function = (
21 | AdaptiveReLU,
22 | AdaptiveSigmoid,
23 | AdaptiveTanh,
24 | AdaptiveSiLU,
25 | AdaptiveMish,
26 | AdaptiveELU,
27 | AdaptiveCELU,
28 | AdaptiveGELU,
29 | AdaptiveSoftmin,
30 | AdaptiveSoftmax,
31 | AdaptiveSIREN,
32 | AdaptiveExp,
33 | )
34 | x = torch.rand(10, requires_grad=True)
35 |
36 |
37 | @pytest.mark.parametrize("Func", adaptive_function)
38 | def test_constructor(Func):
39 | if Func.__name__ == "AdaptiveExp":
40 | # simple
41 | Func()
42 | # setting values
43 | af = Func(alpha=1.0, beta=2.0)
44 | assert af.alpha.requires_grad
45 | assert af.beta.requires_grad
46 | assert af.alpha == 1.0
47 | assert af.beta == 2.0
48 | else:
49 | # simple
50 | Func()
51 | # setting values
52 | af = Func(alpha=1.0, beta=2.0, gamma=3.0)
53 | assert af.alpha.requires_grad
54 | assert af.beta.requires_grad
55 | assert af.gamma.requires_grad
56 | assert af.alpha == 1.0
57 | assert af.beta == 2.0
58 | assert af.gamma == 3.0
59 |
60 | # fixed variables
61 | af = Func(alpha=1.0, beta=2.0, fixed=["alpha"])
62 | assert af.alpha.requires_grad is False
63 | assert af.beta.requires_grad
64 | assert af.alpha == 1.0
65 | assert af.beta == 2.0
66 |
67 | with pytest.raises(TypeError):
68 | Func(alpha=1.0, beta=2.0, fixed=["delta"])
69 |
70 | with pytest.raises(ValueError):
71 | Func(alpha="s")
72 | Func(alpha=1)
73 |
74 |
75 | @pytest.mark.parametrize("Func", adaptive_function)
76 | def test_forward(Func):
77 | af = Func()
78 | af(x)
79 |
80 |
81 | @pytest.mark.parametrize("Func", adaptive_function)
82 | def test_backward(Func):
83 | af = Func()
84 | y = af(x)
85 | y.mean().backward()
86 |
--------------------------------------------------------------------------------
/tests/test_blocks/test_fourier.py:
--------------------------------------------------------------------------------
1 | from pina.model.block import FourierBlock1D, FourierBlock2D, FourierBlock3D
2 | import torch
3 |
4 | input_numb_fields = 3
5 | output_numb_fields = 4
6 | batch = 5
7 |
8 |
9 | def test_constructor_1d():
10 | FourierBlock1D(
11 | input_numb_fields=input_numb_fields,
12 | output_numb_fields=output_numb_fields,
13 | n_modes=5,
14 | )
15 |
16 |
17 | def test_forward_1d():
18 | sconv = FourierBlock1D(
19 | input_numb_fields=input_numb_fields,
20 | output_numb_fields=output_numb_fields,
21 | n_modes=4,
22 | )
23 | x = torch.rand(batch, input_numb_fields, 10)
24 | sconv(x)
25 |
26 |
27 | def test_backward_1d():
28 | sconv = FourierBlock1D(
29 | input_numb_fields=input_numb_fields,
30 | output_numb_fields=output_numb_fields,
31 | n_modes=4,
32 | )
33 | x = torch.rand(batch, input_numb_fields, 10)
34 | x.requires_grad = True
35 | sconv(x)
36 | l = torch.mean(sconv(x))
37 | l.backward()
38 | assert x._grad.shape == torch.Size([5, 3, 10])
39 |
40 |
41 | def test_constructor_2d():
42 | FourierBlock2D(
43 | input_numb_fields=input_numb_fields,
44 | output_numb_fields=output_numb_fields,
45 | n_modes=[5, 4],
46 | )
47 |
48 |
49 | def test_forward_2d():
50 | sconv = FourierBlock2D(
51 | input_numb_fields=input_numb_fields,
52 | output_numb_fields=output_numb_fields,
53 | n_modes=[5, 4],
54 | )
55 | x = torch.rand(batch, input_numb_fields, 10, 10)
56 | sconv(x)
57 |
58 |
59 | def test_backward_2d():
60 | sconv = FourierBlock2D(
61 | input_numb_fields=input_numb_fields,
62 | output_numb_fields=output_numb_fields,
63 | n_modes=[5, 4],
64 | )
65 | x = torch.rand(batch, input_numb_fields, 10, 10)
66 | x.requires_grad = True
67 | sconv(x)
68 | l = torch.mean(sconv(x))
69 | l.backward()
70 | assert x._grad.shape == torch.Size([5, 3, 10, 10])
71 |
72 |
73 | def test_constructor_3d():
74 | FourierBlock3D(
75 | input_numb_fields=input_numb_fields,
76 | output_numb_fields=output_numb_fields,
77 | n_modes=[5, 4, 4],
78 | )
79 |
80 |
81 | def test_forward_3d():
82 | sconv = FourierBlock3D(
83 | input_numb_fields=input_numb_fields,
84 | output_numb_fields=output_numb_fields,
85 | n_modes=[5, 4, 4],
86 | )
87 | x = torch.rand(batch, input_numb_fields, 10, 10, 10)
88 | sconv(x)
89 |
90 |
91 | def test_backward_3d():
92 | sconv = FourierBlock3D(
93 | input_numb_fields=input_numb_fields,
94 | output_numb_fields=output_numb_fields,
95 | n_modes=[5, 4, 4],
96 | )
97 | x = torch.rand(batch, input_numb_fields, 10, 10, 10)
98 | x.requires_grad = True
99 | sconv(x)
100 | l = torch.mean(sconv(x))
101 | l.backward()
102 | assert x._grad.shape == torch.Size([5, 3, 10, 10, 10])
103 |
--------------------------------------------------------------------------------
/tests/test_blocks/test_low_rank_block.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina.model.block import LowRankBlock
5 | from pina import LabelTensor
6 |
7 |
8 | input_dimensions = 2
9 | embedding_dimenion = 1
10 | rank = 4
11 | inner_size = 20
12 | n_layers = 2
13 | func = torch.nn.Tanh
14 | bias = True
15 |
16 |
17 | def test_constructor():
18 | LowRankBlock(
19 | input_dimensions=input_dimensions,
20 | embedding_dimenion=embedding_dimenion,
21 | rank=rank,
22 | inner_size=inner_size,
23 | n_layers=n_layers,
24 | func=func,
25 | bias=bias,
26 | )
27 |
28 |
29 | def test_constructor_wrong():
30 | with pytest.raises(ValueError):
31 | LowRankBlock(
32 | input_dimensions=input_dimensions,
33 | embedding_dimenion=embedding_dimenion,
34 | rank=0.5,
35 | inner_size=inner_size,
36 | n_layers=n_layers,
37 | func=func,
38 | bias=bias,
39 | )
40 |
41 |
42 | def test_forward():
43 | block = LowRankBlock(
44 | input_dimensions=input_dimensions,
45 | embedding_dimenion=embedding_dimenion,
46 | rank=rank,
47 | inner_size=inner_size,
48 | n_layers=n_layers,
49 | func=func,
50 | bias=bias,
51 | )
52 | data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
53 | block(data.extract("u"), data.extract(["x", "y"]))
54 |
55 |
56 | def test_backward():
57 | block = LowRankBlock(
58 | input_dimensions=input_dimensions,
59 | embedding_dimenion=embedding_dimenion,
60 | rank=rank,
61 | inner_size=inner_size,
62 | n_layers=n_layers,
63 | func=func,
64 | bias=bias,
65 | )
66 | data = LabelTensor(torch.rand(10, 30, 3), labels=["x", "y", "u"])
67 | data.requires_grad_(True)
68 | out = block(data.extract("u"), data.extract(["x", "y"]))
69 | loss = out.mean()
70 | loss.backward()
71 |
--------------------------------------------------------------------------------
/tests/test_blocks/test_orthogonal.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 | from pina.model.block import OrthogonalBlock
4 |
5 | torch.manual_seed(111)
6 |
7 | list_matrices = [
8 | torch.randn(10, 3),
9 | torch.rand(100, 5),
10 | torch.randn(5, 5),
11 | ]
12 |
13 | list_prohibited_matrices_dim0 = list_matrices[:-1]
14 |
15 |
16 | @pytest.mark.parametrize("dim", [-1, 0, 1, None])
17 | @pytest.mark.parametrize("requires_grad", [True, False, None])
18 | def test_constructor(dim, requires_grad):
19 | if dim is None and requires_grad is None:
20 | block = OrthogonalBlock()
21 | elif dim is None:
22 | block = OrthogonalBlock(requires_grad=requires_grad)
23 | elif requires_grad is None:
24 | block = OrthogonalBlock(dim=dim)
25 | else:
26 | block = OrthogonalBlock(dim=dim, requires_grad=requires_grad)
27 |
28 | if dim is not None:
29 | assert block.dim == dim
30 | if requires_grad is not None:
31 | assert block.requires_grad == requires_grad
32 |
33 |
34 | def test_wrong_constructor():
35 | with pytest.raises(IndexError):
36 | OrthogonalBlock(2)
37 | with pytest.raises(ValueError):
38 | OrthogonalBlock("a")
39 |
40 |
41 | @pytest.mark.parametrize("V", list_matrices)
42 | def test_forward(V):
43 | orth = OrthogonalBlock()
44 | orth_row = OrthogonalBlock(0)
45 | V_orth = orth(V)
46 | V_orth_row = orth_row(V.T)
47 | assert torch.allclose(V_orth.T @ V_orth, torch.eye(V.shape[1]), atol=1e-6)
48 | assert torch.allclose(
49 | V_orth_row @ V_orth_row.T, torch.eye(V.shape[1]), atol=1e-6
50 | )
51 |
52 |
53 | @pytest.mark.parametrize("V", list_matrices)
54 | def test_backward(V):
55 | orth = OrthogonalBlock(requires_grad=True)
56 | V_orth = orth(V)
57 | loss = V_orth.mean()
58 | loss.backward()
59 |
60 |
61 | @pytest.mark.parametrize("V", list_matrices)
62 | def test_wrong_backward(V):
63 | orth = OrthogonalBlock(requires_grad=False)
64 | V_orth = orth(V)
65 | loss = V_orth.mean()
66 | with pytest.raises(RuntimeError):
67 | loss.backward()
68 |
69 |
70 | @pytest.mark.parametrize("V", list_prohibited_matrices_dim0)
71 | def test_forward_prohibited(V):
72 | orth = OrthogonalBlock(0)
73 | with pytest.raises(Warning):
74 | V_orth = orth(V)
75 | assert V.shape[0] > V.shape[1]
76 |
--------------------------------------------------------------------------------
/tests/test_blocks/test_spectral_convolution.py:
--------------------------------------------------------------------------------
1 | from pina.model.block import (
2 | SpectralConvBlock1D,
3 | SpectralConvBlock2D,
4 | SpectralConvBlock3D,
5 | )
6 | import torch
7 |
8 | input_numb_fields = 3
9 | output_numb_fields = 4
10 | batch = 5
11 |
12 |
13 | def test_constructor_1d():
14 | SpectralConvBlock1D(
15 | input_numb_fields=input_numb_fields,
16 | output_numb_fields=output_numb_fields,
17 | n_modes=5,
18 | )
19 |
20 |
21 | def test_forward_1d():
22 | sconv = SpectralConvBlock1D(
23 | input_numb_fields=input_numb_fields,
24 | output_numb_fields=output_numb_fields,
25 | n_modes=4,
26 | )
27 | x = torch.rand(batch, input_numb_fields, 10)
28 | sconv(x)
29 |
30 |
31 | def test_backward_1d():
32 | sconv = SpectralConvBlock1D(
33 | input_numb_fields=input_numb_fields,
34 | output_numb_fields=output_numb_fields,
35 | n_modes=4,
36 | )
37 | x = torch.rand(batch, input_numb_fields, 10)
38 | x.requires_grad = True
39 | sconv(x)
40 | l = torch.mean(sconv(x))
41 | l.backward()
42 | assert x._grad.shape == torch.Size([5, 3, 10])
43 |
44 |
45 | def test_constructor_2d():
46 | SpectralConvBlock2D(
47 | input_numb_fields=input_numb_fields,
48 | output_numb_fields=output_numb_fields,
49 | n_modes=[5, 4],
50 | )
51 |
52 |
53 | def test_forward_2d():
54 | sconv = SpectralConvBlock2D(
55 | input_numb_fields=input_numb_fields,
56 | output_numb_fields=output_numb_fields,
57 | n_modes=[5, 4],
58 | )
59 | x = torch.rand(batch, input_numb_fields, 10, 10)
60 | sconv(x)
61 |
62 |
63 | def test_backward_2d():
64 | sconv = SpectralConvBlock2D(
65 | input_numb_fields=input_numb_fields,
66 | output_numb_fields=output_numb_fields,
67 | n_modes=[5, 4],
68 | )
69 | x = torch.rand(batch, input_numb_fields, 10, 10)
70 | x.requires_grad = True
71 | sconv(x)
72 | l = torch.mean(sconv(x))
73 | l.backward()
74 | assert x._grad.shape == torch.Size([5, 3, 10, 10])
75 |
76 |
77 | def test_constructor_3d():
78 | SpectralConvBlock3D(
79 | input_numb_fields=input_numb_fields,
80 | output_numb_fields=output_numb_fields,
81 | n_modes=[5, 4, 4],
82 | )
83 |
84 |
85 | def test_forward_3d():
86 | sconv = SpectralConvBlock3D(
87 | input_numb_fields=input_numb_fields,
88 | output_numb_fields=output_numb_fields,
89 | n_modes=[5, 4, 4],
90 | )
91 | x = torch.rand(batch, input_numb_fields, 10, 10, 10)
92 | sconv(x)
93 |
94 |
95 | def test_backward_3d():
96 | sconv = SpectralConvBlock3D(
97 | input_numb_fields=input_numb_fields,
98 | output_numb_fields=output_numb_fields,
99 | n_modes=[5, 4, 4],
100 | )
101 | x = torch.rand(batch, input_numb_fields, 10, 10, 10)
102 | x.requires_grad = True
103 | sconv(x)
104 | l = torch.mean(sconv(x))
105 | l.backward()
106 | assert x._grad.shape == torch.Size([5, 3, 10, 10, 10])
107 |
--------------------------------------------------------------------------------
/tests/test_callback/test_adaptive_refinement_callback.py:
--------------------------------------------------------------------------------
1 | from pina.solver import PINN
2 | from pina.trainer import Trainer
3 | from pina.model import FeedForward
4 | from pina.problem.zoo import Poisson2DSquareProblem as Poisson
5 | from pina.callback import R3Refinement
6 |
7 |
8 | # make the problem
9 | poisson_problem = Poisson()
10 | boundaries = ["g1", "g2", "g3", "g4"]
11 | n = 10
12 | poisson_problem.discretise_domain(n, "grid", domains=boundaries)
13 | poisson_problem.discretise_domain(n, "grid", domains="D")
14 | model = FeedForward(
15 | len(poisson_problem.input_variables), len(poisson_problem.output_variables)
16 | )
17 |
18 | # make the solver
19 | solver = PINN(problem=poisson_problem, model=model)
20 |
21 |
22 | # def test_r3constructor():
23 | # R3Refinement(sample_every=10)
24 |
25 |
26 | # def test_r3refinment_routine():
27 | # # make the trainer
28 | # trainer = Trainer(solver=solver,
29 | # callback=[R3Refinement(sample_every=1)],
30 | # accelerator='cpu',
31 | # max_epochs=5)
32 | # trainer.train()
33 |
34 | # def test_r3refinment_routine():
35 | # model = FeedForward(len(poisson_problem.input_variables),
36 | # len(poisson_problem.output_variables))
37 | # solver = PINN(problem=poisson_problem, model=model)
38 | # trainer = Trainer(solver=solver,
39 | # callback=[R3Refinement(sample_every=1)],
40 | # accelerator='cpu',
41 | # max_epochs=5)
42 | # before_n_points = {loc : len(pts) for loc, pts in trainer.solver.problem.input_pts.items()}
43 | # trainer.train()
44 | # after_n_points = {loc : len(pts) for loc, pts in trainer.solver.problem.input_pts.items()}
45 | # assert before_n_points == after_n_points
46 |
--------------------------------------------------------------------------------
/tests/test_callback/test_metric_tracker.py:
--------------------------------------------------------------------------------
1 | from pina.solver import PINN
2 | from pina.trainer import Trainer
3 | from pina.model import FeedForward
4 | from pina.callback import MetricTracker
5 | from pina.problem.zoo import Poisson2DSquareProblem as Poisson
6 |
7 |
8 | # make the problem
9 | poisson_problem = Poisson()
10 | boundaries = ["g1", "g2", "g3", "g4"]
11 | n = 10
12 | poisson_problem.discretise_domain(n, "grid", domains=boundaries)
13 | poisson_problem.discretise_domain(n, "grid", domains="D")
14 | model = FeedForward(
15 | len(poisson_problem.input_variables), len(poisson_problem.output_variables)
16 | )
17 |
18 | # make the solver
19 | solver = PINN(problem=poisson_problem, model=model)
20 |
21 |
22 | def test_metric_tracker_constructor():
23 | MetricTracker()
24 |
25 |
26 | def test_metric_tracker_routine():
27 | # make the trainer
28 | trainer = Trainer(
29 | solver=solver,
30 | callbacks=[MetricTracker()],
31 | accelerator="cpu",
32 | max_epochs=5,
33 | log_every_n_steps=1,
34 | )
35 | trainer.train()
36 | # get the tracked metrics
37 | metrics = trainer.callbacks[0].metrics
38 | # assert the logged metrics are correct
39 | logged_metrics = sorted(list(metrics.keys()))
40 | assert logged_metrics == ["train_loss"]
41 |
--------------------------------------------------------------------------------
/tests/test_callback/test_optimizer_callback.py:
--------------------------------------------------------------------------------
1 | from pina.callback import SwitchOptimizer
2 | import torch
3 | import pytest
4 |
5 | from pina.solver import PINN
6 | from pina.trainer import Trainer
7 | from pina.model import FeedForward
8 | from pina.problem.zoo import Poisson2DSquareProblem as Poisson
9 | from pina.optim import TorchOptimizer
10 |
11 | # make the problem
12 | poisson_problem = Poisson()
13 | boundaries = ["g1", "g2", "g3", "g4"]
14 | n = 10
15 | poisson_problem.discretise_domain(n, "grid", domains=boundaries)
16 | poisson_problem.discretise_domain(n, "grid", domains="D")
17 | model = FeedForward(
18 | len(poisson_problem.input_variables), len(poisson_problem.output_variables)
19 | )
20 |
21 | # make the solver
22 | solver = PINN(problem=poisson_problem, model=model)
23 |
24 | adam = TorchOptimizer(torch.optim.Adam, lr=0.01)
25 | lbfgs = TorchOptimizer(torch.optim.LBFGS, lr=0.001)
26 |
27 |
28 | def test_switch_optimizer_constructor():
29 | SwitchOptimizer(adam, epoch_switch=10)
30 |
31 |
32 | def test_switch_optimizer_routine():
33 | # check initial optimizer
34 | solver.configure_optimizers()
35 | assert solver.optimizer.instance.__class__ == torch.optim.Adam
36 | # make the trainer
37 | switch_opt_callback = SwitchOptimizer(lbfgs, epoch_switch=3)
38 | trainer = Trainer(
39 | solver=solver,
40 | callbacks=[switch_opt_callback],
41 | accelerator="cpu",
42 | max_epochs=5,
43 | )
44 | trainer.train()
45 | assert solver.optimizer.instance.__class__ == torch.optim.LBFGS
46 |
--------------------------------------------------------------------------------
/tests/test_callback/test_progress_bar.py:
--------------------------------------------------------------------------------
1 | from pina.solver import PINN
2 | from pina.trainer import Trainer
3 | from pina.model import FeedForward
4 | from pina.callback.processing_callback import PINAProgressBar
5 | from pina.problem.zoo import Poisson2DSquareProblem as Poisson
6 |
7 |
8 | # make the problem
9 | poisson_problem = Poisson()
10 | boundaries = ["g1", "g2", "g3", "g4"]
11 | n = 10
12 | condition_names = list(poisson_problem.conditions.keys())
13 | poisson_problem.discretise_domain(n, "grid", domains=boundaries)
14 | poisson_problem.discretise_domain(n, "grid", domains="D")
15 | model = FeedForward(
16 | len(poisson_problem.input_variables), len(poisson_problem.output_variables)
17 | )
18 |
19 | # make the solver
20 | solver = PINN(problem=poisson_problem, model=model)
21 |
22 |
23 | def test_progress_bar_constructor():
24 | PINAProgressBar()
25 |
26 |
27 | def test_progress_bar_routine():
28 | # make the trainer
29 | trainer = Trainer(
30 | solver=solver,
31 | callbacks=[PINAProgressBar(["val", condition_names[0]])],
32 | accelerator="cpu",
33 | max_epochs=5,
34 | )
35 | trainer.train()
36 | # TODO there should be a check that the correct metrics are displayed
37 |
--------------------------------------------------------------------------------
/tests/test_data/test_tensor_dataset.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 | from pina.data.dataset import PinaDatasetFactory, PinaTensorDataset
4 |
5 | input_tensor = torch.rand((100, 10))
6 | output_tensor = torch.rand((100, 2))
7 |
8 | input_tensor_2 = torch.rand((50, 10))
9 | output_tensor_2 = torch.rand((50, 2))
10 |
11 | conditions_dict_single = {
12 | "data": {
13 | "input": input_tensor,
14 | "target": output_tensor,
15 | }
16 | }
17 |
18 | conditions_dict_single_multi = {
19 | "data_1": {
20 | "input": input_tensor,
21 | "target": output_tensor,
22 | },
23 | "data_2": {
24 | "input": input_tensor_2,
25 | "target": output_tensor_2,
26 | },
27 | }
28 |
29 | max_conditions_lengths_single = {"data": 100}
30 |
31 | max_conditions_lengths_multi = {"data_1": 100, "data_2": 50}
32 |
33 |
34 | @pytest.mark.parametrize(
35 | "conditions_dict, max_conditions_lengths",
36 | [
37 | (conditions_dict_single, max_conditions_lengths_single),
38 | (conditions_dict_single_multi, max_conditions_lengths_multi),
39 | ],
40 | )
41 | def test_constructor_tensor(conditions_dict, max_conditions_lengths):
42 | dataset = PinaDatasetFactory(
43 | conditions_dict,
44 | max_conditions_lengths=max_conditions_lengths,
45 | automatic_batching=True,
46 | )
47 | assert isinstance(dataset, PinaTensorDataset)
48 |
49 |
50 | def test_getitem_single():
51 | dataset = PinaDatasetFactory(
52 | conditions_dict_single,
53 | max_conditions_lengths=max_conditions_lengths_single,
54 | automatic_batching=False,
55 | )
56 |
57 | tensors = dataset.fetch_from_idx_list([i for i in range(70)])
58 | assert isinstance(tensors, dict)
59 | assert list(tensors.keys()) == ["data"]
60 | assert sorted(list(tensors["data"].keys())) == ["input", "target"]
61 | assert isinstance(tensors["data"]["input"], torch.Tensor)
62 | assert tensors["data"]["input"].shape == torch.Size((70, 10))
63 | assert isinstance(tensors["data"]["target"], torch.Tensor)
64 | assert tensors["data"]["target"].shape == torch.Size((70, 2))
65 |
66 |
67 | def test_getitem_multi():
68 | dataset = PinaDatasetFactory(
69 | conditions_dict_single_multi,
70 | max_conditions_lengths=max_conditions_lengths_multi,
71 | automatic_batching=False,
72 | )
73 | tensors = dataset.fetch_from_idx_list([i for i in range(70)])
74 | assert isinstance(tensors, dict)
75 | assert list(tensors.keys()) == ["data_1", "data_2"]
76 | assert sorted(list(tensors["data_1"].keys())) == ["input", "target"]
77 | assert isinstance(tensors["data_1"]["input"], torch.Tensor)
78 | assert tensors["data_1"]["input"].shape == torch.Size((70, 10))
79 | assert isinstance(tensors["data_1"]["target"], torch.Tensor)
80 | assert tensors["data_1"]["target"].shape == torch.Size((70, 2))
81 |
82 | assert sorted(list(tensors["data_2"].keys())) == ["input", "target"]
83 | assert isinstance(tensors["data_2"]["input"], torch.Tensor)
84 | assert tensors["data_2"]["input"].shape == torch.Size((50, 10))
85 | assert isinstance(tensors["data_2"]["target"], torch.Tensor)
86 | assert tensors["data_2"]["target"].shape == torch.Size((50, 2))
87 |
--------------------------------------------------------------------------------
/tests/test_equations/test_equation.py:
--------------------------------------------------------------------------------
1 | from pina.equation import Equation
2 | from pina.operator import grad, laplacian
3 | from pina import LabelTensor
4 | import torch
5 | import pytest
6 |
7 |
8 | def eq1(input_, output_):
9 | u_grad = grad(output_, input_)
10 | u1_xx = grad(u_grad, input_, components=["du1dx"], d=["x"])
11 | u2_xy = grad(u_grad, input_, components=["du2dx"], d=["y"])
12 | return torch.hstack([u1_xx, u2_xy])
13 |
14 |
15 | def eq2(input_, output_):
16 | force_term = torch.sin(input_.extract(["x"]) * torch.pi) * torch.sin(
17 | input_.extract(["y"]) * torch.pi
18 | )
19 | delta_u = laplacian(output_.extract(["u1"]), input_)
20 | return delta_u - force_term
21 |
22 |
23 | def foo():
24 | pass
25 |
26 |
27 | def test_constructor():
28 | Equation(eq1)
29 | Equation(eq2)
30 | with pytest.raises(ValueError):
31 | Equation([1, 2, 4])
32 | with pytest.raises(ValueError):
33 | Equation(foo())
34 |
35 |
36 | def test_residual():
37 | eq_1 = Equation(eq1)
38 | eq_2 = Equation(eq2)
39 |
40 | pts = LabelTensor(torch.rand(10, 2), labels=["x", "y"])
41 | pts.requires_grad = True
42 | u = torch.pow(pts, 2)
43 | u.labels = ["u1", "u2"]
44 |
45 | eq_1_res = eq_1.residual(pts, u)
46 | eq_2_res = eq_2.residual(pts, u)
47 |
48 | assert eq_1_res.shape == torch.Size([10, 2])
49 | assert eq_2_res.shape == torch.Size([10, 1])
50 |
--------------------------------------------------------------------------------
/tests/test_equations/test_system_equation.py:
--------------------------------------------------------------------------------
1 | from pina.equation import SystemEquation
2 | from pina.operator import grad, laplacian
3 | from pina import LabelTensor
4 | import torch
5 | import pytest
6 |
7 |
8 | def eq1(input_, output_):
9 | u_grad = grad(output_, input_)
10 | u1_xx = grad(u_grad, input_, components=["du1dx"], d=["x"])
11 | u2_xy = grad(u_grad, input_, components=["du2dx"], d=["y"])
12 | return torch.hstack([u1_xx, u2_xy])
13 |
14 |
15 | def eq2(input_, output_):
16 | force_term = torch.sin(input_.extract(["x"]) * torch.pi) * torch.sin(
17 | input_.extract(["y"]) * torch.pi
18 | )
19 | delta_u = laplacian(output_.extract(["u1"]), input_)
20 | return delta_u - force_term
21 |
22 |
23 | def foo():
24 | pass
25 |
26 |
27 | def test_constructor():
28 | SystemEquation([eq1, eq2])
29 | SystemEquation([eq1, eq2], reduction="sum")
30 | with pytest.raises(NotImplementedError):
31 | SystemEquation([eq1, eq2], reduction="foo")
32 | with pytest.raises(ValueError):
33 | SystemEquation(foo)
34 |
35 |
36 | def test_residual():
37 |
38 | pts = LabelTensor(torch.rand(10, 2), labels=["x", "y"])
39 | pts.requires_grad = True
40 | u = torch.pow(pts, 2)
41 | u.labels = ["u1", "u2"]
42 |
43 | eq_1 = SystemEquation([eq1, eq2], reduction="mean")
44 | res = eq_1.residual(pts, u)
45 | assert res.shape == torch.Size([10])
46 |
47 | eq_1 = SystemEquation([eq1, eq2], reduction="sum")
48 | res = eq_1.residual(pts, u)
49 | assert res.shape == torch.Size([10])
50 |
51 | eq_1 = SystemEquation([eq1, eq2], reduction=None)
52 | res = eq_1.residual(pts, u)
53 | assert res.shape == torch.Size([10, 3])
54 |
55 | eq_1 = SystemEquation([eq1, eq2])
56 | res = eq_1.residual(pts, u)
57 | assert res.shape == torch.Size([10, 3])
58 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_cartesian.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina import LabelTensor
4 | from pina.domain import CartesianDomain
5 |
6 |
7 | def test_constructor():
8 | CartesianDomain({"x": [0, 1], "y": [0, 1]})
9 |
10 |
11 | def test_is_inside_check_border():
12 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
13 | pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"])
14 | pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"])
15 | domain = CartesianDomain({"x": [0, 1], "y": [0, 1]})
16 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]):
17 | assert domain.is_inside(pt, check_border=True) == exp_result
18 |
19 |
20 | def test_is_inside_not_check_border():
21 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
22 | pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"])
23 | pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"])
24 | domain = CartesianDomain({"x": [0, 1], "y": [0, 1]})
25 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, False, False]):
26 | assert domain.is_inside(pt, check_border=False) == exp_result
27 |
28 |
29 | def test_is_inside_fixed_variables():
30 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
31 | pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"])
32 | pt_3 = LabelTensor(torch.tensor([[1.0, 1.5]]), ["x", "y"])
33 | domain = CartesianDomain({"x": 1, "y": [0, 1]})
34 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [False, True, False]):
35 | assert domain.is_inside(pt, check_border=False) == exp_result
36 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_difference.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina import LabelTensor
4 | from pina.domain import Difference, EllipsoidDomain, CartesianDomain
5 |
6 |
7 | def test_constructor_two_CartesianDomains():
8 | Difference(
9 | [
10 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
11 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
12 | ]
13 | )
14 |
15 |
16 | def test_constructor_two_3DCartesianDomain():
17 | Difference(
18 | [
19 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
20 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
21 | ]
22 | )
23 |
24 |
25 | def test_constructor_three_CartesianDomains():
26 | Difference(
27 | [
28 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
29 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
30 | CartesianDomain({"x": [2, 4], "y": [2, 4]}),
31 | ]
32 | )
33 |
34 |
35 | def test_is_inside_two_CartesianDomains():
36 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
37 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"])
38 | domain = Difference(
39 | [
40 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
41 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
42 | ]
43 | )
44 | assert domain.is_inside(pt_1) == True
45 | assert domain.is_inside(pt_2) == False
46 |
47 |
48 | def test_is_inside_two_3DCartesianDomain():
49 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"])
50 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"])
51 | domain = Difference(
52 | [
53 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
54 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
55 | ]
56 | )
57 | assert domain.is_inside(pt_1) == True
58 | assert domain.is_inside(pt_2) == False
59 |
60 |
61 | def test_sample():
62 | n = 100
63 | domain = Difference(
64 | [
65 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}),
66 | CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}),
67 | ]
68 | )
69 | pts = domain.sample(n)
70 | assert isinstance(pts, LabelTensor)
71 | assert pts.shape[0] == n
72 |
73 | n = 105
74 | pts = domain.sample(n)
75 | assert isinstance(pts, LabelTensor)
76 | assert pts.shape[0] == n
77 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_ellipsoid.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina import LabelTensor
5 | from pina.domain import EllipsoidDomain
6 |
7 |
8 | def test_constructor():
9 | EllipsoidDomain({"x": [0, 1], "y": [0, 1]})
10 | EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=True)
11 |
12 |
13 | def test_is_inside_sample_surface_false():
14 | domain = EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=False)
15 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
16 | pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"])
17 | pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"])
18 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, False, False]):
19 | assert domain.is_inside(pt) == exp_result
20 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [True, True, False]):
21 | assert domain.is_inside(pt, check_border=True) == exp_result
22 |
23 |
24 | def test_is_inside_sample_surface_true():
25 | domain = EllipsoidDomain({"x": [0, 1], "y": [0, 1]}, sample_surface=True)
26 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
27 | pt_2 = LabelTensor(torch.tensor([[1.0, 0.5]]), ["x", "y"])
28 | pt_3 = LabelTensor(torch.tensor([[1.5, 0.5]]), ["x", "y"])
29 | for pt, exp_result in zip([pt_1, pt_2, pt_3], [False, True, False]):
30 | assert domain.is_inside(pt) == exp_result
31 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_exclusion.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina import LabelTensor
4 | from pina.domain import Exclusion, EllipsoidDomain, CartesianDomain
5 |
6 |
7 | def test_constructor_two_CartesianDomains():
8 | Exclusion(
9 | [
10 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
11 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
12 | ]
13 | )
14 |
15 |
16 | def test_constructor_two_3DCartesianDomain():
17 | Exclusion(
18 | [
19 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
20 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
21 | ]
22 | )
23 |
24 |
25 | def test_constructor_three_CartesianDomains():
26 | Exclusion(
27 | [
28 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
29 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
30 | CartesianDomain({"x": [2, 4], "y": [2, 4]}),
31 | ]
32 | )
33 |
34 |
35 | def test_is_inside_two_CartesianDomains():
36 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
37 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"])
38 | domain = Exclusion(
39 | [
40 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
41 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
42 | ]
43 | )
44 | assert domain.is_inside(pt_1) == True
45 | assert domain.is_inside(pt_2) == False
46 |
47 |
48 | def test_is_inside_two_3DCartesianDomain():
49 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"])
50 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"])
51 | domain = Exclusion(
52 | [
53 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
54 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
55 | ]
56 | )
57 | assert domain.is_inside(pt_1) == True
58 | assert domain.is_inside(pt_2) == False
59 |
60 |
61 | def test_sample():
62 | n = 100
63 | domain = Exclusion(
64 | [
65 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}),
66 | CartesianDomain({"x": [0.3, 1.5], "y": [0.3, 1.5]}),
67 | ]
68 | )
69 | pts = domain.sample(n)
70 | assert isinstance(pts, LabelTensor)
71 | assert pts.shape[0] == n
72 |
73 | n = 105
74 | pts = domain.sample(n)
75 | assert isinstance(pts, LabelTensor)
76 | assert pts.shape[0] == n
77 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_intersection.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina import LabelTensor
4 | from pina.domain import Intersection, EllipsoidDomain, CartesianDomain
5 |
6 |
7 | def test_constructor_two_CartesianDomains():
8 | Intersection(
9 | [
10 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
11 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
12 | ]
13 | )
14 |
15 |
16 | def test_constructor_two_3DCartesianDomain():
17 | Intersection(
18 | [
19 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
20 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
21 | ]
22 | )
23 |
24 |
25 | def test_constructor_three_CartesianDomains():
26 | Intersection(
27 | [
28 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
29 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
30 | CartesianDomain({"x": [2, 4], "y": [2, 4]}),
31 | ]
32 | )
33 |
34 |
35 | def test_is_inside_two_CartesianDomains():
36 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
37 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5]]), ["x", "y"])
38 | pt_3 = LabelTensor(torch.tensor([[1.5, 1.5]]), ["x", "y"])
39 |
40 | domain = Intersection(
41 | [
42 | CartesianDomain({"x": [0, 2], "y": [0, 2]}),
43 | CartesianDomain({"x": [1, 3], "y": [1, 3]}),
44 | ]
45 | )
46 | assert domain.is_inside(pt_1) == False
47 | assert domain.is_inside(pt_2) == False
48 | assert domain.is_inside(pt_3) == True
49 |
50 |
51 | def test_is_inside_two_3DCartesianDomain():
52 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"])
53 | pt_2 = LabelTensor(torch.tensor([[-1, -0.5, -0.5]]), ["x", "y", "z"])
54 | pt_3 = LabelTensor(torch.tensor([[1.5, 1.5, 1.5]]), ["x", "y", "z"])
55 | domain = Intersection(
56 | [
57 | CartesianDomain({"x": [0, 2], "y": [0, 2], "z": [0, 2]}),
58 | CartesianDomain({"x": [1, 3], "y": [1, 3], "z": [1, 3]}),
59 | ]
60 | )
61 | assert domain.is_inside(pt_1) == False
62 | assert domain.is_inside(pt_2) == False
63 | assert domain.is_inside(pt_3) == True
64 |
65 |
66 | def test_sample():
67 | n = 100
68 | domain = Intersection(
69 | [
70 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}),
71 | CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}),
72 | ]
73 | )
74 | pts = domain.sample(n)
75 | assert isinstance(pts, LabelTensor)
76 | assert pts.shape[0] == n
77 |
78 | n = 105
79 | pts = domain.sample(n)
80 | assert isinstance(pts, LabelTensor)
81 | assert pts.shape[0] == n
82 |
--------------------------------------------------------------------------------
/tests/test_geometry/test_union.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina import LabelTensor
4 | from pina.domain import Union, EllipsoidDomain, CartesianDomain
5 |
6 |
7 | def test_constructor_two_CartesianDomains():
8 | Union(
9 | [
10 | CartesianDomain({"x": [0, 1], "y": [0, 1]}),
11 | CartesianDomain({"x": [0.5, 2], "y": [-1, 0.1]}),
12 | ]
13 | )
14 |
15 |
16 | def test_constructor_two_EllipsoidDomains():
17 | Union(
18 | [
19 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1], "z": [-1, 1]}),
20 | EllipsoidDomain(
21 | {"x": [-0.5, 0.5], "y": [-0.5, 0.5], "z": [-0.5, 0.5]}
22 | ),
23 | ]
24 | )
25 |
26 |
27 | def test_constructor_EllipsoidDomain_CartesianDomain():
28 | Union(
29 | [
30 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}),
31 | CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}),
32 | ]
33 | )
34 |
35 |
36 | def test_is_inside_two_CartesianDomains():
37 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
38 | pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ["x", "y"])
39 | domain = Union(
40 | [
41 | CartesianDomain({"x": [0, 1], "y": [0, 1]}),
42 | CartesianDomain({"x": [0.5, 2], "y": [-1, 0.1]}),
43 | ]
44 | )
45 | assert domain.is_inside(pt_1) == True
46 | assert domain.is_inside(pt_2) == False
47 |
48 |
49 | def test_is_inside_two_EllipsoidDomains():
50 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5, 0.5]]), ["x", "y", "z"])
51 | pt_2 = LabelTensor(torch.tensor([[-1, -1, -1]]), ["x", "y", "z"])
52 | domain = Union(
53 | [
54 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1], "z": [-1, 1]}),
55 | EllipsoidDomain(
56 | {"x": [-0.5, 0.5], "y": [-0.5, 0.5], "z": [-0.5, 0.5]}
57 | ),
58 | ]
59 | )
60 | assert domain.is_inside(pt_1) == True
61 | assert domain.is_inside(pt_2) == False
62 |
63 |
64 | def test_is_inside_EllipsoidDomain_CartesianDomain():
65 | pt_1 = LabelTensor(torch.tensor([[0.5, 0.5]]), ["x", "y"])
66 | pt_2 = LabelTensor(torch.tensor([[-1, -1]]), ["x", "y"])
67 | domain = Union(
68 | [
69 | EllipsoidDomain(
70 | {
71 | "x": [-1, 1],
72 | "y": [-1, 1],
73 | }
74 | ),
75 | CartesianDomain({"x": [0.6, 1.5], "y": [-2, 0]}),
76 | ]
77 | )
78 | assert domain.is_inside(pt_1) == True
79 | assert domain.is_inside(pt_2) == False
80 |
81 |
82 | def test_sample():
83 | n = 100
84 | domain = Union(
85 | [
86 | EllipsoidDomain({"x": [-1, 1], "y": [-1, 1]}),
87 | CartesianDomain({"x": [-0.5, 0.5], "y": [-0.5, 0.5]}),
88 | ]
89 | )
90 | pts = domain.sample(n)
91 | assert isinstance(pts, LabelTensor)
92 | assert pts.shape[0] == n
93 |
94 | n = 105
95 | pts = domain.sample(n)
96 | assert isinstance(pts, LabelTensor)
97 | assert pts.shape[0] == n
98 |
--------------------------------------------------------------------------------
/tests/test_loss/test_lp_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina.loss import LpLoss
4 |
5 | input = torch.tensor([[3.0], [1.0], [-8.0]])
6 | target = torch.tensor([[6.0], [4.0], [2.0]])
7 | available_reductions = ["str", "mean", "none"]
8 |
9 |
10 | def test_LpLoss_constructor():
11 | # test reduction
12 | for reduction in available_reductions:
13 | LpLoss(reduction=reduction)
14 | # test p
15 | for p in [float("inf"), -float("inf"), 1, 10, -8]:
16 | LpLoss(p=p)
17 |
18 |
19 | def test_LpLoss_forward():
20 | # l2 loss
21 | loss = LpLoss(p=2, reduction="mean")
22 | l2_loss = torch.mean(torch.sqrt((input - target).pow(2)))
23 | assert loss(input, target) == l2_loss
24 | # l1 loss
25 | loss = LpLoss(p=1, reduction="sum")
26 | l1_loss = torch.sum(torch.abs(input - target))
27 | assert loss(input, target) == l1_loss
28 |
29 |
30 | def test_LpRelativeLoss_constructor():
31 | # test reduction
32 | for reduction in available_reductions:
33 | LpLoss(reduction=reduction, relative=True)
34 | # test p
35 | for p in [float("inf"), -float("inf"), 1, 10, -8]:
36 | LpLoss(p=p, relative=True)
37 |
38 |
39 | def test_LpRelativeLoss_forward():
40 | # l2 relative loss
41 | loss = LpLoss(p=2, reduction="mean", relative=True)
42 | l2_loss = torch.sqrt((input - target).pow(2)) / torch.sqrt(input.pow(2))
43 | assert loss(input, target) == torch.mean(l2_loss)
44 | # l1 relative loss
45 | loss = LpLoss(p=1, reduction="sum", relative=True)
46 | l1_loss = torch.abs(input - target) / torch.abs(input)
47 | assert loss(input, target) == torch.sum(l1_loss)
48 |
--------------------------------------------------------------------------------
/tests/test_loss/test_power_loss.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina.loss import PowerLoss
5 |
6 | input = torch.tensor([[3.0], [1.0], [-8.0]])
7 | target = torch.tensor([[6.0], [4.0], [2.0]])
8 | available_reductions = ["str", "mean", "none"]
9 |
10 |
11 | def test_PowerLoss_constructor():
12 | # test reduction
13 | for reduction in available_reductions:
14 | PowerLoss(reduction=reduction)
15 | # test p
16 | for p in [float("inf"), -float("inf"), 1, 10, -8]:
17 | PowerLoss(p=p)
18 |
19 |
20 | def test_PowerLoss_forward():
21 | # l2 loss
22 | loss = PowerLoss(p=2, reduction="mean")
23 | l2_loss = torch.mean((input - target).pow(2))
24 | assert loss(input, target) == l2_loss
25 | # l1 loss
26 | loss = PowerLoss(p=1, reduction="sum")
27 | l1_loss = torch.sum(torch.abs(input - target))
28 | assert loss(input, target) == l1_loss
29 |
30 |
31 | def test_LpRelativeLoss_constructor():
32 | # test reduction
33 | for reduction in available_reductions:
34 | PowerLoss(reduction=reduction, relative=True)
35 | # test p
36 | for p in [float("inf"), -float("inf"), 1, 10, -8]:
37 | PowerLoss(p=p, relative=True)
38 |
39 |
40 | def test_LpRelativeLoss_forward():
41 | # l2 relative loss
42 | loss = PowerLoss(p=2, reduction="mean", relative=True)
43 | l2_loss = (input - target).pow(2) / input.pow(2)
44 | assert loss(input, target) == torch.mean(l2_loss)
45 | # l1 relative loss
46 | loss = PowerLoss(p=1, reduction="sum", relative=True)
47 | l1_loss = torch.abs(input - target) / torch.abs(input)
48 | assert loss(input, target) == torch.sum(l1_loss)
49 |
--------------------------------------------------------------------------------
/tests/test_model/test_feed_forward.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina.model import FeedForward
5 |
6 | data = torch.rand((20, 3))
7 | input_vars = 3
8 | output_vars = 4
9 |
10 |
11 | def test_constructor():
12 | FeedForward(input_vars, output_vars)
13 | FeedForward(input_vars, output_vars, inner_size=10, n_layers=20)
14 | FeedForward(input_vars, output_vars, layers=[10, 20, 5, 2])
15 | FeedForward(
16 | input_vars, output_vars, layers=[10, 20, 5, 2], func=torch.nn.ReLU
17 | )
18 | FeedForward(
19 | input_vars,
20 | output_vars,
21 | layers=[10, 20, 5, 2],
22 | func=[torch.nn.ReLU, torch.nn.ReLU, None, torch.nn.Tanh],
23 | )
24 |
25 |
26 | def test_constructor_wrong():
27 | with pytest.raises(RuntimeError):
28 | FeedForward(
29 | input_vars,
30 | output_vars,
31 | layers=[10, 20, 5, 2],
32 | func=[torch.nn.ReLU, torch.nn.ReLU],
33 | )
34 |
35 |
36 | def test_forward():
37 | dim_in, dim_out = 3, 2
38 | fnn = FeedForward(dim_in, dim_out)
39 | output_ = fnn(data)
40 | assert output_.shape == (data.shape[0], dim_out)
41 |
42 |
43 | def test_backward():
44 | dim_in, dim_out = 3, 2
45 | fnn = FeedForward(dim_in, dim_out)
46 | data.requires_grad = True
47 | output_ = fnn(data)
48 | l = torch.mean(output_)
49 | l.backward()
50 | assert data._grad.shape == torch.Size([20, 3])
51 |
--------------------------------------------------------------------------------
/tests/test_model/test_kernel_neural_operator.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from pina.model import KernelNeuralOperator, FeedForward
3 |
4 | input_dim = 2
5 | output_dim = 4
6 | embedding_dim = 24
7 | batch_size = 10
8 | numb = 256
9 | data = torch.rand(size=(batch_size, numb, input_dim), requires_grad=True)
10 | output_shape = torch.Size([batch_size, numb, output_dim])
11 |
12 |
13 | lifting_operator = FeedForward(
14 | input_dimensions=input_dim, output_dimensions=embedding_dim
15 | )
16 | projection_operator = FeedForward(
17 | input_dimensions=embedding_dim, output_dimensions=output_dim
18 | )
19 | integral_kernels = torch.nn.Sequential(
20 | FeedForward(
21 | input_dimensions=embedding_dim, output_dimensions=embedding_dim
22 | ),
23 | FeedForward(
24 | input_dimensions=embedding_dim, output_dimensions=embedding_dim
25 | ),
26 | )
27 |
28 |
29 | def test_constructor():
30 | KernelNeuralOperator(
31 | lifting_operator=lifting_operator,
32 | integral_kernels=integral_kernels,
33 | projection_operator=projection_operator,
34 | )
35 |
36 |
37 | def test_forward():
38 | operator = KernelNeuralOperator(
39 | lifting_operator=lifting_operator,
40 | integral_kernels=integral_kernels,
41 | projection_operator=projection_operator,
42 | )
43 | out = operator(data)
44 | assert out.shape == output_shape
45 |
46 |
47 | def test_backward():
48 | operator = KernelNeuralOperator(
49 | lifting_operator=lifting_operator,
50 | integral_kernels=integral_kernels,
51 | projection_operator=projection_operator,
52 | )
53 | out = operator(data)
54 | loss = torch.nn.functional.mse_loss(out, torch.zeros_like(out))
55 | loss.backward()
56 | grad = data.grad
57 | assert grad.shape == data.shape
58 |
--------------------------------------------------------------------------------
/tests/test_model/test_residual_feed_forward.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 | from pina.model import ResidualFeedForward
4 |
5 |
6 | def test_constructor():
7 | # simple constructor
8 | ResidualFeedForward(input_dimensions=2, output_dimensions=1)
9 |
10 | # wrong transformer nets (not 2)
11 | with pytest.raises(ValueError):
12 | ResidualFeedForward(
13 | input_dimensions=2,
14 | output_dimensions=1,
15 | transformer_nets=[torch.nn.Linear(2, 20)],
16 | )
17 |
18 | # wrong transformer nets (not nn.Module)
19 | with pytest.raises(ValueError):
20 | ResidualFeedForward(
21 | input_dimensions=2, output_dimensions=1, transformer_nets=[2, 2]
22 | )
23 |
24 |
25 | def test_forward():
26 | x = torch.rand(10, 2)
27 | model = ResidualFeedForward(input_dimensions=2, output_dimensions=1)
28 | model(x)
29 |
30 |
31 | def test_backward():
32 | x = torch.rand(10, 2)
33 | x.requires_grad = True
34 | model = ResidualFeedForward(input_dimensions=2, output_dimensions=1)
35 | model(x)
36 | l = torch.mean(model(x))
37 | l.backward()
38 | assert x.grad.shape == torch.Size([10, 2])
39 |
--------------------------------------------------------------------------------
/tests/test_model/test_spline.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 |
4 | from pina.model import Spline
5 |
6 | data = torch.rand((20, 3))
7 | input_vars = 3
8 | output_vars = 4
9 |
10 | valid_args = [
11 | {
12 | "knots": torch.tensor([0.0, 0.0, 0.0, 1.0, 2.0, 3.0, 3.0, 3.0]),
13 | "control_points": torch.tensor([0.0, 0.0, 1.0, 0.0, 0.0]),
14 | "order": 3,
15 | },
16 | {
17 | "knots": torch.tensor(
18 | [-2.0, -2.0, -2.0, -2.0, -1.0, 0.0, 1.0, 2.0, 2.0, 2.0, 2.0]
19 | ),
20 | "control_points": torch.tensor([0.0, 0.0, 0.0, 6.0, 0.0, 0.0, 0.0]),
21 | "order": 4,
22 | },
23 | # {'control_points': {'n': 5, 'dim': 1}, 'order': 2},
24 | # {'control_points': {'n': 7, 'dim': 1}, 'order': 3}
25 | ]
26 |
27 |
28 | def scipy_check(model, x, y):
29 | from scipy.interpolate._bsplines import BSpline
30 | import numpy as np
31 |
32 | spline = BSpline(
33 | t=model.knots.detach().numpy(),
34 | c=model.control_points.detach().numpy(),
35 | k=model.order - 1,
36 | )
37 | y_scipy = spline(x).flatten()
38 | y = y.detach().numpy()
39 | np.testing.assert_allclose(y, y_scipy, atol=1e-5)
40 |
41 |
42 | @pytest.mark.parametrize("args", valid_args)
43 | def test_constructor(args):
44 | Spline(**args)
45 |
46 |
47 | def test_constructor_wrong():
48 | with pytest.raises(ValueError):
49 | Spline()
50 |
51 |
52 | @pytest.mark.parametrize("args", valid_args)
53 | def test_forward(args):
54 | min_x = args["knots"][0]
55 | max_x = args["knots"][-1]
56 | xi = torch.linspace(min_x, max_x, 1000)
57 | model = Spline(**args)
58 | yi = model(xi).squeeze()
59 | scipy_check(model, xi, yi)
60 | return
61 |
62 |
63 | @pytest.mark.parametrize("args", valid_args)
64 | def test_backward(args):
65 | min_x = args["knots"][0]
66 | max_x = args["knots"][-1]
67 | xi = torch.linspace(min_x, max_x, 100)
68 | model = Spline(**args)
69 | yi = model(xi)
70 | fake_loss = torch.sum(yi)
71 | assert model.control_points.grad is None
72 | fake_loss.backward()
73 | assert model.control_points.grad is not None
74 |
75 | # dim_in, dim_out = 3, 2
76 | # fnn = FeedForward(dim_in, dim_out)
77 | # data.requires_grad = True
78 | # output_ = fnn(data)
79 | # l=torch.mean(output_)
80 | # l.backward()
81 | # assert data._grad.shape == torch.Size([20,3])
82 |
--------------------------------------------------------------------------------
/tests/test_optimizer.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 | from pina.optim import TorchOptimizer
4 |
5 | opt_list = [
6 | torch.optim.Adam,
7 | torch.optim.AdamW,
8 | torch.optim.SGD,
9 | torch.optim.RMSprop,
10 | ]
11 |
12 |
13 | @pytest.mark.parametrize("optimizer_class", opt_list)
14 | def test_constructor(optimizer_class):
15 | TorchOptimizer(optimizer_class, lr=1e-3)
16 |
17 |
18 | @pytest.mark.parametrize("optimizer_class", opt_list)
19 | def test_hook(optimizer_class):
20 | opt = TorchOptimizer(optimizer_class, lr=1e-3)
21 | opt.hook(torch.nn.Linear(10, 10).parameters())
22 |
--------------------------------------------------------------------------------
/tests/test_package.py:
--------------------------------------------------------------------------------
1 | def test_import():
2 | import pina
3 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_advection.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pina.problem.zoo import AdvectionProblem
3 | from pina.problem import SpatialProblem, TimeDependentProblem
4 |
5 |
6 | @pytest.mark.parametrize("c", [1.5, 3])
7 | def test_constructor(c):
8 | print(f"Testing with c = {c} (type: {type(c)})")
9 | problem = AdvectionProblem(c=c)
10 | problem.discretise_domain(n=10, mode="random", domains="all")
11 | assert problem.are_all_domains_discretised
12 | assert isinstance(problem, SpatialProblem)
13 | assert isinstance(problem, TimeDependentProblem)
14 | assert hasattr(problem, "conditions")
15 | assert isinstance(problem.conditions, dict)
16 |
17 | with pytest.raises(ValueError):
18 | AdvectionProblem(c="a")
19 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_allen_cahn.py:
--------------------------------------------------------------------------------
1 | from pina.problem.zoo import AllenCahnProblem
2 | from pina.problem import SpatialProblem, TimeDependentProblem
3 |
4 |
5 | def test_constructor():
6 | problem = AllenCahnProblem()
7 | problem.discretise_domain(n=10, mode="random", domains="all")
8 | assert problem.are_all_domains_discretised
9 | assert isinstance(problem, SpatialProblem)
10 | assert isinstance(problem, TimeDependentProblem)
11 | assert hasattr(problem, "conditions")
12 | assert isinstance(problem.conditions, dict)
13 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_diffusion_reaction.py:
--------------------------------------------------------------------------------
1 | from pina.problem.zoo import DiffusionReactionProblem
2 | from pina.problem import TimeDependentProblem, SpatialProblem
3 |
4 |
5 | def test_constructor():
6 | problem = DiffusionReactionProblem()
7 | problem.discretise_domain(n=10, mode="random", domains="all")
8 | assert problem.are_all_domains_discretised
9 | assert isinstance(problem, TimeDependentProblem)
10 | assert isinstance(problem, SpatialProblem)
11 | assert hasattr(problem, "conditions")
12 | assert isinstance(problem.conditions, dict)
13 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_helmholtz.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pina.problem.zoo import HelmholtzProblem
3 | from pina.problem import SpatialProblem
4 |
5 |
6 | @pytest.mark.parametrize("alpha", [1.5, 3])
7 | def test_constructor(alpha):
8 | problem = HelmholtzProblem(alpha=alpha)
9 | problem.discretise_domain(n=10, mode="random", domains="all")
10 | assert problem.are_all_domains_discretised
11 | assert isinstance(problem, SpatialProblem)
12 | assert hasattr(problem, "conditions")
13 | assert isinstance(problem.conditions, dict)
14 |
15 | with pytest.raises(ValueError):
16 | HelmholtzProblem(alpha="a")
17 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_inverse_poisson_2d_square.py:
--------------------------------------------------------------------------------
1 | from pina.problem.zoo import InversePoisson2DSquareProblem
2 | from pina.problem import InverseProblem, SpatialProblem
3 |
4 |
5 | def test_constructor():
6 | problem = InversePoisson2DSquareProblem()
7 | problem.discretise_domain(n=10, mode="random", domains="all")
8 | assert problem.are_all_domains_discretised
9 | assert isinstance(problem, InverseProblem)
10 | assert isinstance(problem, SpatialProblem)
11 | assert hasattr(problem, "conditions")
12 | assert isinstance(problem.conditions, dict)
13 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_poisson_2d_square.py:
--------------------------------------------------------------------------------
1 | from pina.problem.zoo import Poisson2DSquareProblem
2 | from pina.problem import SpatialProblem
3 |
4 |
5 | def test_constructor():
6 | problem = Poisson2DSquareProblem()
7 | problem.discretise_domain(n=10, mode="random", domains="all")
8 | assert problem.are_all_domains_discretised
9 | assert isinstance(problem, SpatialProblem)
10 | assert hasattr(problem, "conditions")
11 | assert isinstance(problem.conditions, dict)
12 |
--------------------------------------------------------------------------------
/tests/test_problem_zoo/test_supervised_problem.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from pina.problem import AbstractProblem
3 | from pina.condition import InputTargetCondition
4 | from pina.problem.zoo.supervised_problem import SupervisedProblem
5 | from pina.graph import RadiusGraph
6 |
7 |
8 | def test_constructor():
9 | input_ = torch.rand((100, 10))
10 | output_ = torch.rand((100, 10))
11 | problem = SupervisedProblem(input_=input_, output_=output_)
12 | assert isinstance(problem, AbstractProblem)
13 | assert hasattr(problem, "conditions")
14 | assert isinstance(problem.conditions, dict)
15 | assert list(problem.conditions.keys()) == ["data"]
16 | assert isinstance(problem.conditions["data"], InputTargetCondition)
17 |
18 |
19 | def test_constructor_graph():
20 | x = torch.rand((20, 100, 10))
21 | pos = torch.rand((20, 100, 2))
22 | input_ = [
23 | RadiusGraph(x=x_, pos=pos_, radius=0.2, edge_attr=True)
24 | for x_, pos_ in zip(x, pos)
25 | ]
26 | output_ = torch.rand((20, 100, 10))
27 | problem = SupervisedProblem(input_=input_, output_=output_)
28 | assert isinstance(problem, AbstractProblem)
29 | assert hasattr(problem, "conditions")
30 | assert isinstance(problem.conditions, dict)
31 | assert list(problem.conditions.keys()) == ["data"]
32 | assert isinstance(problem.conditions["data"], InputTargetCondition)
33 | assert isinstance(problem.conditions["data"].input, list)
34 | assert isinstance(problem.conditions["data"].target, torch.Tensor)
35 |
--------------------------------------------------------------------------------
/tests/test_scheduler.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import pytest
3 | from pina.optim import TorchOptimizer, TorchScheduler
4 |
5 | opt_list = [
6 | torch.optim.Adam,
7 | torch.optim.AdamW,
8 | torch.optim.SGD,
9 | torch.optim.RMSprop,
10 | ]
11 |
12 | sch_list = [torch.optim.lr_scheduler.ConstantLR]
13 |
14 |
15 | @pytest.mark.parametrize("scheduler_class", sch_list)
16 | def test_constructor(scheduler_class):
17 | TorchScheduler(scheduler_class)
18 |
19 |
20 | @pytest.mark.parametrize("optimizer_class", opt_list)
21 | @pytest.mark.parametrize("scheduler_class", sch_list)
22 | def test_hook(optimizer_class, scheduler_class):
23 | opt = TorchOptimizer(optimizer_class, lr=1e-3)
24 | opt.hook(torch.nn.Linear(10, 10).parameters())
25 | sch = TorchScheduler(scheduler_class)
26 | sch.hook(opt)
27 |
--------------------------------------------------------------------------------
/tests/test_utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | from pina.utils import merge_tensors
4 | from pina.label_tensor import LabelTensor
5 | from pina import LabelTensor
6 | from pina.domain import EllipsoidDomain, CartesianDomain
7 | from pina.utils import check_consistency
8 | import pytest
9 | from pina.domain import DomainInterface
10 |
11 |
12 | def test_merge_tensors():
13 | tensor1 = LabelTensor(torch.rand((20, 3)), ["a", "b", "c"])
14 | tensor2 = LabelTensor(torch.zeros((20, 3)), ["d", "e", "f"])
15 | tensor3 = LabelTensor(torch.ones((30, 3)), ["g", "h", "i"])
16 |
17 | merged_tensor = merge_tensors((tensor1, tensor2, tensor3))
18 | assert tuple(merged_tensor.labels) == (
19 | "a",
20 | "b",
21 | "c",
22 | "d",
23 | "e",
24 | "f",
25 | "g",
26 | "h",
27 | "i",
28 | )
29 | assert merged_tensor.shape == (20 * 20 * 30, 9)
30 | assert torch.all(merged_tensor.extract(("d", "e", "f")) == 0)
31 | assert torch.all(merged_tensor.extract(("g", "h", "i")) == 1)
32 |
33 |
34 | def test_check_consistency_correct():
35 | ellipsoid1 = EllipsoidDomain({"x": [1, 2], "y": [-2, 1]})
36 | example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ["x", "y", "z"])
37 |
38 | check_consistency(example_input_pts, torch.Tensor)
39 | check_consistency(CartesianDomain, DomainInterface, subclass=True)
40 | check_consistency(ellipsoid1, DomainInterface)
41 |
42 |
43 | def test_check_consistency_incorrect():
44 | ellipsoid1 = EllipsoidDomain({"x": [1, 2], "y": [-2, 1]})
45 | example_input_pts = LabelTensor(torch.tensor([[0, 0, 0]]), ["x", "y", "z"])
46 |
47 | with pytest.raises(ValueError):
48 | check_consistency(example_input_pts, DomainInterface)
49 | with pytest.raises(ValueError):
50 | check_consistency(torch.Tensor, DomainInterface, subclass=True)
51 | with pytest.raises(ValueError):
52 | check_consistency(ellipsoid1, torch.Tensor)
53 |
--------------------------------------------------------------------------------
/tests/test_weighting/test_ntk_weighting.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from pina import Trainer
3 | from pina.solver import PINN
4 | from pina.model import FeedForward
5 | from pina.problem.zoo import Poisson2DSquareProblem
6 | from pina.loss import NeuralTangentKernelWeighting
7 |
8 | problem = Poisson2DSquareProblem()
9 | condition_names = problem.conditions.keys()
10 |
11 |
12 | @pytest.mark.parametrize(
13 | "model,alpha",
14 | [
15 | (
16 | FeedForward(
17 | len(problem.input_variables), len(problem.output_variables)
18 | ),
19 | 0.5,
20 | )
21 | ],
22 | )
23 | def test_constructor(model, alpha):
24 | NeuralTangentKernelWeighting(model=model, alpha=alpha)
25 |
26 |
27 | @pytest.mark.parametrize("model", [0.5])
28 | def test_wrong_constructor1(model):
29 | with pytest.raises(ValueError):
30 | NeuralTangentKernelWeighting(model)
31 |
32 |
33 | @pytest.mark.parametrize(
34 | "model,alpha",
35 | [
36 | (
37 | FeedForward(
38 | len(problem.input_variables), len(problem.output_variables)
39 | ),
40 | 1.2,
41 | )
42 | ],
43 | )
44 | def test_wrong_constructor2(model, alpha):
45 | with pytest.raises(ValueError):
46 | NeuralTangentKernelWeighting(model, alpha)
47 |
48 |
49 | @pytest.mark.parametrize(
50 | "model,alpha",
51 | [
52 | (
53 | FeedForward(
54 | len(problem.input_variables), len(problem.output_variables)
55 | ),
56 | 0.5,
57 | )
58 | ],
59 | )
60 | def test_train_aggregation(model, alpha):
61 | weighting = NeuralTangentKernelWeighting(model=model, alpha=alpha)
62 | problem.discretise_domain(50)
63 | solver = PINN(problem=problem, model=model, weighting=weighting)
64 | trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu")
65 | trainer.train()
66 |
--------------------------------------------------------------------------------
/tests/test_weighting/test_standard_weighting.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 |
4 | from pina import Trainer
5 | from pina.solver import PINN
6 | from pina.model import FeedForward
7 | from pina.problem.zoo import Poisson2DSquareProblem
8 | from pina.loss import ScalarWeighting
9 |
10 | problem = Poisson2DSquareProblem()
11 | model = FeedForward(len(problem.input_variables), len(problem.output_variables))
12 | condition_names = problem.conditions.keys()
13 | print(problem.conditions.keys())
14 |
15 |
16 | @pytest.mark.parametrize(
17 | "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))]
18 | )
19 | def test_constructor(weights):
20 | ScalarWeighting(weights=weights)
21 |
22 |
23 | @pytest.mark.parametrize("weights", ["a", [1, 2, 3]])
24 | def test_wrong_constructor(weights):
25 | with pytest.raises(ValueError):
26 | ScalarWeighting(weights=weights)
27 |
28 |
29 | @pytest.mark.parametrize(
30 | "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))]
31 | )
32 | def test_aggregate(weights):
33 | weighting = ScalarWeighting(weights=weights)
34 | losses = dict(
35 | zip(
36 | condition_names,
37 | [torch.randn(1) for _ in range(len(condition_names))],
38 | )
39 | )
40 | weighting.aggregate(losses=losses)
41 |
42 |
43 | @pytest.mark.parametrize(
44 | "weights", [1, 1.0, dict(zip(condition_names, [1] * len(condition_names)))]
45 | )
46 | def test_train_aggregation(weights):
47 | weighting = ScalarWeighting(weights=weights)
48 | problem.discretise_domain(50)
49 | solver = PINN(problem=problem, model=model, weighting=weighting)
50 | trainer = Trainer(solver=solver, max_epochs=5, accelerator="cpu")
51 | trainer.train()
52 |
--------------------------------------------------------------------------------
/tutorials/static/API_color.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/API_color.png
--------------------------------------------------------------------------------
/tutorials/static/deep_ensemble.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/deep_ensemble.png
--------------------------------------------------------------------------------
/tutorials/static/logging.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/logging.png
--------------------------------------------------------------------------------
/tutorials/static/neural_operator.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/neural_operator.png
--------------------------------------------------------------------------------
/tutorials/static/pina_logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/pina_logo.png
--------------------------------------------------------------------------------
/tutorials/static/pina_wokflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/static/pina_wokflow.png
--------------------------------------------------------------------------------
/tutorials/tutorial10/data/Data_KS.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/tutorial10/data/Data_KS.mat
--------------------------------------------------------------------------------
/tutorials/tutorial10/data/Data_KS2.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/tutorial10/data/Data_KS2.mat
--------------------------------------------------------------------------------
/tutorials/tutorial5/Data_Darcy.mat:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/tutorial5/Data_Darcy.mat
--------------------------------------------------------------------------------
/tutorials/tutorial7/data/pinn_solution_0.5_0.5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/tutorial7/data/pinn_solution_0.5_0.5
--------------------------------------------------------------------------------
/tutorials/tutorial7/data/pts_0.5_0.5:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mathLab/PINA/6b355b45de4d8a9449b9f8451b146f11df6e9fe4/tutorials/tutorial7/data/pts_0.5_0.5
--------------------------------------------------------------------------------