├── .DS_Store ├── .github └── workflows │ ├── docker-publish.yml │ ├── main.yml │ ├── pytest.yml │ ├── python-package.yml │ └── python-publish.yml ├── .gitignore ├── .travis.yml ├── Dockerfile ├── Pipfile ├── Pipfile.lock ├── README.md ├── environment.yml ├── examples ├── AC-SA.py ├── AC-baseline.py ├── AC-discovery.py ├── AC-dist-new.py ├── AC-dist.py ├── AC-inference.py ├── AC.mat ├── CH-discovery.py ├── CH.pkl ├── burgers-assimilate.py ├── burgers-new.py ├── burgers_shock.mat ├── steady-state-poisson.py ├── steady-state.py ├── testing.py ├── testing1D-AC.py ├── testing1D.py └── transfer-learn.py ├── logo.png ├── requirements.txt ├── setup.py ├── tdq-banner.png ├── tensordiffeq.egg-info ├── PKG-INFO ├── SOURCES.txt ├── dependency_links.txt ├── requires.txt └── top_level.txt ├── tensordiffeq ├── __init__.py ├── __pycache__ │ ├── __init__.cpython-36.pyc │ ├── __init__.cpython-37.pyc │ ├── fit.cpython-36.pyc │ ├── fit.cpython-37.pyc │ ├── helpers.cpython-36.pyc │ ├── helpers.cpython-37.pyc │ ├── models.cpython-36.pyc │ ├── models.cpython-37.pyc │ ├── networks.cpython-36.pyc │ ├── networks.cpython-37.pyc │ ├── plotting.cpython-36.pyc │ ├── plotting.cpython-37.pyc │ ├── utils.cpython-36.pyc │ └── utils.cpython-37.pyc ├── archive │ └── models.py ├── boundaries.py ├── domains.py ├── fit.py ├── helpers.py ├── models.py ├── networks.py ├── optimizers.py ├── output.py ├── plotting.py ├── sampling.py └── utils.py └── test ├── AC2test.py ├── Burgers2test.py ├── test_AC_distributed.py ├── test_AC_distributed_minibatch.py ├── test_AC_nonDistributed.py ├── test_AC_nonDistributed_minibatch.py ├── test_Burgers_distributed.py ├── test_Burgers_distributed_minibatch.py ├── test_Burgers_nonDistributed.py └── test_Burgers_nonDistributed_minibatch.py /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/.DS_Store -------------------------------------------------------------------------------- /.github/workflows/docker-publish.yml: -------------------------------------------------------------------------------- 1 | name: Docker Release 2 | 3 | on: 4 | push: 5 | # Publish `main` as Docker `latest` image. 6 | branches: 7 | - main 8 | 9 | # Publish `v1.2.3` tags as releases. 10 | tags: 11 | - v* 12 | 13 | # Run tests for any PRs. 14 | pull_request: 15 | 16 | env: 17 | # TODO: Change variable to your image's name. 18 | IMAGE_NAME: image 19 | 20 | jobs: 21 | # Run tests. 22 | # See also https://docs.docker.com/docker-hub/builds/automated-testing/ 23 | test: 24 | runs-on: ubuntu-latest 25 | 26 | steps: 27 | - uses: actions/checkout@v2 28 | 29 | - name: Run tests 30 | run: | 31 | if [ -f docker-compose.test.yml ]; then 32 | docker-compose --file docker-compose.test.yml build 33 | docker-compose --file docker-compose.test.yml run sut 34 | else 35 | docker build . --file Dockerfile 36 | fi 37 | 38 | # Push image to GitHub Packages. 39 | # See also https://docs.docker.com/docker-hub/builds/ 40 | push: 41 | # Ensure test job passes before pushing image. 42 | needs: test 43 | 44 | runs-on: ubuntu-latest 45 | if: github.event_name == 'push' 46 | 47 | steps: 48 | - uses: actions/checkout@v2 49 | 50 | - name: Build image 51 | run: docker build . --file Dockerfile --tag $IMAGE_NAME 52 | 53 | - name: Log into registry 54 | run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login docker.pkg.github.com -u ${{ github.actor }} --password-stdin 55 | 56 | - name: Push image 57 | run: | 58 | IMAGE_ID=docker.pkg.github.com/${{ github.repository }}/$IMAGE_NAME 59 | 60 | # Change all uppercase to lowercase 61 | IMAGE_ID=$(echo $IMAGE_ID | tr '[A-Z]' '[a-z]') 62 | 63 | # Strip git ref prefix from version 64 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 65 | 66 | # Strip "v" prefix from tag name 67 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') 68 | 69 | # Use Docker `latest` tag convention 70 | [ "$VERSION" == "main" ] && VERSION=latest 71 | 72 | echo IMAGE_ID=$IMAGE_ID 73 | echo VERSION=$VERSION 74 | 75 | docker tag $IMAGE_NAME $IMAGE_ID:$VERSION 76 | docker push $IMAGE_ID:$VERSION 77 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Codecov 2 | on: [push] 3 | jobs: 4 | run: 5 | runs-on: ${{ matrix.os }} 6 | strategy: 7 | matrix: 8 | os: [ubuntu-latest] 9 | env: 10 | OS: ${{ matrix.os }} 11 | PYTHON: '3.7' 12 | steps: 13 | - uses: actions/checkout@master 14 | - name: Setup Python 15 | uses: actions/setup-python@master 16 | with: 17 | python-version: 3.7 18 | - name: Generate coverage report 19 | run: | 20 | pip install pytest 21 | pip install pytest-cov 22 | pytest --cov=./ --cov-report=xml 23 | - name: Upload coverage to Codecov 24 | uses: codecov/codecov-action@v2 25 | with: 26 | # token: ${{ secrets.CODECOV_TOKEN }} 27 | directory: ./coverage/reports/ 28 | env_vars: OS,PYTHON 29 | fail_ci_if_error: true 30 | files: ./coverage1.xml,./coverage2.xml 31 | flags: unittests 32 | name: codecov-umbrella 33 | path_to_write_report: ./coverage/codecov_report.txt 34 | verbose: true 35 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | # .github/workflows/app.yaml 2 | # https://blog.dennisokeeffe.com/blog/2021-08-08-pytest-with-github-actions 3 | name: PyTest 4 | on: push 5 | 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | timeout-minutes: 45 10 | 11 | steps: 12 | - name: Check out repository code 13 | uses: actions/checkout@v2 14 | 15 | # Setup Python (faster than using Python container) 16 | - name: Setup Python 17 | uses: actions/setup-python@v2 18 | with: 19 | python-version: "3.8" 20 | 21 | - name: Install pipenv 22 | run: | 23 | python -m pip install --upgrade pipenv wheel 24 | - id: cache-pipenv 25 | uses: actions/cache@v1 26 | with: 27 | path: ~/.local/share/virtualenvs 28 | key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }} 29 | 30 | - name: Install dependencies 31 | if: steps.cache-pipenv.outputs.cache-hit != 'true' 32 | run: | 33 | pipenv install --deploy --dev 34 | # - name: Run test suite 35 | # run: | 36 | # pipenv run test -v 37 | 38 | - name: Run tests and generate coverage report 39 | run: | 40 | python -m pip install tensordiffeq 41 | pip install pytest 42 | pip install pytest-cov 43 | pytest --cov=./ --cov-report=xml 44 | - name: Upload coverage to Codecov 45 | uses: codecov/codecov-action@v2 46 | with: 47 | token: ${{ secrets.CODECOV_TOKEN }} 48 | version: "v0.1.15" 49 | directory: ./coverage/reports/ 50 | env_vars: OS,PYTHON 51 | fail_ci_if_error: true 52 | files: ./coverage1.xml,./coverage2.xml 53 | flags: unittests 54 | name: codecov-umbrella 55 | # path_to_write_report: ./coverage/codecov_report.txt 56 | verbose: true 57 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Package Build 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | strategy: 17 | matrix: 18 | python-version: [3.7, 3.8, 3.9] 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v2 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install dependencies 27 | run: | 28 | python -m pip install --upgrade pip 29 | python -m pip install flake8 pytest 30 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 31 | - name: Lint with flake8 32 | run: | 33 | # stop the build if there are Python syntax errors or undefined names 34 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 35 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 36 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 37 | - name: Test with pytest 38 | run: | 39 | python -c "import tensordiffeq" 40 | -------------------------------------------------------------------------------- /.github/workflows/python-publish.yml: -------------------------------------------------------------------------------- 1 | # This workflow will upload a Python Package using Twine when a release is created 2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries 3 | 4 | name: Package Release 5 | 6 | on: 7 | release: 8 | types: [published] 9 | 10 | jobs: 11 | deploy: 12 | 13 | runs-on: ubuntu-latest 14 | 15 | steps: 16 | - uses: actions/checkout@v2 17 | - name: Set up Python 18 | uses: actions/setup-python@v2 19 | with: 20 | python-version: '3.xx' 21 | - name: Install dependencies 22 | run: | 23 | python -m pip install --upgrade pip 24 | pip install setuptools wheel twine 25 | - name: Build and publish 26 | env: 27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }} 28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }} 29 | run: | 30 | python setup.py sdist bdist_wheel 31 | twine upload dist/* 32 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | cache/* 2 | tensordiffeq/__pycache__/* 3 | venv/* 4 | .idea/* 5 | dist/* 6 | examples/*.ipynb -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | # ===== Linux ====== 3 | dist: xenial 4 | python: 5 | - 3.5 6 | - 3.6 7 | - 3.7 8 | matrix: 9 | include: 10 | # ======= OSX ======== 11 | # ----- changes in Travis images means this doesn't work for versions before 3.7.5 --- 12 | # - name: "Python 2.7.14 on macOS 10.13" 13 | # os: osx 14 | # osx_image: xcode9.3 # Python 2.7.14_2 running on macOS 10.13 15 | # language: shell # 'language: python' is an error on Travis CI macOS 16 | # before_install: 17 | # - python --version 18 | # # - pip install -U pip 19 | # # - python -m pip install --upgrade pip 20 | # - pip install pytest --user 21 | # - pip install codecov --user 22 | # install: pip install ".[test]" --user 23 | # script: python -m pytest 24 | # after_success: python -m codecov 25 | # - name: "Python 3.6.5 on macOS 10.13" 26 | # os: osx 27 | # osx_image: xcode9.4 # Python 3.6.5 running on macOS 10.13 28 | # language: shell # 'language: python' is an error on Travis CI macOS 29 | # before_install: 30 | # - python3 --version 31 | # - pip3 install -U pip 32 | # - pip3 install -U pytest 33 | # - pip3 install codecov 34 | # script: python3 -m pytest 35 | # after_success: python 3 -m codecov 36 | - name: "Python 3.7.5 on macOS 10.14" 37 | os: osx 38 | osx_image: xcode10.2 # Python 3.7.5 running on macOS 10.14.3 39 | language: shell # 'language: python' is an error on Travis CI macOS 40 | before_install: 41 | - python3 --version 42 | - pip3 install -U pip 43 | - pip3 install -U pytest 44 | - pip3 install codecov 45 | script: python3 -m pytest 46 | after_success: python 3 -m codecov 47 | - name: "Python 3.8.0 on macOS 10.14" 48 | os: osx 49 | osx_image: xcode11.3 # Python 3.8.0 running on macOS 10.14.6 50 | language: shell # 'language: python' is an error on Travis CI macOS 51 | before_install: 52 | - python3 --version 53 | - pip3 install -U pip 54 | - pip3 install -U pytest 55 | - pip3 install codecov 56 | script: python3 -m pytest 57 | after_success: python 3 -m codecov 58 | # ====== WINDOWS ========= 59 | - name: "Python 2.7 on Windows" 60 | os: windows # Windows 10.0.17134 N/A Build 17134 61 | language: shell # 'language: python' errors Travis CI Windows 62 | before_install: 63 | - choco install python2 64 | - python --version 65 | - python -m pip install --upgrade pip 66 | - pip install --upgrade pytest 67 | - pip install codecov 68 | env: PATH=/c/Python27:/c/Python27/Scripts:$PATH 69 | - name: "Python 3.5.4 on Windows" 70 | os: windows # Windows 10.0.17134 N/A Build 17134 71 | language: shell # 'language: python' is an error on Travis CI Windows 72 | before_install: 73 | - choco install python --version 3.5.4 74 | - python --version 75 | - python -m pip install --upgrade pip 76 | - pip3 install --upgrade pytest 77 | - pip3 install codecov 78 | env: PATH=/c/Python35:/c/Python35/Scripts:$PATH 79 | - name: "Python 3.6.8 on Windows" 80 | os: windows # Windows 10.0.17134 N/A Build 17134 81 | language: shell # 'language: python' is an error on Travis CI Windows 82 | before_install: 83 | - choco install python --version 3.6.8 84 | - python --version 85 | - python -m pip install --upgrade pip 86 | - pip3 install --upgrade pytest 87 | - pip3 install codecov 88 | env: PATH=/c/Python36:/c/Python36/Scripts:$PATH 89 | - name: "Python 3.7.4 on Windows" 90 | os: windows # Windows 10.0.17134 N/A Build 17134 91 | language: shell # 'language: python' is an error on Travis CI Windows 92 | before_install: 93 | - choco install python --version 3.7.4 94 | - python --version 95 | - python -m pip install --upgrade pip 96 | - pip3 install --upgrade pytest 97 | - pip3 install codecov 98 | env: PATH=/c/Python37:/c/Python37/Scripts:$PATH 99 | before_install: 100 | - python --version 101 | - pip install -U pip 102 | - pip install -U pytest 103 | - pip install codecov 104 | install: 105 | - pip install ".[test]" 106 | script: pytest 107 | after_success: 108 | - codecov # submit coverage 109 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM tensorflow/tensorflow:nightly-gpu 2 | 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | 5 | RUN apt-get update && apt-get install -y eog python3-tk python-yaml texlive-full openssh-server sudo x11-apps && apt-get clean && rm -rf /var/lib/apt/lists 6 | 7 | ENV DISPLAY=:0 8 | 9 | RUN pip install librosa pytz matplotlib scikit-learn Pillow pandas progress openpyxl numpy pyDOE numba tensordiffeq 10 | 11 | RUN useradd -rm -d /home/ubuntu -s /bin/bash -g root -G sudo -u 1000 test 12 | 13 | RUN echo 'test:test' | chpasswd 14 | 15 | RUN service ssh start 16 | 17 | EXPOSE 22 18 | 19 | ENV QT_X11_NO_MITSHM=1 20 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | matplotlib = "*" 8 | numpy = "*" 9 | scipy = "*" 10 | tensorflow = "*" 11 | tensorflow-probability = "*" 12 | pyfiglet = "*" 13 | tqdm = "*" 14 | pyDOE2 = "*" 15 | requests = "*" 16 | 17 | [dev-packages] 18 | tensordiffeq = {editable = true, path = "."} 19 | pytest = "*" 20 | 21 | [requires] 22 | python_version = "3.8" 23 | 24 | [scripts] 25 | test = "pytest" 26 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ![TensorDiffEq logo](tdq-banner.png) 3 | 4 | 5 | ![Package Build](https://github.com/tensordiffeq/TensorDiffEq/workflows/Package%20Build/badge.svg) 6 | ![Package Release](https://github.com/tensordiffeq/TensorDiffEq/workflows/Package%20Release/badge.svg) 7 | ![pypi](https://img.shields.io/pypi/v/tensordiffeq) 8 | ![downloads](https://img.shields.io/pypi/dm/tensordiffeq) 9 | ![python versions](https://img.shields.io/pypi/pyversions/tensordiffeq) 10 | 11 | ### Notice: Support for Python 3.6 will be dropped in v.0.2.1, please plan accordingly! 12 | 13 | ## Efficient and Scalable Physics-Informed Deep Learning 14 | 15 | #### Collocation-based PINN PDE solvers for prediction and discovery methods on top of [Tensorflow](https://github.com/tensorflow/tensorflow) 2.X for multi-worker distributed computing. 16 | 17 | Use TensorDiffEq if you require: 18 | - A meshless PINN solver that can distribute over multiple workers (GPUs) for 19 | forward problems (inference) and inverse problems (discovery) 20 | - Scalable domains - Iterated solver construction allows for N-D spatio-temporal support 21 | - support for N-D spatial domains with no time element is included 22 | - Self-Adaptive Collocation methods for forward and inverse PINNs 23 | - Intuitive user interface allowing for explicit definitions of variable domains, 24 | boundary conditions, initial conditions, and strong-form PDEs 25 | 26 | 27 | What makes TensorDiffEq different? 28 | - Completely open-source 29 | - [Self-Adaptive Solvers](https://arxiv.org/abs/2009.04544) for forward and inverse problems, leading to increased accuracy of the solution and stability in training, resulting in 30 | less overall training time 31 | - Multi-GPU distributed training for large or fine-grain spatio-temporal domains 32 | - Built on top of Tensorflow 2.0 for increased support in new functionality exclusive to recent TF releases, such as [XLA support](https://www.tensorflow.org/xla), 33 | [autograph](https://blog.tensorflow.org/2018/07/autograph-converts-python-into-tensorflow-graphs.html) for efficent graph-building, and [grappler support](https://www.tensorflow.org/guide/graph_optimization) 34 | for graph optimization* - with no chance of the source code being sunset in a further Tensorflow version release 35 | 36 | - Intuitive interface - defining domains, BCs, ICs, and strong-form PDEs in "plain english" 37 | 38 | 39 | *In development 40 | 41 | If you use TensorDiffEq in your work, please cite it via: 42 | 43 | ```code 44 | @article{mcclenny2021tensordiffeq, 45 | title={TensorDiffEq: Scalable Multi-GPU Forward and Inverse Solvers for Physics Informed Neural Networks}, 46 | author={McClenny, Levi D and Haile, Mulugeta A and Braga-Neto, Ulisses M}, 47 | journal={arXiv preprint arXiv:2103.16034}, 48 | year={2021} 49 | } 50 | ``` 51 | 52 | ### Thanks to our additional contributors: 53 | @marcelodallaqua, @ragusa, @emiliocoutinho 54 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | scipy 4 | tensorflow_probability 5 | pyDOE2 6 | pyfiglet 7 | tqdm -------------------------------------------------------------------------------- /examples/AC-SA.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import math 3 | import tensordiffeq as tdq 4 | from tensordiffeq.models import CollocationSolverND 5 | from tensordiffeq.boundaries import * 6 | 7 | Domain = DomainND(["x", "t"], time_var='t') 8 | 9 | Domain.add("x", [-1.0, 1.0], 512) 10 | Domain.add("t", [0.0, 1.0], 201) 11 | 12 | N_f = 50000 13 | Domain.generate_collocation_points(N_f) 14 | 15 | 16 | def func_ic(x): 17 | return x ** 2 * np.cos(math.pi * x) 18 | 19 | 20 | # Conditions to be considered at the boundaries for the periodic BC 21 | def deriv_model(u_model, x, t): 22 | u = u_model(tf.concat([x, t], 1)) 23 | u_x = tf.gradients(u, x)[0] 24 | # u_xx = tf.gradients(u_x, x)[0] 25 | # u_xxx = tf.gradients(u_xx, x)[0] 26 | # u_xxxx = tf.gradients(u_xxx, x)[0] 27 | return u, u_x 28 | 29 | 30 | init = IC(Domain, [func_ic], var=[['x']]) 31 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 32 | 33 | BCs = [init, x_periodic] 34 | 35 | 36 | def f_model(u_model, x, t): 37 | u = u_model(tf.concat([x, t], 1)) 38 | u_x = tf.gradients(u, x) 39 | u_xx = tf.gradients(u_x, x) 40 | u_t = tf.gradients(u, t) 41 | c1 = tdq.utils.constant(.0001) 42 | c2 = tdq.utils.constant(5.0) 43 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 44 | return f_u 45 | 46 | ## Which loss functions will have adaptive weights 47 | # "residual" should a tuple for the case of multiple residual equation 48 | # BCs have to follow the same order as the previously defined BCs list 49 | dict_adaptive = {"residual": [True], 50 | "BCs": [True, False]} 51 | 52 | ## Weights initialization 53 | # dictionary with keys "residual" and "BCs". Values must be a tuple with dimension 54 | # equal to the number of residuals and boundary conditions, respectively 55 | init_weights = {"residual": [tf.random.uniform([N_f, 1])], 56 | "BCs": [100 * tf.random.uniform([512, 1]), None]} 57 | 58 | 59 | layer_sizes = [2, 128, 128, 128, 128, 1] 60 | 61 | model = CollocationSolverND() 62 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, 63 | dict_adaptive=dict_adaptive, init_weights=init_weights) 64 | 65 | model.fit(tf_iter=10000, newton_iter=10000) 66 | 67 | # Load high-fidelity data for error calculation 68 | data = scipy.io.loadmat('AC.mat') 69 | 70 | Exact = data['uu'] 71 | Exact_u = np.real(Exact) 72 | 73 | 74 | 75 | x = Domain.domaindict[0]['xlinspace'] 76 | t = Domain.domaindict[1]["tlinspace"] 77 | 78 | # create mesh for plotting 79 | 80 | X, T = np.meshgrid(x, t) 81 | 82 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 83 | u_star = Exact_u.T.flatten()[:, None] 84 | 85 | # forward pass through model 86 | u_pred, f_u_pred = model.predict(X_star) 87 | 88 | error_u = tdq.helpers.find_L2_error(u_pred, u_star) 89 | print('Error u: %e' % (error_u)) 90 | 91 | U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, T)) 92 | FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 93 | 94 | lb = np.array([-1.0, 0.0]) 95 | ub = np.array([1.0, 1]) 96 | 97 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) -------------------------------------------------------------------------------- /examples/AC-baseline.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import scipy.io 4 | 5 | import tensordiffeq as tdq 6 | from tensordiffeq.boundaries import * 7 | from tensordiffeq.models import CollocationSolverND 8 | 9 | Domain = DomainND(["x", "t"], time_var='t') 10 | 11 | Domain.add("x", [-1.0, 1.0], 512) 12 | Domain.add("t", [0.0, 1.0], 201) 13 | 14 | N_f = 50000 15 | Domain.generate_collocation_points(N_f) 16 | 17 | 18 | def func_ic(x): 19 | return x ** 2 * np.cos(math.pi * x) 20 | 21 | 22 | # Conditions to be considered at the boundaries for the periodic BC 23 | def deriv_model(u_model, x, t): 24 | u = u_model(tf.concat([x, t], 1)) 25 | u_x = tf.gradients(u, x)[0] 26 | u_xx = tf.gradients(u_x, x)[0] 27 | u_xxx = tf.gradients(u_xx, x)[0] 28 | u_xxxx = tf.gradients(u_xxx, x)[0] 29 | return u, u_x, u_xxx, u_xxxx 30 | 31 | 32 | init = IC(Domain, [func_ic], var=[['x']]) 33 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 34 | 35 | BCs = [init, x_periodic] 36 | 37 | 38 | def f_model(u_model, x, t): 39 | u = u_model(tf.concat([x, t], 1)) 40 | u_x = tf.gradients(u, x) 41 | u_xx = tf.gradients(u_x, x) 42 | u_t = tf.gradients(u, t) 43 | c1 = tdq.utils.constant(.0001) 44 | c2 = tdq.utils.constant(5.0) 45 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 46 | return f_u 47 | 48 | layer_sizes = [2, 128, 128, 128, 128, 1] 49 | 50 | model = CollocationSolverND() 51 | model.compile(layer_sizes, f_model, Domain, BCs) 52 | model.fit(tf_iter=10000, newton_iter=10000) 53 | 54 | # Load high-fidelity data for error calculation 55 | data = scipy.io.loadmat('AC.mat') 56 | 57 | Exact = data['uu'] 58 | Exact_u = np.real(Exact) 59 | 60 | # t = data['tt'].flatten()[:,None] 61 | # x = data['x'].flatten()[:,None] 62 | 63 | x = Domain.domaindict[0]['xlinspace'] 64 | t = Domain.domaindict[1]["tlinspace"] 65 | 66 | # create mesh for plotting 67 | 68 | X, T = np.meshgrid(x, t) 69 | 70 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 71 | u_star = Exact_u.T.flatten()[:, None] 72 | 73 | # forward pass through model 74 | u_pred, f_u_pred = model.predict(X_star) 75 | 76 | error_u = tdq.find_L2_error(u_pred, u_star) 77 | print('Error u: %e' % (error_u)) 78 | 79 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X, T)) 80 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 81 | 82 | lb = np.array([-1.0, 0.0]) 83 | ub = np.array([1.0, 1]) 84 | 85 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 86 | -------------------------------------------------------------------------------- /examples/AC-discovery.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import scipy.io 4 | import tensordiffeq as tdq 5 | from tensordiffeq.models import DiscoveryModel 6 | from tensordiffeq.utils import tensor 7 | 8 | ##################### 9 | ## Discovery Model ## 10 | ##################### 11 | 12 | 13 | # Put params into a list 14 | params = [tf.Variable(0.0, dtype=tf.float32), tf.Variable(0.0, dtype=tf.float32)] 15 | 16 | 17 | # Define f_model, note the `vars` argument. Inputs must follow this order! 18 | def f_model(u_model, var, x, t): 19 | u = u_model(tf.concat([x, t], 1)) 20 | u_x = tf.gradients(u, x) 21 | u_xx = tf.gradients(u_x, x) 22 | u_t = tf.gradients(u, t) 23 | c1 = var[0] # tunable param 1 24 | c2 = var[1] # tunable param 2 25 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 26 | return f_u 27 | 28 | 29 | # Import data, same data as Raissi et al 30 | 31 | data = scipy.io.loadmat('AC.mat') 32 | 33 | t = data['tt'].flatten()[:, None] 34 | x = data['x'].flatten()[:, None] 35 | Exact = data['uu'] 36 | Exact_u = np.real(Exact) 37 | 38 | # generate all combinations of x and t 39 | X, T = np.meshgrid(x, t) 40 | 41 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 42 | u_star = Exact_u.T.flatten()[:, None] 43 | 44 | x = X_star[:, 0:1] 45 | t = X_star[:, 1:2] 46 | 47 | # append to a list for input to model.fit 48 | X = [x, t] 49 | 50 | # define col_weights for SA discovery model, can be removed 51 | col_weights = tf.Variable(tf.random.uniform([np.shape(x)[0], 1])) 52 | 53 | # define MLP depth and layer width 54 | layer_sizes = [2, 128, 128, 128, 128, 1] 55 | 56 | # initialize, compile, train model 57 | model = DiscoveryModel() 58 | model.compile(layer_sizes, f_model, X, u_star, params, 59 | col_weights=col_weights) # baseline discovery approach can be done by simply removing the col_weights arg 60 | 61 | # an example as to how one could modify an optimizer, in this case the col_weights optimizer 62 | model.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, 63 | beta_1=.95) 64 | 65 | # train loop 66 | model.fit(tf_iter=10000) 67 | 68 | # doesnt work quite yet 69 | tdq.plotting.plot_weights(model, scale=10.0) 70 | -------------------------------------------------------------------------------- /examples/AC-dist-new.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import scipy.io 4 | 5 | import tensordiffeq as tdq 6 | from tensordiffeq.boundaries import * 7 | from tensordiffeq.models import CollocationSolverND 8 | 9 | Domain = DomainND(["x", "t"], time_var='t') 10 | 11 | Domain.add("x", [-1.0, 1.0], 512) 12 | Domain.add("t", [0.0, 1.0], 201) 13 | 14 | N_f = 500000 15 | Domain.generate_collocation_points(N_f) 16 | 17 | 18 | def func_ic(x): 19 | return x ** 2 * np.cos(math.pi * x) 20 | 21 | 22 | # Conditions to be considered at the boundaries for the periodic BC 23 | def deriv_model(u_model, x, t): 24 | u = u_model(tf.concat([x, t], 1)) 25 | u_x = tf.gradients(u, x)[0] 26 | u_xx = tf.gradients(u_x, x)[0] 27 | u_xxx = tf.gradients(u_xx, x)[0] 28 | u_xxxx = tf.gradients(u_xxx, x)[0] 29 | return u, u_x, u_xxx, u_xxxx 30 | 31 | 32 | init = IC(Domain, [func_ic], var=[['x']]) 33 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 34 | 35 | BCs = [init, x_periodic] 36 | 37 | 38 | def f_model(u_model, x, t): 39 | u = u_model(tf.concat([x, t], 1)) 40 | u_x = tf.gradients(u, x) 41 | u_xx = tf.gradients(u_x, x) 42 | u_t = tf.gradients(u, t) 43 | c1 = tdq.utils.constant(.0001) 44 | c2 = tdq.utils.constant(5.0) 45 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 46 | return f_u 47 | 48 | layer_sizes = [2, 128, 128, 128, 128, 1] 49 | 50 | model = CollocationSolverND() 51 | model.compile(layer_sizes, f_model, Domain, BCs, dist=True) 52 | model.fit(tf_iter=1001) 53 | print("training pass 1 completed") 54 | model.fit(tf_iter=1001) 55 | 56 | # Load high-fidelity data for error calculation 57 | data = scipy.io.loadmat('AC.mat') 58 | 59 | Exact = data['uu'] 60 | Exact_u = np.real(Exact) 61 | 62 | # t = data['tt'].flatten()[:,None] 63 | # x = data['x'].flatten()[:,None] 64 | 65 | x = Domain.domaindict[0]['xlinspace'] 66 | t = Domain.domaindict[1]["tlinspace"] 67 | 68 | # create mesh for plotting 69 | 70 | X, T = np.meshgrid(x, t) 71 | 72 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 73 | u_star = Exact_u.T.flatten()[:, None] 74 | 75 | # forward pass through model 76 | u_pred, f_u_pred = model.predict(X_star) 77 | 78 | error_u = tdq.find_L2_error(u_pred, u_star) 79 | print('Error u: %e' % (error_u)) 80 | 81 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X, T)) 82 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 83 | 84 | lb = np.array([-1.0, 0.0]) 85 | ub = np.array([1.0, 1]) 86 | 87 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 88 | -------------------------------------------------------------------------------- /examples/AC-dist.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import scipy.io 4 | import tensordiffeq as tdq 5 | from tensordiffeq.models import CollocationSolver1D 6 | 7 | def f_model(u_model, x, t): 8 | tf.print(np.shape(x)) 9 | u = u_model(tf.concat([x,t],1)) 10 | u_x = tf.gradients(u, x) 11 | u_xx = tf.gradients(u_x, x) 12 | u_t = tf.gradients(u,t) 13 | c1 = tdq.utils.constant(.0001) 14 | c2 = tdq.utils.constant(5.0) 15 | f_u = u_t - c1*u_xx + c2*u*u*u - c2*u 16 | return f_u 17 | 18 | def u_x_model(u_model, x, t): 19 | u = u_model(tf.concat([x,t], 1)) 20 | u_x = tf.gradients(u, x) 21 | return u, u_x 22 | 23 | 24 | N0 = 200 25 | NS = 200 26 | N_b = 100 27 | N_f = 500000 28 | 29 | col_weights = tf.random.uniform([N_f, 1]) 30 | u_weights = tf.Variable(100*tf.random.uniform([N0, 1])) 31 | 32 | # Grab collocation points using latin hpyercube sampling 33 | xlimits = np.array([[-1.0, 1.0], [0.0, 1.0]]) 34 | X_f = tdq.LatinHypercubeSample(N_f, xlimits) #x_f, t_f 35 | 36 | 37 | lb = np.array([-1.0]) 38 | ub = np.array([1.0]) 39 | 40 | # Import data, same data as Raissi et al 41 | 42 | data = scipy.io.loadmat('AC.mat') 43 | 44 | t = data['tt'].flatten()[:,None] 45 | x = data['x'].flatten()[:,None] 46 | Exact = data['uu'] 47 | Exact_u = np.real(Exact) 48 | 49 | #grab training points from domain 50 | idx_x = np.random.choice(x.shape[0], N0, replace=False) 51 | x0 = x[idx_x,:] 52 | u0 = tf.cast(Exact_u[idx_x,0:1], dtype = tf.float32) 53 | 54 | idx_xs = np.random.choice(x.shape[0], NS, replace=False) #need multiple Xs 55 | idx_ts = 100 #for 1 t value 56 | y_s = Exact[idx_xs, idx_ts] 57 | x_s = x[idx_xs,:] 58 | t_s = np.repeat(t[idx_ts,:], len(x_s)) 59 | 60 | y_s = tf.cast(tf.reshape(y_s, (-1,1)), dtype = tf.float32) #tensors need to be of shape (NS, 1), not (NS, ) 61 | x_s = tdq.tensor(x_s) 62 | t_s = tf.cast(tf.reshape(t_s, (-1,1)), dtype = tf.float32) 63 | 64 | idx_t = np.random.choice(t.shape[0], N_b, replace=False) 65 | tb = t[idx_t,:] 66 | 67 | # Grab collocation points using latin hpyercube sampling 68 | x_f = tf.convert_to_tensor(X_f[:,0:1], dtype=tf.float32) 69 | t_f = tf.convert_to_tensor(np.abs(X_f[:,1:2]), dtype=tf.float32) 70 | 71 | 72 | X0 = np.concatenate((x0, 0*x0), 1) # (x0, 0) 73 | X_lb = np.concatenate((0*tb + lb[0], tb), 1) # (lb[0], tb) 74 | X_ub = np.concatenate((0*tb + ub[0], tb), 1) # (ub[0], tb) 75 | 76 | 77 | x0 = tf.cast(X0[:,0:1], dtype = tf.float32) 78 | t0 = tf.cast(X0[:,1:2], dtype = tf.float32) 79 | 80 | x_lb = tf.convert_to_tensor(X_lb[:,0:1], dtype=tf.float32) 81 | t_lb = tf.convert_to_tensor(X_lb[:,1:2], dtype=tf.float32) 82 | 83 | x_ub = tf.convert_to_tensor(X_ub[:,0:1], dtype=tf.float32) 84 | t_ub = tf.convert_to_tensor(X_ub[:,1:2], dtype=tf.float32) 85 | 86 | layer_sizes = [2, 128, 128, 128, 128, 1] 87 | model = CollocationSolver1D() 88 | 89 | def g(lam): 90 | return lam**2 91 | 92 | #model.compile(layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, isPeriodic=True, u_x_model=u_x_model, isAdaptive = True, col_weights = col_weights, u_weights = u_weights, g = g, dist = True) 93 | model.compile(layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, isPeriodic=True, u_x_model=u_x_model, dist = True) 94 | #train loop 95 | init = model.col_weights 96 | model.fit(tf_iter = 301, newton_iter = 100, batch_sz = 500000) 97 | 98 | #generate meshgrid for forward pass of u_pred 99 | X, T = np.meshgrid(x,t) 100 | 101 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 102 | u_star = Exact_u.T.flatten()[:,None] 103 | 104 | u_pred, f_u_pred = model.predict(X_star) 105 | 106 | error_u = tdq.find_L2_error(u_pred, u_star) 107 | print('Error u: %e' % (error_u)) 108 | 109 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X,T)) 110 | 111 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X,T)) 112 | 113 | lb = np.array([-1.0, 0.0]) 114 | ub = np.array([1.0, 1]) 115 | 116 | # print(np.shape(init)) 117 | # print(np.shape(model.col_weights)) 118 | # plt.scatter(x_f, t_f, c = ((init - model.col_weights)/200)) 119 | # print(np.mean(init - model.col_weights)) 120 | # plt.show() 121 | 122 | 123 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub = ub, lb = lb, Exact_u=Exact_u) 124 | 125 | tdq.plotting.plot_weights(model) 126 | 127 | tdq.plotting.plot_glam_values(model) 128 | 129 | extent = [0.0, 1.0, -1.0, 1.0] 130 | tdq.plotting.plot_residuals(FU_pred, extent) 131 | -------------------------------------------------------------------------------- /examples/AC-inference.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import scipy.io 4 | import tensordiffeq as tdq 5 | from tensordiffeq.models import DiscoveryModel 6 | from tensordiffeq.utils import tensor 7 | 8 | ##################### 9 | ## Discovery Model ## 10 | ##################### 11 | 12 | 13 | # Put params into a list 14 | params = [tf.Variable(0.0, dtype=tf.float32), tf.Variable(0.0, dtype=tf.float32)] 15 | 16 | 17 | # Define f_model, note the `vars` argument. Inputs must follow this order! 18 | def f_model(u_model, var, x, t): 19 | u = u_model(tf.concat([x, t], 1)) 20 | u_x = tf.gradients(u, x) 21 | u_xx = tf.gradients(u_x, x) 22 | u_t = tf.gradients(u, t) 23 | c1 = var[0] # tunable param 1 24 | c2 = var[1] # tunable param 2 25 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 26 | return f_u 27 | 28 | 29 | # Import data, same data as Raissi et al 30 | 31 | data = scipy.io.loadmat('AC.mat') 32 | 33 | t = data['tt'].flatten()[:, None] 34 | x = data['x'].flatten()[:, None] 35 | Exact = data['uu'] 36 | Exact_u = np.real(Exact) 37 | 38 | # define MLP depth and layer width 39 | layer_sizes = [2, 128, 128, 128, 128, 1] 40 | 41 | # generate all combinations of x and t 42 | X, T = np.meshgrid(x, t) 43 | 44 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 45 | u_star = Exact_u.T.flatten()[:, None] 46 | 47 | x = X_star[:, 0:1] 48 | t = X_star[:, 1:2] 49 | 50 | print(np.shape(x)) 51 | # append to a list for input to model.fit 52 | X = [x, t] 53 | 54 | # define col_weights for SA discovery model 55 | col_weights = tf.Variable(tf.random.uniform([np.shape(x)[0], 1])) 56 | 57 | # initialize, compile, train model 58 | model = DiscoveryModel() 59 | model.compile(layer_sizes, f_model, X, u_star, params, 60 | col_weights=col_weights) # baseline approach can be done by simply removing the col_weights arg 61 | model.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, 62 | beta_1=.95) # an example as to how one could modify an optimizer, in this case the col_weights optimizer 63 | 64 | # train loop 65 | model.fit(tf_iter=10000) 66 | 67 | # doesnt work quite yet 68 | tdq.plotting.plot_weights(model, scale=10.0) 69 | -------------------------------------------------------------------------------- /examples/AC.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/examples/AC.mat -------------------------------------------------------------------------------- /examples/CH-discovery.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import scipy.io 4 | import tensordiffeq as tdq 5 | from tensordiffeq.models import DiscoveryModel 6 | from tensordiffeq.utils import tensor 7 | import pickle 8 | 9 | ##################### 10 | ## Discovery Model ## 11 | ##################### 12 | 13 | 14 | # Put params into a list 15 | params = [tf.Variable(0.0001, dtype=tf.float32), tf.Variable(0.0001, dtype=tf.float32)] 16 | 17 | 18 | # Define f_model, note the `vars` argument. Inputs must follow this order! 19 | # Define f_model, note the `vars` argument. Inputs must follow this order! 20 | def f_model(u_model, var, x, t): 21 | # keep track of our gradients 22 | g1 = var[0] 23 | g2 = var[1] 24 | 25 | u = u_model(tf.concat([x, t], 1)) 26 | u_x = tf.gradients(u, x) 27 | u_t = tf.gradients(u, t) 28 | u_xx = tf.gradients(u_x, x) 29 | 30 | tmp = g1 * (u ** 3 - u) - g2 * u_xx 31 | tmp_x = tf.gradients(tmp, x)[0] 32 | tmp_xx = tf.gradients(tmp_x, x)[0] 33 | 34 | f_u = u_t - tmp_xx 35 | return f_u 36 | 37 | # Import data, same data as Raissi et al 38 | 39 | with open('CH.pkl', 'rb') as f: 40 | data = pickle.load(f) 41 | 42 | Exact_u = data 43 | 44 | x = np.linspace(-1, 1, np.shape(data)[1]) 45 | t = np.linspace(0, 1, np.shape(data)[0]) 46 | 47 | # generate all combinations of x and t 48 | X, T = np.meshgrid(x, t) 49 | 50 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 51 | u_star = tensor(Exact_u.T.flatten()[:, None]) 52 | 53 | x = X_star[:, 0:1] 54 | t = X_star[:, 1:2] 55 | 56 | # append to a list for input to model.fit 57 | X = [x, t] 58 | 59 | # define col_weights for SA discovery model, can be removed 60 | col_weights = tf.Variable(tf.random.uniform([np.shape(x)[0], 1])) 61 | 62 | # define MLP depth and layer width 63 | layer_sizes = [2, 128, 128, 128, 128, 1] 64 | 65 | # initialize, compile, train model 66 | model = DiscoveryModel() 67 | model.compile(layer_sizes, f_model, X, u_star, params) # baseline discovery approach can be done by simply removing the col_weights arg 68 | 69 | # train loop 70 | model.fit(tf_iter=10000) 71 | 72 | # doesnt work quite yet 73 | tdq.plotting.plot_weights(model, scale=10.0) 74 | -------------------------------------------------------------------------------- /examples/CH.pkl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/examples/CH.pkl -------------------------------------------------------------------------------- /examples/burgers-assimilate.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import scipy.io 4 | import tensordiffeq as tdq 5 | import math 6 | from tensordiffeq.models import CollocationSolver1D 7 | 8 | 9 | def f_model(u_model, x, t): 10 | u = u_model(tf.concat([x,t],1)) 11 | u_x = tf.gradients(u, x) 12 | u_xx = tf.gradients(u_x, x) 13 | u_t = tf.gradients(u, t) 14 | 15 | f_u = u_t + u*u_x - (0.05/tf.constant(math.pi))*u_xx 16 | 17 | return f_u 18 | 19 | 20 | 21 | lb = np.array([-1.0]) 22 | ub = np.array([1.0]) 23 | 24 | N0 = 60 25 | N_b = 20 #25 per upper and lower boundary, so 50 total 26 | N_f = 10000 27 | NS = 200 28 | 29 | data = scipy.io.loadmat('burgers_shock.mat') 30 | 31 | t = data['t'].flatten()[:,None] 32 | x = data['x'].flatten()[:,None] 33 | Exact = data['usol'] 34 | Exact_u = Exact 35 | 36 | idx_xs = np.random.choice(x.shape[0], NS, replace=False) #need multiple Xs 37 | idx_ts = 75 #for 1 t value 38 | y_s = Exact[idx_xs, idx_ts] 39 | x_s = x[idx_xs,:] 40 | t_s = np.repeat(t[idx_ts,:], len(x_s)) 41 | 42 | y_s = tf.cast(tf.reshape(y_s, (-1,1)), dtype = tf.float32) #tensors need to be of shape (NS, 1), not (NS, ) 43 | x_s = tdq.tensor(x_s) 44 | t_s = tf.cast(tf.reshape(t_s, (-1,1)), dtype = tf.float32) 45 | 46 | idx_x = np.random.choice(x.shape[0], N0, replace=False) 47 | x0 = x[idx_x,:] 48 | u0 = Exact_u[idx_x,0:1] 49 | 50 | u0 = tf.cast(u0, tf.float32) 51 | idx_t = np.random.choice(t.shape[0], N_b, replace=False) 52 | tb = t[idx_t,:] 53 | 54 | xlimits = np.array([[-1.0, 1.0], [0.0, 1.0]]) 55 | X_f = tdq.LatinHypercubeSample(N_f, xlimits) 56 | 57 | x_f = tf.convert_to_tensor(X_f[:,0:1], dtype=tf.float32) 58 | t_f = tf.convert_to_tensor(np.abs(X_f[:,1:2]), dtype=tf.float32) 59 | 60 | X0 = np.concatenate((x0, 0*x0), 1) # (x0, 0) 61 | X_lb = np.concatenate((0*tb + lb[0], tb), 1) # (lb[0], tb) 62 | X_ub = np.concatenate((0*tb + ub[0], tb), 1) # (ub[0], tb) 63 | 64 | x0 = X0[:,0:1] 65 | t0 = X0[:,1:2] 66 | 67 | x_lb = tf.convert_to_tensor(X_lb[:,0:1], dtype=tf.float32) 68 | t_lb = tf.convert_to_tensor(X_lb[:,1:2], dtype=tf.float32) 69 | 70 | x_ub = tf.convert_to_tensor(X_ub[:,0:1], dtype=tf.float32) 71 | t_ub = tf.convert_to_tensor(X_ub[:,1:2], dtype=tf.float32) 72 | 73 | 74 | t_b_zeros = tf.cast(tf.reshape(np.repeat(0.0, N0),(N0,1)), tf.float32) 75 | u_ub = tf.cast(tf.reshape(np.repeat(0.0, N_b),(N_b,1)), tf.float32) 76 | u_lb = tf.cast(tf.reshape(np.repeat(0.0, N_b),(N_b,1)), tf.float32) 77 | 78 | 79 | 80 | layer_sizes = [2, 128, 128, 128, 128, 1] 81 | model = CollocationSolver1D(assimilate = True) 82 | #model.compile(layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, isAdaptive=True, col_weights=col_weights, u_weights=u_weights, g = g) 83 | model.compile(layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, u_lb = u_lb, u_ub = u_ub) 84 | model.compile_data(x_s, t_s, y_s) 85 | #train loops 86 | model.fit(tf_iter = 100, newton_iter =100) 87 | 88 | X, T = np.meshgrid(x,t) 89 | 90 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 91 | u_star = Exact_u.T.flatten()[:,None] 92 | 93 | u_pred, f_u_pred = model.predict(X_star) 94 | 95 | error_u = tdq.find_L2_error(u_pred, u_star) 96 | print('Error u: %e' % (error_u)) 97 | 98 | 99 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X,T)) 100 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X,T)) 101 | 102 | lb = np.array([-1.0, 0.0]) 103 | ub = np.array([1.0, 1]) 104 | 105 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub = ub, lb = lb, Exact_u=Exact_u) 106 | -------------------------------------------------------------------------------- /examples/burgers-new.py: -------------------------------------------------------------------------------- 1 | import math 2 | import scipy.io 3 | import tensordiffeq as tdq 4 | from tensordiffeq.boundaries import * 5 | from tensordiffeq.models import CollocationSolverND 6 | 7 | Domain = DomainND(["x", "t"], time_var='t') 8 | 9 | Domain.add("x", [-1.0, 1.0], 256) 10 | Domain.add("t", [0.0, 1.0], 100) 11 | 12 | N_f = 10000 13 | Domain.generate_collocation_points(N_f) 14 | 15 | 16 | def func_ic(x): 17 | return -np.sin(x * math.pi) 18 | 19 | init = IC(Domain, [func_ic], var=[['x']]) 20 | upper_x = dirichletBC(Domain, val=0.0, var='x', target="upper") 21 | lower_x = dirichletBC(Domain, val=0.0, var='x', target="lower") 22 | 23 | BCs = [init, upper_x, lower_x] 24 | 25 | 26 | def f_model(u_model, x, t): 27 | u = u_model(tf.concat([x, t], 1)) 28 | u_x = tf.gradients(u, x) 29 | u_xx = tf.gradients(u_x, x) 30 | u_t = tf.gradients(u, t) 31 | f_u = u_t + u * u_x - (0.01 / tf.constant(math.pi)) * u_xx 32 | return f_u 33 | 34 | 35 | layer_sizes = [2, 20, 20, 20, 20, 20, 20, 20, 20, 1] 36 | 37 | model = CollocationSolverND() 38 | model.compile(layer_sizes, f_model, Domain, BCs) 39 | 40 | # to reproduce results from Raissi and the SA-PINNs paper, train for 10k newton and 10k adam 41 | model.fit(tf_iter=10000, newton_iter=10000) 42 | 43 | 44 | ####################################################### 45 | #################### PLOTTING ######################### 46 | ####################################################### 47 | 48 | data = scipy.io.loadmat('burgers_shock.mat') 49 | 50 | Exact = data['usol'] 51 | Exact_u = np.real(Exact) 52 | 53 | # t = data['tt'].flatten()[:,None] 54 | # x = data['x'].flatten()[:,None] 55 | 56 | x = Domain.domaindict[0]['xlinspace'] 57 | t = Domain.domaindict[1]["tlinspace"] 58 | 59 | X, T = np.meshgrid(x, t) 60 | 61 | # print(np.shape((X,T))) #2, 100, 256 62 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 63 | u_star = Exact_u.T.flatten()[:, None] 64 | 65 | u_pred, f_u_pred = model.predict(X_star) 66 | 67 | error_u = tdq.helpers.find_L2_error(u_pred, u_star) 68 | print('Error u: %e' % (error_u)) 69 | 70 | U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, T)) 71 | FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 72 | 73 | lb = np.array([-1.0, 0.0]) 74 | ub = np.array([1.0, 1]) 75 | 76 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 77 | -------------------------------------------------------------------------------- /examples/burgers_shock.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/examples/burgers_shock.mat -------------------------------------------------------------------------------- /examples/steady-state-poisson.py: -------------------------------------------------------------------------------- 1 | import math 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | import tensordiffeq as tdq 5 | from tensordiffeq.boundaries import * 6 | from tensordiffeq.models import CollocationSolverND 7 | from tensorflow.math import sin 8 | from tensordiffeq.utils import constant 9 | 10 | Domain = DomainND(["x", "y"]) 11 | 12 | Domain.add("x", [0, 1.0], 11) 13 | Domain.add("y", [0, 1.0], 11) 14 | 15 | N_f = 100 16 | Domain.generate_collocation_points(N_f) 17 | 18 | 19 | def f_model(u_model, x, y): 20 | u = u_model(tf.concat([x, y], 1)) 21 | u_x = tf.gradients(u, x)[0] 22 | u_y = tf.gradients(u, y)[0] 23 | u_xx = tf.gradients(u_x, x)[0] 24 | u_yy = tf.gradients(u_y, y)[0] 25 | 26 | a1 = constant(1.0) 27 | a2 = constant(1.0) 28 | pi = constant(math.pi) 29 | 30 | # we use this specific forcing term because we have an exact analytical solution for this case 31 | # to compare the results of the PINN solution 32 | # note that we must use tensorflow math primitives such as sin, cos, etc! 33 | forcing = - sin(a1 * pi * x) * sin(a2 * pi * y) 34 | 35 | f_u = u_xx + u_yy - forcing # = 0 36 | 37 | return f_u 38 | 39 | 40 | def func_upper_x(y): 41 | return -sin(constant(math.pi) * y) * sin(constant(math.pi)) 42 | 43 | 44 | def func_upper_y(x): 45 | return -sin(constant(math.pi) * x) * sin(constant(math.pi)) 46 | 47 | 48 | lower_x = dirichletBC(Domain, val=0.0, var='x', target="upper") 49 | upper_x = FunctionDirichletBC(Domain, fun=[func_upper_x], var='x', target="upper", func_inputs=["y"], n_values=10) 50 | upper_y = FunctionDirichletBC(Domain, fun=[func_upper_y], var='y', target="upper", func_inputs=["x"], n_values=10) 51 | lower_y = dirichletBC(Domain, val=0.0, var='y', target="lower") 52 | 53 | BCs = [upper_x, lower_x, upper_y, lower_y] 54 | 55 | layer_sizes = [2, 16, 16, 1] 56 | 57 | model = CollocationSolverND() 58 | model.compile(layer_sizes, f_model, Domain, BCs) 59 | model.tf_optimizer = tf.keras.optimizers.Adam(lr=.005) 60 | model.fit(tf_iter=4000) 61 | 62 | # get exact solution 63 | nx, ny = (11, 11) 64 | x = np.linspace(0, 1, nx) 65 | y = np.linspace(0, 1, ny) 66 | 67 | xv, yv = np.meshgrid(x, y) 68 | 69 | x = np.reshape(x, (-1, 1)) 70 | y = np.reshape(y, (-1, 1)) 71 | 72 | # Exact analytical soln is available: 73 | Exact_u = (np.sin(math.pi * xv) * np.sin(math.pi * yv)) / (2*math.pi**2) 74 | 75 | # Flatten for use 76 | u_star = Exact_u.flatten()[:, None] 77 | 78 | # Plotting 79 | x = Domain.domaindict[0]['xlinspace'] 80 | y = Domain.domaindict[1]["ylinspace"] 81 | 82 | X, Y = np.meshgrid(x, y) 83 | 84 | # print(np.shape((X,Y))) # 2, 256, 256 85 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) 86 | 87 | lb = np.array([0.0, 0.0]) 88 | ub = np.array([1.0, 1]) 89 | 90 | u_pred, f_u_pred = model.predict(X_star) 91 | 92 | #error_u = tdq.helpers.find_L2_error(u_pred, u_star) 93 | #print('Error u: %e' % (error_u)) 94 | 95 | U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, Y)) 96 | FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, Y)) 97 | 98 | lb = np.array([0.0, 0.0]) 99 | ub = np.array([1.0, 1.0]) 100 | 101 | tdq.plotting.plot_solution_domain1D(model, [x, y], ub=ub, lb=lb, Exact_u=Exact_u) 102 | -------------------------------------------------------------------------------- /examples/steady-state.py: -------------------------------------------------------------------------------- 1 | import math 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | import tensordiffeq as tdq 5 | from tensordiffeq.boundaries import * 6 | from tensordiffeq.models import CollocationSolverND 7 | from tensorflow.math import sin 8 | from tensordiffeq.utils import constant 9 | 10 | Domain = DomainND(["x", "y"]) 11 | 12 | Domain.add("x", [-1.0, 1.0], 1001) 13 | Domain.add("y", [-1.0, 1.0], 1001) 14 | 15 | N_f = 10000 16 | Domain.generate_collocation_points(N_f) 17 | 18 | 19 | def f_model(u_model, x, y): 20 | u = u_model(tf.concat([x, y], 1)) 21 | u_x = tf.gradients(u, x)[0] 22 | u_y = tf.gradients(u, y)[0] 23 | u_xx = tf.gradients(u_x, x)[0] 24 | u_yy = tf.gradients(u_y, y)[0] 25 | 26 | a1 = constant(1.0) 27 | a2 = constant(4.0) 28 | ksq = constant(1.0) 29 | pi = constant(math.pi) 30 | 31 | # we use this specific forcing term because we have an exact analytical solution for this case 32 | # to compare the results of the PINN solution 33 | # note that we must use tensorflow math primitives such as sin, cos, etc! 34 | forcing = - (a1 * pi) ** 2 * sin(a1 * pi * x) * sin(a2 * pi * y) - \ 35 | (a2 * pi) ** 2 * sin(a1 * pi * x) * sin(a2 * pi * y) + \ 36 | ksq * sin(a1 * pi * x) * sin(a2 * pi * y) 37 | 38 | f_u = u_xx + u_yy + ksq * u - forcing # = 0 39 | 40 | return f_u 41 | 42 | 43 | upper_x = dirichletBC(Domain, val=0.0, var='x', target="upper") 44 | lower_x = dirichletBC(Domain, val=0.0, var='x', target="lower") 45 | upper_y = dirichletBC(Domain, val=0.0, var='y', target="upper") 46 | lower_y = dirichletBC(Domain, val=0.0, var='y', target="lower") 47 | 48 | BCs = [upper_x, lower_x, upper_y, lower_y] 49 | 50 | layer_sizes = [2, 50, 50, 50, 50, 1] 51 | 52 | model = CollocationSolverND() 53 | model.compile(layer_sizes, f_model, Domain, BCs) 54 | 55 | model.fit(tf_iter=10000, newton_iter=10000) 56 | 57 | # get exact solution 58 | nx, ny = (1001, 1001) 59 | x = np.linspace(-1, 1, nx) 60 | y = np.linspace(-1, 1, ny) 61 | 62 | xv, yv = np.meshgrid(x, y) 63 | 64 | x = np.reshape(x, (-1, 1)) 65 | y = np.reshape(y, (-1, 1)) 66 | 67 | # Exact analytical soln is available: 68 | Exact_u = np.sin(math.pi * xv) * np.sin(4 * math.pi * yv) 69 | 70 | # Flatten for use 71 | u_star = Exact_u.flatten()[:, None] 72 | 73 | # Plotting 74 | x = Domain.domaindict[0]['xlinspace'] 75 | y = Domain.domaindict[1]["ylinspace"] 76 | 77 | X, Y = np.meshgrid(x, y) 78 | 79 | # print(np.shape((X,Y))) # 2, 256, 256 80 | X_star = np.hstack((X.flatten()[:, None], Y.flatten()[:, None])) 81 | 82 | lb = np.array([-1.0, -1.0]) 83 | ub = np.array([1.0, 1]) 84 | 85 | u_pred, f_u_pred = model.predict(X_star) 86 | 87 | error_u = tdq.helpers.find_L2_error(u_pred, u_star) 88 | print('Error u: %e' % (error_u)) 89 | 90 | U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, Y)) 91 | FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, Y)) 92 | 93 | lb = np.array([-1.0, -1.0]) 94 | ub = np.array([1.0, 1.0]) 95 | 96 | tdq.plotting.plot_solution_domain1D(model, [x, y], ub=ub, lb=lb, Exact_u=Exact_u) 97 | -------------------------------------------------------------------------------- /examples/testing.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import math 3 | import tensordiffeq as tdq 4 | from tensordiffeq.models import CollocationSolverND 5 | # from tensordiffeq import DomainND 6 | from tensordiffeq.boundaries import * 7 | 8 | Domain = DomainND(["x", "y", "t"], time_var='t') 9 | 10 | Domain.add("x", [-1.0, 1.0], 256) 11 | Domain.add("y", [-1.0, 1.0], 256) 12 | Domain.add("t", [0.0, 1.0], 100) 13 | 14 | N_f = 20000 15 | Domain.generate_collocation_points(N_f) 16 | 17 | 18 | def func_ic_xy(x, y): 19 | return -np.sin(x * math.pi) + -np.sin(y * math.pi) 20 | 21 | init = IC(Domain, [func_ic_xy], var=[['x', 'y']]) 22 | 23 | 24 | # Conditions to be considered at the boundaries for the periodic BC 25 | def deriv_model(u_model, x, y, t): 26 | u = u_model(tf.concat([x, y, t], 1)) 27 | u_x = tf.gradients(u, x)[0] 28 | u_y = tf.gradients(u, y)[0] 29 | u_xx = tf.gradients(u_x, x)[0] 30 | u_yx = tf.gradients(u_y, x)[0] 31 | u_yy = tf.gradients(u_y, y)[0] 32 | u_xy = tf.gradients(u_x, y)[0] 33 | return u, u_x, u_y, u_xx, u_yy, u_xy, u_yx 34 | 35 | 36 | x_periodic = periodicBC(Domain, ["x", "y"], [deriv_model]) 37 | 38 | 39 | # upper_x = dirichlectBC(Domain, val=0.0, var='x',target="upper") 40 | # 41 | # lower_x = dirichlectBC(Domain, val=0.0, var='x', target="lower") 42 | 43 | BCs = [init, x_periodic] 44 | 45 | 46 | def f_model(u_model, x, y, t): 47 | u = u_model(tf.concat([x, y, t], 1)) 48 | u_x = tf.gradients(u, x) 49 | u_xx = tf.gradients(u_x, x) 50 | u_t = tf.gradients(u, t) 51 | 52 | f_u = u_t + u * u_x - (0.05 / tf.constant(math.pi)) * u_xx 53 | 54 | return f_u 55 | 56 | 57 | layer_sizes = [3, 128, 128, 128, 128, 1] 58 | 59 | model = CollocationSolverND() 60 | model.compile(layer_sizes, f_model, Domain, BCs) 61 | model.fit(tf_iter=1000, newton_iter=1000) 62 | 63 | data = scipy.io.loadmat('burgers_shock.mat') 64 | 65 | Exact = data['usol'] 66 | Exact_u = np.real(Exact) 67 | 68 | 69 | x = Domain.domaindict[0]['xlinspace'] 70 | t = Domain.domaindict[1]["tlinspace"] 71 | 72 | X, T = np.meshgrid(x, t) 73 | 74 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 75 | u_star = Exact_u.T.flatten()[:, None] 76 | 77 | u_pred, f_u_pred = model.predict(X_star) 78 | 79 | error_u = tdq.find_L2_error(u_pred, u_star) 80 | print('Error u: %e' % (error_u)) 81 | 82 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X, T)) 83 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 84 | 85 | lb = np.array([-1.0, 0.0]) 86 | ub = np.array([1.0, 1]) 87 | 88 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 89 | -------------------------------------------------------------------------------- /examples/testing1D-AC.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import math 3 | import tensordiffeq as tdq 4 | from tensordiffeq.models import CollocationSolverND 5 | from tensordiffeq.boundaries import * 6 | 7 | Domain = DomainND(["x", "t"], time_var='t') 8 | 9 | Domain.add("x", [-1.0, 1.0], 256) 10 | Domain.add("t", [0.0, 1.0], 100) 11 | 12 | N_f = 20000 13 | Domain.generate_collocation_points(N_f) 14 | 15 | 16 | def func_ic(x): 17 | return x**2*np.cos(math.pi*x) 18 | 19 | 20 | init = IC(Domain, [func_ic], var=[['x']]) 21 | 22 | # Conditions to be considered at the boundaries for the periodic BC 23 | def deriv_model(u_model, x, t): 24 | u = u_model(tf.concat([x, t], 1)) 25 | u_x = tf.gradients(u, x)[0] 26 | return u, u_x 27 | 28 | x_periodic = periodicBC(Domain, ["x"], [deriv_model]) 29 | 30 | 31 | BCs = [init, x_periodic] 32 | 33 | 34 | def f_model(u_model, x, t): 35 | tf.print(np.shape(x)) 36 | u = u_model(tf.concat([x,t],1)) 37 | u_x = tf.gradients(u, x) 38 | u_xx = tf.gradients(u_x, x) 39 | u_t = tf.gradients(u,t) 40 | c1 = tdq.utils.constant(.0001) 41 | c2 = tdq.utils.constant(5.0) 42 | f_u = u_t - c1*u_xx + c2*u*u*u - c2*u 43 | return f_u 44 | 45 | 46 | layer_sizes = [2, 128, 128, 128, 128, 1] 47 | 48 | model = CollocationSolverND() 49 | model.compile(layer_sizes, f_model, Domain, BCs) 50 | model.fit(tf_iter=1000, newton_iter=1000) 51 | 52 | 53 | data = scipy.io.loadmat('burgers_shock.mat') 54 | 55 | Exact = data['usol'] 56 | Exact_u = np.real(Exact) 57 | 58 | # t = data['tt'].flatten()[:,None] 59 | # x = data['x'].flatten()[:,None] 60 | 61 | x = Domain.domaindict[0]['xlinspace'] 62 | t = Domain.domaindict[1]["tlinspace"] 63 | 64 | 65 | print(x,t) 66 | 67 | X, T = np.meshgrid(x, t) 68 | 69 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 70 | u_star = Exact_u.T.flatten()[:, None] 71 | 72 | u_pred, f_u_pred = model.predict(X_star) 73 | 74 | error_u = tdq.find_L2_error(u_pred, u_star) 75 | print('Error u: %e' % (error_u)) 76 | 77 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X, T)) 78 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 79 | 80 | lb = np.array([-1.0, 0.0]) 81 | ub = np.array([1.0, 1]) 82 | 83 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 84 | -------------------------------------------------------------------------------- /examples/testing1D.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import math 3 | import tensordiffeq as tdq 4 | from tensordiffeq.models import CollocationSolverND 5 | from tensordiffeq.boundaries import * 6 | 7 | Domain = DomainND(["x", "t"], time_var='t') 8 | 9 | Domain.add("x", [-1.0, 1.0], 512) 10 | Domain.add("t", [0.0, 1.0], 201) 11 | 12 | N_f = 50000 13 | Domain.generate_collocation_points(N_f) 14 | 15 | 16 | def func_ic(x): 17 | return x ** 2 * np.cos(math.pi * x) 18 | 19 | 20 | # Conditions to be considered at the boundaries for the periodic BC 21 | def deriv_model(u_model, x, t): 22 | u = u_model(tf.concat([x, t], 1)) 23 | u_x = tf.gradients(u, x)[0] 24 | u_xx = tf.gradients(u_x, x)[0] 25 | u_xxx = tf.gradients(u_xx, x)[0] 26 | u_xxxx = tf.gradients(u_xxx, x)[0] 27 | return u, u_x, u_xxx, u_xxxx 28 | 29 | 30 | init = IC(Domain, [func_ic], var=[['x']]) 31 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 32 | 33 | BCs = [init, x_periodic] 34 | 35 | 36 | def f_model(u_model, x, t): 37 | u = u_model(tf.concat([x, t], 1)) 38 | u_x = tf.gradients(u, x) 39 | u_xx = tf.gradients(u_x, x) 40 | u_t = tf.gradients(u, t) 41 | c1 = tdq.utils.constant(.0001) 42 | c2 = tdq.utils.constant(5.0) 43 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 44 | return f_u 45 | 46 | 47 | col_weights = tf.Variable(tf.random.uniform([N_f, 1]), trainable=True, dtype=tf.float32) 48 | u_weights = tf.Variable(100 * tf.random.uniform([512, 1]), trainable=True, dtype=tf.float32) 49 | 50 | layer_sizes = [2, 128, 128, 128, 128, 1] 51 | 52 | model = CollocationSolverND() 53 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights) 54 | model.fit(tf_iter=10000, newton_iter=10000) 55 | 56 | # Load high-fidelity data for error calculation 57 | data = scipy.io.loadmat('AC.mat') 58 | 59 | Exact = data['uu'] 60 | Exact_u = np.real(Exact) 61 | 62 | # t = data['tt'].flatten()[:,None] 63 | # x = data['x'].flatten()[:,None] 64 | 65 | x = Domain.domaindict[0]['xlinspace'] 66 | t = Domain.domaindict[1]["tlinspace"] 67 | 68 | # create mesh for plotting 69 | 70 | X, T = np.meshgrid(x, t) 71 | 72 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 73 | u_star = Exact_u.T.flatten()[:, None] 74 | 75 | # forward pass through model 76 | u_pred, f_u_pred = model.predict(X_star) 77 | 78 | error_u = tdq.find_L2_error(u_pred, u_star) 79 | print('Error u: %e' % (error_u)) 80 | 81 | U_pred = tdq.get_griddata(X_star, u_pred.flatten(), (X, T)) 82 | FU_pred = tdq.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 83 | 84 | lb = np.array([-1.0, 0.0]) 85 | ub = np.array([1.0, 1]) 86 | 87 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 88 | -------------------------------------------------------------------------------- /examples/transfer-learn.py: -------------------------------------------------------------------------------- 1 | import scipy.io 2 | import math 3 | import tensorflow as tf 4 | import tensordiffeq as tdq 5 | from tensordiffeq.models import CollocationSolverND 6 | from tensordiffeq.boundaries import * 7 | 8 | Domain = DomainND(["x", "t"], time_var='t') 9 | 10 | Domain.add("x", [-1.0, 1.0], 512) 11 | Domain.add("t", [0.0, 1.0], 201) 12 | 13 | N_f = 50000 14 | Domain.generate_collocation_points(N_f) 15 | 16 | 17 | def func_ic(x): 18 | return x ** 2 * np.cos(math.pi * x) 19 | 20 | 21 | # Conditions to be considered at the boundaries for the periodic BC 22 | def deriv_model(u_model, x, t): 23 | u = u_model(tf.concat([x, t], 1)) 24 | u_x = tf.gradients(u, x)[0] 25 | # u_xx = tf.gradients(u_x, x)[0] 26 | # u_xxx = tf.gradients(u_xx, x)[0] 27 | # u_xxxx = tf.gradients(u_xxx, x)[0] 28 | return u, u_x 29 | 30 | 31 | init = IC(Domain, [func_ic], var=[['x']]) 32 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 33 | 34 | BCs = [init, x_periodic] 35 | 36 | 37 | def f_model(u_model, x, t): 38 | u = u_model(tf.concat([x, t], 1)) 39 | u_x = tf.gradients(u, x) 40 | u_xx = tf.gradients(u_x, x) 41 | u_t = tf.gradients(u, t) 42 | c1 = tdq.utils.constant(.0001) 43 | c2 = tdq.utils.constant(5.0) 44 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 45 | return f_u 46 | 47 | 48 | col_weights = tf.Variable(tf.random.uniform([N_f, 1]), trainable=True, dtype=tf.float32) 49 | u_weights = tf.Variable(100 * tf.random.uniform([512, 1]), trainable=True, dtype=tf.float32) 50 | 51 | layer_sizes = [2, 128, 128, 128, 128, 1] 52 | 53 | model = CollocationSolverND() 54 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights) 55 | model.fit(tf_iter=5000) 56 | model.save("test_model") 57 | 58 | # Must re-initialize the model class in order to effectively transfer learn or resume training 59 | model = CollocationSolverND() 60 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights) 61 | model.tf_optimizer = tf.keras.optimizers.Adam(.0001) 62 | model.tf_optimizer_weights= tf.keras.optimizers.Adam(.0001) 63 | model.load_model("test_model") 64 | model.fit(tf_iter=5000) 65 | 66 | # Must re-initialize the model class in order to effectively transfer learn or resume training 67 | model = CollocationSolverND() 68 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=True, col_weights=col_weights, u_weights=u_weights) 69 | model.tf_optimizer = tf.keras.optimizers.Adam(.00001) 70 | model.tf_optimizer_weights= tf.keras.optimizers.Adam(.00001) 71 | model.load_model("test_model") 72 | model.fit(tf_iter=5000) 73 | 74 | # Load high-fidelity data for error calculation 75 | data = scipy.io.loadmat('AC.mat') 76 | 77 | Exact = data['uu'] 78 | Exact_u = np.real(Exact) 79 | 80 | 81 | 82 | x = Domain.domaindict[0]['xlinspace'] 83 | t = Domain.domaindict[1]["tlinspace"] 84 | 85 | # create mesh for plotting 86 | 87 | X, T = np.meshgrid(x, t) 88 | 89 | X_star = np.hstack((X.flatten()[:, None], T.flatten()[:, None])) 90 | u_star = Exact_u.T.flatten()[:, None] 91 | 92 | # forward pass through model 93 | u_pred, f_u_pred = model.predict(X_star) 94 | 95 | error_u = tdq.helpers.find_L2_error(u_pred, u_star) 96 | print('Error u: %e' % (error_u)) 97 | 98 | U_pred = tdq.plotting.get_griddata(X_star, u_pred.flatten(), (X, T)) 99 | FU_pred = tdq.plotting.get_griddata(X_star, f_u_pred.flatten(), (X, T)) 100 | 101 | lb = np.array([-1.0, 0.0]) 102 | ub = np.array([1.0, 1]) 103 | 104 | tdq.plotting.plot_solution_domain1D(model, [x, t], ub=ub, lb=lb, Exact_u=Exact_u) 105 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/logo.png -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | scipy 4 | tensorflow 5 | tensorflow_probability 6 | pyDOE2 7 | pyfiglet 8 | tqdm -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | import os 3 | import sys 4 | 5 | if sys.platform == 'darwin': 6 | os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9' 7 | 8 | with open("README.md", "r") as fh: 9 | long_description = fh.read() 10 | 11 | with open("requirements.txt", "r") as f: 12 | install_requires = [x.strip() for x in f.readlines()] 13 | 14 | setuptools.setup( 15 | name="tensordiffeq", 16 | version="0.2.0", 17 | author="Levi McClenny", 18 | author_email="levimcclenny@tamu.edu", 19 | description="Distributed PDE Solver in Tensorflow", 20 | long_description=long_description, 21 | long_description_content_type="text/markdown", 22 | url="https://github.com/tensordiffeq/tensordiffeq", 23 | download_url="https://github.com/tensordiffeq/tensordiffeq/tarball/v0.2.0", 24 | install_requires=install_requires, 25 | packages=setuptools.find_packages(), 26 | include_package_data=True, 27 | classifiers=[ 28 | "Programming Language :: Python :: 3", 29 | "Programming Language :: Python :: 3.6", 30 | "Programming Language :: Python :: 3.7", 31 | "Programming Language :: Python :: 3.8", 32 | "License :: OSI Approved :: MIT License", 33 | "Operating System :: OS Independent", 34 | "Environment :: GPU :: NVIDIA CUDA", 35 | "Intended Audience :: Developers", 36 | "Intended Audience :: Education", 37 | "Intended Audience :: Science/Research", 38 | "Topic :: Scientific/Engineering", 39 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 40 | "Topic :: Scientific/Engineering :: Mathematics", 41 | "Topic :: Scientific/Engineering :: Physics", 42 | "Topic :: Software Development :: Libraries", 43 | "Topic :: Software Development :: Libraries :: Python Modules" 44 | ], 45 | python_requires='>=3.6', 46 | ) 47 | -------------------------------------------------------------------------------- /tdq-banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tdq-banner.png -------------------------------------------------------------------------------- /tensordiffeq.egg-info/PKG-INFO: -------------------------------------------------------------------------------- 1 | Metadata-Version: 2.1 2 | Name: tensordiffeq 3 | Version: 0.1.9 4 | Summary: Distributed PDE Solver in Tensorflow 5 | Home-page: https://github.com/tensordiffeq/tensordiffeq 6 | Author: Levi McClenny 7 | Author-email: levimcclenny@tamu.edu 8 | License: UNKNOWN 9 | Download-URL: https://github.com/tensordiffeq/tensordiffeq/tarball/v0.1.9 10 | Platform: UNKNOWN 11 | Classifier: Programming Language :: Python :: 3 12 | Classifier: Programming Language :: Python :: 3.6 13 | Classifier: Programming Language :: Python :: 3.7 14 | Classifier: Programming Language :: Python :: 3.8 15 | Classifier: License :: OSI Approved :: MIT License 16 | Classifier: Operating System :: OS Independent 17 | Classifier: Environment :: GPU :: NVIDIA CUDA 18 | Classifier: Intended Audience :: Developers 19 | Classifier: Intended Audience :: Education 20 | Classifier: Intended Audience :: Science/Research 21 | Classifier: Topic :: Scientific/Engineering 22 | Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence 23 | Classifier: Topic :: Scientific/Engineering :: Mathematics 24 | Classifier: Topic :: Scientific/Engineering :: Physics 25 | Classifier: Topic :: Software Development :: Libraries 26 | Classifier: Topic :: Software Development :: Libraries :: Python Modules 27 | Requires-Python: >=3.6 28 | Description-Content-Type: text/markdown 29 | 30 | 31 | ![TensorDiffEq logo](tdq-banner.png) 32 | 33 | 34 | ![Package Build](https://github.com/tensordiffeq/TensorDiffEq/workflows/Package%20Build/badge.svg) 35 | ![Package Release](https://github.com/tensordiffeq/TensorDiffEq/workflows/Package%20Release/badge.svg) 36 | ![pypi](https://img.shields.io/pypi/v/tensordiffeq) 37 | ![downloads](https://img.shields.io/pypi/dm/tensordiffeq) 38 | ![python versions](https://img.shields.io/pypi/pyversions/tensordiffeq) 39 | 40 | ## Efficient and Scalable Physics-Informed Deep Learning 41 | 42 | #### Collocation-based PINN PDE solvers for prediction and discovery methods on top of [Tensorflow](https://github.com/tensorflow/tensorflow) 2.X for multi-worker distributed computing. 43 | 44 | Use TensorDiffEq if you require: 45 | - A meshless PINN solver that can distribute over multiple workers (GPUs) for 46 | forward problems (inference) and inverse problems (discovery) 47 | - Scalable domains - Iterated solver construction allows for N-D spatio-temporal support 48 | - support for N-D spatial domains with no time element is included 49 | - Self-Adaptive Collocation methods for forward and inverse PINNs 50 | - Intuitive user interface allowing for explicit definitions of variable domains, 51 | boundary conditions, initial conditions, and strong-form PDEs 52 | 53 | 54 | What makes TensorDiffEq different? 55 | - Completely open-source 56 | - [Self-Adaptive Solvers](https://arxiv.org/abs/2009.04544) for forward and inverse problems, leading to increased accuracy of the solution and stability in training, resulting in 57 | less overall training time 58 | - Multi-GPU distributed training for large or fine-grain spatio-temporal domains 59 | - Built on top of Tensorflow 2.0 for increased support in new functionality exclusive to recent TF releases, such as [XLA support](https://www.tensorflow.org/xla), 60 | [autograph](https://blog.tensorflow.org/2018/07/autograph-converts-python-into-tensorflow-graphs.html) for efficent graph-building, and [grappler support](https://www.tensorflow.org/guide/graph_optimization) 61 | for graph optimization* - with no chance of the source code being sunset in a further Tensorflow version release 62 | 63 | - Intuitive interface - defining domains, BCs, ICs, and strong-form PDEs in "plain english" 64 | 65 | 66 | *In development 67 | 68 | If you use TensorDiffEq in your work, please cite it via: 69 | 70 | ```code 71 | @article{mcclenny2021tensordiffeq, 72 | title={TensorDiffEq: Scalable Multi-GPU Forward and Inverse Solvers for Physics Informed Neural Networks}, 73 | author={McClenny, Levi D and Haile, Mulugeta A and Braga-Neto, Ulisses M}, 74 | journal={arXiv preprint arXiv:2103.16034}, 75 | year={2021} 76 | } 77 | ``` 78 | 79 | ### Thanks to our additional contributors: 80 | @marcelodallaqua, @ragusa, @emiliocoutinho 81 | 82 | 83 | -------------------------------------------------------------------------------- /tensordiffeq.egg-info/SOURCES.txt: -------------------------------------------------------------------------------- 1 | README.md 2 | pyproject.toml 3 | setup.py 4 | tensordiffeq/__init__.py 5 | tensordiffeq/boundaries.py 6 | tensordiffeq/domains.py 7 | tensordiffeq/fit.py 8 | tensordiffeq/helpers.py 9 | tensordiffeq/models.py 10 | tensordiffeq/networks.py 11 | tensordiffeq/optimizers.py 12 | tensordiffeq/output.py 13 | tensordiffeq/plotting.py 14 | tensordiffeq/sampling.py 15 | tensordiffeq/utils.py 16 | tensordiffeq.egg-info/PKG-INFO 17 | tensordiffeq.egg-info/SOURCES.txt 18 | tensordiffeq.egg-info/dependency_links.txt 19 | tensordiffeq.egg-info/requires.txt 20 | tensordiffeq.egg-info/top_level.txt 21 | test/test_AC_distributed.py 22 | test/test_AC_distributed_minibatch.py 23 | test/test_AC_nonDistributed.py 24 | test/test_AC_nonDistributed_minibatch.py 25 | test/test_Burgers_distributed.py 26 | test/test_Burgers_distributed_minibatch.py 27 | test/test_Burgers_nonDistributed.py 28 | test/test_Burgers_nonDistributed_minibatch.py -------------------------------------------------------------------------------- /tensordiffeq.egg-info/dependency_links.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tensordiffeq.egg-info/requires.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | numpy 3 | scipy 4 | tensorflow 5 | tensorflow_probability 6 | pyDOE2 7 | pyfiglet 8 | tqdm 9 | -------------------------------------------------------------------------------- /tensordiffeq.egg-info/top_level.txt: -------------------------------------------------------------------------------- 1 | tensordiffeq 2 | -------------------------------------------------------------------------------- /tensordiffeq/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from tensordiffeq import models, optimizers, networks, plotting, utils, domains, boundaries, fit, helpers, sampling 4 | 5 | # from .models import CollocationSolverND, DiscoveryModel 6 | # from .boundaries import dirichletBC, periodicBC, IC 7 | # from .utils import constant, LatinHypercubeSample, tensor 8 | # from .plotting import newfig, get_griddata 9 | # from .helpers import find_L2_error 10 | # from .optimizers import graph_lbfgs, eager_lbfgs 11 | 12 | 13 | __all__ = [ 14 | "models", 15 | "networks", 16 | "plotting", 17 | "utils", 18 | "helpers", 19 | "optimizers", 20 | "boundaries", 21 | "domains", 22 | "fit", 23 | "sampling" 24 | ] 25 | -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/__init__.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/__init__.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/fit.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/fit.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/fit.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/fit.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/helpers.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/helpers.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/helpers.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/helpers.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/models.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/models.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/models.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/models.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/networks.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/networks.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/networks.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/networks.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/plotting.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/plotting.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/plotting.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/plotting.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /tensordiffeq/__pycache__/utils.cpython-37.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tensordiffeq/TensorDiffEq/7633927b8471a4150ea25972fbf41902af01070d/tensordiffeq/__pycache__/utils.cpython-37.pyc -------------------------------------------------------------------------------- /tensordiffeq/archive/models.py: -------------------------------------------------------------------------------- 1 | from tensordiffeq.fit import * 2 | 3 | class CollocationSolver1D(): 4 | def __init__(self, assimilate = False): 5 | self.assimilate = assimilate 6 | self.periodicBC = False 7 | 8 | 9 | def compile(self, layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, u_ub = None, u_lb = None, isPeriodic = False, u_x_model = None, isAdaptive = False, col_weights = None, u_weights = None, g = None, dist = False): 10 | self.layer_sizes = layer_sizes 11 | self.sizes_w, self.sizes_b = get_sizes(layer_sizes) 12 | self.x0 = x0 13 | self.t0 = t0 14 | self.u0 = u0 15 | self.x_lb = x_lb 16 | self.t_lb = t_lb 17 | self.u_lb = u_lb 18 | self.x_ub = x_ub 19 | self.t_ub = t_ub 20 | self.u_ub = u_ub 21 | self.x_f = x_f 22 | self.t_f = t_f 23 | self.f_model = get_tf_model(f_model) 24 | self.isAdaptive = False 25 | self.g = g 26 | self.dist = dist 27 | #self.u_x_model = get_tf_model(u_x_model) 28 | if isPeriodic: 29 | self.periodicBC = True 30 | if not u_x_model: 31 | raise Exception("Periodic BC is listed but no u_x model is defined!") 32 | else: 33 | self.u_x_model = get_tf_model(u_x_model) 34 | 35 | self.col_weights = col_weights 36 | self.u_weights = u_weights 37 | 38 | if isAdaptive: 39 | self.isAdaptive = True 40 | if self.col_weights is None and self.u_weights is None: 41 | raise Exception("Adaptive weights selected but no inputs were specified!") 42 | if not isAdaptive: 43 | if self.col_weights is not None and self.u_weights is not None: 44 | raise Exception("Adaptive weights are turned off but weight vectors were provided. Set the weight vectors to \"none\" to continue") 45 | 46 | def compile_data(self, x, t, y): 47 | if not self.assimilate: 48 | raise Exception("Assimilate needs to be set to 'true' for data assimilation. Re-initialize CollocationSolver1D with assimilate=True.") 49 | self.data_x = x 50 | self.data_t = t 51 | self.data_s = y 52 | 53 | def loss(self): 54 | if self.dist: 55 | 56 | f_u_pred = self.f_model(self.u_model, self.dist_x_f, self.dist_t_f) 57 | else: 58 | f_u_pred = self.f_model(self.u_model, self.x_f, self.t_f) 59 | 60 | u0_pred = self.u_model(tf.concat([self.x0, self.t0],1)) 61 | 62 | if self.periodicBC: 63 | u_lb_pred, u_x_lb_pred = self.u_x_model(self.u_model, self.x_lb, self.t_lb) 64 | u_ub_pred, u_x_ub_pred = self.u_x_model(self.u_model, self.x_ub, self.t_ub) 65 | mse_b_u = MSE(u_lb_pred,u_ub_pred) + MSE(u_x_lb_pred, u_x_ub_pred) 66 | else: 67 | u_lb_pred = self.u_model(tf.concat([self.x_lb, self.t_lb],1)) 68 | u_ub_pred = self.u_model(tf.concat([self.x_ub, self.t_ub],1)) 69 | mse_b_u = MSE(u_lb_pred, self.u_lb) + MSE(u_ub_pred, self.u_ub) 70 | 71 | mse_0_u = MSE(u0_pred, self.u0, self.u_weights) 72 | 73 | if self.g is not None: 74 | if self.dist: 75 | mse_f_u = g_MSE(f_u_pred, constant(0.0), self.g(self.dist_col_weights)) 76 | else: 77 | mse_f_u = g_MSE(f_u_pred, constant(0.0), self.g(self.col_weights)) 78 | 79 | else: 80 | mse_f_u = MSE(f_u_pred, constant(0.0)) 81 | 82 | if self.assimilate: 83 | s_pred = self.u_model(tf.concat([self.data_x, self.data_t],1)) 84 | mse_s_u = MSE(s_pred, self.data_s) 85 | return mse_0_u + mse_b_u + mse_f_u + mse_s_u, mse_0_u, mse_b_u, mse_f_u 86 | else: 87 | return mse_0_u + mse_b_u + mse_f_u, mse_0_u, mse_b_u, mse_f_u 88 | 89 | def grad(self): 90 | with tf.GradientTape() as tape: 91 | loss_value, mse_0, mse_b, mse_f = self.loss() 92 | grads = tape.gradient(loss_value, self.variables) 93 | return loss_value, grads 94 | 95 | 96 | def fit(self, tf_iter, newton_iter, batch_sz = None, newton_eager = True): 97 | if(self.isAdaptive and (batch_sz is not None)): 98 | raise Exception("Currently we dont support minibatching for adaptive PINNs") 99 | if self.dist: 100 | fit_dist(self, tf_iter = tf_iter, newton_iter = newton_iter, batch_sz = batch_sz, newton_eager = newton_eager) 101 | else: 102 | fit(self, tf_iter = tf_iter, newton_iter = newton_iter, batch_sz = batch_sz, newton_eager = newton_eager) 103 | 104 | 105 | #L-BFGS implementation from https://github.com/pierremtb/PINNs-TF2.0 106 | def get_loss_and_flat_grad(self): 107 | def loss_and_flat_grad(w): 108 | with tf.GradientTape() as tape: 109 | set_weights(self.u_model, w, self.sizes_w, self.sizes_b) 110 | loss_value, _, _, _ = self.loss() 111 | grad = tape.gradient(loss_value, self.u_model.trainable_variables) 112 | grad_flat = [] 113 | for g in grad: 114 | grad_flat.append(tf.reshape(g, [-1])) 115 | grad_flat = tf.concat(grad_flat, 0) 116 | #print(loss_value, grad_flat) 117 | return loss_value, grad_flat 118 | 119 | return loss_and_flat_grad 120 | 121 | 122 | def predict(self, X_star): 123 | X_star = convertTensor(X_star) 124 | u_star = self.u_model(X_star) 125 | 126 | f_u_star = self.f_model(self.u_model, X_star[:,0:1], 127 | X_star[:,1:2]) 128 | 129 | return u_star.numpy(), f_u_star.numpy() 130 | 131 | 132 | class CollocationSolver2D(CollocationSolver1D): 133 | 134 | def compile(self, layer_sizes, f_model, x_f, y_f, t_f, x0, t0, u0, x_lb, y_lb, t_lb, x_ub, y_ub, t_ub, isPeriodic = False, u_x_model = None, isAdaptive = False, col_weights = None, u_weights = None, g = None): 135 | CollocationSolver1D.compile(layer_sizes, f_model, x_f, t_f, x0, t0, u0, x_lb, t_lb, x_ub, t_ub, isPeriodic, u_x_model, isAdaptive, col_weights, u_weights, g) 136 | self.y_lb = y_lb 137 | self.y_ub = y_ub 138 | self.y_f = y_f 139 | 140 | def loss(self): 141 | f_u_pred = self.f_model(self.u_model, self.x_f, self.y_f, self.t_f) 142 | u0_pred = self.u_model(tf.concat([self.x0, self.y0, self.t0],1)) 143 | 144 | u_lb_pred, u_x_lb_pred, u_y_lb_pred = self.u_x_model(self.u_model, self.x_lb, self.y_lb, self.t_lb) 145 | u_ub_pred, u_x_ub_pred, u_y_ub_pred = self.u_x_model(self.u_model, self.x_ub, self.y_ub, self.t_ub) 146 | 147 | mse_b_u = MSE(u_lb_pred,u_ub_pred) + MSE(u_x_lb_pred, u_x_ub_pred) + MSE(u_y_lb_pred, u_y_ub_pred) 148 | 149 | mse_0_u = MSE(u0_pred, self.u0, self.u_weights) 150 | 151 | if self.g is not None: 152 | mse_f_u = g_MSE(f_u_pred, constant(0.0), self.g(self.col_weights)) 153 | else: 154 | mse_f_u = MSE(f_u_pred, constant(0.0)) 155 | 156 | return mse_0_u + mse_b_u + mse_f_u , mse_0_u, mse_b_u, mse_f_u 157 | 158 | class DiscoveryModel(): 159 | def compile(self, layer_sizes, f_model, X, u, vars, col_weights = None): 160 | self.layer_sizes = layer_sizes 161 | self.f_model = f_model 162 | self.X = X 163 | self.x_f = X[:,0:1] 164 | self.t_f = X[:,1:2] 165 | self.u = u 166 | self.vars = vars 167 | self.u_model = neural_net(self.layer_sizes) 168 | self.tf_optimizer = tf.keras.optimizers.Adam(lr = 0.005, beta_1=.99) 169 | self.tf_optimizer_vars = tf.keras.optimizers.Adam(lr = 0.0005, beta_1=.99) 170 | self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr = 0.005, beta_1=.99) 171 | self.col_weights = col_weights 172 | 173 | def loss(self): 174 | u_pred = self.u_model(self.X) 175 | f_u_pred, self.vars = self.f_model(self.u_model, self.x_f, self.t_f, self.vars) 176 | 177 | if self.col_weights is not None: 178 | return MSE(u_pred, self.u) + g_MSE(f_u_pred, constant(0.0), self.col_weights**2) 179 | else: 180 | return MSE(u_pred, self.u) + MSE(f_u_pred, constant(0.0)) 181 | 182 | 183 | def grad(self): 184 | with tf.GradientTape() as tape: 185 | loss_value = self.loss() 186 | grads = tape.gradient(loss_value, self.variables) 187 | return loss_value, grads 188 | 189 | @tf.function 190 | def train_op(self): 191 | if self.col_weights is not None: 192 | len_ = len(self.vars) 193 | self.variables = self.u_model.trainable_variables 194 | self.variables.extend([self.col_weights]) 195 | self.variables.extend(self.vars) 196 | loss_value, grads = self.grad() 197 | self.tf_optimizer.apply_gradients(zip(grads[:-(len_+2)], self.u_model.trainable_variables)) 198 | self.tf_optimizer_weights.apply_gradients(zip([-grads[-(len_+1)]], [self.col_weights])) 199 | self.tf_optimizer_vars.apply_gradients(zip(grads[-len_:], self.vars)) 200 | else: 201 | self.variables = self.u_model.trainable_variables 202 | loss_value, mse_0, mse_b, mse_f, grads = self.grad() 203 | self.tf_optimizer.apply_gradients(zip(grads, self.u_model.trainable_variables)) 204 | 205 | return loss_value 206 | 207 | 208 | def train_loop(self, tf_iter): 209 | start_time = time.time() 210 | for i in range(tf_iter): 211 | loss_value = self.train_op() 212 | if i % 100 == 0: 213 | elapsed = time.time() - start_time 214 | print('It: %d, Time: %.2f' % (i, elapsed)) 215 | tf.print(f"total loss: {loss_value}") 216 | var = [var.numpy() for var in self.vars] 217 | print("vars estimate(s):", var) 218 | start_time = time.time() 219 | -------------------------------------------------------------------------------- /tensordiffeq/boundaries.py: -------------------------------------------------------------------------------- 1 | from tensordiffeq.domains import DomainND 2 | import numpy as np 3 | import tensorflow as tf 4 | from .utils import multimesh, flatten_and_stack, MSE, convertTensor, get_tf_model 5 | 6 | 7 | def get_linspace(dict_): 8 | lin_key = "linspace" 9 | return [val for key, val in dict_.items() if lin_key in key][0] 10 | 11 | 12 | class BC(DomainND): 13 | def __init__(self): 14 | self.isPeriodic = False 15 | self.isInit = False 16 | self.isNeumann = False 17 | 18 | def compile(self): 19 | self.input = self.create_input() 20 | 21 | def get_dict(self, var): 22 | return next(item for item in self.domain.domaindict if item["identifier"] == var) 23 | 24 | def get_not_dims(self, var): 25 | self.dicts_ = [item for item in self.domain.domaindict if item['identifier'] != var] 26 | return [get_linspace(dict_) for dict_ in self.dicts_] 27 | 28 | def create_target_input_repeat(self, var, target): 29 | fidelity_key = "fidelity" 30 | fids = [] 31 | for dict_ in self.dicts_: 32 | res = [val for key, val in dict_.items() if fidelity_key in key] 33 | fids.append(res) 34 | reps = np.prod(fids) 35 | if type(target) is str: 36 | return np.repeat(self.dict_[(var + target)], reps) 37 | else: 38 | return np.repeat(target, reps) 39 | 40 | 41 | class dirichletBC(BC): 42 | def __init__(self, domain, val, var, target): 43 | self.domain = domain 44 | self.val = val 45 | self.var = var 46 | self.target = target 47 | super().__init__() 48 | self.dicts_ = [item for item in self.domain.domaindict if item['identifier'] != self.var] 49 | self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == self.var) 50 | self.target = self.dict_[var+target] 51 | self.compile() 52 | self.isDirichlect = True 53 | 54 | def create_input(self): 55 | repeated_value = self.create_target_input_repeat(self.var, self.target) 56 | repeated_value = np.reshape(repeated_value, (-1, 1)) 57 | mesh = flatten_and_stack(multimesh(self.get_not_dims(self.var))) 58 | mesh = np.insert(mesh, self.domain.vars.index(self.var), repeated_value.flatten(), axis=1) 59 | return mesh 60 | 61 | 62 | class FunctionDirichletBC(BC): 63 | def __init__(self, domain, fun, var, target, func_inputs, n_values=None): 64 | self.domain = domain 65 | self.fun = fun 66 | self.var = var 67 | self.target = target 68 | self.func_inputs = func_inputs 69 | self.n_values = n_values 70 | self.dicts_ = [item for item in self.domain.domaindict if item['identifier'] != self.var] 71 | self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == self.var) 72 | print(self.dict_) 73 | super().__init__() 74 | self.targets = self.dict_[var+target] 75 | self.compile() 76 | self.create_target() 77 | self.isDirichlect = True 78 | 79 | def create_input(self): 80 | dims = self.get_not_dims(self.var) 81 | #dims = [get_linspace(dim) for dim in self.vars] 82 | # vals = np.reshape(fun_vals, (-1, len(self.vars))) 83 | mesh = flatten_and_stack(multimesh(dims)) 84 | # dim_repeat = np.repeat(0.0, len(mesh)) 85 | dim_repeat = self.create_target_input_repeat(self.var, self.target) 86 | mesh = np.insert(mesh, self.domain.vars.index(self.var), dim_repeat.flatten(), axis=1) 87 | if self.n_values is not None: 88 | self.nums = np.random.randint(0, high=len(mesh), size=self.n_values) 89 | mesh = mesh[self.nums] 90 | return mesh 91 | 92 | def create_target(self): 93 | fun_vals = [] 94 | for i, var_ in enumerate(self.func_inputs): 95 | arg_list = [] 96 | for j, var in enumerate(var_): 97 | var_dict = self.get_dict(var) 98 | arg_list.append(get_linspace(var_dict)) 99 | inp = flatten_and_stack(multimesh(arg_list)) 100 | fun_vals.append(self.fun[i](*inp.T)) 101 | self.val = convertTensor(np.reshape(fun_vals, (-1, 1))[self.nums]) 102 | 103 | class FunctionNeumannBC(BC): 104 | def __init__(self, domain, fun, var, target, deriv_model, func_inputs, n_values=None): 105 | self.n_values = n_values 106 | self.domain = domain 107 | self.fun = fun 108 | self.var = var 109 | self.target = target 110 | super().__init__() 111 | self.deriv_model = [get_tf_model(model) for model in deriv_model] 112 | self.isNeumann = True 113 | self.func_inputs = func_inputs 114 | self.compile() 115 | self.create_target() 116 | 117 | def get_input_upper_lower(self, var): 118 | self.repeat = self.create_target_input_repeat(var, self.target) 119 | 120 | def compile(self): 121 | self.input = [] 122 | for var in self.var: 123 | self.dicts_ = [item for item in self.domain.domaindict if item["identifier"] != var] 124 | self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == var) 125 | self.get_input_upper_lower(var) 126 | mesh = flatten_and_stack(multimesh(self.get_not_dims(var))) 127 | self.input.append(np.insert(mesh, self.domain.vars.index(var), self.repeat.flatten(), axis=1)) 128 | 129 | if self.n_values is not None: 130 | self.nums = np.random.randint(0, high=len(self.input[0]), size=self.n_values) 131 | else: 132 | self.nums = np.random.randint(0, high=len(self.input[0]), size=len(self.input[0])) 133 | 134 | self.input = self.unroll(self.input) 135 | # self.lower = self.unroll(self.lower) 136 | 137 | def u_x_model(self, u_model, inputs): 138 | return [model(u_model, *inputs) for model in self.deriv_model] 139 | 140 | def unroll(self, inp): 141 | outer = [] 142 | for _, lst in enumerate(inp): 143 | tmp = [np.reshape(vec, (-1, 1))[self.nums] for vec in lst.T] 144 | outer.append(np.asarray(tmp)) 145 | return outer 146 | 147 | def create_target(self): 148 | fun_vals = [] 149 | for i, var_ in enumerate(self.func_inputs): 150 | arg_list = [] 151 | for j, var in enumerate(var_): 152 | var_dict = self.get_dict(var) 153 | arg_list.append(get_linspace(var_dict)) 154 | inp = flatten_and_stack(multimesh(arg_list)) 155 | fun_vals.append(self.fun[i](*inp.T)) 156 | self.val = convertTensor(np.reshape(fun_vals, (-1, 1))[self.nums]) 157 | 158 | def get_function_out(func, var, dict_): 159 | linspace = get_linspace(dict_) 160 | return func(linspace) 161 | 162 | 163 | class IC(BC): 164 | def __init__(self, domain, fun, var, n_values=None): 165 | self.isInit = True 166 | self.n_values = n_values 167 | self.domain = domain 168 | self.fun = fun 169 | self.vars = var 170 | super().__init__() 171 | self.isInit = True 172 | self.dicts_ = [item for item in self.domain.domaindict if item['identifier'] != self.domain.time_var] 173 | self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == self.domain.time_var) 174 | self.compile() 175 | self.create_target() 176 | 177 | def create_input(self): 178 | dims = self.get_not_dims(self.domain.time_var) 179 | mesh = flatten_and_stack(multimesh(dims)) 180 | t_repeat = np.repeat(0.0, len(mesh)) 181 | 182 | mesh = np.concatenate((mesh, np.reshape(t_repeat, (-1, 1))), axis=1) 183 | if self.n_values is not None: 184 | self.nums = np.random.randint(0, high=len(mesh), size=self.n_values) 185 | mesh = mesh[self.nums] 186 | return mesh 187 | 188 | def create_target(self): 189 | fun_vals = [] 190 | for i, var_ in enumerate(self.vars): 191 | arg_list = [] 192 | for j, var in enumerate(var_): 193 | var_dict = self.get_dict(var) 194 | arg_list.append(get_linspace(var_dict)) 195 | inp = flatten_and_stack(multimesh(arg_list)) 196 | fun_vals.append(self.fun[i](*inp.T)) 197 | if self.n_values is not None: 198 | self.val = convertTensor(np.reshape(fun_vals, (-1, 1))[self.nums]) 199 | else: 200 | self.val = convertTensor(np.reshape(fun_vals, (-1, 1))) 201 | 202 | def loss(self): 203 | return MSE(self.preds, self.val) 204 | 205 | class periodicBC(BC): 206 | def __init__(self, domain, var, deriv_model, n_values=None): 207 | self.n_values = n_values 208 | self.domain = domain 209 | self.var = var 210 | super().__init__() 211 | self.deriv_model = [get_tf_model(model) for model in deriv_model] 212 | self.isPeriodic = True 213 | self.compile() 214 | 215 | def get_input_upper_lower(self, var): 216 | self.upper_repeat = self.create_target_input_repeat(var, self.dict_["range"][1]) 217 | self.lower_repeat = self.create_target_input_repeat(var, self.dict_["range"][0]) 218 | 219 | def compile(self): 220 | self.upper = [] 221 | self.lower = [] 222 | for var in self.var: 223 | self.dicts_ = [item for item in self.domain.domaindict if item["identifier"] != var] 224 | self.dict_ = next(item for item in self.domain.domaindict if item["identifier"] == var) 225 | self.get_input_upper_lower(var) 226 | mesh = flatten_and_stack(multimesh(self.get_not_dims(var))) 227 | self.upper.append(np.insert(mesh, self.domain.vars.index(var), self.upper_repeat.flatten(), axis=1)) 228 | self.lower.append(np.insert(mesh, self.domain.vars.index(var), self.lower_repeat.flatten(), axis=1)) 229 | 230 | if self.n_values is not None: 231 | self.nums = np.random.randint(0, high=len(self.upper[0]), size=self.n_values) 232 | else: 233 | self.nums = np.random.randint(0, high=len(self.upper[0]), size=len(self.upper[0])) 234 | 235 | self.upper = self.unroll(self.upper) 236 | self.lower = self.unroll(self.lower) 237 | 238 | def u_x_model(self, u_model, inputs): 239 | return [model(u_model, *inputs) for model in self.deriv_model] 240 | 241 | def unroll(self, inp): 242 | outer = [] 243 | for _, lst in enumerate(inp): 244 | tmp = [np.reshape(vec, (-1, 1))[self.nums] for vec in lst.T] 245 | outer.append(np.asarray(tmp)) 246 | return outer 247 | 248 | 249 | 250 | -------------------------------------------------------------------------------- /tensordiffeq/domains.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from .utils import LatinHypercubeSample 3 | 4 | 5 | class DomainND: 6 | def __init__(self, var, time_var=None): 7 | self.vars = var 8 | self.domaindict = [] 9 | self.domain_ids = [] 10 | self.time_var = time_var 11 | 12 | def generate_collocation_points(self, N_f): 13 | range_list = [ 14 | [val for key, val in dict_.items() if "range" in key][0] 15 | for dict_ in self.domaindict 16 | ] 17 | 18 | limits = np.array(range_list) # x,t domain 19 | X_f = LatinHypercubeSample(N_f, limits) 20 | self.X_f = X_f 21 | 22 | def add(self, token, vals, fidel): 23 | self.domain_ids.append(token) 24 | self.domaindict.append({ 25 | "identifier": token, 26 | "range": vals, 27 | (token + "fidelity"): fidel, 28 | (token + "linspace"): np.linspace(vals[0], vals[1], fidel), 29 | (token + "upper"): vals[1], 30 | (token + "lower"): vals[0] 31 | }) 32 | -------------------------------------------------------------------------------- /tensordiffeq/fit.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from .networks import * 4 | from .models import * 5 | from .utils import * 6 | from .optimizers import * 7 | from .output import print_screen 8 | import time 9 | import os 10 | from tqdm.auto import tqdm, trange 11 | from random import random, randint 12 | import sys 13 | 14 | os.environ["TF_GPU_THREAD_MODE"] = "gpu_private" 15 | 16 | 17 | def fit(obj, tf_iter=0, newton_iter=0, newton_eager=True): 18 | 19 | start_time = time.time() 20 | 21 | # these cant be tf.functions on initialization since the distributed strategy requires its own 22 | # graph using grad and adaptgrad, so they cant be compiled as tf.functions until we know dist/non-dist 23 | obj.grad = tf.function(obj.grad) 24 | if obj.verbose: print_screen(obj) 25 | 26 | print("Starting Adam training") 27 | # tf.profiler.experimental.start('../cache/tblogdir1') 28 | train_op_fn = train_op_inner(obj) 29 | with trange(tf_iter) as t: 30 | for epoch in t: 31 | loss_value = train_op_fn(obj) 32 | # Description will be displayed on the left 33 | t.set_description('Adam epoch %i' % (epoch + 1)) 34 | # Postfix will be displayed on the right, 35 | # formatted automatically based on argument's datatype 36 | if epoch % 10 == 0: 37 | t.set_postfix(loss=loss_value.numpy()) 38 | 39 | 40 | # tf.profiler.experimental.stop() 41 | 42 | # tf.profiler.experimental.start('../cache/tblogdir1') 43 | if newton_iter > 0: 44 | obj.n_batches = 1 45 | print("Starting L-BFGS training") 46 | if newton_eager: 47 | print("Executing eager-mode L-BFGS") 48 | loss_and_flat_grad = obj.get_loss_and_flat_grad() 49 | eager_lbfgs(loss_and_flat_grad, 50 | get_weights(obj.u_model), 51 | Struct(), maxIter=newton_iter, learningRate=0.8) 52 | 53 | else: 54 | print("Executing graph-mode L-BFGS\n Building graph...") 55 | print("Warning: Depending on your CPU/GPU setup, eager-mode L-BFGS may prove faster. If the computational " 56 | "graph takes a long time to build, or the computation is slow, try eager-mode L-BFGS (enabled by " 57 | "default)") 58 | 59 | lbfgs_train(obj, newton_iter) 60 | 61 | # tf.profiler.experimental.stop() 62 | 63 | 64 | # @tf.function 65 | def lbfgs_train(obj, newton_iter): 66 | func = graph_lbfgs(obj.u_model, obj.update_loss) 67 | 68 | init_params = tf.dynamic_stitch(func.idx, obj.u_model.trainable_variables) 69 | 70 | lbfgs_op(func, init_params, newton_iter) 71 | 72 | 73 | @tf.function 74 | def lbfgs_op(func, init_params, newton_iter): 75 | return tfp.optimizer.lbfgs_minimize( 76 | value_and_gradients_function=func, 77 | initial_position=init_params, 78 | max_iterations=newton_iter, 79 | tolerance=1e-20, 80 | ) 81 | 82 | 83 | def train_op_inner(obj): 84 | @tf.function 85 | def apply_grads(obj=obj): 86 | if obj.n_batches > 1: 87 | obj.batch_indx_map = np.random.choice(obj.X_f_len[0], size=obj.X_f_len[0], replace=False) 88 | 89 | for i in range(obj.n_batches): 90 | # unstack = tf.unstack(obj.u_model.trainable_variables, axis = 2) 91 | obj.batch = i 92 | obj.variables = obj.u_model.trainable_variables 93 | obj.variables = obj.u_model.trainable_variables 94 | if obj.isAdaptive: 95 | obj.variables.extend(obj.lambdas) 96 | loss_value, grads = obj.grad() 97 | 98 | n_lambdas = len(obj.lambdas) 99 | graph_w = grads[:-n_lambdas] 100 | grads_lambda = grads[-n_lambdas:] 101 | grad_neg = [-x for x in grads_lambda] 102 | 103 | obj.tf_optimizer.apply_gradients(zip(graph_w, obj.u_model.trainable_variables)) 104 | obj.tf_optimizer_weights.apply_gradients(zip(grad_neg, obj.lambdas)) 105 | else: 106 | loss_value, grads = obj.grad() 107 | obj.tf_optimizer.apply_gradients(zip(grads, obj.u_model.trainable_variables)) 108 | 109 | obj.batch = None 110 | 111 | return loss_value 112 | 113 | return apply_grads 114 | 115 | 116 | def fit_dist(obj, tf_iter, newton_iter, batch_sz=None, newton_eager=True): 117 | def train_epoch(dataset, STEPS): 118 | total_loss = 0.0 119 | num_batches = 0.0 120 | # dist_col_weights = iter(col_weights) 121 | dist_dataset_iterator = iter(dataset) 122 | for _ in range(STEPS): 123 | total_loss += distributed_train_step(obj, next(dist_dataset_iterator)) 124 | num_batches += 1 125 | train_loss = total_loss / num_batches 126 | return train_loss 127 | 128 | def train_step(obj, inputs): 129 | obj.dist_X_f = inputs 130 | # obj.dist_col_weights = col_weights 131 | if obj.isAdaptive: 132 | obj.variables = obj.u_model.trainable_variables 133 | obj.dist_col_weights = tf.gather(obj.col_weights, col_idx) 134 | print(obj.dist_col_weights) 135 | obj.variables.extend([obj.u_weights, obj.dist_col_weights]) 136 | loss_value, grads = obj.grad() 137 | obj.tf_optimizer.apply_gradients(zip(grads[:-2], obj.u_model.trainable_variables)) 138 | print([grads[-2], grads[-1]]) 139 | obj.tf_optimizer_weights.apply_gradients( 140 | zip([-grads[-2], -grads[-1]], [obj.u_weights, obj.dist_col_weights])) 141 | # TODO collocation weight splitting across replicas 142 | # tf.scatter_nd_add(obj.col_weights, col_idx, obj.dist_col_weights) 143 | else: 144 | obj.variables = obj.u_model.trainable_variables 145 | loss_value, grads = obj.grad() 146 | obj.tf_optimizer.apply_gradients(zip(grads, obj.u_model.trainable_variables)) 147 | return loss_value 148 | 149 | @tf.function 150 | def distributed_train_step(obj, dataset_inputs): 151 | per_replica_losses = obj.strategy.run(train_step, args=(obj, dataset_inputs)) 152 | return obj.strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_losses, 153 | axis=None) 154 | 155 | @tf.function 156 | def dist_loop(obj, STEPS): 157 | total_loss = 0.0 158 | num_batches = 0.0 159 | # dist_col_weights = iter(col_weights) 160 | dist_dataset_iterator = iter(obj.train_dist_dataset) 161 | for _ in range(STEPS): 162 | total_loss += distributed_train_step(obj, next(dist_dataset_iterator)) 163 | num_batches += 1 164 | train_loss = total_loss / num_batches 165 | 166 | return train_loss 167 | 168 | def train_loop(obj, tf_iter, STEPS): 169 | print_screen(obj) 170 | start_time = time.time() 171 | with trange(tf_iter) as t: 172 | for epoch in t: 173 | loss = dist_loop(obj, STEPS) 174 | t.set_description('Adam epoch %i' % (epoch + 1)) 175 | if epoch % 10 == 0: 176 | elapsed = time.time() - start_time 177 | t.set_postfix(loss=loss.numpy()) 178 | # print('It: %d, Time: %.2f, loss: %.9f' % (epoch, elapsed, tf.get_static_value(loss))) 179 | start_time = time.time() 180 | 181 | print("starting Adam training") 182 | STEPS = np.max((obj.n_batches // obj.strategy.num_replicas_in_sync, 1)) 183 | # tf.profiler.experimental.start('../cache/tblogdir1') 184 | train_loop(obj, tf_iter, STEPS) 185 | # tf.profiler.experimental.stop() 186 | 187 | # l-bfgs-b optimization 188 | print("Starting L-BFGS training") 189 | # lbfgs_train(obj, newton_iter) 190 | # tf.profiler.experimental.stop() 191 | -------------------------------------------------------------------------------- /tensordiffeq/helpers.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def find_L2_error(u_pred, u_star): 4 | return np.linalg.norm(u_star-u_pred,2)/np.linalg.norm(u_star,2) 5 | -------------------------------------------------------------------------------- /tensordiffeq/models.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import time 4 | from .utils import * 5 | from .networks import * 6 | from .plotting import * 7 | from .fit import * 8 | from tqdm.auto import tqdm, trange 9 | from .output import print_screen 10 | 11 | 12 | class CollocationSolverND: 13 | def __init__(self, assimilate=False, verbose=True): 14 | self.assimilate = assimilate 15 | self.verbose = verbose 16 | 17 | def compile(self, layer_sizes, f_model, domain, bcs, isAdaptive=False, 18 | dict_adaptive=None, init_weights=None, g=None, dist=False): 19 | """ 20 | Args: 21 | layer_sizes: A list of layer sizes, can be overwritten via resetting u_model to a keras model 22 | f_model: PDE definition 23 | domain: a Domain object containing the information on the domain of the system 24 | bcs: a list of ICs/BCs for the problem 25 | isAdaptive: Boolean value determining whether to implement self-adaptive solving 26 | dict_adaptive: a dictionary with boollean indicating adaptive loss for every loss function 27 | init_weights: a dictionary with keys "residual" and "BCs". Values must be a tuple with dimension 28 | equal to the number of residuals and boundares conditions, respectively 29 | g: a function in terms of `lambda` for self-adapting solving. Defaults to lambda^2 30 | dist: A boolean value determining whether the solving will be distributed across multiple GPUs 31 | 32 | Returns: 33 | None 34 | """ 35 | self.tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 36 | self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 37 | self.layer_sizes = layer_sizes 38 | self.sizes_w, self.sizes_b = get_sizes(layer_sizes) 39 | self.bcs = bcs 40 | self.f_model = get_tf_model(f_model) 41 | self.g = g 42 | self.domain = domain 43 | self.dist = dist 44 | self.X_f_dims = tf.shape(self.domain.X_f) 45 | self.X_f_len = tf.slice(self.X_f_dims, [0], [1]).numpy() 46 | # must explicitly cast data into tf.float32 for stability 47 | # tmp = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(self.domain.X_f.T)] 48 | # self.X_f_in = np.asarray(tmp) 49 | self.X_f_in = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(self.domain.X_f.T)] 50 | self.u_model = neural_net(self.layer_sizes) 51 | self.batch = None 52 | self.batch_indx_map = None 53 | self.lambdas = self.dict_adaptive = self.lambdas_map = None 54 | self.isAdaptive = isAdaptive 55 | 56 | if self.isAdaptive: 57 | self.dict_adaptive = dict_adaptive 58 | self.lambdas, self.lambdas_map = initialize_weights_loss(init_weights) 59 | 60 | if dict_adaptive is None and init_weights is None: 61 | raise Exception("Adaptive weights selected but no inputs were specified!") 62 | if ( 63 | self.isAdaptive is False 64 | and self.dict_adaptive is not None 65 | and self.lambdas is not None 66 | ): 67 | raise Exception( 68 | "Adaptive weights are turned off but weight vectors were provided. Set the weight vectors to " 69 | "\"none\" to continue") 70 | 71 | def compile_data(self, x, t, y): 72 | if not self.assimilate: 73 | raise Exception( 74 | "Assimilate needs to be set to 'true' for data assimilation. Re-initialize CollocationSolver1D with " 75 | "assimilate=True.") 76 | self.data_x = x 77 | self.data_t = t 78 | self.data_s = y 79 | 80 | def update_loss(self): 81 | loss_bcs = 0. 82 | 83 | ##################################### 84 | # BOUNDARIES and INIT conditions 85 | ##################################### 86 | # Check if adaptive is allowed 87 | if self.isAdaptive: 88 | if len(self.lambdas_map['bcs']) > 0: 89 | idx_lambda_bcs = self.lambdas_map['bcs'][0] 90 | 91 | for counter_bc, bc in enumerate(self.bcs): 92 | loss_bc = 0. 93 | # Check if the current BC is adaptive 94 | if self.isAdaptive: 95 | isBC_adaptive = self.dict_adaptive["BCs"][counter_bc] 96 | else: 97 | isBC_adaptive = False 98 | 99 | # Periodic BC iteration for all components of deriv_model 100 | if bc.isPeriodic: 101 | if isBC_adaptive: 102 | # TODO: include Adapative Periodic Boundaries Conditions 103 | raise Exception('TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions') 104 | else: 105 | for i, dim in enumerate(bc.var): 106 | for j, lst in enumerate(dim): 107 | for k, tup in enumerate(lst): 108 | upper = bc.u_x_model(self.u_model, bc.upper[i])[j][k] 109 | lower = bc.u_x_model(self.u_model, bc.lower[i])[j][k] 110 | msq = MSE(upper, lower) 111 | loss_bc = tf.math.add(loss_bc, msq) 112 | # initial BCs, including adaptive model 113 | elif bc.isInit: 114 | if isBC_adaptive: 115 | loss_bc = MSE(self.u_model(bc.input), bc.val, self.lambdas[idx_lambda_bcs]) 116 | idx_lambda_bcs += 1 117 | else: 118 | loss_bc = MSE(self.u_model(bc.input), bc.val) 119 | # BC types are added 120 | elif bc.isNeumann: 121 | if isBC_adaptive: 122 | #TODO: include Adapative Neumann Boundaries Conditions 123 | raise Exception('TensorDiffEq is currently not accepting Adapative Neumann Boundaries Conditions') 124 | else: 125 | for i, dim in enumerate(bc.var): 126 | for j, lst in enumerate(dim): 127 | for k, tup in enumerate(lst): 128 | target = tf.cast(bc.u_x_model(self.u_model, bc.input[i])[j][k], dtype=tf.float32) 129 | msq = MSE(bc.val, target) 130 | loss_bc = tf.math.add(loss_bc, msq) 131 | 132 | elif bc.isDirichlect: 133 | if isBC_adaptive: 134 | loss_bc = MSE(self.u_model(bc.input), bc.val, self.lambdas[idx_lambda_bcs]) 135 | idx_lambda_bcs += 1 136 | else: 137 | loss_bc = MSE(self.u_model(bc.input), bc.val) 138 | 139 | else: 140 | raise Exception('Boundary condition type is not acceptable') 141 | 142 | loss_bcs = tf.add(loss_bcs, loss_bc) 143 | 144 | ##################################### 145 | # Residual Equations 146 | ##################################### 147 | # pass thorough the forward method 148 | if self.n_batches > 1: 149 | # The collocation points will be split based on the batch_indx_map 150 | # generated on the beginning of this epoch on models.train_op_inner.apply_grads 151 | X_batch = [] 152 | for x_in in self.X_f_in: 153 | indx_on_batch = self.batch_indx_map[self.batch * self.batch_sz:(self.batch + 1) * self.batch_sz] 154 | X_batch.append(tf.gather(x_in,indx_on_batch)) 155 | f_u_preds = self.f_model(self.u_model, *X_batch) 156 | else: 157 | f_u_preds = self.f_model(self.u_model, *self.X_f_in) 158 | 159 | # If it is only one residual, just convert it to a tuple of one element 160 | if not isinstance(f_u_preds, tuple): 161 | f_u_preds = f_u_preds, 162 | 163 | loss_res = 0. 164 | for counter_res, f_u_pred in enumerate(f_u_preds): 165 | # Check if the current Residual is adaptive 166 | if self.isAdaptive: 167 | isRes_adaptive = self.dict_adaptive["residual"][counter_res] 168 | if isRes_adaptive: 169 | idx_lambda_res = self.lambdas_map['residual'][0] 170 | lambdas2loss = self.lambdas[idx_lambda_res] 171 | 172 | if self.n_batches > 1: 173 | # select lambdas on minebatch 174 | lambdas2loss = tf.gather(lambdas2loss,indx_on_batch) 175 | 176 | if self.g is not None: 177 | loss_r = g_MSE(f_u_pred, constant(0.0), self.g(lambdas2loss)) 178 | else: 179 | loss_r = MSE(f_u_pred, constant(0.0), lambdas2loss) 180 | idx_lambda_res += 1 181 | else: 182 | # In the case where the model is Adaptive but the residual 183 | # is not adaptive, the residual loss should be computed. 184 | loss_r = MSE(f_u_pred, constant(0.0)) 185 | else: 186 | loss_r = MSE(f_u_pred, constant(0.0)) 187 | 188 | loss_res = tf.math.add(loss_r, loss_res) 189 | 190 | loss_total = tf.math.add(loss_res, loss_bcs) 191 | 192 | return loss_total 193 | 194 | # @tf.function 195 | def grad(self): 196 | with tf.GradientTape() as tape: 197 | loss_value = self.update_loss() 198 | grads = tape.gradient(loss_value, self.variables) 199 | return loss_value, grads 200 | 201 | def fit(self, tf_iter=0, newton_iter=0, batch_sz=None, newton_eager=True): 202 | 203 | # Can adjust batch size for collocation points, here we set it to N_f 204 | N_f = self.X_f_len[0] 205 | self.batch_sz = batch_sz if batch_sz is not None else N_f 206 | self.n_batches = N_f // self.batch_sz 207 | 208 | if self.isAdaptive and self.dist: 209 | raise Exception("Currently we dont support distributed training for adaptive PINNs") 210 | 211 | if self.n_batches > 1 and self.dist: 212 | raise Exception("Currently we dont support distributed minibatching training") 213 | 214 | if self.dist: 215 | BUFFER_SIZE = len(self.X_f_in[0]) 216 | EPOCHS = tf_iter 217 | # devices = ['/gpu:0', '/gpu:1','/gpu:2', '/gpu:3'], 218 | try: 219 | self.strategy = tf.distribute.MirroredStrategy() 220 | except: 221 | print( 222 | "Looks like we cant find any GPUs available, or your GPUs arent responding to Tensorflow's API. If " 223 | "you're receiving this in error, check that your CUDA, " 224 | "CUDNN, and other GPU dependencies are installed correctly with correct versioning based on your " 225 | "version of Tensorflow") 226 | 227 | print("Number of GPU devices: {}".format(self.strategy.num_replicas_in_sync)) 228 | 229 | BATCH_SIZE_PER_REPLICA = self.batch_sz 230 | GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * self.strategy.num_replicas_in_sync 231 | 232 | # options = tf.data.Options() 233 | # options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.DATA 234 | 235 | self.train_dataset = tf.data.Dataset.from_tensor_slices( 236 | self.X_f_in).batch(GLOBAL_BATCH_SIZE) 237 | 238 | # self.train_dataset = self.train_dataset.with_options(options) 239 | 240 | self.train_dist_dataset = self.strategy.experimental_distribute_dataset(self.train_dataset) 241 | 242 | start_time = time.time() 243 | 244 | with self.strategy.scope(): 245 | self.u_model = neural_net(self.layer_sizes) 246 | self.tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 247 | self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 248 | # self.dist_col_weights = tf.Variable(tf.zeros(batch_sz), validate_shape=True) 249 | 250 | if self.isAdaptive: 251 | # self.col_weights = tf.Variable(tf.random.uniform([self.batch_sz, 1])) 252 | self.u_weights = tf.Variable(self.u_weights) 253 | 254 | fit_dist(self, tf_iter=tf_iter, newton_iter=newton_iter, batch_sz=batch_sz, newton_eager=newton_eager) 255 | 256 | else: 257 | fit(self, tf_iter=tf_iter, newton_iter=newton_iter, newton_eager=newton_eager) 258 | 259 | # L-BFGS implementation from https://github.com/pierremtb/PINNs-TF2.0 260 | def get_loss_and_flat_grad(self): 261 | def loss_and_flat_grad(w): 262 | with tf.GradientTape() as tape: 263 | set_weights(self.u_model, w, self.sizes_w, self.sizes_b) 264 | loss_value = self.update_loss() 265 | grad = tape.gradient(loss_value, self.u_model.trainable_variables) 266 | grad_flat = [] 267 | for g in grad: 268 | grad_flat.append(tf.reshape(g, [-1])) 269 | grad_flat = tf.concat(grad_flat, 0) 270 | return loss_value, grad_flat 271 | 272 | return loss_and_flat_grad 273 | 274 | def predict(self, X_star): 275 | # predict using concatenated data 276 | u_star = self.u_model(X_star) 277 | # split data into tuples for ND support 278 | # must explicitly cast data into tf.float32 for stability 279 | # tmp = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(X_star.T)] 280 | # X_star = np.asarray(tmp) 281 | # X_star = tuple(X_star) 282 | X_star = [tf.cast(np.reshape(vec, (-1, 1)), tf.float32) for i, vec in enumerate(X_star.T)] 283 | f_u_star = self.f_model(self.u_model, *X_star) 284 | return u_star.numpy(), f_u_star.numpy() 285 | 286 | def save(self, path): 287 | self.u_model.save(path) 288 | 289 | def load_model(self, path, compile_model=False): 290 | self.u_model = tf.keras.models.load_model(path, compile=compile_model) 291 | 292 | 293 | # WIP 294 | # TODO Distributed Discovery Model 295 | class DiscoveryModel(): 296 | def compile(self, layer_sizes, f_model, X, u, var, col_weights=None): 297 | self.layer_sizes = layer_sizes 298 | self.f_model = get_tf_model(f_model) 299 | self.X = X 300 | self.u = u 301 | self.vars = var 302 | self.len_ = len(var) 303 | self.u_model = neural_net(self.layer_sizes) 304 | self.tf_optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 305 | self.tf_optimizer_vars = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 306 | self.tf_optimizer_weights = tf.keras.optimizers.Adam(lr=0.005, beta_1=.99) 307 | self.col_weights = col_weights 308 | # tmp = [np.reshape(vec, (-1,1)) for i, vec in enumerate(self.X)] 309 | self.X_in = tuple(X) 310 | # self.X_in = np.asarray(tmp).T 311 | 312 | # print(np.shape(self.X_in)) 313 | 314 | @tf.function 315 | def loss(self): 316 | u_pred = self.u_model(tf.concat(self.X, 1)) 317 | f_u_pred = self.f_model(self.u_model, self.vars, *self.X_in) 318 | if self.col_weights is not None: 319 | return MSE(u_pred, self.u) + g_MSE(tf.cast(f_u_pred, dtype=tf.float32), constant(0.0), self.col_weights ** 2) 320 | else: 321 | return MSE(u_pred, self.u) + MSE(tf.cast(f_u_pred, dtype=tf.float32), constant(0.0)) 322 | 323 | @tf.function 324 | def grad(self): 325 | with tf.GradientTape() as tape: 326 | loss_value = self.loss() 327 | grads = tape.gradient(loss_value, self.variables) 328 | return loss_value, grads 329 | 330 | @tf.function 331 | def train_op(self): 332 | self.variables = self.u_model.trainable_variables 333 | len_ = self.len_ 334 | if self.col_weights is not None: 335 | 336 | self.variables.extend([self.col_weights]) 337 | self.variables.extend(self.vars) 338 | loss_value, grads = self.grad() 339 | self.tf_optimizer.apply_gradients(zip(grads[:-(len_ + 2)], self.u_model.trainable_variables)) 340 | self.tf_optimizer_weights.apply_gradients(zip([-grads[-(len_ + 1)]], [self.col_weights])) 341 | self.tf_optimizer_vars.apply_gradients(zip(grads[-len_:], self.vars)) 342 | else: 343 | self.variables.extend(self.vars) 344 | loss_value, grads = self.grad() 345 | 346 | self.tf_optimizer.apply_gradients(zip(grads[:-(len_ + 1)], self.u_model.trainable_variables)) 347 | 348 | self.tf_optimizer_vars.apply_gradients(zip(grads[-len_:], self.vars)) 349 | 350 | return loss_value 351 | 352 | def fit(self, tf_iter): 353 | self.train_loop(tf_iter) 354 | 355 | def train_loop(self, tf_iter): # sourcery skip: move-assign 356 | start_time = time.time() 357 | print_screen(self, discovery_model=True) 358 | with trange(tf_iter) as t: 359 | for i in t: 360 | loss_value = self.train_op() 361 | if i % 10 == 0: 362 | # elapsed = time.time() - start_time 363 | # print('It: %d, Time: %.2f' % (i, elapsed)) 364 | # tf.print(f"loss_value: {loss_value}") 365 | var = [var.numpy() for var in self.vars] 366 | t.set_postfix(loss=loss_value.numpy()) 367 | t.set_postfix(vars=var) 368 | # tf.print(f"vars estimate(s): {var}") 369 | # start_time = time.time() 370 | -------------------------------------------------------------------------------- /tensordiffeq/networks.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.keras.models import Sequential 3 | from tensorflow.keras.layers import Dense, Input 4 | from tensorflow.keras import layers, activations 5 | 6 | 7 | # define the baseline FC neural network model 8 | # information about how to define custom neural networks is available 9 | # in the docs - https://docs.tensordiffeq.io/hacks/networks/index.html 10 | def neural_net(layer_sizes): 11 | model = Sequential() 12 | model.add(layers.InputLayer(input_shape=(layer_sizes[0],))) 13 | for width in layer_sizes[1:-1]: 14 | model.add(layers.Dense( 15 | width, activation=tf.nn.tanh, 16 | kernel_initializer="glorot_normal")) 17 | model.add(layers.Dense( 18 | layer_sizes[-1], activation=None, 19 | kernel_initializer="glorot_normal")) 20 | return model 21 | -------------------------------------------------------------------------------- /tensordiffeq/optimizers.py: -------------------------------------------------------------------------------- 1 | # from https://gist.github.com/piyueh/712ec7d4540489aad2dcfb80f9a54993 2 | 3 | import numpy 4 | import tensorflow as tf 5 | import tensorflow_probability as tfp 6 | from matplotlib import pyplot 7 | from tqdm.auto import tqdm, trange 8 | import time 9 | 10 | def graph_lbfgs2(obj): 11 | """A factory to create a function required by tfp.optimizer.lbfgs_minimize. 12 | Args: 13 | model [in]: an instance of `tf.keras.Model` or its subclasses. 14 | loss [in]: a function with signature loss_value = loss(pred_y, true_y). 15 | Returns: 16 | A function that has a signature of: 17 | loss_value, gradients = f(model_parameters). 18 | """ 19 | model = obj.u_model 20 | loss = obj.update_loss 21 | variables, dict_variables = obj.get_trainable_variables() 22 | obj.variables = variables 23 | # obtain the shapes of all trainable parameters in the model 24 | shapes = tf.shape_n(variables) 25 | n_tensors = len(shapes) 26 | 27 | # we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to 28 | # prepare required information first 29 | count = 0 30 | idx = [] # stitch indices 31 | part = [] # partition indices 32 | start_time = time.time() 33 | 34 | for i, shape in enumerate(shapes): 35 | n = numpy.product(shape) 36 | idx.append(tf.reshape(tf.range(count, count + n, dtype=tf.int32), shape)) 37 | part.extend([i] * n) 38 | count += n 39 | 40 | part = tf.constant(part) 41 | 42 | @tf.function 43 | def assign_new_model_parameters(params_1d): 44 | """A function updating the model's parameters with a 1D tf.Tensor. 45 | Args: 46 | params_1d [in]: a 1D tf.Tensor representing the model's trainable parameters. 47 | """ 48 | 49 | params = tf.dynamic_partition(params_1d, part, n_tensors) 50 | for i, (shape, param) in enumerate(zip(shapes, params)): 51 | #model.trainable_variables[i].assign(tf.reshape(param, shape)) 52 | obj.variables[i].assign(tf.reshape(param, shape)) 53 | 54 | if obj.diffAdaptive_type > 0: 55 | obj.diff_list.append(obj.variables[dict_variables['nn_weights']:dict_variables['diffusion']][0].numpy()) 56 | 57 | # now create a function that will be returned by this factory 58 | @tf.function 59 | def f(params_1d): 60 | """A function that can be used by tfp.optimizer.lbfgs_minimize. 61 | This function is created by function_factory. 62 | Args: 63 | params_1d [in]: a 1D tf.Tensor. 64 | Returns: 65 | A scalar loss and the gradients w.r.t. the `params_1d`. 66 | """ 67 | # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters 68 | with tf.GradientTape() as tape: 69 | # update the parameters in the model 70 | assign_new_model_parameters(params_1d) 71 | # calculate the loss 72 | loss_value = loss() 73 | 74 | # calculate gradients and convert to 1D tf.Tensor 75 | grads = tape.gradient(loss_value, obj.variables) 76 | 77 | # Extracting the correct gradient for each set of variables 78 | if obj.isAdaptive: 79 | grads_lambdas = grads[dict_variables['nn_weights']:dict_variables['lambdas']] 80 | grads_lambdas_neg = [-x for x in grads_lambdas] 81 | grads[dict_variables['nn_weights']:dict_variables['lambdas']] = grads_lambdas_neg 82 | 83 | grads = tf.dynamic_stitch(idx, grads) 84 | 85 | # print out iteration & loss 86 | f.iter.assign_add(1) 87 | 88 | if f.iter % 30 == 0: 89 | elapsed = tf.timestamp() - f.start_time 90 | 91 | tf.print(f'LBFGS iter {f.iter // 3} -> loss:{loss_value:.2e} time: {elapsed:.2f} seconds') 92 | f.start_time.assign(tf.timestamp()) 93 | 94 | # store loss value so we can retrieve later 95 | tf.py_function(f.history.append, inp=[loss_value], Tout=[]) 96 | 97 | if loss_value < obj.min_loss['l-bfgs']: 98 | # Keep the information of the best model trained (lower loss function value) 99 | obj.best_model['l-bfgs'] = obj.u_model # best model 100 | obj.min_loss['l-bfgs'] = loss_value.numpy() # loss value 101 | obj.best_epoch['l-bfgs'] = f.iter.numpy() # best epoch 102 | obj.best_diff['l-bfgs'] = obj.diffusion[0].numpy() 103 | 104 | return loss_value, grads 105 | 106 | # store these information as members so we can use them outside the scope 107 | f.iter = tf.Variable(0) 108 | f.idx = idx 109 | f.part = part 110 | f.shapes = shapes 111 | f.assign_new_model_parameters = assign_new_model_parameters 112 | f.history = [] 113 | f.start_time = tf.Variable(tf.timestamp()) 114 | 115 | return f 116 | 117 | 118 | def graph_lbfgs(model, loss): 119 | """A factory to create a function required by tfp.optimizer.lbfgs_minimize. 120 | Args: 121 | model [in]: an instance of `tf.keras.Model` or its subclasses. 122 | loss [in]: a function with signature loss_value = loss(pred_y, true_y). 123 | Returns: 124 | A function that has a signature of: 125 | loss_value, gradients = f(model_parameters). 126 | """ 127 | 128 | # obtain the shapes of all trainable parameters in the model 129 | shapes = tf.shape_n(model.trainable_variables) 130 | n_tensors = len(shapes) 131 | 132 | # we'll use tf.dynamic_stitch and tf.dynamic_partition later, so we need to 133 | # prepare required information first 134 | count = 0 135 | idx = [] # stitch indices 136 | part = [] # partition indices 137 | start_time = time.time() 138 | 139 | for i, shape in enumerate(shapes): 140 | n = numpy.product(shape) 141 | idx.append(tf.reshape(tf.range(count, count + n, dtype=tf.int32), shape)) 142 | part.extend([i] * n) 143 | count += n 144 | 145 | part = tf.constant(part) 146 | 147 | @tf.function 148 | def assign_new_model_parameters(params_1d): 149 | """A function updating the model's parameters with a 1D tf.Tensor. 150 | Args: 151 | params_1d [in]: a 1D tf.Tensor representing the model's trainable parameters. 152 | """ 153 | 154 | params = tf.dynamic_partition(params_1d, part, n_tensors) 155 | for i, (shape, param) in enumerate(zip(shapes, params)): 156 | model.trainable_variables[i].assign(tf.reshape(param, shape)) 157 | 158 | # now create a function that will be returned by this factory 159 | @tf.function 160 | def f(params_1d): 161 | """A function that can be used by tfp.optimizer.lbfgs_minimize. 162 | This function is created by function_factory. 163 | Args: 164 | params_1d [in]: a 1D tf.Tensor. 165 | Returns: 166 | A scalar loss and the gradients w.r.t. the `params_1d`. 167 | """ 168 | # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters 169 | with tf.GradientTape() as tape: 170 | # update the parameters in the model 171 | assign_new_model_parameters(params_1d) 172 | # calculate the loss 173 | loss_value = loss() 174 | 175 | # calculate gradients and convert to 1D tf.Tensor 176 | grads = tape.gradient(loss_value, model.trainable_variables) 177 | grads = tf.dynamic_stitch(idx, grads) 178 | 179 | # print out iteration & loss 180 | f.iter.assign_add(1) 181 | 182 | if f.iter % 300 == 0: 183 | elapsed = tf.timestamp() - f.start_time 184 | 185 | tf.print("Iter:", f.iter // 3, "loss:", loss_value, "time:", elapsed) 186 | f.start_time.assign(tf.timestamp()) 187 | 188 | # store loss value so we can retrieve later 189 | tf.py_function(f.history.append, inp=[loss_value], Tout=[]) 190 | 191 | return loss_value, grads 192 | 193 | # store these information as members so we can use them outside the scope 194 | f.iter = tf.Variable(0) 195 | f.idx = idx 196 | f.part = part 197 | f.shapes = shapes 198 | f.assign_new_model_parameters = assign_new_model_parameters 199 | f.history = [] 200 | f.start_time = tf.Variable(tf.timestamp()) 201 | 202 | return f 203 | 204 | 205 | def dot(a, b): 206 | """Dot product function since TensorFlow doesn't have one.""" 207 | return tf.reduce_sum(a * b) 208 | 209 | 210 | def verbose_func(s): 211 | print(s) 212 | 213 | 214 | def eager_lbfgs(opfunc, x, state, maxIter=100, learningRate=1, do_verbose=True): 215 | """port of lbfgs.lua, using TensorFlow eager mode. 216 | """ 217 | 218 | global final_loss, times 219 | 220 | maxEval = maxIter * 1.25 221 | tolFun = 1e-12 222 | tolX = 1e-12 223 | nCorrection = 50 224 | isverbose = False 225 | state.start_time = time.time() 226 | 227 | # verbose function 228 | if isverbose: 229 | verbose = verbose_func 230 | else: 231 | verbose = lambda x: None 232 | 233 | f, g = opfunc(x) 234 | g_old = g 235 | f_old = f 236 | 237 | f_hist = [f] 238 | currentFuncEval = 1 239 | state.funcEval = state.funcEval + 1 240 | p = g.shape[0] 241 | 242 | # check optimality of initial point 243 | tmp1 = tf.abs(g) 244 | if tf.reduce_sum(tmp1) <= tolFun: 245 | verbose("optimality condition below tolFun") 246 | return x, f_hist 247 | 248 | # optimize for a max of maxIter iterations 249 | nIter = 0 250 | times = [] 251 | with trange(maxIter) as t_: 252 | for epoch in t_: 253 | start_time = time.time() 254 | if state.nIter == 1: 255 | tmp1 = tf.abs(g) 256 | t = min(1, 1 / tf.reduce_sum(tmp1)) 257 | else: 258 | t = learningRate 259 | # keep track of nb of iterations 260 | nIter = nIter + 1 261 | state.nIter = state.nIter + 1 262 | 263 | ############################################################ 264 | ## compute gradient descent direction 265 | ############################################################ 266 | if state.nIter == 1: 267 | d = -g 268 | old_dirs = [] 269 | old_stps = [] 270 | Hdiag = 1 271 | else: 272 | # do lbfgs update (update memory) 273 | y = g - g_old 274 | s = d * t 275 | ys = dot(y, s) 276 | 277 | if ys > 1e-10: 278 | # updating memory 279 | if len(old_dirs) == nCorrection: 280 | # shift history by one (limited-memory) 281 | del old_dirs[0] 282 | del old_stps[0] 283 | 284 | # store new direction/step 285 | old_dirs.append(s) 286 | old_stps.append(y) 287 | 288 | # update scale of initial Hessian approximation 289 | Hdiag = ys / dot(y, y) 290 | 291 | # compute the approximate (L-BFGS) inverse Hessian 292 | # multiplied by the gradient 293 | k = len(old_dirs) 294 | 295 | # need to be accessed element-by-element, so don't re-type tensor: 296 | ro = [0] * nCorrection 297 | for i in range(k): 298 | ro[i] = 1 / dot(old_stps[i], old_dirs[i]) 299 | 300 | # iteration in L-BFGS loop collapsed to use just one buffer 301 | # need to be accessed element-by-element, so don't re-type tensor: 302 | al = [0] * nCorrection 303 | 304 | q = -g 305 | for i in range(k - 1, -1, -1): 306 | al[i] = dot(old_dirs[i], q) * ro[i] 307 | q = q - al[i] * old_stps[i] 308 | 309 | # multiply by initial Hessian 310 | r = q * Hdiag 311 | for i in range(k): 312 | be_i = dot(old_stps[i], r) * ro[i] 313 | r += (al[i] - be_i) * old_dirs[i] 314 | 315 | d = r 316 | # final direction is in r/d (same object) 317 | 318 | g_old = g 319 | f_old = f 320 | 321 | ############################################################ 322 | ## compute step length 323 | ############################################################ 324 | # directional derivative 325 | gtd = dot(g, d) 326 | 327 | # check that progress can be made along that direction 328 | if gtd > -tolX: 329 | verbose("Can not make progress along direction.") 330 | break 331 | 332 | # reset initial guess for step size 333 | if state.nIter == 1: 334 | tmp1 = tf.abs(g) 335 | t = min(1, 1 / tf.reduce_sum(tmp1)) 336 | else: 337 | t = learningRate 338 | 339 | x += t * d 340 | 341 | if nIter != maxIter: 342 | # re-evaluate function only if not in last iteration 343 | # the reason we do this: in a stochastic setting, 344 | # no use to re-evaluate that function here 345 | f, g = opfunc(x) 346 | 347 | lsFuncEval = 1 348 | f_hist.append(f) 349 | 350 | # update func eval 351 | currentFuncEval = currentFuncEval + lsFuncEval 352 | state.funcEval = state.funcEval + lsFuncEval 353 | 354 | ############################################################ 355 | ## check conditions 356 | ############################################################ 357 | if nIter == maxIter: 358 | break 359 | 360 | if currentFuncEval >= maxEval: 361 | # max nb of function evals 362 | print('max nb of function evals') 363 | break 364 | 365 | tmp1 = tf.abs(g) 366 | if tf.reduce_sum(tmp1) <= tolFun: 367 | # check optimality 368 | print('optimality condition below tolFun') 369 | break 370 | 371 | tmp1 = tf.abs(d * t) 372 | if tf.reduce_sum(tmp1) <= tolX: 373 | # step size below tolX 374 | print('step size below tolX') 375 | break 376 | 377 | if tf.abs(f, f_old) < tolX: 378 | # function value changing less than tolX 379 | print('function value changing less than tolX' + str(tf.abs(f - f_old))) 380 | break 381 | 382 | t_.set_description('L-BFGS epoch %i' % (nIter+1)) 383 | if do_verbose: 384 | if nIter % 10 == 0: 385 | t_.set_postfix(loss=f.numpy()) 386 | elapsed = time.time() - state.start_time 387 | #print("Step: %3d, loss: %9.8f, time: " % (nIter, f.numpy()), elapsed) 388 | state.start_time = time.time() 389 | 390 | if nIter == maxIter - 1: 391 | final_loss = f.numpy() 392 | 393 | # save state 394 | state.old_dirs = old_dirs 395 | state.old_stps = old_stps 396 | state.Hdiag = Hdiag 397 | state.g_old = g_old 398 | state.f_old = f_old 399 | state.t = t 400 | state.d = d 401 | 402 | return x, f_hist, currentFuncEval 403 | 404 | 405 | # dummy/Struct gives Lua-like struct object with 0 defaults 406 | class dummy(object): 407 | pass 408 | 409 | 410 | class Struct(dummy): 411 | def __getattribute__(self, key): 412 | if key == '__dict__': 413 | return super(dummy, self).__getattribute__('__dict__') 414 | return self.__dict__.get(key, 0) 415 | -------------------------------------------------------------------------------- /tensordiffeq/output.py: -------------------------------------------------------------------------------- 1 | from pyfiglet import Figlet 2 | from os import system, name 3 | import sys 4 | 5 | def print_screen(model, discovery_model=False): 6 | f = Figlet(font='slant') 7 | print(f.renderText('TensorDiffEq')) 8 | if discovery_model: 9 | print("Running Discovery Model for Parameter Estimation\n\n") 10 | print("Neural Network Model Summary\n") 11 | print(model.u_model.summary()) -------------------------------------------------------------------------------- /tensordiffeq/plotting.py: -------------------------------------------------------------------------------- 1 | # Raissi et al plotting scripts - https://github.com/maziarraissi/PINNs/blob/master/Utilities/plotting.py 2 | # All code in this script is credited to Raissi et al 3 | 4 | 5 | import matplotlib as mpl 6 | import numpy as np 7 | from scipy.interpolate import griddata 8 | import matplotlib.gridspec as gridspec 9 | from mpl_toolkits.axes_grid1 import make_axes_locatable 10 | 11 | def figsize(scale, nplots = 1): 12 | fig_width_pt = 390.0 # Get this from LaTeX using \the\textwidth 13 | inches_per_pt = 1.0/72.27 # Convert pt to inch 14 | golden_mean = (np.sqrt(5.0)-1.0)/2.0 # Aesthetic ratio (you could change this) 15 | fig_width = fig_width_pt*inches_per_pt*scale # width in inches 16 | fig_height = nplots*fig_width*golden_mean # height in inches 17 | fig_size = [fig_width, fig_height] 18 | return fig_size 19 | 20 | 21 | import matplotlib.pyplot as plt 22 | 23 | def newfig(width, nplots = 1): 24 | fig = plt.figure(figsize=figsize(width, nplots)) 25 | ax = fig.add_subplot(111) 26 | return fig, ax 27 | 28 | 29 | def plot_solution_domain1D(model, domain, ub, lb, Exact_u=None, u_transpose=False): 30 | """ 31 | Plot a 1D solution Domain 32 | Arguments 33 | --------- 34 | model : model 35 | a `model` class which contains the PDE solution 36 | domain : Domain 37 | a `Domain` object containing the x,t pairs 38 | ub: list 39 | a list of floats containing the upper boundaries of the plot 40 | lb : list 41 | a list of floats containing the lower boundaries of the plot 42 | Exact_u : list 43 | a list of the exact values of the solution for comparison 44 | u_transpose : Boolean 45 | a `bool` describing whether or not to transpose the solution plot of the domain 46 | Returns 47 | ------- 48 | None 49 | """ 50 | X, T = np.meshgrid(domain[0],domain[1]) 51 | 52 | X_star = np.hstack((X.flatten()[:,None], T.flatten()[:,None])) 53 | if Exact_u is not None: 54 | u_star = Exact_u.T.flatten()[:,None] 55 | 56 | u_pred, f_u_pred = model.predict(X_star) 57 | if u_transpose: 58 | U_pred = griddata(X_star, u_pred.T.flatten(), (X, T), method='cubic') 59 | else: 60 | U_pred = griddata(X_star, u_pred.flatten(), (X, T), method='cubic') 61 | 62 | fig, ax = newfig(1.3, 1.0) 63 | 64 | ax.axis('off') 65 | 66 | ####### Row 0: h(t,x) ################## 67 | gs0 = gridspec.GridSpec(1, 2) 68 | gs0.update(top=1-0.06, bottom=1-1/3, left=0.15, right=0.85, wspace=0) 69 | ax = plt.subplot(gs0[:, :]) 70 | 71 | h = ax.imshow(U_pred.T, interpolation='nearest', cmap='YlGnBu', 72 | extent=[lb[1], ub[1], lb[0], ub[0]], 73 | origin='lower', aspect='auto') 74 | divider = make_axes_locatable(ax) 75 | cax = divider.append_axes("right", size="5%", pad=0.05) 76 | fig.colorbar(h, cax=cax) 77 | len_ = len(domain[1])//4 78 | 79 | line = np.linspace(domain[0].min(), domain[0].max(), 2)[:,None] 80 | ax.plot(domain[1][len_]*np.ones((2,1)), line, 'k--', linewidth = 1) 81 | ax.plot(domain[1][2*len_]*np.ones((2,1)), line, 'k--', linewidth = 1) 82 | ax.plot(domain[1][3*len_]*np.ones((2,1)), line, 'k--', linewidth = 1) 83 | 84 | ax.set_xlabel('t') 85 | ax.set_ylabel('x') 86 | leg = ax.legend(frameon=False, loc = 'best') 87 | # plt.setp(leg.get_texts(), color='w') 88 | ax.set_title('u(t,x)', fontsize = 10) 89 | 90 | ####### Row 1: h(t,x) slices ################## 91 | gs1 = gridspec.GridSpec(1, 3) 92 | gs1.update(top=1-1/3, bottom=0, left=0.1, right=0.9, wspace=0.5) 93 | 94 | ax = plt.subplot(gs1[0, 0]) 95 | ax.plot(domain[0],Exact_u[:,len_], 'b-', linewidth = 2, label = 'Exact') 96 | ax.plot(domain[0],U_pred[len_,:], 'r--', linewidth = 2, label = 'Prediction') 97 | ax.set_xlabel('x') 98 | ax.set_ylabel('u(t,x)') 99 | ax.set_title('t = %.2f' % (domain[1][len_]), fontsize = 10) 100 | ax.axis('square') 101 | ax.set_xlim([-1.1,1.1]) 102 | ax.set_ylim([-1.1,1.1]) 103 | 104 | ax = plt.subplot(gs1[0, 1]) 105 | ax.plot(domain[0],Exact_u[:,2*len_], 'b-', linewidth = 2, label = 'Exact') 106 | ax.plot(domain[0],U_pred[2*len_,:], 'r--', linewidth = 2, label = 'Prediction') 107 | ax.set_xlabel('x') 108 | ax.set_ylabel('u(t,x)') 109 | ax.axis('square') 110 | ax.set_xlim([-1.1,1.1]) 111 | ax.set_ylim([-1.1,1.1]) 112 | ax.set_title('t = %.2f' % (domain[1][2*len_]), fontsize = 10) 113 | ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.3), ncol=5, frameon=False) 114 | 115 | ax = plt.subplot(gs1[0, 2]) 116 | ax.plot(domain[0],Exact_u[:,3*len_], 'b-', linewidth = 2, label = 'Exact') 117 | ax.plot(domain[0],U_pred[3*len_,:], 'r--', linewidth = 2, label = 'Prediction') 118 | ax.set_xlabel('x') 119 | ax.set_ylabel('u(t,x)') 120 | ax.axis('square') 121 | ax.set_xlim([-1.1,1.1]) 122 | ax.set_ylim([-1.1,1.1]) 123 | ax.set_title('t = %.2f' % (domain[1][3*len_]), fontsize = 10) 124 | 125 | plt.show() 126 | 127 | 128 | def plot_weights(model, scale = 1): 129 | plt.scatter(model.domain.X_f[:,1], model.domain.X_f[:,0], c = model.lambdas[0].numpy(), s = model.lambdas[0].numpy()/float(scale)) 130 | plt.xlabel(model.domain.domain_ids[1]) 131 | plt.ylabel(model.domain.domain_ids[0]) 132 | plt.show() 133 | 134 | def plot_glam_values(model, scale = 1): 135 | plt.scatter(model.t_f, model.x_f, c = model.g(model.col_weights).numpy(), s = model.g(model.col_weights).numpy()/float(scale)) 136 | plt.show() 137 | 138 | def plot_residuals(FU_pred, extent): 139 | fig, ax = plt.subplots() 140 | ec = plt.imshow(FU_pred.T, interpolation='nearest', cmap='rainbow', 141 | extent=extent, 142 | origin='lower', aspect='auto') 143 | 144 | #ax.add_collection(ec) 145 | ax.autoscale_view() 146 | ax.set_xlabel('x') 147 | ax.set_ylabel('t') 148 | cbar = plt.colorbar(ec) 149 | cbar.set_label('\overline{f}_u prediction') 150 | plt.show() 151 | 152 | def get_griddata(grid, data, dims): 153 | return griddata(grid, data, dims, method='cubic') 154 | -------------------------------------------------------------------------------- /tensordiffeq/sampling.py: -------------------------------------------------------------------------------- 1 | # From smt, ported due to installation errors 2 | # https://github.com/SMTorg/smt/blob/master/smt/sampling_methods/sampling_method.py 3 | # https://github.com/SMTorg/smt/blob/master/smt/utils/options_dictionary.py 4 | # https://github.com/SMTorg/smt/blob/master/smt/sampling_methods/lhs.py 5 | # Citations to smt will be allocated appropriately. All code here is credited to Dr. John T. Hwang 6 | 7 | from abc import ABCMeta, abstractmethod 8 | import numpy as np 9 | from pyDOE2 import lhs 10 | from scipy.spatial.distance import pdist, cdist 11 | import numpy as np 12 | 13 | 14 | class OptionsDictionary(object): 15 | """ 16 | Generalization of the dictionary that allows for declaring keys. 17 | Attributes 18 | ---------- 19 | _dict : dict 20 | Dictionary of option values keyed by option names. 21 | _declared_entries : dict 22 | Dictionary of declared entries. 23 | """ 24 | 25 | def __init__(self): 26 | self._dict = {} 27 | self._declared_entries = {} 28 | 29 | def clone(self): 30 | """ 31 | Return a clone of this object. 32 | Returns 33 | ------- 34 | OptionsDictionary 35 | Deep-copied clone. 36 | """ 37 | clone = self.__class__() 38 | clone._dict = dict(self._dict) 39 | clone._declared_entries = dict(self._declared_entries) 40 | return clone 41 | 42 | def __getitem__(self, name): 43 | """ 44 | Get an option that was previously declared and optionally set. 45 | Arguments 46 | --------- 47 | name : str 48 | The name of the option. 49 | Returns 50 | ------- 51 | object 52 | Value of the option. 53 | """ 54 | return self._dict[name] 55 | 56 | def __setitem__(self, name, value): 57 | """ 58 | Set an option that was previously declared. 59 | The value argument must be valid, which means it must satisfy the following: 60 | 1. If values and not types was given when declaring, value must be in values. 61 | 2. If types and not values was given when declaring, type(value) must be in types. 62 | 3. If values and types were given when declaring, either of the above must be true. 63 | Arguments 64 | --------- 65 | name : str 66 | The name of the option. 67 | value : object 68 | The value to set. 69 | """ 70 | assert name in self._declared_entries, "Option %s has not been declared" % name 71 | self._assert_valid(name, value) 72 | self._dict[name] = value 73 | 74 | def __contains__(self, key): 75 | return key in self._dict 76 | 77 | def is_declared(self, key): 78 | return key in self._declared_entries 79 | 80 | def _assert_valid(self, name, value): 81 | values = self._declared_entries[name]["values"] 82 | types = self._declared_entries[name]["types"] 83 | 84 | if values is not None and types is not None: 85 | assert value in values or isinstance( 86 | value, types 87 | ), "Option %s: value and type of %s are both invalid - " % ( 88 | name, 89 | value, 90 | ) + "value must be %s or type must be %s" % ( 91 | values, 92 | types, 93 | ) 94 | elif values is not None: 95 | assert value in values, "Option %s: value %s is invalid - must be %s" % ( 96 | name, 97 | value, 98 | values, 99 | ) 100 | elif types is not None: 101 | assert isinstance( 102 | value, types 103 | ), "Option %s: type of %s is invalid - must be %s" % (name, value, types) 104 | 105 | def update(self, dict_): 106 | """ 107 | Loop over and set all the entries in the given dictionary into self. 108 | Arguments 109 | --------- 110 | dict_ : dict 111 | The given dictionary. All keys must have been declared. 112 | """ 113 | for name in dict_: 114 | self[name] = dict_[name] 115 | 116 | def declare(self, name, default=None, values=None, types=None, desc=""): 117 | """ 118 | Declare an option. 119 | The value of the option must satisfy the following: 120 | 1. If values and not types was given when declaring, value must be in values. 121 | 2. If types and not values was given when declaring, type(value) must be in types. 122 | 3. If values and types were given when declaring, either of the above must be true. 123 | Arguments 124 | --------- 125 | name : str 126 | Name of the option. 127 | default : object 128 | Optional default value that must be valid under the above 3 conditions. 129 | values : list 130 | Optional list of acceptable option values. 131 | types : type or list of types 132 | Optional list of acceptable option types. 133 | desc : str 134 | Optional description of the option. 135 | """ 136 | self._declared_entries[name] = { 137 | "values": values, 138 | "types": types, 139 | "default": default, 140 | "desc": desc, 141 | } 142 | 143 | if default is not None: 144 | self._assert_valid(name, default) 145 | 146 | self._dict[name] = default 147 | 148 | class SamplingMethod(object, metaclass=ABCMeta): 149 | def __init__(self, **kwargs): 150 | 151 | self.options = OptionsDictionary() 152 | self.options.declare( 153 | "xlimits", 154 | types=np.ndarray, 155 | desc="The interval of the domain in each dimension with shape nx x 2 (required)", 156 | ) 157 | self._initialize() 158 | self.options.update(kwargs) 159 | 160 | def _initialize(self) -> None: 161 | """ 162 | Implemented by sampling methods to declare options (optional). 163 | Examples 164 | -------- 165 | self.options.declare('option_name', default_value, types=(bool, int), desc='description') 166 | """ 167 | pass 168 | 169 | def __call__(self, nt: int) -> np.ndarray: 170 | """ 171 | Compute the requested number of sampling points. 172 | The number of dimensions (nx) is determined based on `xlimits.shape[0]`. 173 | Arguments 174 | --------- 175 | nt : int 176 | Number of points requested. 177 | Returns 178 | ------- 179 | ndarray[nt, nx] 180 | The sampling locations in the input space. 181 | """ 182 | return self._compute(nt) 183 | 184 | @abstractmethod 185 | def _compute(self, nt: int) -> np.ndarray: 186 | """ 187 | Implemented by sampling methods to compute the requested number of sampling points. 188 | The number of dimensions (nx) is determined based on `xlimits.shape[0]`. 189 | Arguments 190 | --------- 191 | nt : int 192 | Number of points requested. 193 | Returns 194 | ------- 195 | ndarray[nt, nx] 196 | The sampling locations in the input space. 197 | """ 198 | raise Exception("This sampling method has not been implemented correctly") 199 | 200 | 201 | class ScaledSamplingMethod(SamplingMethod): 202 | """ This class describes an sample method which generates samples in the unit hypercube. 203 | The __call__ method does scale the generated samples accordingly to the defined xlimits. 204 | """ 205 | 206 | def __call__(self, nt: int) -> np.ndarray: 207 | """ 208 | Compute the requested number of sampling points. 209 | The number of dimensions (nx) is determined based on `xlimits.shape[0]`. 210 | Arguments 211 | --------- 212 | nt : int 213 | Number of points requested. 214 | Returns 215 | ------- 216 | ndarray[nt, nx] 217 | The sampling locations in the input space. 218 | """ 219 | return _scale_to_xlimits(self._compute(nt), self.options["xlimits"]) 220 | 221 | @abstractmethod 222 | def _compute(self, nt: int) -> np.ndarray: 223 | """ 224 | Implemented by sampling methods to compute the requested number of sampling points. 225 | The number of dimensions (nx) is determined based on `xlimits.shape[0]`. 226 | Arguments 227 | --------- 228 | nt : int 229 | Number of points requested. 230 | Returns 231 | ------- 232 | ndarray[nt, nx] 233 | The sampling locations in the unit hypercube. 234 | """ 235 | raise Exception("This sampling method has not been implemented correctly") 236 | 237 | 238 | def _scale_to_xlimits(samples: np.ndarray, xlimits: np.ndarray) -> np.ndarray: 239 | """ Scales the samples from the unit hypercube to the specified limits. 240 | Parameters 241 | ---------- 242 | samples : np.ndarray 243 | The samples with coordinates in [0,1] 244 | xlimits : np.ndarray 245 | The xlimits 246 | Returns 247 | ------- 248 | np.ndarray 249 | The scaled samples. 250 | """ 251 | nx = xlimits.shape[0] 252 | for kx in range(nx): 253 | samples[:, kx] = xlimits[kx, 0] + samples[:, kx] * (xlimits[kx, 1] - xlimits[kx, 0]) 254 | return samples 255 | 256 | class LHS(ScaledSamplingMethod): 257 | def _initialize(self): 258 | self.options.declare( 259 | "criterion", 260 | "c", 261 | values=[ 262 | "center", 263 | "maximin", 264 | "centermaximin", 265 | "correlation", 266 | "c", 267 | "m", 268 | "cm", 269 | "corr", 270 | "ese", 271 | ], 272 | types=str, 273 | desc="criterion used to construct the LHS design " 274 | + "c, m, cm and corr are abbreviation of center, maximin, centermaximin and correlation, respectively", 275 | ) 276 | self.options.declare( 277 | "random_state", 278 | types=(type(None), int, np.random.RandomState), 279 | desc="Numpy RandomState object or seed number which controls random draws", 280 | ) 281 | 282 | def _compute(self, nt): 283 | """ 284 | Implemented by sampling methods to compute the requested number of sampling points. 285 | The number of dimensions (nx) is determined based on `xlimits.shape[0]`. 286 | Arguments 287 | --------- 288 | nt : int 289 | Number of points requested. 290 | Returns 291 | ------- 292 | ndarray[nt, nx] 293 | The sampling locations in the unit hypercube. 294 | """ 295 | xlimits = self.options["xlimits"] 296 | nx = xlimits.shape[0] 297 | 298 | if isinstance(self.options["random_state"], np.random.RandomState): 299 | self.random_state = self.options["random_state"] 300 | elif isinstance(self.options["random_state"], int): 301 | self.random_state = np.random.RandomState(self.options["random_state"]) 302 | else: 303 | self.random_state = np.random.RandomState() 304 | 305 | if self.options["criterion"] != "ese": 306 | return lhs( 307 | nx, 308 | samples=nt, 309 | criterion=self.options["criterion"], 310 | random_state=self.random_state, 311 | ) 312 | elif self.options["criterion"] == "ese": 313 | return self._ese(nx, nt) 314 | 315 | def _maximinESE( 316 | self, 317 | X, 318 | T0=None, 319 | outer_loop=None, 320 | inner_loop=None, 321 | J=20, 322 | tol=1e-3, 323 | p=10, 324 | return_hist=False, 325 | fixed_index=[], 326 | ): 327 | """ 328 | Returns an optimized design starting from design X. For more information, 329 | see R. Jin, W. Chen and A. Sudjianto (2005): 330 | An efficient algorithm for constructing optimal design of computer 331 | experiments. Journal of Statistical Planning and Inference, 134:268-287. 332 | Parameters 333 | ---------- 334 | X : array 335 | The design to be optimized 336 | T0 : double, optional 337 | Initial temperature of the algorithm. 338 | If set to None, a standard temperature is used. 339 | outer_loop : integer, optional 340 | The number of iterations of the outer loop. If None, set to 341 | min(1.5*dimension of LHS, 30) 342 | inner_loop : integer, optional 343 | The number of iterations of the inner loop. If None, set to 344 | min(20*dimension of LHS, 100) 345 | J : integer, optional 346 | Number of replications of the plan in the inner loop. Default to 20 347 | tol : double, optional 348 | Tolerance for modification of Temperature T. Default to 0.001 349 | p : integer, optional 350 | Power used in the calculation of the PhiP criterion. Default to 10 351 | return_hist : boolean, optional 352 | If set to True, the function returns information about the behaviour of 353 | temperature, PhiP criterion and probability of acceptance during the 354 | process of optimization. Default to False 355 | Returns 356 | ------ 357 | X_best : array 358 | The optimized design 359 | hist : dictionnary 360 | If return_hist is set to True, returns a dictionnary containing the phiP 361 | ('PhiP') criterion, the temperature ('T') and the probability of 362 | acceptance ('proba') during the optimization. 363 | """ 364 | 365 | # Initialize parameters if not defined 366 | if T0 is None: 367 | T0 = 0.005 * self._PhiP(X, p=p) 368 | if inner_loop is None: 369 | inner_loop = min(20 * X.shape[1], 100) 370 | if outer_loop is None: 371 | outer_loop = min(int(1.5 * X.shape[1]), 30) 372 | 373 | T = T0 374 | X_ = X[:] # copy of initial plan 375 | X_best = X_[:] 376 | d = X.shape[1] 377 | PhiP_ = self._PhiP(X_best, p=p) 378 | PhiP_best = PhiP_ 379 | 380 | hist_T = list() 381 | hist_proba = list() 382 | hist_PhiP = list() 383 | hist_PhiP.append(PhiP_best) 384 | 385 | # Outer loop 386 | for z in range(outer_loop): 387 | PhiP_oldbest = PhiP_best 388 | n_acpt = 0 389 | n_imp = 0 390 | 391 | # Inner loop 392 | for i in range(inner_loop): 393 | 394 | modulo = (i + 1) % d 395 | l_X = list() 396 | l_PhiP = list() 397 | 398 | # Build J different plans with a single exchange procedure 399 | # See description of PhiP_exchange procedure 400 | for j in range(J): 401 | l_X.append(X_.copy()) 402 | l_PhiP.append( 403 | self._PhiP_exchange( 404 | l_X[j], k=modulo, PhiP_=PhiP_, p=p, fixed_index=fixed_index 405 | ) 406 | ) 407 | 408 | l_PhiP = np.asarray(l_PhiP) 409 | k = np.argmin(l_PhiP) 410 | PhiP_try = l_PhiP[k] 411 | 412 | # Threshold of acceptance 413 | if PhiP_try - PhiP_ <= T * self.random_state.rand(1)[0]: 414 | PhiP_ = PhiP_try 415 | n_acpt = n_acpt + 1 416 | X_ = l_X[k] 417 | 418 | # Best plan retained 419 | if PhiP_ < PhiP_best: 420 | X_best = X_ 421 | PhiP_best = PhiP_ 422 | n_imp = n_imp + 1 423 | 424 | hist_PhiP.append(PhiP_best) 425 | 426 | p_accpt = float(n_acpt) / inner_loop # probability of acceptance 427 | p_imp = float(n_imp) / inner_loop # probability of improvement 428 | 429 | hist_T.extend(inner_loop * [T]) 430 | hist_proba.extend(inner_loop * [p_accpt]) 431 | 432 | if PhiP_best - PhiP_oldbest < tol: 433 | # flag_imp = 1 434 | if p_accpt >= 0.1 and p_imp < p_accpt: 435 | T = 0.8 * T 436 | elif p_accpt >= 0.1 and p_imp == p_accpt: 437 | pass 438 | else: 439 | T = T / 0.8 440 | else: 441 | # flag_imp = 0 442 | if p_accpt <= 0.1: 443 | T = T / 0.7 444 | else: 445 | T = 0.9 * T 446 | 447 | hist = {"PhiP": hist_PhiP, "T": hist_T, "proba": hist_proba} 448 | 449 | if return_hist: 450 | return X_best, hist 451 | else: 452 | return X_best 453 | 454 | def _PhiP(self, X, p=10): 455 | """ 456 | Calculates the PhiP criterion of the design X with power p. 457 | X : array_like 458 | The design where to calculate PhiP 459 | p : integer 460 | The power used for the calculation of PhiP (default to 10) 461 | """ 462 | 463 | return ((pdist(X) ** (-p)).sum()) ** (1.0 / p) 464 | 465 | def _PhiP_exchange(self, X, k, PhiP_, p, fixed_index): 466 | """ 467 | Modifies X with a single exchange algorithm and calculates the corresponding 468 | PhiP criterion. Internal use. 469 | Optimized calculation of the PhiP criterion. For more information, see: 470 | R. Jin, W. Chen and A. Sudjianto (2005): 471 | An efficient algorithm for constructing optimal design of computer 472 | experiments. Journal of Statistical Planning and Inference, 134:268-287. 473 | Parameters 474 | ---------- 475 | X : array_like 476 | The initial design (will be modified during procedure) 477 | k : integer 478 | The column where the exchange is proceeded 479 | PhiP_ : double 480 | The PhiP criterion of the initial design X 481 | p : integer 482 | The power used for the calculation of PhiP 483 | Returns 484 | ------ 485 | res : double 486 | The PhiP criterion of the modified design X 487 | """ 488 | 489 | # Choose two (different) random rows to perform the exchange 490 | i1 = self.random_state.randint(X.shape[0]) 491 | while i1 in fixed_index: 492 | i1 = self.random_state.randint(X.shape[0]) 493 | 494 | i2 = self.random_state.randint(X.shape[0]) 495 | while i2 == i1 or i2 in fixed_index: 496 | i2 = self.random_state.randint(X.shape[0]) 497 | 498 | X_ = np.delete(X, [i1, i2], axis=0) 499 | 500 | dist1 = cdist([X[i1, :]], X_) 501 | dist2 = cdist([X[i2, :]], X_) 502 | d1 = np.sqrt( 503 | dist1 ** 2 + (X[i2, k] - X_[:, k]) ** 2 - (X[i1, k] - X_[:, k]) ** 2 504 | ) 505 | d2 = np.sqrt( 506 | dist2 ** 2 - (X[i2, k] - X_[:, k]) ** 2 + (X[i1, k] - X_[:, k]) ** 2 507 | ) 508 | 509 | res = ( 510 | PhiP_ ** p + (d1 ** (-p) - dist1 ** (-p) + d2 ** (-p) - dist2 ** (-p)).sum() 511 | ) ** (1.0 / p) 512 | X[i1, k], X[i2, k] = X[i2, k], X[i1, k] 513 | 514 | return res 515 | 516 | def _ese(self, dim, nt): 517 | # Parameters of maximinESE procedure 518 | P0 = lhs(dim, nt, criterion=None, random_state=self.random_state) 519 | J = 20 520 | outer_loop = min(int(1.5 * dim), 30) 521 | inner_loop = min(20 * dim, 100) 522 | 523 | D0 = pdist(P0) 524 | R0 = np.corrcoef(P0) 525 | corr0 = np.max(np.abs(R0[R0 != 1])) 526 | phip0 = self._PhiP(P0) 527 | 528 | P, historic = self._maximinESE( 529 | P0, 530 | outer_loop=outer_loop, 531 | inner_loop=inner_loop, 532 | J=J, 533 | tol=1e-3, 534 | p=10, 535 | return_hist=True, 536 | ) 537 | return P -------------------------------------------------------------------------------- /tensordiffeq/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensordiffeq.sampling import LHS 3 | import time as time 4 | import numpy as np 5 | 6 | 7 | def set_weights(model, w, sizes_w, sizes_b): 8 | for i, layer in enumerate(model.layers[0:]): 9 | start_weights = sum(sizes_w[:i]) + sum(sizes_b[:i]) 10 | end_weights = sum(sizes_w[:i + 1]) + sum(sizes_b[:i]) 11 | weights = w[start_weights:end_weights] 12 | w_div = int(sizes_w[i] / sizes_b[i]) 13 | weights = tf.reshape(weights, [w_div, sizes_b[i]]) 14 | biases = w[end_weights:end_weights + sizes_b[i]] 15 | weights_biases = [weights, biases] 16 | layer.set_weights(weights_biases) 17 | 18 | 19 | def get_weights(model): 20 | w = [] 21 | for layer in model.layers[0:]: 22 | weights_biases = layer.get_weights() 23 | weights = weights_biases[0].flatten() 24 | biases = weights_biases[1] 25 | w.extend(weights) 26 | w.extend(biases) 27 | 28 | w = tf.convert_to_tensor(w) 29 | return w 30 | 31 | 32 | def get_sizes(layer_sizes): 33 | sizes_w = [layer_sizes[i] * layer_sizes[i - 1] for i in range(len(layer_sizes)) if i != 0] 34 | sizes_b = layer_sizes[1:] 35 | return sizes_w, sizes_b 36 | 37 | 38 | def MSE(pred, actual, weights=None): 39 | if weights is not None: 40 | return tf.reduce_mean(tf.square(weights * tf.math.subtract(pred, actual))) 41 | return tf.reduce_mean(tf.square(tf.math.subtract(pred, actual))) 42 | 43 | 44 | def g_MSE(pred, actual, g_lam): 45 | return tf.reduce_mean(g_lam * tf.square(tf.math.subtract(pred, actual))) 46 | 47 | 48 | def constant(val, dtype=tf.float32): 49 | return tf.constant(val, dtype=dtype) 50 | 51 | 52 | def convertTensor(val, dtype=tf.float32): 53 | return tf.cast(val, dtype=dtype) 54 | 55 | 56 | def LatinHypercubeSample(N_f, bounds): 57 | sampling = LHS(xlimits=bounds) 58 | return sampling(N_f) 59 | 60 | 61 | def get_tf_model(model): 62 | return tf.function(model) 63 | 64 | 65 | def tensor(x, dtype=tf.float32): 66 | return tf.convert_to_tensor(x, dtype=dtype) 67 | 68 | 69 | def multimesh(arrs): 70 | lens = list(map(len, arrs)) 71 | dim = len(arrs) 72 | 73 | sz = 1 74 | for s in lens: 75 | sz *= s 76 | 77 | ans = [] 78 | for i, arr in enumerate(arrs): 79 | slc = [1] * dim 80 | slc[i] = lens[i] 81 | arr2 = np.asarray(arr).reshape(slc) 82 | for j, sz in enumerate(lens): 83 | if j != i: 84 | arr2 = arr2.repeat(sz, axis=j) 85 | ans.append(arr2) 86 | 87 | return ans # returns like np.meshgrid 88 | 89 | 90 | # if desired, this flattens and hstacks the output dimensions for feeding into a tf/keras type neural network 91 | def flatten_and_stack(mesh): 92 | dims = np.shape(mesh) 93 | output = np.zeros((len(mesh), np.prod(dims[1:]))) 94 | for i, arr in enumerate(mesh): 95 | output[i] = arr.flatten() 96 | return output.T # returns in an [nxm] matrix 97 | 98 | 99 | def initialize_weights_loss(init_weights): 100 | lambdas = [] 101 | lambdas_map = {} 102 | counter = 0 103 | 104 | for i, (key, values) in enumerate(init_weights.items()): 105 | list = [] 106 | for value in values: 107 | if value is not None: 108 | lambdas.append(tf.Variable(value, trainable=True, dtype=tf.float32)) 109 | list.append(counter) 110 | counter += 1 111 | lambdas_map[key.lower()] = list 112 | return lambdas, lambdas_map 113 | -------------------------------------------------------------------------------- /test/AC2test.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import pytest 4 | import tensordiffeq as tdq 5 | from tensordiffeq.boundaries import * 6 | from tensordiffeq.models import CollocationSolverND 7 | import math 8 | 9 | 10 | def main(args): 11 | 12 | if args is None: 13 | args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 14 | 15 | 'run_functions_eagerly': False, 16 | 'epoch_adam': 20, 17 | 'epoch_lbfgs': 20, 18 | 'lbfgs_eager': False, 19 | 'isAdaptive': True, 20 | 'dist_training': False, 21 | 'dict_adaptive': {"residual": [True], 22 | "BCs": [False, False]}, 23 | 'N_x': 100, 24 | 'N_t': 50, 25 | 'N_f': 5000, 26 | 'batch_sz': 200, 27 | } 28 | 29 | layer_sizes = args['layer_sizes'] 30 | run_functions_eagerly = args['run_functions_eagerly'] 31 | epoch_adam = args['epoch_adam'] 32 | epoch_lbfgs = args['epoch_lbfgs'] 33 | lbfgs_eager = args['lbfgs_eager'] 34 | isAdaptive = args['isAdaptive'] 35 | dist_training = args['dist_training'] 36 | dict_adaptive = args['dict_adaptive'] 37 | N_x = args['N_x'] 38 | N_t = args['N_t'] 39 | N_f = args['N_f'] 40 | batch_sz = args['batch_sz'] 41 | 42 | 43 | tf.config.run_functions_eagerly(run_functions_eagerly) 44 | 45 | Domain = DomainND(["x", "t"], time_var='t') 46 | 47 | Domain.add("x", [-1.0, 1.0], N_x) 48 | Domain.add("t", [0.0, 1.0], N_t) 49 | 50 | Domain.generate_collocation_points(N_f) 51 | 52 | 53 | def func_ic(x): 54 | return x ** 2 * np.cos(math.pi * x) 55 | 56 | 57 | # Conditions to be considered at the boundaries for the periodic BC 58 | def deriv_model(u_model, x, t): 59 | u = u_model(tf.concat([x, t], 1)) 60 | u_x = tf.gradients(u, x)[0] 61 | 62 | return u, u_x 63 | 64 | 65 | init = IC(Domain, [func_ic], var=[['x']]) 66 | x_periodic = periodicBC(Domain, ['x'], [deriv_model]) 67 | 68 | BCs = [init, x_periodic] 69 | 70 | 71 | def f_model(u_model, x, t): 72 | u = u_model(tf.concat([x, t], 1)) 73 | u_x = tf.gradients(u, x) 74 | u_xx = tf.gradients(u_x, x) 75 | u_t = tf.gradients(u, t) 76 | c1 = tdq.utils.constant(.0001) 77 | c2 = tdq.utils.constant(5.0) 78 | f_u = u_t - c1 * u_xx + c2 * u * u * u - c2 * u 79 | return f_u 80 | 81 | ## Which loss functions will have adaptive weights 82 | # "residual" should a tuple for the case of multiple residual equation 83 | # BCs have to follow the same order as the previously defined BCs list 84 | dict_adaptive = dict_adaptive 85 | 86 | ## Weights initialization 87 | # dictionary with keys "residual" and "BCs". Values must be a tuple with dimension 88 | # equal to the number of residuals and boundary conditions, respectively 89 | 90 | if dict_adaptive["residual"][0] == False: 91 | init_residual = None 92 | else: 93 | init_residual = tf.random.uniform([N_f, 1]) 94 | 95 | if dict_adaptive["BCs"][0] == False: 96 | init_IC = None 97 | else: 98 | init_IC = 100 * tf.random.uniform([N_x, 1]) 99 | 100 | if dict_adaptive["BCs"][1] == False: 101 | init_BC = None 102 | else: 103 | init_BC = tf.random.uniform([N_t, 1]) 104 | 105 | init_weights = {"residual": [init_residual], 106 | "BCs": [init_IC, init_BC]} 107 | 108 | 109 | 110 | model = CollocationSolverND() 111 | model.compile(layer_sizes, f_model, Domain, BCs, isAdaptive=isAdaptive, 112 | dict_adaptive=dict_adaptive, init_weights=init_weights, dist=dist_training) 113 | 114 | model.fit(tf_iter=epoch_adam, newton_iter=epoch_lbfgs, newton_eager=lbfgs_eager, batch_sz=batch_sz) 115 | 116 | return 117 | 118 | if __name__ == "__main__": 119 | main(args=None) 120 | -------------------------------------------------------------------------------- /test/Burgers2test.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import tensordiffeq as tdq 4 | from tensordiffeq.boundaries import * 5 | from tensordiffeq.models import CollocationSolverND 6 | import math 7 | import pytest 8 | 9 | def main(args): 10 | 11 | if args is None: 12 | args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 13 | 14 | 'run_functions_eagerly': True, 15 | 'epoch_adam': 20, 16 | 'epoch_lbfgs': 20, 17 | 'lbfgs_eager': False, 18 | 'isAdaptive': True, 19 | 'dist_training': False, 20 | 'dict_adaptive': {"residual": [True], 21 | "BCs": [True, False, False]}, 22 | 'N_x': 100, 23 | 'N_t': 50, 24 | 'N_f': 5000, 25 | 'batch_sz': 200, 26 | } 27 | 28 | layer_sizes = args['layer_sizes'] 29 | run_functions_eagerly = args['run_functions_eagerly'] 30 | epoch_adam = args['epoch_adam'] 31 | epoch_lbfgs = args['epoch_lbfgs'] 32 | lbfgs_eager = args['lbfgs_eager'] 33 | isAdaptive = args['isAdaptive'] 34 | dist_training = args['dist_training'] 35 | dict_adaptive = args['dict_adaptive'] 36 | N_x = args['N_x'] 37 | N_t = args['N_t'] 38 | N_f = args['N_f'] 39 | batch_sz = args['batch_sz'] 40 | 41 | 42 | tf.config.run_functions_eagerly(run_functions_eagerly) 43 | 44 | Domain = DomainND(["x", "t"], time_var='t') 45 | Domain.add("x", [-1.0, 1.0], N_x) 46 | Domain.add("t", [0.0, 1.0], N_t) 47 | Domain.generate_collocation_points(N_f) 48 | 49 | def func_ic(x): 50 | return -np.sin(x * math.pi) 51 | 52 | init = IC(Domain, [func_ic], var=[['x']]) 53 | upper_x = dirichletBC(Domain, val=0.0, var='x', target="upper") 54 | lower_x = dirichletBC(Domain, val=0.0, var='x', target="lower") 55 | 56 | BCs = [init, upper_x, lower_x] 57 | 58 | def f_model(u_model, x, t): 59 | with tf.GradientTape(persistent=True) as tape: 60 | tape.watch(x) 61 | tape.watch(t) 62 | u = u_model(tf.concat([x, t], 1)) 63 | u_x = tape.gradient(u, x) 64 | 65 | u_xx = tape.gradient(u_x, x) 66 | u_t = tape.gradient(u, t) 67 | 68 | f_u = u_t + u * u_x - 0.01 / tf.constant(math.pi) * u_xx 69 | 70 | return f_u 71 | 72 | ## Which loss functions will have adaptive weights 73 | # "residual" should a tuple for the case of multiple residual equation 74 | # BCs have to follow the same order as the previously defined BCs list 75 | dict_adaptive = dict_adaptive 76 | 77 | ## Weights initialization 78 | # dictionary with keys "residual" and "BCs". Values must be a tuple with dimension 79 | # equal to the number of residuals and boundary conditions, respectively 80 | 81 | if dict_adaptive["residual"][0] == False: 82 | init_residual = None 83 | else: 84 | init_residual = tf.ones([N_f, 1]) 85 | 86 | if dict_adaptive["BCs"][0] == False: 87 | init_IC = None 88 | else: 89 | init_IC = tf.ones([N_x, 1]) 90 | 91 | if dict_adaptive["BCs"][1] == False: 92 | init_BC1 = None 93 | else: 94 | init_BC1 = tf.ones([N_t, 1]) 95 | 96 | if dict_adaptive["BCs"][2] == False: 97 | init_BC2 = None 98 | else: 99 | init_BC2 = tf.ones([N_t, 1]) 100 | 101 | init_weights = {"residual": [init_residual], 102 | "BCs": [init_IC, init_BC1, init_BC2]} 103 | 104 | model = CollocationSolverND() 105 | model.compile(layer_sizes, f_model, Domain, BCs, 106 | isAdaptive=isAdaptive, 107 | dict_adaptive=dict_adaptive, 108 | init_weights=init_weights, 109 | dist=dist_training) 110 | 111 | model.fit(tf_iter=epoch_adam, 112 | newton_iter=epoch_lbfgs, 113 | newton_eager=lbfgs_eager, 114 | batch_sz=batch_sz) 115 | 116 | return 117 | 118 | if __name__ == "__main__": 119 | main(args=None) 120 | -------------------------------------------------------------------------------- /test/test_AC_distributed.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from AC2test import * 3 | 4 | class TestACDistribuited(): 5 | def init_args(self): 6 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 7 | 'run_functions_eagerly': False, 8 | 'epoch_adam': 20, 9 | 'epoch_lbfgs': 20, 10 | 'dist_training': True, 11 | 'dict_adaptive': {"residual": [False], 12 | "BCs": [True, False]}, 13 | 'N_x': 100, 14 | 'N_t': 50, 15 | 'N_f': 5000, 16 | 'batch_sz': None, 17 | } 18 | 19 | def test_lbfgs_eager1(self): 20 | self.init_args() 21 | self.args['lbfgs_eager'] = True 22 | self.args['isAdaptive']= False 23 | try: 24 | main(self.args) 25 | assert True 26 | except Exception as inst: 27 | assert False 28 | 29 | def test_lbfgs_eager2(self): 30 | self.init_args() 31 | self.args['lbfgs_eager'] = False 32 | self.args['isAdaptive'] = False 33 | try: 34 | main(self.args) 35 | assert True 36 | except Exception as inst: 37 | assert False 38 | 39 | def test_adaptive1(self): 40 | self.init_args() 41 | self.args['lbfgs_eager'] = True 42 | self.args['isAdaptive'] = True 43 | self.args['dict_adaptive']= {"residual": [True], 44 | "BCs": [False, False]} 45 | try: 46 | main(self.args) 47 | assert True 48 | except Exception as inst: 49 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 50 | assert True 51 | else: 52 | assert False 53 | 54 | def test_adaptive2(self): 55 | self.init_args() 56 | self.args['lbfgs_eager'] = True 57 | self.args['isAdaptive'] = True 58 | self.args['dict_adaptive']= {"residual": [True], 59 | "BCs": [True, True]} 60 | try: 61 | main(self.args) 62 | assert True 63 | except Exception as inst: 64 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 65 | assert True 66 | else: 67 | assert False 68 | 69 | def test_adaptive3(self): 70 | self.init_args() 71 | self.args['lbfgs_eager'] = True 72 | self.args['isAdaptive'] = True 73 | self.args['dict_adaptive']= {"residual": [True], 74 | "BCs": [True, False]} 75 | try: 76 | main(self.args) 77 | assert True 78 | except Exception as inst: 79 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 80 | assert True 81 | else: 82 | assert False 83 | def test_adaptive4(self): 84 | self.init_args() 85 | self.args['lbfgs_eager'] = True 86 | self.args['isAdaptive'] = True 87 | self.args['dict_adaptive']= {"residual": [True], 88 | "BCs": [False, True]} 89 | try: 90 | main(self.args) 91 | assert True 92 | except Exception as inst: 93 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 94 | assert True 95 | else: 96 | assert False 97 | 98 | def test_adaptive5(self): 99 | self.init_args() 100 | self.args['lbfgs_eager'] = True 101 | self.args['isAdaptive'] = True 102 | self.args['dict_adaptive']= {"residual": [False], 103 | "BCs": [False, False]} 104 | try: 105 | main(self.args) 106 | assert True 107 | except Exception as inst: 108 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 109 | assert True 110 | else: 111 | assert False 112 | def test_adaptive6(self): 113 | self.init_args() 114 | self.args['lbfgs_eager'] = True 115 | self.args['isAdaptive'] = True 116 | self.args['dict_adaptive']= {"residual": [False], 117 | "BCs": [True, True]} 118 | try: 119 | main(self.args) 120 | assert True 121 | except Exception as inst: 122 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 123 | assert True 124 | else: 125 | assert False 126 | def test_adaptive7(self): 127 | self.init_args() 128 | self.args['lbfgs_eager'] = True 129 | self.args['isAdaptive'] = True 130 | self.args['dict_adaptive']= {"residual": [False], 131 | "BCs": [True, False]} 132 | try: 133 | main(self.args) 134 | assert True 135 | except Exception as inst: 136 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 137 | assert True 138 | else: 139 | assert False 140 | 141 | def test_adaptive8(self): 142 | self.init_args() 143 | self.args['lbfgs_eager'] = True 144 | self.args['isAdaptive'] = True 145 | self.args['dict_adaptive']= {"residual": [False], 146 | "BCs": [False, True]} 147 | try: 148 | main(self.args) 149 | assert True 150 | except Exception as inst: 151 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 152 | assert True 153 | else: 154 | assert False 155 | -------------------------------------------------------------------------------- /test/test_AC_distributed_minibatch.py: -------------------------------------------------------------------------------- 1 | from AC2test import * 2 | 3 | class TestACDistributedMinibatch(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': False, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'dist_training': True, 10 | 'dict_adaptive': {"residual": [False], 11 | "BCs": [True, False]}, 12 | 'N_x': 100, 13 | 'N_t': 50, 14 | 'N_f': 5000, 15 | 'batch_sz': 200, 16 | } 17 | 18 | def test_lgfgs_eager1(self): 19 | self.init_args() 20 | self.args['lbfgs_eager'] = True 21 | self.args['isAdaptive']= False 22 | try: 23 | main(self.args) 24 | assert True 25 | except Exception as inst: 26 | if "Currently we dont support distributed minibatching training" in inst.args[0]: 27 | assert True 28 | else: 29 | assert False 30 | 31 | 32 | def test_lgfgs_eager2(self): 33 | self.init_args() 34 | self.args['lbfgs_eager'] = False 35 | self.args['isAdaptive'] = False 36 | try: 37 | main(self.args) 38 | assert True 39 | except Exception as inst: 40 | if "Currently we dont support distributed minibatching training" in inst.args[0]: 41 | assert True 42 | else: 43 | assert False 44 | 45 | def test_adaptive1(self): 46 | self.init_args() 47 | self.args['lbfgs_eager'] = True 48 | self.args['isAdaptive'] = True 49 | self.args['dict_adaptive']= {"residual": [True], 50 | "BCs": [False, False]} 51 | try: 52 | main(self.args) 53 | assert True 54 | except Exception as inst: 55 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 56 | assert True 57 | else: 58 | assert False 59 | 60 | def test_adaptive2(self): 61 | self.init_args() 62 | self.args['lbfgs_eager'] = True 63 | self.args['isAdaptive'] = True 64 | self.args['dict_adaptive']= {"residual": [True], 65 | "BCs": [True, True]} 66 | try: 67 | main(self.args) 68 | assert True 69 | except Exception as inst: 70 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 71 | assert True 72 | else: 73 | assert False 74 | 75 | def test_adaptive3(self): 76 | self.init_args() 77 | self.args['lbfgs_eager'] = True 78 | self.args['isAdaptive'] = True 79 | self.args['dict_adaptive']= {"residual": [True], 80 | "BCs": [True, False]} 81 | try: 82 | main(self.args) 83 | assert True 84 | except Exception as inst: 85 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 86 | assert True 87 | else: 88 | assert False 89 | def test_adaptive4(self): 90 | self.init_args() 91 | self.args['lbfgs_eager'] = True 92 | self.args['isAdaptive'] = True 93 | self.args['dict_adaptive']= {"residual": [True], 94 | "BCs": [False, True]} 95 | try: 96 | main(self.args) 97 | assert True 98 | except Exception as inst: 99 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 100 | assert True 101 | else: 102 | assert False 103 | 104 | def test_adaptive5(self): 105 | self.init_args() 106 | self.args['lbfgs_eager'] = True 107 | self.args['isAdaptive'] = True 108 | self.args['dict_adaptive']= {"residual": [False], 109 | "BCs": [False, False]} 110 | try: 111 | main(self.args) 112 | assert True 113 | except Exception as inst: 114 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 115 | assert True 116 | else: 117 | assert False 118 | def test_adaptive6(self): 119 | self.init_args() 120 | self.args['lbfgs_eager'] = True 121 | self.args['isAdaptive'] = True 122 | self.args['dict_adaptive']= {"residual": [False], 123 | "BCs": [True, True]} 124 | try: 125 | main(self.args) 126 | assert True 127 | except Exception as inst: 128 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 129 | assert True 130 | else: 131 | assert False 132 | def test_adaptive7(self): 133 | self.init_args() 134 | self.args['lbfgs_eager'] = True 135 | self.args['isAdaptive'] = True 136 | self.args['dict_adaptive']= {"residual": [False], 137 | "BCs": [True, False]} 138 | try: 139 | main(self.args) 140 | assert True 141 | except Exception as inst: 142 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 143 | assert True 144 | else: 145 | assert False 146 | 147 | def test_adaptive8(self): 148 | self.init_args() 149 | self.args['lbfgs_eager'] = True 150 | self.args['isAdaptive'] = True 151 | self.args['dict_adaptive']= {"residual": [False], 152 | "BCs": [False, True]} 153 | try: 154 | main(self.args) 155 | assert True 156 | except Exception as inst: 157 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 158 | assert True 159 | else: 160 | assert False 161 | -------------------------------------------------------------------------------- /test/test_AC_nonDistributed.py: -------------------------------------------------------------------------------- 1 | from AC2test import * 2 | 3 | class TestACNonDistributed(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': False, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'dist_training': False, 10 | 'dict_adaptive': {"residual": [False], 11 | "BCs": [True, False]}, 12 | 'N_x': 100, 13 | 'N_t': 50, 14 | 'N_f': 5000, 15 | 'batch_sz': None, 16 | } 17 | 18 | def test_lgfgs_eager1(self): 19 | self.init_args() 20 | self.args['lbfgs_eager'] = True 21 | self.args['isAdaptive']= False 22 | try: 23 | main(self.args) 24 | assert True 25 | except Exception as inst: 26 | assert False 27 | 28 | def test_lgfgs_eager2(self): 29 | self.init_args() 30 | self.args['lbfgs_eager'] = False 31 | self.args['isAdaptive'] = False 32 | try: 33 | main(self.args) 34 | assert True 35 | except Exception as inst: 36 | assert False 37 | 38 | def test_adaptive1(self): 39 | self.init_args() 40 | self.args['lbfgs_eager'] = True 41 | self.args['isAdaptive'] = True 42 | self.args['dict_adaptive']= {"residual": [True], 43 | "BCs": [False, False]} 44 | try: 45 | main(self.args) 46 | assert True 47 | except Exception as inst: 48 | assert False 49 | 50 | def test_adaptive2(self): 51 | self.init_args() 52 | self.args['lbfgs_eager'] = True 53 | self.args['isAdaptive'] = True 54 | self.args['dict_adaptive']= {"residual": [True], 55 | "BCs": [True, True]} 56 | try: 57 | main(self.args) 58 | assert True 59 | except Exception as inst: 60 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 61 | assert True 62 | else: 63 | assert False 64 | 65 | def test_adaptive3(self): 66 | self.init_args() 67 | self.args['lbfgs_eager'] = True 68 | self.args['isAdaptive'] = True 69 | self.args['dict_adaptive']= {"residual": [True], 70 | "BCs": [True, False]} 71 | try: 72 | main(self.args) 73 | assert True 74 | except Exception as inst: 75 | assert False 76 | 77 | def test_adaptive4(self): 78 | self.init_args() 79 | self.args['lbfgs_eager'] = True 80 | self.args['isAdaptive'] = True 81 | self.args['dict_adaptive']= {"residual": [True], 82 | "BCs": [False, True]} 83 | try: 84 | main(self.args) 85 | assert True 86 | except Exception as inst: 87 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 88 | assert True 89 | else: 90 | assert False 91 | 92 | def test_adaptive5(self): 93 | self.init_args() 94 | self.args['lbfgs_eager'] = True 95 | self.args['isAdaptive'] = True 96 | self.args['dict_adaptive']= {"residual": [False], 97 | "BCs": [False, False]} 98 | try: 99 | main(self.args) 100 | assert True 101 | except Exception as inst: 102 | assert False 103 | 104 | def test_adaptive6(self): 105 | self.init_args() 106 | self.args['lbfgs_eager'] = True 107 | self.args['isAdaptive'] = True 108 | self.args['dict_adaptive']= {"residual": [False], 109 | "BCs": [True, True]} 110 | try: 111 | main(self.args) 112 | assert True 113 | except Exception as inst: 114 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 115 | assert True 116 | else: 117 | assert False 118 | 119 | def test_adaptive7(self): 120 | self.init_args() 121 | self.args['lbfgs_eager'] = True 122 | self.args['isAdaptive'] = True 123 | self.args['dict_adaptive']= {"residual": [False], 124 | "BCs": [True, False]} 125 | try: 126 | main(self.args) 127 | assert True 128 | except Exception as inst: 129 | assert False 130 | 131 | def test_adaptive8(self): 132 | self.init_args() 133 | self.args['lbfgs_eager'] = True 134 | self.args['isAdaptive'] = True 135 | self.args['dict_adaptive']= {"residual": [False], 136 | "BCs": [False, True]} 137 | try: 138 | main(self.args) 139 | assert True 140 | except Exception as inst: 141 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 142 | assert True 143 | else: 144 | assert False 145 | -------------------------------------------------------------------------------- /test/test_AC_nonDistributed_minibatch.py: -------------------------------------------------------------------------------- 1 | from AC2test import * 2 | 3 | class TestACNonDistributed_MiniBatch(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': False, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'dist_training': False, 10 | 'dict_adaptive': {"residual": [False], 11 | "BCs": [True, False]}, 12 | 'N_x': 100, 13 | 'N_t': 50, 14 | 'N_f': 5000, 15 | 'batch_sz': 200, 16 | } 17 | 18 | def test_lgfgs_eager1(self): 19 | self.init_args() 20 | self.args['lbfgs_eager'] = True 21 | self.args['isAdaptive']= False 22 | try: 23 | main(self.args) 24 | assert True 25 | except Exception as inst: 26 | assert False 27 | 28 | def test_lgfgs_eager2(self): 29 | self.init_args() 30 | self.args['lbfgs_eager'] = False 31 | self.args['isAdaptive'] = False 32 | try: 33 | main(self.args) 34 | assert True 35 | except Exception as inst: 36 | assert False 37 | 38 | def test_adaptive1(self): 39 | self.init_args() 40 | self.args['lbfgs_eager'] = True 41 | self.args['isAdaptive'] = True 42 | self.args['dict_adaptive']= {"residual": [True], 43 | "BCs": [False, False]} 44 | try: 45 | main(self.args) 46 | assert True 47 | except Exception as inst: 48 | assert False 49 | 50 | def test_adaptive2(self): 51 | self.init_args() 52 | self.args['lbfgs_eager'] = True 53 | self.args['isAdaptive'] = True 54 | self.args['dict_adaptive']= {"residual": [True], 55 | "BCs": [True, True]} 56 | try: 57 | main(self.args) 58 | assert True 59 | except Exception as inst: 60 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 61 | assert True 62 | else: 63 | assert False 64 | 65 | def test_adaptive3(self): 66 | self.init_args() 67 | self.args['lbfgs_eager'] = True 68 | self.args['isAdaptive'] = True 69 | self.args['dict_adaptive']= {"residual": [True], 70 | "BCs": [True, False]} 71 | try: 72 | main(self.args) 73 | assert True 74 | except Exception as inst: 75 | assert False 76 | 77 | def test_adaptive4(self): 78 | self.init_args() 79 | self.args['lbfgs_eager'] = True 80 | self.args['isAdaptive'] = True 81 | self.args['dict_adaptive']= {"residual": [True], 82 | "BCs": [False, True]} 83 | try: 84 | main(self.args) 85 | assert True 86 | except Exception as inst: 87 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 88 | assert True 89 | else: 90 | assert False 91 | 92 | def test_adaptive5(self): 93 | self.init_args() 94 | self.args['lbfgs_eager'] = True 95 | self.args['isAdaptive'] = True 96 | self.args['dict_adaptive']= {"residual": [False], 97 | "BCs": [False, False]} 98 | try: 99 | main(self.args) 100 | assert True 101 | except Exception as inst: 102 | assert False 103 | 104 | def test_adaptive6(self): 105 | self.init_args() 106 | self.args['lbfgs_eager'] = True 107 | self.args['isAdaptive'] = True 108 | self.args['dict_adaptive']= {"residual": [False], 109 | "BCs": [True, True]} 110 | try: 111 | main(self.args) 112 | assert True 113 | except Exception as inst: 114 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 115 | assert True 116 | else: 117 | assert False 118 | 119 | def test_adaptive7(self): 120 | self.init_args() 121 | self.args['lbfgs_eager'] = True 122 | self.args['isAdaptive'] = True 123 | self.args['dict_adaptive']= {"residual": [False], 124 | "BCs": [True, False]} 125 | try: 126 | main(self.args) 127 | assert True 128 | except Exception as inst: 129 | assert False 130 | 131 | def test_adaptive8(self): 132 | self.init_args() 133 | self.args['lbfgs_eager'] = True 134 | self.args['isAdaptive'] = True 135 | self.args['dict_adaptive']= {"residual": [False], 136 | "BCs": [False, True]} 137 | try: 138 | main(self.args) 139 | assert True 140 | except Exception as inst: 141 | if "TensorDiffEq is currently not accepting Adapative Periodic Boundaries Conditions" in inst.args[0]: 142 | assert True 143 | else: 144 | assert False 145 | -------------------------------------------------------------------------------- /test/test_Burgers_distributed.py: -------------------------------------------------------------------------------- 1 | from Burgers2test import * 2 | 3 | class TestBurgersDistributed(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': True, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'lbfgs_eager': False, 10 | 'isAdaptive': True, 11 | 'dist_training': True, 12 | 'dict_adaptive': {"residual": [False], 13 | "BCs": [True, False, False]}, 14 | 'N_x': 100, 15 | 'N_t': 50, 16 | 'N_f': 5000, 17 | 'batch_sz': None, 18 | } 19 | 20 | def test_lbfgs_eager1(self): 21 | self.init_args() 22 | self.args['lbfgs_eager'] = True 23 | self.args['isAdaptive']= False 24 | try: 25 | main(self.args) 26 | assert True 27 | except Exception as inst: 28 | assert False 29 | 30 | def test_lbfgs_eager2(self): 31 | self.init_args() 32 | self.args['lbfgs_eager'] = False 33 | self.args['isAdaptive'] = False 34 | try: 35 | main(self.args) 36 | assert True 37 | except Exception as inst: 38 | assert False 39 | 40 | def test_adaptive1(self): 41 | self.init_args() 42 | self.args['lbfgs_eager'] = True 43 | self.args['isAdaptive'] = True 44 | self.args['dict_adaptive']= {"residual": [True], 45 | "BCs": [False, False, False]} 46 | try: 47 | main(self.args) 48 | assert True 49 | except Exception as inst: 50 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 51 | assert True 52 | else: 53 | assert False 54 | 55 | def test_adaptive2(self): 56 | self.init_args() 57 | self.args['lbfgs_eager'] = True 58 | self.args['isAdaptive'] = True 59 | self.args['dict_adaptive']= {"residual": [True], 60 | "BCs": [True, True, True]} 61 | try: 62 | main(self.args) 63 | assert True 64 | except Exception as inst: 65 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 66 | assert True 67 | else: 68 | assert False 69 | 70 | def test_adaptive3(self): 71 | self.init_args() 72 | self.args['lbfgs_eager'] = True 73 | self.args['isAdaptive'] = True 74 | self.args['dict_adaptive']= {"residual": [True], 75 | "BCs": [True, False, False]} 76 | try: 77 | main(self.args) 78 | assert True 79 | except Exception as inst: 80 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 81 | assert True 82 | else: 83 | assert False 84 | 85 | def test_adaptive4(self): 86 | self.init_args() 87 | self.args['lbfgs_eager'] = True 88 | self.args['isAdaptive'] = True 89 | self.args['dict_adaptive']= {"residual": [True], 90 | "BCs": [False, True, True]} 91 | try: 92 | main(self.args) 93 | assert True 94 | except Exception as inst: 95 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 96 | assert True 97 | else: 98 | assert False 99 | 100 | def test_adaptive5(self): 101 | self.init_args() 102 | self.args['lbfgs_eager'] = True 103 | self.args['isAdaptive'] = True 104 | self.args['dict_adaptive']= {"residual": [False], 105 | "BCs": [False, False, False]} 106 | try: 107 | main(self.args) 108 | assert True 109 | except Exception as inst: 110 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 111 | assert True 112 | else: 113 | assert False 114 | 115 | def test_adaptive6(self): 116 | self.init_args() 117 | self.args['lbfgs_eager'] = True 118 | self.args['isAdaptive'] = True 119 | self.args['dict_adaptive']= {"residual": [False], 120 | "BCs": [True, True, True]} 121 | try: 122 | main(self.args) 123 | assert True 124 | except Exception as inst: 125 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 126 | assert True 127 | else: 128 | assert False 129 | 130 | def test_adaptive7(self): 131 | self.init_args() 132 | self.args['lbfgs_eager'] = True 133 | self.args['isAdaptive'] = True 134 | self.args['dict_adaptive']= {"residual": [False], 135 | "BCs": [True, False, False]} 136 | try: 137 | main(self.args) 138 | assert True 139 | except Exception as inst: 140 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 141 | assert True 142 | else: 143 | assert False 144 | 145 | def test_adaptive8(self): 146 | self.init_args() 147 | self.args['lbfgs_eager'] = True 148 | self.args['isAdaptive'] = True 149 | self.args['dict_adaptive']= {"residual": [False], 150 | "BCs": [False, True, True]} 151 | try: 152 | main(self.args) 153 | assert True 154 | except Exception as inst: 155 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 156 | assert True 157 | else: 158 | assert False 159 | -------------------------------------------------------------------------------- /test/test_Burgers_distributed_minibatch.py: -------------------------------------------------------------------------------- 1 | from Burgers2test import * 2 | 3 | class TestBurgersDistributedMinibatch(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': True, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'lbfgs_eager': False, 10 | 'isAdaptive': True, 11 | 'dist_training': True, 12 | 'dict_adaptive': {"residual": [False], 13 | "BCs": [True, False, False]}, 14 | 'N_x': 100, 15 | 'N_t': 50, 16 | 'N_f': 5000, 17 | 'batch_sz': 200, 18 | } 19 | 20 | def test_lbfgs_eager1(self): 21 | self.init_args() 22 | self.args['lbfgs_eager'] = True 23 | self.args['isAdaptive']= False 24 | try: 25 | main(self.args) 26 | assert True 27 | except Exception as inst: 28 | if "Currently we dont support distributed minibatching training" in inst.args[0]: 29 | assert True 30 | else: 31 | assert False 32 | 33 | def test_lbfgs_eager2(self): 34 | self.init_args() 35 | self.args['lbfgs_eager'] = False 36 | self.args['isAdaptive'] = False 37 | try: 38 | main(self.args) 39 | assert True 40 | except Exception as inst: 41 | if "Currently we dont support distributed minibatching training" in inst.args[0]: 42 | assert True 43 | else: 44 | assert False 45 | 46 | def test_adaptive1(self): 47 | self.init_args() 48 | self.args['lbfgs_eager'] = True 49 | self.args['isAdaptive'] = True 50 | self.args['dict_adaptive']= {"residual": [True], 51 | "BCs": [False, False, False]} 52 | try: 53 | main(self.args) 54 | assert True 55 | except Exception as inst: 56 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 57 | assert True 58 | else: 59 | assert False 60 | 61 | def test_adaptive2(self): 62 | self.init_args() 63 | self.args['lbfgs_eager'] = True 64 | self.args['isAdaptive'] = True 65 | self.args['dict_adaptive']= {"residual": [True], 66 | "BCs": [True, True, True]} 67 | try: 68 | main(self.args) 69 | assert True 70 | except Exception as inst: 71 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 72 | assert True 73 | else: 74 | assert False 75 | 76 | def test_adaptive3(self): 77 | self.init_args() 78 | self.args['lbfgs_eager'] = True 79 | self.args['isAdaptive'] = True 80 | self.args['dict_adaptive']= {"residual": [True], 81 | "BCs": [True, False, False]} 82 | try: 83 | main(self.args) 84 | assert True 85 | except Exception as inst: 86 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 87 | assert True 88 | else: 89 | assert False 90 | 91 | def test_adaptive4(self): 92 | self.init_args() 93 | self.args['lbfgs_eager'] = True 94 | self.args['isAdaptive'] = True 95 | self.args['dict_adaptive']= {"residual": [True], 96 | "BCs": [False, True, True]} 97 | try: 98 | main(self.args) 99 | assert True 100 | except Exception as inst: 101 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 102 | assert True 103 | else: 104 | assert False 105 | 106 | def test_adaptive5(self): 107 | self.init_args() 108 | self.args['lbfgs_eager'] = True 109 | self.args['isAdaptive'] = True 110 | self.args['dict_adaptive']= {"residual": [False], 111 | "BCs": [False, False, False]} 112 | try: 113 | main(self.args) 114 | assert True 115 | except Exception as inst: 116 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 117 | assert True 118 | else: 119 | assert False 120 | 121 | def test_adaptive6(self): 122 | self.init_args() 123 | self.args['lbfgs_eager'] = True 124 | self.args['isAdaptive'] = True 125 | self.args['dict_adaptive']= {"residual": [False], 126 | "BCs": [True, True, True]} 127 | try: 128 | main(self.args) 129 | assert True 130 | except Exception as inst: 131 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 132 | assert True 133 | else: 134 | assert False 135 | 136 | def test_adaptive7(self): 137 | self.init_args() 138 | self.args['lbfgs_eager'] = True 139 | self.args['isAdaptive'] = True 140 | self.args['dict_adaptive']= {"residual": [False], 141 | "BCs": [True, False, False]} 142 | try: 143 | main(self.args) 144 | assert True 145 | except Exception as inst: 146 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 147 | assert True 148 | else: 149 | assert False 150 | 151 | def test_adaptive8(self): 152 | self.init_args() 153 | self.args['lbfgs_eager'] = True 154 | self.args['isAdaptive'] = True 155 | self.args['dict_adaptive']= {"residual": [False], 156 | "BCs": [False, True, True]} 157 | try: 158 | main(self.args) 159 | assert True 160 | except Exception as inst: 161 | if "Currently we dont support distributed training for adaptive PINNs" in inst.args[0]: 162 | assert True 163 | else: 164 | assert False 165 | -------------------------------------------------------------------------------- /test/test_Burgers_nonDistributed.py: -------------------------------------------------------------------------------- 1 | from Burgers2test import * 2 | 3 | class TestBurgersNonDistributed(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': True, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'lbfgs_eager': False, 10 | 'isAdaptive': True, 11 | 'dist_training': False, 12 | 'dict_adaptive': {"residual": [False], 13 | "BCs": [True, False, False]}, 14 | 'N_x': 100, 15 | 'N_t': 50, 16 | 'N_f': 5000, 17 | 'batch_sz': None, 18 | } 19 | 20 | def test_lbfgs_eager1(self): 21 | self.init_args() 22 | self.args['lbfgs_eager'] = True 23 | self.args['isAdaptive']= False 24 | try: 25 | main(self.args) 26 | assert True 27 | except Exception as inst: 28 | assert False 29 | 30 | def test_lbfgs_eager2(self): 31 | self.init_args() 32 | self.args['lbfgs_eager'] = False 33 | self.args['isAdaptive'] = False 34 | try: 35 | main(self.args) 36 | assert True 37 | except Exception as inst: 38 | assert False 39 | 40 | def test_adaptive1(self): 41 | self.init_args() 42 | self.args['lbfgs_eager'] = True 43 | self.args['isAdaptive'] = True 44 | self.args['dict_adaptive']= {"residual": [True], 45 | "BCs": [False, False, False]} 46 | try: 47 | main(self.args) 48 | assert True 49 | except Exception as inst: 50 | assert False 51 | 52 | def test_adaptive2(self): 53 | self.init_args() 54 | self.args['lbfgs_eager'] = True 55 | self.args['isAdaptive'] = True 56 | self.args['dict_adaptive']= {"residual": [True], 57 | "BCs": [True, True, True]} 58 | try: 59 | main(self.args) 60 | assert True 61 | except Exception as inst: 62 | assert False 63 | 64 | def test_adaptive3(self): 65 | self.init_args() 66 | self.args['lbfgs_eager'] = True 67 | self.args['isAdaptive'] = True 68 | self.args['dict_adaptive']= {"residual": [True], 69 | "BCs": [True, False, False]} 70 | try: 71 | main(self.args) 72 | assert True 73 | except Exception as inst: 74 | assert False 75 | 76 | def test_adaptive4(self): 77 | self.init_args() 78 | self.args['lbfgs_eager'] = True 79 | self.args['isAdaptive'] = True 80 | self.args['dict_adaptive']= {"residual": [True], 81 | "BCs": [False, True, True]} 82 | try: 83 | main(self.args) 84 | assert True 85 | except Exception as inst: 86 | assert False 87 | 88 | def test_adaptive5(self): 89 | self.init_args() 90 | self.args['lbfgs_eager'] = True 91 | self.args['isAdaptive'] = True 92 | self.args['dict_adaptive']= {"residual": [False], 93 | "BCs": [False, False, False]} 94 | try: 95 | main(self.args) 96 | assert True 97 | except Exception as inst: 98 | assert False 99 | 100 | def test_adaptive6(self): 101 | self.init_args() 102 | self.args['lbfgs_eager'] = True 103 | self.args['isAdaptive'] = True 104 | self.args['dict_adaptive']= {"residual": [False], 105 | "BCs": [True, True, True]} 106 | try: 107 | main(self.args) 108 | assert True 109 | except Exception as inst: 110 | assert False 111 | 112 | def test_adaptive7(self): 113 | self.init_args() 114 | self.args['lbfgs_eager'] = True 115 | self.args['isAdaptive'] = True 116 | self.args['dict_adaptive']= {"residual": [False], 117 | "BCs": [True, False, False]} 118 | try: 119 | main(self.args) 120 | assert True 121 | except Exception as inst: 122 | assert False 123 | 124 | def test_adaptive8(self): 125 | self.init_args() 126 | self.args['lbfgs_eager'] = True 127 | self.args['isAdaptive'] = True 128 | self.args['dict_adaptive']= {"residual": [False], 129 | "BCs": [False, True, True]} 130 | try: 131 | main(self.args) 132 | assert True 133 | except Exception as inst: 134 | assert False 135 | -------------------------------------------------------------------------------- /test/test_Burgers_nonDistributed_minibatch.py: -------------------------------------------------------------------------------- 1 | from Burgers2test import * 2 | 3 | class TestBurgersNonDistributedMinibatch(): 4 | def init_args(self): 5 | self.args = {'layer_sizes': [2, 21, 21, 21, 21, 1], 6 | 'run_functions_eagerly': True, 7 | 'epoch_adam': 20, 8 | 'epoch_lbfgs': 20, 9 | 'lbfgs_eager': False, 10 | 'isAdaptive': True, 11 | 'dist_training': False, 12 | 'dict_adaptive': {"residual": [False], 13 | "BCs": [True, False, False]}, 14 | 'N_x': 100, 15 | 'N_t': 50, 16 | 'N_f': 5000, 17 | 'batch_sz': 200, 18 | } 19 | 20 | def test_lbfgs_eager1(self): 21 | self.init_args() 22 | self.args['lbfgs_eager'] = True 23 | self.args['isAdaptive']= False 24 | try: 25 | main(self.args) 26 | assert True 27 | except Exception as inst: 28 | assert False 29 | 30 | def test_lbfgs_eager2(self): 31 | self.init_args() 32 | self.args['lbfgs_eager'] = False 33 | self.args['isAdaptive'] = False 34 | try: 35 | main(self.args) 36 | assert True 37 | except Exception as inst: 38 | assert False 39 | 40 | def test_adaptive1(self): 41 | self.init_args() 42 | self.args['lbfgs_eager'] = True 43 | self.args['isAdaptive'] = True 44 | self.args['dict_adaptive']= {"residual": [True], 45 | "BCs": [False, False, False]} 46 | try: 47 | main(self.args) 48 | assert True 49 | except Exception as inst: 50 | assert False 51 | 52 | def test_adaptive2(self): 53 | self.init_args() 54 | self.args['lbfgs_eager'] = True 55 | self.args['isAdaptive'] = True 56 | self.args['dict_adaptive']= {"residual": [True], 57 | "BCs": [True, True, True]} 58 | try: 59 | main(self.args) 60 | assert True 61 | except Exception as inst: 62 | assert False 63 | 64 | def test_adaptive3(self): 65 | self.init_args() 66 | self.args['lbfgs_eager'] = True 67 | self.args['isAdaptive'] = True 68 | self.args['dict_adaptive']= {"residual": [True], 69 | "BCs": [True, False, False]} 70 | try: 71 | main(self.args) 72 | assert True 73 | except Exception as inst: 74 | assert False 75 | 76 | def test_adaptive4(self): 77 | self.init_args() 78 | self.args['lbfgs_eager'] = True 79 | self.args['isAdaptive'] = True 80 | self.args['dict_adaptive']= {"residual": [True], 81 | "BCs": [False, True, True]} 82 | try: 83 | main(self.args) 84 | assert True 85 | except Exception as inst: 86 | assert False 87 | 88 | def test_adaptive5(self): 89 | self.init_args() 90 | self.args['lbfgs_eager'] = True 91 | self.args['isAdaptive'] = True 92 | self.args['dict_adaptive']= {"residual": [False], 93 | "BCs": [False, False, False]} 94 | try: 95 | main(self.args) 96 | assert True 97 | except Exception as inst: 98 | assert False 99 | 100 | def test_adaptive6(self): 101 | self.init_args() 102 | self.args['lbfgs_eager'] = True 103 | self.args['isAdaptive'] = True 104 | self.args['dict_adaptive']= {"residual": [False], 105 | "BCs": [True, True, True]} 106 | try: 107 | main(self.args) 108 | assert True 109 | except Exception as inst: 110 | assert False 111 | 112 | def test_adaptive7(self): 113 | self.init_args() 114 | self.args['lbfgs_eager'] = True 115 | self.args['isAdaptive'] = True 116 | self.args['dict_adaptive']= {"residual": [False], 117 | "BCs": [True, False, False]} 118 | try: 119 | main(self.args) 120 | assert True 121 | except Exception as inst: 122 | assert False 123 | 124 | def test_adaptive8(self): 125 | self.init_args() 126 | self.args['lbfgs_eager'] = True 127 | self.args['isAdaptive'] = True 128 | self.args['dict_adaptive']= {"residual": [False], 129 | "BCs": [False, True, True]} 130 | try: 131 | main(self.args) 132 | assert True 133 | except Exception as inst: 134 | assert False 135 | --------------------------------------------------------------------------------