├── .github └── workflows │ ├── codeql.yaml │ ├── macos-arm.yaml │ ├── macos-x86.yaml │ ├── release.yaml │ ├── ubuntu.yaml │ └── windows.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .pylintrc ├── .readthedocs.yaml ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── docs ├── Makefile ├── make.bat ├── requirements.txt └── source │ ├── _static │ ├── tad-dftd4-favicon.svg │ └── tad-dftd4.svg │ ├── conf.py │ ├── disp.rst │ ├── index.rst │ ├── installation.rst │ └── modules │ ├── cutoff.rst │ ├── damping │ ├── atm.rst │ ├── index.rst │ ├── parameters.rst │ └── rational.rst │ ├── data │ ├── hardness.rst │ ├── index.rst │ ├── r4r2.rst │ ├── radii.rst │ ├── wfpair.rst │ └── zeff.rst │ ├── defaults.rst │ ├── index.rst │ ├── model │ ├── base.rst │ ├── d4.rst │ ├── d4s.rst │ └── index.rst │ └── typing │ ├── builtin.rst │ ├── index.rst │ └── pytorch.rst ├── environment.yaml ├── examples ├── batch.py ├── d4s.py └── single.py ├── pyproject.toml ├── setup.cfg ├── setup.py ├── src └── tad_dftd4 │ ├── __init__.py │ ├── __version__.py │ ├── cutoff.py │ ├── damping │ ├── __init__.py │ ├── atm.py │ ├── parameters │ │ ├── __init__.py │ │ ├── parameters.toml │ │ └── read.py │ └── rational.py │ ├── data │ ├── __init__.py │ ├── hardness.py │ ├── r4r2.py │ ├── radii.py │ ├── wfpair.py │ └── zeff.py │ ├── defaults.py │ ├── disp.py │ ├── model │ ├── __init__.py │ ├── base.py │ ├── d4.py │ ├── d4s.py │ └── utils.py │ ├── ncoord │ └── __init__.py │ ├── py.typed │ ├── reference │ ├── __init__.py │ ├── charge_eeq.py │ ├── charge_gfn2.py │ └── params.py │ └── typing │ ├── __init__.py │ ├── builtin.py │ └── pytorch.py ├── test ├── __init__.py ├── conftest.py ├── test_cutoff │ ├── __init__.py │ ├── test_general.py │ └── test_types.py ├── test_disp │ ├── __init__.py │ ├── samples.py │ ├── test_atm.py │ ├── test_full.py │ ├── test_general.py │ ├── test_properties.py │ └── test_twobody.py ├── test_grad │ ├── __init__.py │ ├── samples_grad.py │ ├── samples_hessian.py │ ├── test_hessian.py │ ├── test_nan.py │ ├── test_param.py │ └── test_pos.py ├── test_model │ ├── __init__.py │ ├── samples.py │ ├── test_c6.py │ ├── test_general.py │ ├── test_models.py │ ├── test_params.py │ └── test_weights.py └── test_param │ ├── __init__.py │ ├── test_fail.py │ └── test_read.py └── tox.ini /.github/workflows/codeql.yaml: -------------------------------------------------------------------------------- 1 | # For most projects, this workflow file will not need changing; you simply need 2 | # to commit it to your repository. 3 | # 4 | # You may wish to alter this file to override the set of languages analyzed, 5 | # or to provide custom queries or build logic. 6 | # 7 | # ******** NOTE ******** 8 | # We have attempted to detect the languages in your repository. Please check 9 | # the `language` matrix defined below to confirm you have the correct set of 10 | # supported CodeQL languages. 11 | # 12 | name: "CodeQL" 13 | 14 | on: 15 | push: 16 | branches: ["main"] 17 | pull_request: 18 | branches: ["main"] 19 | schedule: 20 | - cron: "31 23 * * 0" 21 | 22 | jobs: 23 | analyze: 24 | name: Analyze 25 | # Runner size impacts CodeQL analysis time. To learn more, please see: 26 | # - https://gh.io/recommended-hardware-resources-for-running-codeql 27 | # - https://gh.io/supported-runners-and-hardware-resources 28 | # - https://gh.io/using-larger-runners 29 | # Consider using larger runners for possible analysis time improvements. 30 | runs-on: ubuntu-latest 31 | timeout-minutes: 360 32 | permissions: 33 | actions: read 34 | contents: read 35 | security-events: write 36 | 37 | strategy: 38 | fail-fast: false 39 | matrix: 40 | language: ["python"] 41 | # CodeQL supports [ 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift' ] 42 | # Use only 'java-kotlin' to analyze code written in Java, Kotlin or both 43 | # Use only 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both 44 | # Learn more about CodeQL language support at https://aka.ms/codeql-docs/language-support 45 | 46 | steps: 47 | - name: Checkout repository 48 | uses: actions/checkout@v4 49 | with: 50 | persist-credentials: false 51 | 52 | # Initializes the CodeQL tools for scanning. 53 | - name: Initialize CodeQL 54 | uses: github/codeql-action/init@v3 55 | with: 56 | languages: ${{ matrix.language }} 57 | # If you wish to specify custom queries, you can do so here or in a config file. 58 | # By default, queries listed here will override any specified in a config file. 59 | # Prefix the list here with "+" to use these queries and those in the config file. 60 | # For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs 61 | # queries: security-extended,security-and-quality 62 | queries: +security-and-quality 63 | 64 | # Autobuild attempts to build any compiled languages (C/C++, C#, Go, Java, or Swift). 65 | # If this step fails, then you should remove it and run the build manually (see below) 66 | - name: Autobuild 67 | uses: github/codeql-action/autobuild@v3 68 | 69 | # ℹ️ Command-line programs to run using the OS shell. 70 | # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun 71 | 72 | # If the Autobuild fails above, remove it and uncomment the following three lines. 73 | # modify them (or add more) to build your code if your project, please refer to the EXAMPLE below for guidance. 74 | 75 | # - run: | 76 | # echo "Run, Build Application using script" 77 | # ./location_of_script_within_repo/buildscript.sh 78 | 79 | - name: Perform CodeQL Analysis 80 | uses: github/codeql-action/analyze@v3 81 | with: 82 | category: "/language:${{matrix.language}}" 83 | -------------------------------------------------------------------------------- /.github/workflows/macos-arm.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: Tests (macOS arm) 18 | 19 | on: 20 | push: 21 | branches: 22 | - main 23 | - master 24 | paths-ignore: 25 | - "doc*/**" 26 | - "./*.ya?ml" 27 | - "**/*.md" 28 | - "**/*.rst" 29 | 30 | pull_request: 31 | paths-ignore: 32 | - "doc*/**" 33 | - "./*.ya?ml" 34 | - "**/*.md" 35 | - "**/*.rst" 36 | 37 | workflow_dispatch: 38 | 39 | jobs: 40 | main: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: [macos-14, macos-15] 45 | # Python 3.8/3.9 is not on macos-latest (macos-14-arm64) 46 | # https://github.com/actions/setup-python/issues/696 47 | python-version: ["3.10", "3.11", "3.12"] 48 | # only test oldest and newest version of torch 49 | torch-version: ["1.11.0", "2.5.1"] 50 | exclude: 51 | # Check latest versions here: https://download.pytorch.org/whl/torch/ 52 | # 53 | # PyTorch now fully supports Python=<3.11 54 | # see: https://github.com/pytorch/pytorch/issues/86566 55 | # 56 | # PyTorch does now support Python 3.12 (macOS only 2.2) 57 | # see: https://github.com/pytorch/pytorch/issues/110436 58 | - python-version: "3.12" 59 | torch-version: "1.11.0" 60 | # PyTorch<1.13.0 does only support Python=<3.10 61 | # On macOS and Windows, 1.13.x is also not supported for Python>=3.10 62 | - python-version: "3.11" 63 | torch-version: "1.11.0" 64 | - python-version: "3.11" 65 | torch-version: "1.12.1" 66 | - python-version: "3.11" 67 | torch-version: "1.13.1" 68 | 69 | permissions: 70 | contents: read 71 | 72 | runs-on: ${{ matrix.os }} 73 | 74 | defaults: 75 | run: 76 | shell: bash {0} 77 | 78 | steps: 79 | - name: Checkout code 80 | uses: actions/checkout@v3 81 | with: 82 | persist-credentials: false 83 | 84 | - name: Set up Python ${{ matrix.python-version }} 85 | uses: actions/setup-python@v5 86 | with: 87 | python-version: ${{ matrix.python-version }} 88 | 89 | - name: Install dependencies 90 | run: | 91 | python3 -m pip install --upgrade pip 92 | python3 -m pip install tox 93 | 94 | - name: Determine TOXENV 95 | run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV 96 | 97 | - name: Print TOXENV 98 | run: echo "TOXENV is set to '${TOXENV}'." 99 | env: 100 | TOXENV: ${{ env.TOXENV }} 101 | 102 | - name: Unittests with tox 103 | run: tox -e $TOXENV 104 | env: 105 | TOXENV: ${{ env.TOXENV }} 106 | -------------------------------------------------------------------------------- /.github/workflows/macos-x86.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: Tests (macOS x86) 18 | 19 | on: 20 | push: 21 | branches: 22 | - main 23 | - master 24 | paths-ignore: 25 | - "doc*/**" 26 | - "./*.ya?ml" 27 | - "**/*.md" 28 | - "**/*.rst" 29 | 30 | pull_request: 31 | paths-ignore: 32 | - "doc*/**" 33 | - "./*.ya?ml" 34 | - "**/*.md" 35 | - "**/*.rst" 36 | 37 | workflow_dispatch: 38 | 39 | jobs: 40 | main: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: [macos-13] 45 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 46 | # only test oldest and newest version of torch 47 | torch-version: ["1.11.0", "2.2.2"] 48 | exclude: 49 | # Check latest versions here: https://download.pytorch.org/whl/torch/ 50 | # 51 | # PyTorch issues: 52 | # 3.11: https://github.com/pytorch/pytorch/issues/86566 53 | # 3.12: https://github.com/pytorch/pytorch/issues/110436 54 | # 3.13: https://github.com/pytorch/pytorch/issues/130249 55 | # 56 | # Wheels for macOS x86_64 are deprecated since 2.3.0 57 | # 58 | # Starting with macOS 14, runners are based on ARM. 59 | # The macOS 12 runner image is removed on December 3rd. 60 | # 61 | # PyTorch<2.2.0 does only support Python<3.12 (all platforms) 62 | - python-version: "3.12" 63 | torch-version: "1.11.0" 64 | - python-version: "3.12" 65 | torch-version: "1.12.1" 66 | - python-version: "3.12" 67 | torch-version: "1.13.1" 68 | - python-version: "3.12" 69 | torch-version: "2.0.1" 70 | - python-version: "3.12" 71 | torch-version: "2.1.2" 72 | # PyTorch<2.0.0 does only support Python<3.11 (macOS and Windows) 73 | - python-version: "3.11" 74 | torch-version: "1.11.0" 75 | - python-version: "3.11" 76 | torch-version: "1.12.1" 77 | - python-version: "3.11" 78 | torch-version: "1.13.1" 79 | 80 | permissions: 81 | contents: read 82 | 83 | runs-on: ${{ matrix.os }} 84 | 85 | defaults: 86 | run: 87 | shell: bash {0} 88 | 89 | steps: 90 | - name: Checkout code 91 | uses: actions/checkout@v3 92 | with: 93 | persist-credentials: false 94 | 95 | - name: Set up Python ${{ matrix.python-version }} 96 | uses: actions/setup-python@v5 97 | with: 98 | python-version: ${{ matrix.python-version }} 99 | 100 | - name: Install dependencies 101 | run: | 102 | python3 -m pip install --upgrade pip 103 | python3 -m pip install tox 104 | 105 | - name: Determine TOXENV 106 | run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV 107 | 108 | - name: Print TOXENV 109 | run: echo "TOXENV is set to '${TOXENV}'." 110 | env: 111 | TOXENV: ${{ env.TOXENV }} 112 | 113 | - name: Unittests with tox 114 | run: tox -e $TOXENV 115 | env: 116 | TOXENV: ${{ env.TOXENV }} 117 | -------------------------------------------------------------------------------- /.github/workflows/release.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: Build 18 | 19 | on: 20 | push: 21 | branches: 22 | - main 23 | - master 24 | tags: 25 | - "v*" 26 | paths-ignore: 27 | - "doc*/**" 28 | - "./*.ya?ml" 29 | - "**/*.md" 30 | - "**/*.rst" 31 | 32 | pull_request: 33 | paths-ignore: 34 | - "doc*/**" 35 | - "./*.ya?ml" 36 | - "**/*.md" 37 | - "**/*.rst" 38 | 39 | workflow_dispatch: 40 | 41 | jobs: 42 | sdist: 43 | permissions: 44 | contents: read 45 | 46 | runs-on: ubuntu-latest 47 | 48 | steps: 49 | - name: Checkout code 50 | uses: actions/checkout@v4 51 | with: 52 | persist-credentials: false 53 | 54 | - name: Build source distribution (sdist) 55 | run: pipx run build --sdist 56 | 57 | - name: Upload source distribution as artifact 58 | uses: actions/upload-artifact@v4 59 | with: 60 | name: ${{ github.event.repository.name }}-sdist 61 | path: dist 62 | 63 | wheel: 64 | permissions: 65 | contents: read 66 | 67 | runs-on: ubuntu-latest 68 | 69 | steps: 70 | - name: Checkout code 71 | uses: actions/checkout@v4 72 | with: 73 | persist-credentials: false 74 | 75 | - name: Build wheel (bdist) 76 | run: pipx run build --wheel 77 | 78 | - name: Upload wheel as artifact 79 | uses: actions/upload-artifact@v4 80 | with: 81 | name: ${{ github.event.repository.name }}-wheel 82 | path: dist 83 | 84 | install_wheel: 85 | needs: [wheel] 86 | 87 | permissions: 88 | contents: read 89 | 90 | runs-on: ubuntu-latest 91 | 92 | steps: 93 | - name: Checkout code 94 | uses: actions/checkout@v4 95 | with: 96 | persist-credentials: false 97 | 98 | - name: Download build artifacts 99 | uses: actions/download-artifact@v4 100 | with: 101 | path: dist 102 | merge-multiple: true 103 | 104 | - name: Show downloaded artifacts 105 | run: ls -lcahFR --color=auto dist 106 | 107 | - name: Install wheel 108 | run: | 109 | pip install torch --index-url https://download.pytorch.org/whl/cpu 110 | pip install dist/*.whl 111 | 112 | - name: Determine package name 113 | run: | 114 | name=$(echo "${REPO_NAME}" | tr '-' '_') 115 | echo "PKG_NAME=$name" >> "$GITHUB_ENV" 116 | echo "PKG_NAME is set to '${name}'." 117 | env: 118 | REPO_NAME: ${{ github.event.repository.name }} 119 | 120 | - name: Test import 121 | run: python -c "import ${PKG_NAME}; print(${PKG_NAME}.__version__)" 122 | env: 123 | PKG_NAME: ${{ env.PKG_NAME }} 124 | 125 | upload_test_pypi: 126 | needs: [sdist, wheel] 127 | 128 | runs-on: ubuntu-latest 129 | 130 | environment: release 131 | 132 | permissions: 133 | contents: read 134 | id-token: write 135 | 136 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 137 | steps: 138 | - name: Download build artifacts 139 | uses: actions/download-artifact@v4 140 | with: 141 | path: dist 142 | merge-multiple: true 143 | 144 | - name: Publish to Test PyPI 145 | uses: pypa/gh-action-pypi-publish@release/v1 146 | with: 147 | repository-url: https://test.pypi.org/legacy/ 148 | 149 | upload_pypi: 150 | needs: [sdist, wheel, upload_test_pypi] 151 | 152 | runs-on: ubuntu-latest 153 | 154 | environment: release 155 | 156 | permissions: 157 | contents: read 158 | id-token: write 159 | 160 | if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') 161 | steps: 162 | - name: Download build artifacts 163 | uses: actions/download-artifact@v4 164 | with: 165 | path: dist 166 | merge-multiple: true 167 | 168 | - name: Publish to PyPI 169 | uses: pypa/gh-action-pypi-publish@release/v1 170 | -------------------------------------------------------------------------------- /.github/workflows/ubuntu.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: Tests (Ubuntu) 18 | 19 | on: 20 | push: 21 | branches: 22 | - main 23 | - master 24 | paths-ignore: 25 | - "doc*/**" 26 | - "./*.ya?ml" 27 | - "**/*.md" 28 | - "**/*.rst" 29 | 30 | pull_request: 31 | paths-ignore: 32 | - "doc*/**" 33 | - "./*.ya?ml" 34 | - "**/*.md" 35 | - "**/*.rst" 36 | 37 | workflow_dispatch: 38 | 39 | jobs: 40 | main: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: [ubuntu-latest] 45 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 46 | torch-version: 47 | [ 48 | "1.11.0", 49 | "1.12.1", 50 | "1.13.1", 51 | "2.0.1", 52 | "2.1.2", 53 | "2.2.2", 54 | "2.3.1", 55 | "2.4.1", 56 | "2.5.1", 57 | ] 58 | exclude: 59 | # Check latest versions here: https://download.pytorch.org/whl/torch/ 60 | # 61 | # PyTorch issues: 62 | # 3.11: https://github.com/pytorch/pytorch/issues/86566 63 | # 3.12: https://github.com/pytorch/pytorch/issues/110436 64 | # 3.13: https://github.com/pytorch/pytorch/issues/130249 65 | # 66 | # PyTorch<2.2.0 does only support Python<3.12 (all platforms) 67 | - python-version: "3.12" 68 | torch-version: "1.11.0" 69 | - python-version: "3.12" 70 | torch-version: "1.12.1" 71 | - python-version: "3.12" 72 | torch-version: "1.13.1" 73 | - python-version: "3.12" 74 | torch-version: "2.0.1" 75 | - python-version: "3.12" 76 | torch-version: "2.1.2" 77 | # PyTorch<1.13.0 does only support Python<3.11 (Linux) 78 | - python-version: "3.11" 79 | torch-version: "1.11.0" 80 | - python-version: "3.11" 81 | torch-version: "1.12.1" 82 | # PyTorch>=2.5.0 does not support Python<3.9 83 | - python-version: "3.8" 84 | torch-version: "2.5.1" 85 | 86 | runs-on: ${{ matrix.os }} 87 | 88 | permissions: 89 | contents: read 90 | 91 | defaults: 92 | run: 93 | shell: bash {0} 94 | 95 | steps: 96 | - name: Checkout code 97 | uses: actions/checkout@v3 98 | with: 99 | persist-credentials: false 100 | 101 | - name: Set up Python ${{ matrix.python-version }} 102 | uses: actions/setup-python@v5 103 | with: 104 | python-version: ${{ matrix.python-version }} 105 | 106 | - name: Install dependencies 107 | run: | 108 | python3 -m pip install --upgrade pip 109 | python3 -m pip install tox 110 | 111 | - name: Determine TOXENV 112 | run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV 113 | 114 | - name: Print TOXENV 115 | run: echo "TOXENV is set to '${TOXENV}'." 116 | env: 117 | TOXENV: ${{ env.TOXENV }} 118 | 119 | - name: Unittests with tox 120 | run: tox -e $TOXENV 121 | env: 122 | TOXENV: ${{ env.TOXENV }} 123 | 124 | - name: Upload coverage to Codecov 125 | uses: codecov/codecov-action@7f8b4b4bde536c465e797be725718b88c5d95e0e # 5.1.1 126 | if: > 127 | matrix.python-version == '3.11' && 128 | matrix.torch-version == '2.2.2' && 129 | matrix.os == 'ubuntu-latest' 130 | with: 131 | files: ./coverage.xml # optional 132 | -------------------------------------------------------------------------------- /.github/workflows/windows.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: Tests (Windows) 18 | 19 | on: 20 | push: 21 | branches: 22 | - main 23 | - master 24 | paths-ignore: 25 | - "doc*/**" 26 | - "./*.ya?ml" 27 | - "**/*.md" 28 | - "**/*.rst" 29 | 30 | pull_request: 31 | paths-ignore: 32 | - "doc*/**" 33 | - "./*.ya?ml" 34 | - "**/*.md" 35 | - "**/*.rst" 36 | 37 | workflow_dispatch: 38 | 39 | jobs: 40 | main: 41 | strategy: 42 | fail-fast: false 43 | matrix: 44 | os: [windows-latest] 45 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 46 | # only test oldest and newest version of torch 47 | torch-version: ["1.11.0", "2.5.1"] 48 | exclude: 49 | # Check latest versions here: https://download.pytorch.org/whl/torch/ 50 | # 51 | # PyTorch issues: 52 | # 3.11: https://github.com/pytorch/pytorch/issues/86566 53 | # 3.12: https://github.com/pytorch/pytorch/issues/110436 54 | # 3.13: https://github.com/pytorch/pytorch/issues/130249 55 | # 56 | # PyTorch<2.2.0 does only support Python<3.12 (all platforms) 57 | - python-version: "3.12" 58 | torch-version: "1.11.0" 59 | - python-version: "3.12" 60 | torch-version: "1.12.1" 61 | - python-version: "3.12" 62 | torch-version: "1.13.1" 63 | - python-version: "3.12" 64 | torch-version: "2.0.1" 65 | - python-version: "3.12" 66 | torch-version: "2.1.2" 67 | # PyTorch<2.0.0 does only support Python<3.11 (macOS and Windows) 68 | - python-version: "3.11" 69 | torch-version: "1.11.0" 70 | - python-version: "3.11" 71 | torch-version: "1.12.1" 72 | - python-version: "3.11" 73 | torch-version: "1.13.1" 74 | # PyTorch>=2.5.0 does not support Python<3.9 75 | - python-version: "3.8" 76 | torch-version: "2.5.1" 77 | 78 | permissions: 79 | contents: read 80 | 81 | runs-on: ${{ matrix.os }} 82 | 83 | defaults: 84 | run: 85 | shell: bash {0} 86 | 87 | steps: 88 | - name: Checkout code 89 | uses: actions/checkout@v3 90 | with: 91 | persist-credentials: false 92 | 93 | - name: Set up Python ${{ matrix.python-version }} 94 | uses: actions/setup-python@v5 95 | with: 96 | python-version: ${{ matrix.python-version }} 97 | 98 | - name: Install dependencies 99 | run: | 100 | python3 -m pip install --upgrade pip 101 | python3 -m pip install tox 102 | 103 | - name: Determine TOXENV 104 | run: echo "TOXENV=py$(echo ${{ matrix.python-version }} | tr -d '.')-torch$(echo ${{ matrix.torch-version }} | tr -d '.')" >> $GITHUB_ENV 105 | 106 | - name: Print TOXENV 107 | run: echo "TOXENV is set to '${TOXENV}'." 108 | env: 109 | TOXENV: ${{ env.TOXENV }} 110 | 111 | - name: Unittests with tox 112 | run: tox -e $TOXENV 113 | env: 114 | TOXENV: ${{ env.TOXENV }} 115 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | # Byte-compiled / optimized / DLL files 19 | __pycache__/ 20 | *.py[cod] 21 | *$py.class 22 | 23 | # C extensions 24 | *.so 25 | 26 | # Distribution / packaging 27 | .Python 28 | build/ 29 | develop-eggs/ 30 | dist/ 31 | downloads/ 32 | eggs/ 33 | .eggs/ 34 | lib/ 35 | lib64/ 36 | parts/ 37 | sdist/ 38 | var/ 39 | wheels/ 40 | share/python-wheels/ 41 | *.egg-info/ 42 | .installed.cfg 43 | *.egg 44 | MANIFEST 45 | 46 | # PyInstaller 47 | # Usually these files are written by a python script from a template 48 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 49 | *.manifest 50 | *.spec 51 | 52 | # Installer logs 53 | pip-log.txt 54 | pip-delete-this-directory.txt 55 | 56 | # Unit test / coverage reports 57 | htmlcov/ 58 | .tox/ 59 | .nox/ 60 | .coverage 61 | .coverage.* 62 | .cache 63 | nosetests.xml 64 | coverage.xml 65 | *.cover 66 | *.py,cover 67 | .hypothesis/ 68 | .pytest_cache/ 69 | cover/ 70 | 71 | # Translations 72 | *.mo 73 | *.pot 74 | 75 | # Django stuff: 76 | *.log 77 | local_settings.py 78 | db.sqlite3 79 | db.sqlite3-journal 80 | 81 | # Flask stuff: 82 | instance/ 83 | .webassets-cache 84 | 85 | # Scrapy stuff: 86 | .scrapy 87 | 88 | # Sphinx documentation 89 | docs/_build/ 90 | 91 | # PyBuilder 92 | .pybuilder/ 93 | target/ 94 | 95 | # Jupyter Notebook 96 | .ipynb_checkpoints 97 | 98 | # IPython 99 | profile_default/ 100 | ipython_config.py 101 | 102 | # pyenv 103 | # For a library or package, you might want to ignore these files since the code is 104 | # intended to run in multiple environments; otherwise, check them in: 105 | # .python-version 106 | 107 | # pipenv 108 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 109 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 110 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 111 | # install all needed dependencies. 112 | #Pipfile.lock 113 | 114 | # poetry 115 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 116 | # This is especially recommended for binary packages to ensure reproducibility, and is more 117 | # commonly ignored for libraries. 118 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 119 | #poetry.lock 120 | 121 | # pdm 122 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 123 | #pdm.lock 124 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 125 | # in version control. 126 | # https://pdm.fming.dev/#use-with-ide 127 | .pdm.toml 128 | 129 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 130 | __pypackages__/ 131 | 132 | # Celery stuff 133 | celerybeat-schedule 134 | celerybeat.pid 135 | 136 | # SageMath parsed files 137 | *.sage.py 138 | 139 | # Environments 140 | .env 141 | .venv 142 | env/ 143 | venv/ 144 | ENV/ 145 | env.bak/ 146 | venv.bak/ 147 | 148 | # Spyder project settings 149 | .spyderproject 150 | .spyproject 151 | 152 | # Rope project settings 153 | .ropeproject 154 | 155 | # mkdocs documentation 156 | /site 157 | 158 | # mypy 159 | .mypy_cache/ 160 | .dmypy.json 161 | dmypy.json 162 | 163 | # Pyre type checker 164 | .pyre/ 165 | 166 | # pytype static type analyzer 167 | .pytype/ 168 | 169 | # Cython debug symbols 170 | cython_debug/ 171 | 172 | # PyCharm 173 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 174 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 175 | # and can be added to the global gitignore or merged into this file. For a more nuclear 176 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 177 | #.idea/ 178 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | repos: 18 | - repo: https://github.com/pre-commit/pre-commit-hooks 19 | rev: v5.0.0 20 | hooks: 21 | - id: trailing-whitespace 22 | - id: end-of-file-fixer 23 | - id: check-shebang-scripts-are-executable 24 | - id: check-toml 25 | - id: check-yaml 26 | - id: check-added-large-files 27 | - id: debug-statements 28 | language_version: python3 29 | 30 | - repo: https://github.com/asottile/setup-cfg-fmt 31 | rev: v2.8.0 32 | hooks: 33 | - id: setup-cfg-fmt 34 | args: 35 | [ 36 | --include-version-classifiers, 37 | --min-py-version, 38 | "3.8", 39 | --max-py-version, 40 | "3.12", 41 | ] 42 | 43 | - repo: https://github.com/asottile/pyupgrade 44 | rev: v3.19.1 45 | hooks: 46 | - id: pyupgrade 47 | args: [--py37-plus, --keep-runtime-typing] 48 | 49 | - repo: https://github.com/pycqa/isort 50 | rev: 6.0.1 51 | hooks: 52 | - id: isort 53 | name: isort (python) 54 | args: ["--profile", "black", "--line-length", "80", "--filter-files"] 55 | 56 | - repo: https://github.com/psf/black 57 | rev: 25.1.0 58 | hooks: 59 | - id: black 60 | args: ["--line-length", "80"] 61 | 62 | - repo: https://github.com/woodruffw/zizmor-pre-commit 63 | rev: v1.5.2 64 | hooks: 65 | - id: zizmor 66 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | 18 | # .readthedocs.yaml 19 | # Read the Docs configuration file 20 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 21 | 22 | # Required 23 | version: 2 24 | 25 | # Set the version of Python and other tools you might need 26 | build: 27 | os: ubuntu-22.04 28 | tools: 29 | python: "3.10" 30 | 31 | # Build documentation in the docs/ directory with Sphinx 32 | sphinx: 33 | configuration: docs/source/conf.py 34 | 35 | # We recommend specifying your dependencies to enable reproducible builds: 36 | # https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 37 | python: 38 | install: 39 | - requirements: docs/requirements.txt 40 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[python]": { 3 | "editor.defaultFormatter": "ms-python.black-formatter", 4 | "editor.detectIndentation": false, 5 | "editor.formatOnSave": true, 6 | "editor.formatOnPaste": false, // not supported by black 7 | "editor.insertSpaces": true, 8 | "editor.tabSize": 4 9 | }, 10 | "pylint.args": ["--indent-string=' '"], 11 | "python.analysis.diagnosticSeverityOverrides": { 12 | "reportPrivateImportUsage": "information" 13 | }, 14 | "python.defaultInterpreterPath": "${env:CONDA_PREFIX}/envs/torch/bin/python", 15 | "python.testing.pytestArgs": [], 16 | "python.testing.unittestEnabled": false, 17 | "python.testing.pytestEnabled": true 18 | } 19 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to tad-dftd4 2 | 3 | First off, thank you for considering contributing to `tad-dftd4`. 4 | Please take a moment to review this guidelines to make the contribution process simple and effective for all involved. 5 | 6 | Respecting these guidelines helps communicate that you respect the time of the developers who manage and develop this open source project. 7 | In return, they should return this respect by addressing your problem, evaluating changes, and helping you handle your pull requests. 8 | 9 | ## Reporting a Bug 10 | 11 | A bug is a _demonstratable problem_ caused by the code in this repository. 12 | Good bug reports are extremely valuable for us - thank you! 13 | 14 | Before opening a bug report: 15 | 16 | 1. Check if the issue has already been reported. 17 | 2. Check if it still is an issue or has already been fixed? 18 | Try to reproduce it with the latest version from the `main` branch. 19 | 3. Isolate the problem and create a reduced test case. 20 | 21 | A good bug report should not leave others needing to chase you up for more information. 22 | So please try to be as detailed as possible in your report, answer at least these questions: 23 | 24 | 1. Which version of `tad-dftd4` are you using? The current version is always 25 | a subject to change, so be more specific. 26 | 2. What is your environment (your laptop, the cluster of the university)? 27 | 3. What steps will reproduce the issue? 28 | We have to reproduce the issue, so we need all the input files. 29 | 4. What would be the expected outcome? 30 | 5. What did you see instead? 31 | 32 | All these details will help people to fix any potential bugs. 33 | 34 | ## Suggesting a New Feature 35 | 36 | Feature requests are welcome. But take a moment to find out if your idea fits the scope and goals of the project. 37 | It is up to you to provide a strong argument to convince the project's developers of the benefits of this feature. 38 | Please provide as much detail and context as possible. 39 | 40 | ## Implementing a New Feature 41 | 42 | Contributions are welcome via Github pull requests. 43 | 44 | - Each pull request should implement _one_ feature or fix _one_ bug. 45 | If you want to add or fix more than one thing, submit more than one 46 | pull request. 47 | - Do not commit changes to files that are irrelevant to your feature or 48 | bugfix (_e.g._ `.gitignore`). 49 | - Be willing to accept criticism and work on improving your code. 50 | - Make sure the code compiles and the tests run successful on more than 51 | your local machine (_e.g._ on cluster of your university). 52 | 53 | ### For New Contributors 54 | 55 | If you never created a pull request before, welcome :tada:. 56 | You can learn how from [this great tutorial](https://app.egghead.io/courses/how-to-contribute-to-an-open-source-project-on-github) 57 | 58 | Don't know where to start? 59 | You can start by looking through the [open issues](https://github.com/dftd4/tad-dftd4/issues). 60 | Feel free to just comment on an issue you are interested in working and ask for pointers on how begin. 61 | 62 | ## Sign Your Work 63 | 64 | The sign-off is a simple line at the end of the explanation for a commit. 65 | All commits needs to be signed. Your signature certifies that you wrote the patch or otherwise have the right to contribute the material. 66 | The rules are pretty simple, if you can certify the below (from [developercertificate.org](https://developercertificate.org/)): 67 | 68 | ``` 69 | Developer Certificate of Origin 70 | Version 1.1 71 | 72 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 73 | 1 Letterman Drive 74 | Suite D4700 75 | San Francisco, CA, 94129 76 | 77 | Everyone is permitted to copy and distribute verbatim copies of this 78 | license document, but changing it is not allowed. 79 | 80 | Developer's Certificate of Origin 1.1 81 | 82 | By making a contribution to this project, I certify that: 83 | 84 | (a) The contribution was created in whole or in part by me and I 85 | have the right to submit it under the open source license 86 | indicated in the file; or 87 | 88 | (b) The contribution is based upon previous work that, to the best 89 | of my knowledge, is covered under an appropriate open source 90 | license and I have the right under that license to submit that 91 | work with modifications, whether created in whole or in part 92 | by me, under the same open source license (unless I am 93 | permitted to submit under a different license), as indicated 94 | in the file; or 95 | 96 | (c) The contribution was provided directly to me by some other 97 | person who certified (a), (b) or (c) and I have not modified 98 | it. 99 | 100 | (d) I understand and agree that this project and the contribution 101 | are public and that a record of the contribution (including all 102 | personal information I submit with it, including my sign-off) is 103 | maintained indefinitely and may be redistributed consistent with 104 | this project or the open source license(s) involved. 105 | ``` 106 | 107 | Then you just add a line to every git commit message: 108 | 109 | Signed-off-by: Jane Smith 110 | 111 | Use your real name (sorry, no pseudonyms or anonymous contributions.) 112 | 113 | If you set your `user.name` and `user.email` git configs, you can sign your 114 | commit automatically with `git commit -s`. 115 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SPHINXPROJ = tad_dftd4 9 | SOURCEDIR = source 10 | BUILDDIR = build 11 | 12 | # Put it first so that "make" without argument is like "make help". 13 | help: 14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 15 | 16 | .PHONY: help Makefile 17 | 18 | # Catch-all target: route all unknown targets to Sphinx using the new 19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 20 | %: Makefile 21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 22 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SPHINXPROJ=tad_dftd4 11 | set SOURCEDIR=source 12 | set BUILDDIR=build 13 | 14 | %SPHINXBUILD% >NUL 2>NUL 15 | if errorlevel 9009 ( 16 | echo. 17 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 18 | echo.installed, then set the SPHINXBUILD environment variable to point 19 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 20 | echo.may add the Sphinx directory to PATH. 21 | echo. 22 | echo.If you don't have Sphinx installed, grab it from 23 | echo.https://www.sphinx-doc.org/ 24 | exit /b 1 25 | ) 26 | 27 | if "%1" == "" goto help 28 | 29 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 30 | goto end 31 | 32 | :help 33 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 34 | 35 | :end 36 | popd 37 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-book-theme 3 | sphinx-copybutton 4 | sphinx-design 5 | numpy 6 | torch 7 | tad-mctc 8 | tad-multicharge 9 | jinja2<3.1 10 | -------------------------------------------------------------------------------- /docs/source/_static/tad-dftd4-favicon.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 37 | 39 | 44 | 48 | 52 | 57 | 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /docs/source/_static/tad-dftd4.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 39 | 41 | 46 | 50 | 54 | 58 | 62 | 63 | 67 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Config file for docs. 19 | """ 20 | import os.path as op 21 | import sys 22 | 23 | sys.path.insert(0, op.join(op.dirname(__file__), "../../", "src")) 24 | 25 | import tad_dftd4 26 | 27 | project = "Torch autodiff DFT-D4" 28 | author = "Marvin Friede" 29 | copyright = f"2022 {author}" 30 | 31 | extensions = [ 32 | "sphinx_design", 33 | "sphinx_copybutton", 34 | "sphinx_design", 35 | "sphinx.ext.autosummary", 36 | "sphinx.ext.autodoc", 37 | "sphinx.ext.intersphinx", 38 | "sphinx.ext.viewcode", 39 | "sphinx.ext.napoleon", 40 | ] 41 | 42 | html_theme = "sphinx_book_theme" 43 | html_title = project 44 | html_logo = "_static/tad-dftd4.svg" 45 | html_favicon = "_static/tad-dftd4-favicon.svg" 46 | 47 | html_theme_options = { 48 | "repository_url": "https://github.com/dftd4/tad-dftd4", 49 | "repository_branch": "main", 50 | "use_repository_button": True, 51 | "use_edit_page_button": True, 52 | "use_download_button": False, 53 | "path_to_docs": "doc", 54 | "show_navbar_depth": 3, 55 | "logo_only": False, 56 | } 57 | 58 | html_sidebars = {} # type: ignore[var-annotated] 59 | 60 | html_css_files = ["css/custom.css"] 61 | html_static_path = ["_static"] 62 | templates_path = ["_templates"] 63 | 64 | autosummary_generate = True 65 | autosummary_imported_members = True 66 | 67 | autodoc_typehints = "description" 68 | autodoc_member_order = "groupwise" 69 | autoclass_content = "both" 70 | 71 | intersphinx_mapping = { 72 | "numpy": ("https://numpy.org/doc/stable/", None), 73 | "pandas": ("https://pandas.pydata.org/docs/", None), 74 | "python": ("https://docs.python.org/3", None), 75 | "tad_dftd3": ("https://tad-dftd3.readthedocs.io/en/latest/", None), 76 | "tad_mctc": ("https://tad-mctc.readthedocs.io/en/latest/", None), 77 | "tad_multicharge": ( 78 | "https://tad-multicharge.readthedocs.io/en/latest/", 79 | None, 80 | ), 81 | "torch": ("https://pytorch.org/docs/stable/", None), 82 | } 83 | 84 | # Configuration for sphinx-copybutton 85 | copybutton_prompt_text = ">>> |... " 86 | copybutton_prompt_is_regexp = True 87 | 88 | napoleon_google_docstring = False 89 | napoleon_use_param = False 90 | napoleon_use_ivar = True 91 | napoleon_use_rtype = True 92 | 93 | # The main toctree document. 94 | main_doc = "index" 95 | master_doc = "index" 96 | 97 | # The language for content autogenerated by Sphinx. Refer to documentation 98 | # for a list of supported languages. 99 | # 100 | # This is also used if you do content translation via gettext catalogs. 101 | # Usually you set "language" from the command line for these cases. 102 | language = "en" 103 | 104 | # List of patterns, relative to source directory, that match files and 105 | # directories to ignore when looking for source files. 106 | # This pattern also affects html_static_path and html_extra_path. 107 | exclude_patterns = [ 108 | # Sometimes sphinx reads its own outputs as inputs! 109 | "build/html", 110 | "_build/html", 111 | "build/jupyter_execute", 112 | "_build/jupyter_execute", 113 | "notebooks/README.md", 114 | "README.md", 115 | "notebooks/*.md", 116 | ] 117 | -------------------------------------------------------------------------------- /docs/source/disp.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.disp 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ------------ 3 | 4 | pip 5 | ~~~ 6 | 7 | .. image:: https://img.shields.io/pypi/v/tad-dftd4 8 | :target: https://pypi.org/project/tad-dftd4/ 9 | :alt: PyPI 10 | 11 | *tad-dftd4* can easily be installed with ``pip``. 12 | 13 | .. code:: 14 | 15 | pip install tad-dftd4 16 | 17 | conda 18 | ~~~~~ 19 | 20 | .. image:: https://img.shields.io/conda/vn/conda-forge/tad-dftd4.svg 21 | :target: https://anaconda.org/conda-forge/tad-dftd4 22 | :alt: Conda Version 23 | 24 | *tad-dftd4* is also available from ``conda``. 25 | 26 | .. code:: 27 | 28 | conda install tad-dftd4 29 | 30 | From source 31 | ~~~~~~~~~~~ 32 | 33 | This project is hosted on GitHub at `dftd4/tad-dftd4 `__. 34 | Obtain the source by cloning the repository with 35 | 36 | .. code:: 37 | 38 | git clone https://github.com/dftd4/tad-dftd4 39 | cd tad-dftd4 40 | 41 | We recommend using a `conda `__ environment to install the package. 42 | You can setup the environment manager using a `mambaforge `__ installer. 43 | Install the required dependencies from the conda-forge channel. 44 | 45 | .. code:: 46 | 47 | mamba env create -n torch -f environment.yaml 48 | mamba activate torch 49 | 50 | The following dependencies are required 51 | 52 | - `numpy `__ 53 | - `tad-mctc `__ 54 | - `tad-multicharge `__ 55 | - `torch `__ 56 | - `pytest `__ (tests only) 57 | 58 | 59 | Development 60 | ~~~~~~~~~~~ 61 | 62 | For development, additionally install the following tools in your environment. 63 | 64 | .. code:: 65 | 66 | mamba install black covdefaults mypy pre-commit pylint pytest pytest-cov pytest-xdist tox 67 | pip install pytest-random-order 68 | 69 | With pip, add the option ``-e`` and the development dependencies for installing in development mode. 70 | 71 | .. code:: 72 | 73 | pip install -e .[dev] 74 | 75 | The pre-commit hooks are initialized by running the following command in the root of the repository. 76 | 77 | .. code:: 78 | 79 | pre-commit install 80 | 81 | For testing all Python environments, simply run `tox`. 82 | 83 | .. code:: 84 | 85 | tox 86 | 87 | Note that this randomizes the order of tests but skips "large" tests. To modify this behavior, `tox` has to skip the optional _posargs_. 88 | 89 | .. code:: 90 | 91 | tox -- test 92 | -------------------------------------------------------------------------------- /docs/source/modules/cutoff.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.cutoff 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/damping/atm.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.damping.atm 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/damping/index.rst: -------------------------------------------------------------------------------- 1 | .. _damping: 2 | 3 | .. automodule:: tad_dftd4.damping 4 | 5 | .. toctree:: 6 | 7 | rational 8 | atm 9 | parameters 10 | -------------------------------------------------------------------------------- /docs/source/modules/damping/parameters.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.damping.parameters 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/damping/rational.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.damping.rational 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/data/hardness.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.data.hardness 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/data/index.rst: -------------------------------------------------------------------------------- 1 | .. _data: 2 | 3 | .. automodule:: tad_dftd4.data 4 | 5 | .. toctree:: 6 | 7 | hardness 8 | r4r2 9 | radii 10 | wfpair 11 | zeff 12 | -------------------------------------------------------------------------------- /docs/source/modules/data/r4r2.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.data.r4r2 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/data/radii.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.data.radii 2 | -------------------------------------------------------------------------------- /docs/source/modules/data/wfpair.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.data.wfpair 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/data/zeff.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.data.zeff 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/defaults.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.defaults 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/index.rst: -------------------------------------------------------------------------------- 1 | .. _module: 2 | 3 | Module reference 4 | ================ 5 | 6 | The following modules are contained with `tad-dftd4`. 7 | 8 | .. toctree:: 9 | 10 | cutoff 11 | damping/index 12 | data/index 13 | defaults 14 | model/index 15 | typing/index 16 | -------------------------------------------------------------------------------- /docs/source/modules/model/base.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.model.base 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/model/d4.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.model.d4 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/model/d4s.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.model.d4s 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/model/index.rst: -------------------------------------------------------------------------------- 1 | .. _model: 2 | 3 | .. automodule:: tad_dftd4.model 4 | 5 | .. toctree:: 6 | 7 | base 8 | d4 9 | d4s 10 | -------------------------------------------------------------------------------- /docs/source/modules/typing/builtin.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.typing.builtin 2 | :members: 3 | -------------------------------------------------------------------------------- /docs/source/modules/typing/index.rst: -------------------------------------------------------------------------------- 1 | .. _typing: 2 | 3 | .. automodule:: tad_dftd4.typing 4 | 5 | .. toctree:: 6 | 7 | builtin 8 | pytorch 9 | -------------------------------------------------------------------------------- /docs/source/modules/typing/pytorch.rst: -------------------------------------------------------------------------------- 1 | .. automodule:: tad_dftd4.typing.pytorch 2 | :members: 3 | -------------------------------------------------------------------------------- /environment.yaml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | name: torch 18 | channels: 19 | - defaults 20 | - conda-forge 21 | - pytorch 22 | dependencies: 23 | - numpy<2 24 | - python>=3.8,<3.13 25 | - pytorch>=1.11.0,<3 26 | - tad-mctc 27 | - tad-multicharge 28 | - tomli 29 | -------------------------------------------------------------------------------- /examples/batch.py: -------------------------------------------------------------------------------- 1 | # SPDX-Identifier: CC0-1.0 2 | import tad_mctc as mctc 3 | import torch 4 | 5 | import tad_dftd4 as d4 6 | 7 | # S22 system 4: formamide dimer 8 | numbers = mctc.batch.pack( 9 | ( 10 | mctc.convert.symbol_to_number("C C N N H H H H H H O O".split()), 11 | mctc.convert.symbol_to_number("C O N H H H".split()), 12 | ) 13 | ) 14 | 15 | # coordinates in Bohr 16 | positions = mctc.batch.pack( 17 | ( 18 | torch.tensor( 19 | [ 20 | [-3.81469488143921, +0.09993441402912, 0.00000000000000], 21 | [+3.81469488143921, -0.09993441402912, 0.00000000000000], 22 | [-2.66030049324036, -2.15898251533508, 0.00000000000000], 23 | [+2.66030049324036, +2.15898251533508, 0.00000000000000], 24 | [-0.73178529739380, -2.28237795829773, 0.00000000000000], 25 | [-5.89039325714111, -0.02589114569128, 0.00000000000000], 26 | [-3.71254944801331, -3.73605775833130, 0.00000000000000], 27 | [+3.71254944801331, +3.73605775833130, 0.00000000000000], 28 | [+0.73178529739380, +2.28237795829773, 0.00000000000000], 29 | [+5.89039325714111, +0.02589114569128, 0.00000000000000], 30 | [-2.74426102638245, +2.16115570068359, 0.00000000000000], 31 | [+2.74426102638245, -2.16115570068359, 0.00000000000000], 32 | ] 33 | ), 34 | torch.tensor( 35 | [ 36 | [-0.55569743203406, +1.09030425468557, 0.00000000000000], 37 | [+0.51473634678469, +3.15152550263611, 0.00000000000000], 38 | [+0.59869690244446, -1.16861263789477, 0.00000000000000], 39 | [-0.45355203669134, -2.74568780438064, 0.00000000000000], 40 | [+2.52721209544999, -1.29200800956867, 0.00000000000000], 41 | [-2.63139587595376, +0.96447869452240, 0.00000000000000], 42 | ] 43 | ), 44 | ) 45 | ) 46 | 47 | # total charge of both system 48 | charge = torch.tensor([0.0, 0.0]) 49 | 50 | # TPSSh-D4-ATM parameters 51 | param = { 52 | "s6": positions.new_tensor(1.0), 53 | "s8": positions.new_tensor(1.85897750), 54 | "s9": positions.new_tensor(1.0), 55 | "a1": positions.new_tensor(0.44286966), 56 | "a2": positions.new_tensor(4.60230534), 57 | } 58 | 59 | # calculate dispersion energy in Hartree 60 | energy = torch.sum(d4.dftd4(numbers, positions, charge, param), -1) 61 | torch.set_printoptions(precision=10) 62 | print(energy) 63 | # tensor([-0.0088341432, -0.0027013607]) 64 | print(energy[0] - 2 * energy[1]) 65 | # tensor(-0.0034314217) 66 | -------------------------------------------------------------------------------- /examples/d4s.py: -------------------------------------------------------------------------------- 1 | # SPDX-Identifier: CC0-1.0 2 | import tad_mctc as mctc 3 | import torch 4 | 5 | import tad_dftd4 as d4 6 | 7 | numbers = mctc.convert.symbol_to_number( 8 | symbols="C C C C N C S H H H H H".split() 9 | ) 10 | 11 | # coordinates in Bohr 12 | positions = torch.tensor( 13 | [ 14 | [-2.56745685564671, -0.02509985979910, 0.00000000000000], 15 | [-1.39177582455797, +2.27696188880014, 0.00000000000000], 16 | [+1.27784995624894, +2.45107479759386, 0.00000000000000], 17 | [+2.62801937615793, +0.25927727028120, 0.00000000000000], 18 | [+1.41097033661123, -1.99890996077412, 0.00000000000000], 19 | [-1.17186102298849, -2.34220576284180, 0.00000000000000], 20 | [-2.39505990368378, -5.22635838332362, 0.00000000000000], 21 | [+2.41961980455457, -3.62158019253045, 0.00000000000000], 22 | [-2.51744374846065, +3.98181713686746, 0.00000000000000], 23 | [+2.24269048384775, +4.24389473203647, 0.00000000000000], 24 | [+4.66488984573956, +0.17907568006409, 0.00000000000000], 25 | [-4.60044244782237, -0.17794734637413, 0.00000000000000], 26 | ] 27 | ) 28 | 29 | # total charge of the system 30 | charge = torch.tensor(0.0) 31 | 32 | # Create the D4S model 33 | model = d4.model.D4SModel(numbers) 34 | 35 | param = d4.get_params("tpssh") 36 | energy = d4.dftd4(numbers, positions, charge, param, model=model) 37 | torch.set_printoptions(precision=10) 38 | print(energy) 39 | # tensor([-0.0020843975, -0.0019013016, -0.0018165035, -0.0018363572, 40 | # -0.0021877293, -0.0019495023, -0.0022923108, -0.0004326892, 41 | # -0.0004439871, -0.0004362087, -0.0004454589, -0.0005344027]) 42 | -------------------------------------------------------------------------------- /examples/single.py: -------------------------------------------------------------------------------- 1 | # SPDX-Identifier: CC0-1.0 2 | import tad_mctc as mctc 3 | import torch 4 | 5 | import tad_dftd4 as d4 6 | 7 | numbers = mctc.convert.symbol_to_number( 8 | symbols="C C C C N C S H H H H H".split() 9 | ) 10 | 11 | # coordinates in Bohr 12 | positions = torch.tensor( 13 | [ 14 | [-2.56745685564671, -0.02509985979910, 0.00000000000000], 15 | [-1.39177582455797, +2.27696188880014, 0.00000000000000], 16 | [+1.27784995624894, +2.45107479759386, 0.00000000000000], 17 | [+2.62801937615793, +0.25927727028120, 0.00000000000000], 18 | [+1.41097033661123, -1.99890996077412, 0.00000000000000], 19 | [-1.17186102298849, -2.34220576284180, 0.00000000000000], 20 | [-2.39505990368378, -5.22635838332362, 0.00000000000000], 21 | [+2.41961980455457, -3.62158019253045, 0.00000000000000], 22 | [-2.51744374846065, +3.98181713686746, 0.00000000000000], 23 | [+2.24269048384775, +4.24389473203647, 0.00000000000000], 24 | [+4.66488984573956, +0.17907568006409, 0.00000000000000], 25 | [-4.60044244782237, -0.17794734637413, 0.00000000000000], 26 | ] 27 | ) 28 | 29 | # total charge of the system 30 | charge = torch.tensor(0.0) 31 | 32 | # TPSSh-D4-ATM parameters 33 | param = { 34 | "s6": positions.new_tensor(1.0), 35 | "s8": positions.new_tensor(1.85897750), 36 | "s9": positions.new_tensor(1.0), 37 | "a1": positions.new_tensor(0.44286966), 38 | "a2": positions.new_tensor(4.60230534), 39 | } 40 | 41 | # parameters can also be obtained using the functional name: 42 | # param = d4.get_params("tpssh") 43 | 44 | energy = d4.dftd4(numbers, positions, charge, param) 45 | torch.set_printoptions(precision=10) 46 | print(energy) 47 | # tensor([-0.0020841344, -0.0018971195, -0.0018107513, -0.0018305695, 48 | # -0.0021737693, -0.0019484236, -0.0022788253, -0.0004080658, 49 | # -0.0004261866, -0.0004199839, -0.0004280768, -0.0005108935]) 50 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | [build-system] 18 | requires = ["setuptools"] 19 | build-backend = "setuptools.build_meta" 20 | 21 | 22 | [tool.pytest.ini_options] 23 | addopts = "--doctest-modules" 24 | testpaths = ["test"] 25 | pythonpath = ["src"] 26 | markers = [ 27 | "grad: Marks tests which perform 'gradcheck' evaluations, this can be slow.", 28 | "large: Marks tests for large molecules, this can be slow.", 29 | ] 30 | 31 | 32 | [tool.mypy] 33 | check_untyped_defs = true 34 | disallow_any_generics = true 35 | disallow_incomplete_defs = true 36 | disallow_untyped_defs = true 37 | warn_redundant_casts = true 38 | warn_unreachable = true 39 | warn_unused_ignores = true 40 | exclude = ''' 41 | (?x) 42 | ^test/conftest.py$ 43 | ''' 44 | 45 | 46 | [tool.coverage.run] 47 | plugins = ["covdefaults"] 48 | source = ["./src"] 49 | 50 | [tool.coverage.report] 51 | fail_under = 90 52 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = tad_dftd4 3 | version = attr: tad_dftd4.__version__.__version__ 4 | description = Torch autodiff DFT-D4 implementation 5 | long_description = file: README.md 6 | long_description_content_type = text/markdown 7 | author = "Marvin Friede" 8 | license = Apache-2.0 9 | license_files = LICENSE 10 | classifiers = 11 | Intended Audience :: Developers 12 | Intended Audience :: Science/Research 13 | Natural Language :: English 14 | Operating System :: MacOS 15 | Operating System :: Microsoft :: Windows 16 | Operating System :: POSIX 17 | Operating System :: POSIX :: Linux 18 | Programming Language :: Python :: 3 19 | Programming Language :: Python :: 3 :: Only 20 | Programming Language :: Python :: 3.8 21 | Programming Language :: Python :: 3.9 22 | Programming Language :: Python :: 3.10 23 | Programming Language :: Python :: 3.11 24 | Programming Language :: Python :: 3.12 25 | Programming Language :: Python :: Implementation :: CPython 26 | Topic :: Scientific/Engineering :: Chemistry 27 | Typing :: Typed 28 | 29 | [options] 30 | packages = find: 31 | install_requires = 32 | numpy<2 33 | tad-mctc 34 | tad-multicharge 35 | tomli 36 | torch>=1.11,<3 37 | typing-extensions 38 | python_requires = >=3.8, <3.13 39 | package_dir = 40 | =src 41 | 42 | [options.packages.find] 43 | where = src 44 | 45 | [options.extras_require] 46 | dev = 47 | black 48 | covdefaults 49 | mypy 50 | pre-commit 51 | pylint 52 | pytest 53 | pytest-cov 54 | pytest-random-order 55 | pytest-xdist 56 | tox 57 | tox = 58 | covdefaults 59 | pytest 60 | pytest-cov 61 | pytest-random-order 62 | pytest-xdist 63 | 64 | [options.package_data] 65 | tad_dftd4 = 66 | py.typed 67 | damping/parameters/parameters.toml 68 | **/*.npy 69 | **/*.npz 70 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | from setuptools import setup 18 | 19 | if __name__ == "__main__": 20 | setup() 21 | -------------------------------------------------------------------------------- /src/tad_dftd4/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Torch autodiff DFT-D4 19 | ===================== 20 | 21 | Implementation of the DFT-D4 dispersion model in PyTorch. 22 | This module allows to process a single structure or a batch of structures for 23 | the calculation of atom-resolved dispersion energies. 24 | 25 | .. note:: 26 | 27 | This project is still in early development and the API is subject to change. 28 | Contributions are welcome, please checkout our 29 | `contributing guidelines `_. 30 | 31 | Example 32 | ------- 33 | >>> import torch 34 | >>> import tad_dftd4 as d4 35 | >>> import tad_mctc as mctc 36 | >>> 37 | >>> # S22 system 4: formamide dimer 38 | >>> numbers = mctc.batch.pack(( 39 | ... mctc.convert.symbol_to_number("C C N N H H H H H H O O".split()), 40 | ... mctc.convert.symbol_to_number("C O N H H H".split()), 41 | ... )) 42 | >>> 43 | >>> # coordinates in Bohr 44 | >>> positions = mctc.batch.pack(( 45 | ... torch.tensor([ 46 | ... [-3.81469488143921, +0.09993441402912, 0.00000000000000], 47 | ... [+3.81469488143921, -0.09993441402912, 0.00000000000000], 48 | ... [-2.66030049324036, -2.15898251533508, 0.00000000000000], 49 | ... [+2.66030049324036, +2.15898251533508, 0.00000000000000], 50 | ... [-0.73178529739380, -2.28237795829773, 0.00000000000000], 51 | ... [-5.89039325714111, -0.02589114569128, 0.00000000000000], 52 | ... [-3.71254944801331, -3.73605775833130, 0.00000000000000], 53 | ... [+3.71254944801331, +3.73605775833130, 0.00000000000000], 54 | ... [+0.73178529739380, +2.28237795829773, 0.00000000000000], 55 | ... [+5.89039325714111, +0.02589114569128, 0.00000000000000], 56 | ... [-2.74426102638245, +2.16115570068359, 0.00000000000000], 57 | ... [+2.74426102638245, -2.16115570068359, 0.00000000000000], 58 | ... ]), 59 | ... torch.tensor([ 60 | ... [-0.55569743203406, +1.09030425468557, 0.00000000000000], 61 | ... [+0.51473634678469, +3.15152550263611, 0.00000000000000], 62 | ... [+0.59869690244446, -1.16861263789477, 0.00000000000000], 63 | ... [-0.45355203669134, -2.74568780438064, 0.00000000000000], 64 | ... [+2.52721209544999, -1.29200800956867, 0.00000000000000], 65 | ... [-2.63139587595376, +0.96447869452240, 0.00000000000000], 66 | ... ]), 67 | ... )) 68 | >>> 69 | >>> # total charge of both systems 70 | >>> charge = torch.tensor([0.0, 0.0]) 71 | >>> 72 | >>> # TPSSh-D4-ATM parameters 73 | >>> param = { 74 | ... "s6": torch.tensor(1.0), 75 | ... "s8": torch.tensor(1.85897750), 76 | ... "s9": torch.tensor(1.0), 77 | ... "a1": torch.tensor(0.44286966), 78 | ... "a2": torch.tensor(4.60230534), 79 | ... } 80 | >>> 81 | >>> # calculate dispersion energy in Hartree 82 | >>> energy = torch.sum(d4.dftd4(numbers, positions, charge, param), -1) 83 | >>> torch.set_printoptions(precision=10) 84 | >>> print(energy) 85 | tensor([-0.0088341432, -0.0027013607]) 86 | >>> print(energy[0] - 2*energy[1]) 87 | tensor(-0.0034314217) 88 | """ 89 | import torch 90 | 91 | from . import cutoff, damping, data, disp, model, ncoord, typing 92 | from .__version__ import __version__ 93 | from .damping import get_params 94 | from .disp import dftd4 95 | from .model import D4Model, D4SModel 96 | 97 | __all__ = [ 98 | "__version__", 99 | "cutoff", 100 | "damping", 101 | "data", 102 | "dftd4", 103 | "disp", 104 | "get_params", 105 | "model", 106 | "ncoord", 107 | "typing", 108 | "D4Model", 109 | "D4SModel", 110 | ] 111 | -------------------------------------------------------------------------------- /src/tad_dftd4/__version__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Module containing the version string. 19 | """ 20 | __version__ = "0.6.1" 21 | -------------------------------------------------------------------------------- /src/tad_dftd4/cutoff.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Cutoff 19 | ====== 20 | 21 | Real-space cutoffs for the two-body and three-body dispersion energy 22 | as well as the coordination number within D4 and the EEQ Model. 23 | """ 24 | from __future__ import annotations 25 | 26 | import torch 27 | 28 | from . import defaults 29 | from .typing import Tensor, TensorLike 30 | 31 | __all__ = ["Cutoff"] 32 | 33 | 34 | class Cutoff(TensorLike): 35 | """ 36 | Collection of real-space cutoffs. 37 | """ 38 | 39 | disp2: Tensor 40 | """ 41 | Two-body interaction cutoff. 42 | 43 | :default: `60.0` 44 | """ 45 | 46 | disp3: Tensor 47 | """ 48 | Three-body interaction cutoff. 49 | 50 | :default: `40.0` 51 | """ 52 | 53 | cn: Tensor 54 | """ 55 | Coordination number cutoff. 56 | 57 | :default: `30.0` 58 | """ 59 | 60 | cn_eeq: Tensor 61 | """ 62 | Coordination number cutoff within EEQ. 63 | 64 | :default: `25.0` 65 | """ 66 | 67 | __slots__ = ("disp2", "disp3", "cn", "cn_eeq") 68 | 69 | def __init__( 70 | self, 71 | disp2: int | float | Tensor = defaults.D4_DISP2_CUTOFF, 72 | disp3: int | float | Tensor = defaults.D4_DISP3_CUTOFF, 73 | cn: int | float | Tensor = defaults.D4_CN_CUTOFF, 74 | cn_eeq: int | float | Tensor = defaults.D4_CN_EEQ_CUTOFF, 75 | device: torch.device | None = None, 76 | dtype: torch.dtype | None = None, 77 | ) -> None: 78 | super().__init__(device, dtype) 79 | 80 | if isinstance(disp2, (int, float)): 81 | disp2 = torch.tensor(disp2, device=device, dtype=dtype) 82 | if isinstance(disp3, (int, float)): 83 | disp3 = torch.tensor(disp3, device=device, dtype=dtype) 84 | if isinstance(cn, (int, float)): 85 | cn = torch.tensor(cn, device=device, dtype=dtype) 86 | if isinstance(cn_eeq, (int, float)): 87 | cn_eeq = torch.tensor(cn_eeq, device=device, dtype=dtype) 88 | 89 | self.disp2 = disp2 90 | self.disp3 = disp3 91 | self.cn = cn 92 | self.cn_eeq = cn_eeq 93 | -------------------------------------------------------------------------------- /src/tad_dftd4/damping/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Damping schemes 19 | =============== 20 | 21 | Available damping schemes for two- and three-body dispersion terms. 22 | """ 23 | from .atm import * 24 | from .parameters import * 25 | from .rational import * 26 | -------------------------------------------------------------------------------- /src/tad_dftd4/damping/atm.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | r""" 18 | Damping: Axilrod-Teller-Muto (ATM) dispersion term 19 | ================================================== 20 | 21 | This module provides the dispersion energy evaluation for the three-body 22 | Axilrod-Teller-Muto dispersion term. 23 | 24 | .. math:: 25 | 26 | E_\text{disp}^{(3), \text{ATM}} &= 27 | \sum_\text{ABC} E^{\text{ABC}} f_\text{damp}\left(\overline{R}_\text{ABC}\right) \\ 28 | E^{\text{ABC}} &= 29 | \dfrac{C^{\text{ABC}}_9 30 | \left(3 \cos\theta_\text{A} \cos\theta_\text{B} \cos\theta_\text{C} + 1 \right)} 31 | {\left(r_\text{AB} r_\text{BC} r_\text{AC} \right)^3} \\ 32 | f_\text{damp} &= 33 | \dfrac{1}{1+ 6 \left(\overline{R}_\text{ABC}\right)^{-16}} 34 | """ 35 | from __future__ import annotations 36 | 37 | import torch 38 | from tad_mctc import storch 39 | from tad_mctc.batch import real_pairs, real_triples 40 | 41 | from .. import data, defaults 42 | from ..typing import DD, Tensor 43 | 44 | __all__ = ["get_atm_dispersion"] 45 | 46 | 47 | def get_atm_dispersion( 48 | numbers: Tensor, 49 | positions: Tensor, 50 | cutoff: Tensor, 51 | c6: Tensor, 52 | s9: Tensor = torch.tensor(defaults.S9), 53 | a1: Tensor = torch.tensor(defaults.A1), 54 | a2: Tensor = torch.tensor(defaults.A2), 55 | alp: Tensor = torch.tensor(defaults.ALP), 56 | ) -> Tensor: 57 | """ 58 | Axilrod-Teller-Muto dispersion term. 59 | 60 | Parameters 61 | ---------- 62 | numbers : Tensor 63 | Atomic numbers for all atoms in the system of shape ``(..., nat)``. 64 | positions : Tensor 65 | Cartesian coordinates of all atoms (shape: ``(..., nat, 3)``). 66 | cutoff : Tensor 67 | Real-space cutoff. 68 | c6 : Tensor 69 | Atomic C6 dispersion coefficients. 70 | s9 : Tensor, optional 71 | Scaling for dispersion coefficients. Defaults to ``1.0``. 72 | a1 : Tensor, optional 73 | Scaling for the C8 / C6 ratio in the critical radius within the 74 | Becke-Johnson damping function. 75 | a2 : Tensor, optional 76 | Offset parameter for the critical radius within the Becke-Johnson 77 | damping function. 78 | alp : Tensor, optional 79 | Exponent of zero damping function. Defaults to ``14.0``. 80 | 81 | Returns 82 | ------- 83 | Tensor 84 | Atom-resolved ATM dispersion energy. 85 | """ 86 | dd: DD = {"device": positions.device, "dtype": positions.dtype} 87 | 88 | s9 = s9.to(**dd) 89 | alp = alp.to(**dd) 90 | 91 | cutoff2 = cutoff * cutoff 92 | 93 | mask_pairs = real_pairs(numbers, mask_diagonal=True) 94 | mask_triples = real_triples(numbers, mask_diagonal=True, mask_self=True) 95 | 96 | # filler values for masks 97 | eps = torch.tensor(torch.finfo(positions.dtype).eps, **dd) 98 | zero = torch.tensor(0.0, **dd) 99 | one = torch.tensor(1.0, **dd) 100 | 101 | # C9_ABC = s9 * sqrt(|C6_AB * C6_AC * C6_BC|) 102 | c9 = s9 * storch.sqrt( 103 | torch.abs(c6.unsqueeze(-1) * c6.unsqueeze(-2) * c6.unsqueeze(-3)), 104 | ) 105 | 106 | rad = data.R4R2.to(**dd)[numbers] 107 | radii = rad.unsqueeze(-1) * rad.unsqueeze(-2) 108 | temp = a1 * storch.sqrt(3.0 * radii) + a2 109 | 110 | r0ij = temp.unsqueeze(-1) 111 | r0ik = temp.unsqueeze(-2) 112 | r0jk = temp.unsqueeze(-3) 113 | r0 = r0ij * r0ik * r0jk 114 | 115 | # actually faster than other alternatives 116 | # very slow: (pos.unsqueeze(-2) - pos.unsqueeze(-3)).pow(2).sum(-1) 117 | distances = torch.pow( 118 | torch.where( 119 | mask_pairs, 120 | storch.cdist(positions, positions, p=2), 121 | eps, 122 | ), 123 | 2.0, 124 | ) 125 | 126 | r2ij = distances.unsqueeze(-1) 127 | r2ik = distances.unsqueeze(-2) 128 | r2jk = distances.unsqueeze(-3) 129 | r2 = r2ij * r2ik * r2jk 130 | r1 = torch.sqrt(r2) 131 | # add epsilon to avoid zero division later 132 | r3 = torch.where(mask_triples, r1 * r2, eps) 133 | r5 = torch.where(mask_triples, r2 * r3, eps) 134 | 135 | # dividing by tiny numbers leads to huge numbers, which result in NaN's 136 | # upon exponentiation in the subsequent step 137 | base = r0 / torch.where(mask_triples, r1, one) 138 | 139 | # to fix the previous mask, we mask again (not strictly necessary because 140 | # `ang` is also masked and we later multiply with `ang`) 141 | fdamp = torch.where( 142 | mask_triples, 143 | 1.0 / (1.0 + 6.0 * base ** (alp / 3.0)), 144 | zero, 145 | ) 146 | 147 | s = torch.where( 148 | mask_triples, 149 | (r2ij + r2jk - r2ik) * (r2ij - r2jk + r2ik) * (-r2ij + r2jk + r2ik), 150 | zero, 151 | ) 152 | 153 | ang = torch.where( 154 | mask_triples 155 | * (r2ij <= cutoff2) 156 | * (r2jk <= cutoff2) 157 | * (r2jk <= cutoff2), 158 | 0.375 * s / r5 + 1.0 / r3, 159 | torch.tensor(0.0, **dd), 160 | ) 161 | 162 | energy = ang * fdamp * c9 163 | return torch.sum(energy, dim=(-2, -1)) / 6.0 164 | -------------------------------------------------------------------------------- /src/tad_dftd4/damping/parameters/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Damping Parameters 19 | ================== 20 | 21 | Access damping parameters for all supported DFAs. 22 | """ 23 | from .read import * 24 | -------------------------------------------------------------------------------- /src/tad_dftd4/damping/parameters/read.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Damping Parameters 19 | ================== 20 | 21 | Read damping parameters from toml file. The TOML file is coped from the DFT-D4 22 | Fortran GitHub repository. 23 | (https://github.com/dftd4/dftd4/blob/main/assets/parameters.toml) 24 | """ 25 | 26 | from __future__ import annotations 27 | 28 | from pathlib import Path 29 | from typing import Literal 30 | 31 | import torch 32 | 33 | from ...typing import Tensor, overload 34 | 35 | __all__ = ["get_params", "get_params_default"] 36 | 37 | 38 | @overload 39 | def get_params( 40 | func: str, 41 | variant: Literal["bj-eeq-atm"] = "bj-eeq-atm", 42 | with_reference: Literal[False] = False, 43 | device: torch.device | None = None, 44 | dtype: torch.dtype | None = None, 45 | ) -> dict[str, Tensor]: ... 46 | 47 | 48 | @overload 49 | def get_params( 50 | func: str, 51 | variant: Literal["bj-eeq-atm"] = "bj-eeq-atm", 52 | with_reference: Literal[True] = True, 53 | device: torch.device | None = None, 54 | dtype: torch.dtype | None = None, 55 | ) -> dict[str, Tensor | str]: ... 56 | 57 | 58 | def get_params( 59 | func: str, 60 | variant: Literal["bj-eeq-atm"] = "bj-eeq-atm", 61 | with_reference: bool = False, 62 | device: torch.device | None = None, 63 | dtype: torch.dtype | None = None, 64 | ) -> dict[str, Tensor] | dict[str, Tensor | str]: 65 | """ 66 | Obtain damping parameters for a given functional. 67 | 68 | Parameters 69 | ---------- 70 | func : str 71 | Functional name, case-insensitive. 72 | variant : Literal["bj-eeq-atm"] 73 | D4 variant. Only ``'bj-eeq-atm'`` (default D4 model) is supported. 74 | device : torch.device | None, optional 75 | Pytorch device for calculations. Defaults to ``None``. 76 | dtype : torch.dtype | None, optional 77 | Pytorch dtype for calculations. Defaults to ``None``. 78 | 79 | Returns 80 | ------- 81 | dict[str, Tensor] 82 | Damping parameters for the given functional. 83 | 84 | Raises 85 | ------ 86 | KeyError 87 | If functional or D4 variant is not found in damping parameters file. 88 | """ 89 | # pylint: disable=import-outside-toplevel 90 | import tomli as toml 91 | 92 | table: dict[str, dict[str, dict[str, dict[str, dict[str, float | str]]]]] 93 | with open(Path(__file__).parent / "parameters.toml", mode="rb") as f: 94 | table = toml.load(f) 95 | 96 | func_section = table["parameter"] 97 | if func not in func_section: 98 | raise KeyError( 99 | f"Functional '{func.casefold()}' not found in damping parameters." 100 | ) 101 | 102 | variant_section = func_section[func]["d4"] 103 | if variant not in variant_section: 104 | raise KeyError( 105 | f"Variant '{variant}' not found in damping parameters for '{func}'." 106 | ) 107 | 108 | par_section = variant_section[variant] 109 | 110 | d: dict[str, Tensor | str] = {} 111 | for k, v in par_section.items(): 112 | if k == "doi": 113 | if with_reference is False: 114 | continue 115 | d[k] = str(v) 116 | else: 117 | d[k] = torch.tensor(v, device=device, dtype=dtype) 118 | 119 | return d 120 | 121 | 122 | def get_params_default( 123 | variant: Literal[ 124 | "bj-eeq-atm", "d4.bj-eeq-two", "d4.bj-eeq-mbd" 125 | ] = "bj-eeq-atm", 126 | device: torch.device | None = None, 127 | dtype: torch.dtype | None = None, 128 | ) -> dict[str, Tensor]: 129 | """ 130 | Obtain default damping parameters and method info. 131 | 132 | Parameters 133 | ---------- 134 | device : torch.device | None, optional 135 | Pytorch device for calculations. Defaults to `None`. 136 | dtype : torch.dtype | None, optional 137 | Pytorch dtype for calculations. Defaults to `None`. 138 | 139 | Returns 140 | ------- 141 | dict[str, Tensor] 142 | Damping parameters for the given functional. 143 | """ 144 | # pylint: disable=import-outside-toplevel 145 | import tomli as toml 146 | 147 | table: dict[str, dict[str, dict[str, dict[str, dict[str, float | str]]]]] 148 | with open(Path(__file__).parent / "parameters.toml", mode="rb") as f: 149 | table = toml.load(f) 150 | 151 | d = {} 152 | for k, v in table["default"]["parameter"]["d4"][variant].items(): 153 | if isinstance(v, float): 154 | d[k] = torch.tensor(v, device=device, dtype=dtype) 155 | 156 | return d 157 | -------------------------------------------------------------------------------- /src/tad_dftd4/damping/rational.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | r""" 18 | Damping: Rational (Becke-Johnson) damping function 19 | ================================================== 20 | 21 | This module defines the rational damping function, also known as Becke-Johnson 22 | damping. 23 | 24 | .. math:: 25 | 26 | f^n_{\text{damp}}\left(R_0^{\text{AB}}\right) = 27 | \dfrac{R^n_{\text{AB}}}{R^n_{\text{AB}} + 28 | \left( a_1 R_0^{\text{AB}} + a_2 \right)^n} 29 | """ 30 | from __future__ import annotations 31 | 32 | import torch 33 | 34 | from .. import defaults 35 | from ..typing import DD, Tensor 36 | 37 | __all__ = ["rational_damping"] 38 | 39 | 40 | def rational_damping( 41 | order: int, 42 | distances: Tensor, 43 | qq: Tensor, 44 | param: dict[str, Tensor], 45 | ) -> Tensor: 46 | """ 47 | Rational damped dispersion interaction between pairs. 48 | 49 | Parameters 50 | ---------- 51 | order : int 52 | Order of the dispersion interaction, e.g. 53 | 6 for dipole-dipole, 8 for dipole-quadrupole and so on. 54 | distances : Tensor 55 | Pairwise distances between atoms in the system. 56 | qq : Tensor 57 | Quotient of C8 and C6 dispersion coefficients. 58 | param : dict[str, Tensor] 59 | DFT-D4 damping parameters. 60 | 61 | Returns 62 | ------- 63 | Tensor 64 | Values of the damping function. 65 | """ 66 | dd: DD = {"device": distances.device, "dtype": distances.dtype} 67 | 68 | a1 = param.get("a1", torch.tensor(defaults.A1, **dd)) 69 | a2 = param.get("a2", torch.tensor(defaults.A2, **dd)) 70 | return 1.0 / (distances.pow(order) + (a1 * torch.sqrt(qq) + a2).pow(order)) 71 | -------------------------------------------------------------------------------- /src/tad_dftd4/data/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Atomic data 19 | =========== 20 | 21 | Atomic data required within `tad_dftd4`: 22 | - chemical hardness 23 | - Pauling electronegativities 24 | - expectation values 25 | - covalent radii 26 | - effective nuclear charge 27 | 28 | Some atomic data is imported from the `tad_mctc` library or indirectly used within the `tad_mctc` library. 29 | """ 30 | from .hardness import * 31 | from .r4r2 import * 32 | from .radii import * 33 | from .zeff import * 34 | -------------------------------------------------------------------------------- /src/tad_dftd4/data/hardness.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Data: Chemical hardnesses 19 | ========================= 20 | 21 | Element-specific chemical hardnesses for the charge scaling function used 22 | to extrapolate the C6 coefficients in DFT-D4. 23 | """ 24 | import torch 25 | 26 | __all__ = ["GAM"] 27 | 28 | 29 | GAM = torch.tensor( 30 | [ 31 | 0.00000000, # None 32 | 0.47259288, # H 33 | 0.92203391, # He 34 | 0.17452888, # Li (2nd) 35 | 0.25700733, # Be 36 | 0.33949086, # B 37 | 0.42195412, # C 38 | 0.50438193, # N 39 | 0.58691863, # O 40 | 0.66931351, # F 41 | 0.75191607, # Ne 42 | 0.17964105, # Na (3rd) 43 | 0.22157276, # Mg 44 | 0.26348578, # Al 45 | 0.30539645, # Si 46 | 0.34734014, # P 47 | 0.38924725, # S 48 | 0.43115670, # Cl 49 | 0.47308269, # Ar 50 | 0.17105469, # K (4th) 51 | 0.20276244, # Ca 52 | 0.21007322, # Sc 53 | 0.21739647, # Ti 54 | 0.22471039, # V 55 | 0.23201501, # Cr 56 | 0.23933969, # Mn 57 | 0.24665638, # Fe 58 | 0.25398255, # Co 59 | 0.26128863, # Ni 60 | 0.26859476, # Cu 61 | 0.27592565, # Zn 62 | 0.30762999, # Ga 63 | 0.33931580, # Ge 64 | 0.37235985, # As 65 | 0.40273549, # Se 66 | 0.43445776, # Br 67 | 0.46611708, # Kr 68 | 0.15585079, # Rb (5th) 69 | 0.18649324, # Sr 70 | 0.19356210, # Y 71 | 0.20063311, # Zr 72 | 0.20770522, # Nb 73 | 0.21477254, # Mo 74 | 0.22184614, # Tc 75 | 0.22891872, # Ru 76 | 0.23598621, # Rh 77 | 0.24305612, # Pd 78 | 0.25013018, # Ag 79 | 0.25719937, # Cd 80 | 0.28784780, # In 81 | 0.31848673, # Sn 82 | 0.34912431, # Sb 83 | 0.37976593, # Te 84 | 0.41040808, # I 85 | 0.44105777, # Xe 86 | 0.05019332, # Cs (6th) 87 | 0.06762570, # Ba 88 | 0.08504445, # La 89 | 0.10247736, # Ce 90 | 0.11991105, # Pr 91 | 0.13732772, # Nd 92 | 0.15476297, # Pm 93 | 0.17218265, # Sm 94 | 0.18961288, # Eu 95 | 0.20704760, # Gd 96 | 0.22446752, # Tb 97 | 0.24189645, # Dy 98 | 0.25932503, # Ho 99 | 0.27676094, # Er 100 | 0.29418231, # Tm 101 | 0.31159587, # Yb 102 | 0.32902274, # Lu 103 | 0.34592298, # Hf 104 | 0.36388048, # Ta 105 | 0.38130586, # W 106 | 0.39877476, # Re 107 | 0.41614298, # Os 108 | 0.43364510, # Ir 109 | 0.45104014, # Pt 110 | 0.46848986, # Au 111 | 0.48584550, # Hg 112 | 0.12526730, # Tl 113 | 0.14268677, # Pb 114 | 0.16011615, # Bi 115 | 0.17755889, # Po 116 | 0.19497557, # At 117 | 0.21240778, # Rn 118 | 0.07263525, # Fr (7th) 119 | 0.09422158, # Ra 120 | 0.09920295, # Ac 121 | 0.10418621, # Th 122 | 0.14235633, # Pa 123 | 0.16394294, # U 124 | 0.18551941, # Np 125 | 0.22370139, # Pu 126 | 0.25110000, # Am 127 | 0.25030000, # Cm 128 | 0.28840000, # Bk 129 | 0.31000000, # Cf 130 | 0.33160000, # Es 131 | 0.35320000, # Fm 132 | 0.36820000, # Md 133 | 0.39630000, # No 134 | 0.40140000, # Lr 135 | 0.00000000, # Rf 136 | 0.00000000, # Db 137 | 0.00000000, # Sg 138 | 0.00000000, # Bh 139 | 0.00000000, # Hs 140 | 0.00000000, # Mt 141 | 0.00000000, # Ds 142 | 0.00000000, # Rg 143 | 0.00000000, # Cn 144 | 0.00000000, # Nh 145 | 0.00000000, # Fl 146 | 0.00000000, # Lv 147 | 0.00000000, # Mc 148 | 0.00000000, # Ts 149 | 0.00000000, # Og 150 | ] 151 | ) 152 | """Element-specific chemical hardnesses.""" 153 | -------------------------------------------------------------------------------- /src/tad_dftd4/data/r4r2.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Data: Expectation values 19 | ======================== 20 | 21 | PBE0/def2-QZVP atomic values calculated by S. Grimme in Gaussian (2010). 22 | Rare gases recalculated by J. Mewes with PBE0/aug-cc-pVQZ in Dirac (2018). 23 | Also new super heavies Cn, Nh, Fl, Lv, Og and Am-Rg calculated at 24 | 4c-PBE/Dyall-AE4Z level (Dirac 2022). 25 | """ 26 | import torch 27 | 28 | __all__ = ["R4R2"] 29 | 30 | 31 | # fmt: off 32 | r4_over_r2 = torch.tensor([ 33 | 0.0000, # None 34 | 8.0589, 3.4698, # H,He 35 | 29.0974,14.8517,11.8799, 7.8715, 5.5588, 4.7566, 3.8025, 3.1036, # Li-Ne 36 | 26.1552,17.2304,17.7210,12.7442, 9.5361, 8.1652, 6.7463, 5.6004, # Na-Ar 37 | 29.2012,22.3934, # K,Ca 38 | 19.0598,16.8590,15.4023,12.5589,13.4788, # Sc- 39 | 12.2309,11.2809,10.5569,10.1428, 9.4907, # -Zn 40 | 13.4606,10.8544, 8.9386, 8.1350, 7.1251, 6.1971, # Ga-Kr 41 | 30.0162,24.4103, # Rb,Sr 42 | 20.3537,17.4780,13.5528,11.8451,11.0355, # Y- 43 | 10.1997, 9.5414, 9.0061, 8.6417, 8.9975, # -Cd 44 | 14.0834,11.8333,10.0179, 9.3844, 8.4110, 7.5152, # In-Xe 45 | 32.7622,27.5708, # Cs,Ba 46 | 23.1671,21.6003,20.9615,20.4562,20.1010,19.7475,19.4828, # La-Eu 47 | 15.6013,19.2362,17.4717,17.8321,17.4237,17.1954,17.1631, # Gd-Yb 48 | 14.5716,15.8758,13.8989,12.4834,11.4421, # Lu- 49 | 10.2671, 8.3549, 7.8496, 7.3278, 7.4820, # -Hg 50 | 13.5124,11.6554,10.0959, 9.7340, 8.8584, 8.0125, # Tl-Rn 51 | 29.8135,26.3157, # Fr,Ra 52 | 19.1885,15.8542,16.1305,15.6161,15.1226,16.1576,14.6510, # Ac-Am 53 | 14.7178,13.9108,13.5623,13.2326,12.9189,12.6133,12.3142, # Cm-No 54 | 14.8326,12.3771,10.6378, 9.3638, 8.2297, # Lr- 55 | 7.5667, 6.9456, 6.3946, 5.9159, 5.4929, # -Cn 56 | 6.7286, 6.5144,10.9169,10.3600, 9.4723, 8.6641, # Nh-Og 57 | ]) 58 | """Actually calculated r⁴ over r² expectation values.""" 59 | # fmt: on 60 | 61 | 62 | R4R2 = torch.sqrt( 63 | 0.5 * (r4_over_r2 * torch.sqrt(torch.arange(r4_over_r2.shape[0]))) 64 | ) 65 | """r⁴ over r² expectation values.""" 66 | -------------------------------------------------------------------------------- /src/tad_dftd4/data/radii.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Data: Radii 19 | =========== 20 | 21 | Covalent radii (imported from *tad-mctc*). 22 | """ 23 | from tad_mctc.data import COV_D3 24 | 25 | __all__ = ["COV_D3"] 26 | -------------------------------------------------------------------------------- /src/tad_dftd4/data/zeff.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Data: Charges 19 | ============= 20 | 21 | Effective charges (imported from *tad-mctc*). 22 | """ 23 | from tad_mctc.data.zeff import ZEFF 24 | 25 | __all__ = ["ZEFF"] 26 | -------------------------------------------------------------------------------- /src/tad_dftd4/defaults.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Defaults 19 | ======== 20 | 21 | This module defines the default values for all parameters within `tad_dftd4`. 22 | """ 23 | 24 | # DFT-D4 25 | 26 | D4_CN_CUTOFF = 30.0 27 | """Coordination number cutoff (30.0).""" 28 | 29 | D4_CN_EEQ_CUTOFF = 25.0 30 | """Coordination number cutoff within EEQ (25.0).""" 31 | 32 | D4_CN_EEQ_MAX = 8.0 33 | """Maximum coordination number (8.0).""" 34 | 35 | D4_DISP2_CUTOFF = 60.0 36 | """Two-body interaction cutoff (60.0).""" 37 | 38 | D4_DISP3_CUTOFF = 40.0 39 | """Three-body interaction cutoff (40.0).""" 40 | 41 | D4_KCN = 7.5 42 | """Steepness of counting function (7.5).""" 43 | 44 | D4_K4 = 4.10451 45 | """Parameter for electronegativity scaling.""" 46 | 47 | D4_K5 = 19.08857 48 | """Parameter for electronegativity scaling.""" 49 | 50 | D4_K6 = 2 * 11.28174**2 # 254.56 51 | """Parameter for electronegativity scaling.""" 52 | 53 | # DFT-D4 damping parameters 54 | 55 | A1 = 0.4 56 | """Scaling for the C8 / C6 ratio in the critical radius (0.4).""" 57 | 58 | A2 = 5.0 59 | """Offset parameter for the critical radius (5.0).""" 60 | 61 | S6 = 1.0 62 | """Default scaling of dipole-dipole term (1.0 to retain correct limit).""" 63 | 64 | S8 = 1.0 65 | """Default scaling of dipole-quadrupole term (1.0).""" 66 | 67 | S9 = 1.0 68 | """Default scaling of three-body term (1.0).""" 69 | 70 | S10 = 0.0 71 | """Default scaling of quadrupole-quadrupole term (0.0).""" 72 | 73 | RS9 = 4.0 / 3.0 74 | """Scaling for van-der-Waals radii in damping function (4.0/3.0).""" 75 | 76 | ALP = 16.0 77 | """Exponent of zero damping function (16.0).""" 78 | -------------------------------------------------------------------------------- /src/tad_dftd4/model/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Dispersion Models 19 | ================= 20 | 21 | Collection of dispersion models. Currently, the D4 and D4S models are available. 22 | """ 23 | 24 | from .d4 import * 25 | from .d4s import * 26 | -------------------------------------------------------------------------------- /src/tad_dftd4/model/utils.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Model: Utils 19 | ============ 20 | 21 | Utility for dispersion model construction. 22 | """ 23 | from __future__ import annotations 24 | 25 | import torch 26 | from tad_mctc.math import einsum 27 | 28 | from ..typing import Tensor 29 | 30 | __all__ = ["trapzd", "trapzd_noref", "is_exceptional"] 31 | 32 | 33 | def trapzd(pol1: Tensor, pol2: Tensor | None = None) -> Tensor: 34 | """ 35 | Numerical Casimir--Polder integration. 36 | 37 | Parameters 38 | ---------- 39 | pol1 : Tensor 40 | Polarizabilities of shape ``(..., nat, nref, 23)``. 41 | pol2 : Tensor | None, optional 42 | Polarizabilities of shape ``(..., nat, nref, 23)``. Defaults to 43 | ``None``, in which case ``pol2`` is set to ``pol1``. 44 | 45 | Returns 46 | ------- 47 | Tensor 48 | C6 coefficients of shape ``(..., nat, nat, nref, nref)``. 49 | """ 50 | thopi = 3.0 / 3.141592653589793238462643383279502884197 51 | 52 | weights = torch.tensor( 53 | [ 54 | 2.4999500000000000e-002, 55 | 4.9999500000000000e-002, 56 | 7.5000000000000010e-002, 57 | 0.1000000000000000, 58 | 0.1000000000000000, 59 | 0.1000000000000000, 60 | 0.1000000000000000, 61 | 0.1000000000000000, 62 | 0.1000000000000000, 63 | 0.1000000000000000, 64 | 0.1000000000000000, 65 | 0.1500000000000000, 66 | 0.2000000000000000, 67 | 0.2000000000000000, 68 | 0.2000000000000000, 69 | 0.2000000000000000, 70 | 0.3500000000000000, 71 | 0.5000000000000000, 72 | 0.7500000000000000, 73 | 1.0000000000000000, 74 | 1.7500000000000000, 75 | 2.5000000000000000, 76 | 1.2500000000000000, 77 | ], 78 | device=pol1.device, 79 | dtype=pol1.dtype, 80 | ) 81 | 82 | # NOTE: In the old version, a memory inefficient intermediate tensor was 83 | # created. The new version uses `einsum` to avoid this. 84 | # 85 | # (..., 1, nat, 1, nref, 23) * (..., nat, 1, nref, 1, 23) = 86 | # (..., nat, nat, nref, nref, 23) -> (..., nat, nat, nref, nref) 87 | # a = alpha.unsqueeze(-4).unsqueeze(-3) * alpha.unsqueeze(-3).unsqueeze(-2) 88 | # 89 | # rc6 = thopi * torch.sum(weights * a, dim=-1) 90 | 91 | return thopi * einsum( 92 | "w,...iaw,...jbw->...ijab", 93 | *(weights, pol1, pol1 if pol2 is None else pol2), 94 | ) 95 | 96 | 97 | def trapzd_noref(pol1: Tensor, pol2: Tensor | None = None) -> Tensor: 98 | """ 99 | Numerical Casimir--Polder integration. 100 | 101 | This version takes polarizabilities of shape ``(..., nat, 23)``, i.e., 102 | the reference dimension has already been summed over. 103 | 104 | Parameters 105 | ---------- 106 | pol1 : Tensor 107 | Polarizabilities of shape ``(..., nat, 23)``. 108 | pol2 : Tensor | None, optional 109 | Polarizabilities of shape ``(..., nat, 23)``. Defaults to 110 | ``None``, in which case ``pol2`` is set to ``pol1``. 111 | 112 | Returns 113 | ------- 114 | Tensor 115 | C6 coefficients of shape ``(..., nat, nat)``. 116 | """ 117 | thopi = 3.0 / 3.141592653589793238462643383279502884197 118 | 119 | weights = torch.tensor( 120 | [ 121 | 2.4999500000000000e-002, 122 | 4.9999500000000000e-002, 123 | 7.5000000000000010e-002, 124 | 0.1000000000000000, 125 | 0.1000000000000000, 126 | 0.1000000000000000, 127 | 0.1000000000000000, 128 | 0.1000000000000000, 129 | 0.1000000000000000, 130 | 0.1000000000000000, 131 | 0.1000000000000000, 132 | 0.1500000000000000, 133 | 0.2000000000000000, 134 | 0.2000000000000000, 135 | 0.2000000000000000, 136 | 0.2000000000000000, 137 | 0.3500000000000000, 138 | 0.5000000000000000, 139 | 0.7500000000000000, 140 | 1.0000000000000000, 141 | 1.7500000000000000, 142 | 2.5000000000000000, 143 | 1.2500000000000000, 144 | ], 145 | device=pol1.device, 146 | dtype=pol1.dtype, 147 | ) 148 | 149 | return thopi * einsum( 150 | "w,...iw,...jw->...ij", 151 | *(weights, pol1, pol1 if pol2 is None else pol2), 152 | ) 153 | 154 | 155 | def is_exceptional(x: Tensor, dtype: torch.dtype) -> Tensor: 156 | """ 157 | Check if a tensor is exceptional (``NaN`` or too large). 158 | 159 | Parameters 160 | ---------- 161 | x : Tensor 162 | Tensor to check. 163 | dtype : torch.dtype 164 | Data type of the tensor. 165 | 166 | Returns 167 | ------- 168 | Tensor 169 | Boolean tensor indicating exceptional values. 170 | """ 171 | return torch.isnan(x) | (x > torch.finfo(dtype).max) 172 | -------------------------------------------------------------------------------- /src/tad_dftd4/ncoord/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Coordination Number 19 | =================== 20 | 21 | Functions for calculating the D4 coordination numbers. 22 | Only exported for convenience. 23 | """ 24 | 25 | from tad_mctc.ncoord.count import erf_count 26 | from tad_mctc.ncoord.d4 import cn_d4 27 | 28 | __all__ = ["erf_count", "cn_d4"] 29 | -------------------------------------------------------------------------------- /src/tad_dftd4/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dftd4/tad-dftd4/24a05e1c8b961f21c1c052d2d9d6e7dc7ffef9bf/src/tad_dftd4/py.typed -------------------------------------------------------------------------------- /src/tad_dftd4/reference/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Reference Parameters 19 | ==================== 20 | 21 | Parameters of reference systems. 22 | """ 23 | 24 | from .params import * 25 | -------------------------------------------------------------------------------- /src/tad_dftd4/typing/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Type annotations 19 | ================ 20 | 21 | All type annotations for this project. 22 | """ 23 | from .builtin import * 24 | from .pytorch import * 25 | -------------------------------------------------------------------------------- /src/tad_dftd4/typing/builtin.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Type annotations: Built-ins 19 | =========================== 20 | 21 | Built-in type annotations are imported from the *tad-mctc* library, which 22 | handles some version checking. 23 | """ 24 | from tad_mctc.typing import ( 25 | Any, 26 | Callable, 27 | Literal, 28 | NoReturn, 29 | TypedDict, 30 | overload, 31 | ) 32 | 33 | __all__ = ["Any", "Callable", "Literal", "NoReturn", "TypedDict", "overload"] 34 | -------------------------------------------------------------------------------- /src/tad_dftd4/typing/pytorch.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Type annotations: PyTorch 19 | ========================= 20 | 21 | PyTorch-related type annotations for this project. 22 | """ 23 | from tad_mctc.typing import ( 24 | DD, 25 | CountingFunction, 26 | DampingFunction, 27 | Molecule, 28 | Tensor, 29 | TensorLike, 30 | TensorOrTensors, 31 | get_default_device, 32 | get_default_dtype, 33 | ) 34 | 35 | __all__ = [ 36 | "DD", 37 | "CountingFunction", 38 | "DampingFunction", 39 | "Molecule", 40 | "Tensor", 41 | "TensorLike", 42 | "TensorOrTensors", 43 | "get_default_device", 44 | "get_default_dtype", 45 | ] 46 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/conftest.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Setup for pytest. 19 | """ 20 | from __future__ import annotations 21 | 22 | import numpy as np 23 | import pytest 24 | import torch 25 | 26 | # avoid randomness and non-deterministic algorithms 27 | np.random.seed(0) 28 | torch.manual_seed(0) 29 | torch.use_deterministic_algorithms(True) 30 | 31 | torch.set_printoptions(precision=10) 32 | 33 | FAST_MODE: bool = True 34 | """Flag for fast gradient tests.""" 35 | 36 | DEVICE: torch.device | None = None 37 | """Name of Device.""" 38 | 39 | 40 | def pytest_addoption(parser: pytest.Parser) -> None: 41 | """Set up additional command line options.""" 42 | 43 | parser.addoption( 44 | "--cuda", 45 | action="store_true", 46 | help="Use GPU as default device.", 47 | ) 48 | 49 | parser.addoption( 50 | "--detect-anomaly", 51 | "--da", 52 | action="store_true", 53 | help="Enable PyTorch's debug mode for gradient tests.", 54 | ) 55 | 56 | parser.addoption( 57 | "--jit", 58 | action="store_true", 59 | help="Enable JIT during tests (default = False).", 60 | ) 61 | 62 | parser.addoption( 63 | "--fast", 64 | action="store_true", 65 | help="Use `fast_mode` for gradient checks (default = True).", 66 | ) 67 | 68 | parser.addoption( 69 | "--slow", 70 | action="store_true", 71 | help="Do *not* use `fast_mode` for gradient checks (default = False).", 72 | ) 73 | 74 | parser.addoption( 75 | "--tpo-linewidth", 76 | action="store", 77 | default=400, 78 | type=int, 79 | help=( 80 | "The number of characters per line for the purpose of inserting " 81 | "line breaks (default = 80). Thresholded matrices will ignore " 82 | "this parameter." 83 | ), 84 | ) 85 | 86 | parser.addoption( 87 | "--tpo-precision", 88 | action="store", 89 | default=6, 90 | type=int, 91 | help=( 92 | "Number of digits of precision for floating point output " 93 | "(default = 4)." 94 | ), 95 | ) 96 | 97 | parser.addoption( 98 | "--tpo-threshold", 99 | action="store", 100 | default=1000, 101 | type=int, 102 | help=( 103 | "Total number of array elements which trigger summarization " 104 | "rather than full `repr` (default = 1000)." 105 | ), 106 | ) 107 | 108 | 109 | def pytest_configure(config: pytest.Config) -> None: 110 | """Pytest configuration hook.""" 111 | global DEVICE, FAST_MODE 112 | 113 | if config.getoption("--detect-anomaly"): 114 | torch.autograd.anomaly_mode.set_detect_anomaly(True) 115 | 116 | if config.getoption("--jit"): 117 | torch.jit._state.enable() # type:ignore # pylint: disable=protected-access 118 | else: 119 | torch.jit._state.disable() # type:ignore # pylint: disable=protected-access 120 | 121 | if config.getoption("--fast"): 122 | FAST_MODE = True 123 | if config.getoption("--slow"): 124 | FAST_MODE = False 125 | 126 | if config.getoption("--cuda"): 127 | if not torch.cuda.is_available(): 128 | raise RuntimeError("No cuda devices available.") 129 | 130 | if FAST_MODE is True: 131 | FAST_MODE = False 132 | 133 | from warnings import warn 134 | 135 | warn( 136 | "Fast mode for gradient checks not compatible with GPU " 137 | "execution. Switching to slow mode. Use the '--slow' flag " 138 | "for GPU tests ('--cuda') to avoid this warning.\n" 139 | "(Issue: https://github.com/pytorch/pytorch/issues/114536)" 140 | ) 141 | 142 | DEVICE = torch.device("cuda:0") 143 | torch.use_deterministic_algorithms(False) 144 | 145 | # `torch.set_default_tensor_type` is deprecated since 2.1.0 and version 146 | # 2.0.0 introduces `torch.set_default_device` 147 | if torch.__version__ < (2, 0, 0): # type: ignore 148 | torch.set_default_tensor_type("torch.cuda.FloatTensor") # type: ignore 149 | else: 150 | torch.set_default_device(DEVICE) # type: ignore[attr-defined] 151 | else: 152 | torch.use_deterministic_algorithms(True) 153 | DEVICE = None 154 | 155 | if config.getoption("--tpo-linewidth"): 156 | torch.set_printoptions(linewidth=config.getoption("--tpo-linewidth")) 157 | 158 | if config.getoption("--tpo-precision"): 159 | torch.set_printoptions(precision=config.getoption("--tpo-precision")) 160 | 161 | if config.getoption("--tpo-threshold"): 162 | torch.set_printoptions(threshold=config.getoption("--tpo-threshold")) 163 | 164 | # register an additional marker 165 | config.addinivalue_line("markers", "cuda: mark test that require CUDA.") 166 | 167 | 168 | def pytest_runtest_setup(item: pytest.Function) -> None: 169 | """Custom marker for tests requiring CUDA.""" 170 | 171 | for _ in item.iter_markers(name="cuda"): 172 | if not torch.cuda.is_available(): 173 | pytest.skip( 174 | "Torch not compiled with CUDA or no CUDA device available." 175 | ) 176 | -------------------------------------------------------------------------------- /test/test_cutoff/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/test_cutoff/test_general.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test the correct handling of types in the `Cutoff` class. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.convert import str_to_device 25 | 26 | from tad_dftd4.cutoff import Cutoff 27 | 28 | 29 | @pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.float64]) 30 | def test_change_type(dtype: torch.dtype) -> None: 31 | cutoff = Cutoff().type(dtype) 32 | assert cutoff.dtype == dtype 33 | assert cutoff.disp2.dtype == dtype 34 | assert cutoff.disp3.dtype == dtype 35 | assert cutoff.cn.dtype == dtype 36 | assert cutoff.cn_eeq.dtype == dtype 37 | 38 | 39 | def test_change_type_fail() -> None: 40 | cutoff = Cutoff() 41 | 42 | # trying to use setter 43 | with pytest.raises(AttributeError): 44 | cutoff.dtype = torch.float64 45 | 46 | # passing disallowed dtype 47 | with pytest.raises(ValueError): 48 | cutoff.type(torch.bool) 49 | 50 | 51 | @pytest.mark.cuda 52 | @pytest.mark.parametrize("device_str", ["cpu", "cuda"]) 53 | def test_change_device(device_str: str) -> None: 54 | device = str_to_device(device_str) 55 | cutoff = Cutoff().to(device) 56 | assert cutoff.device == device 57 | assert cutoff.disp2.device == device 58 | assert cutoff.disp3.device == device 59 | assert cutoff.cn.device == device 60 | assert cutoff.cn_eeq.device == device 61 | 62 | 63 | def test_change_device_fail() -> None: 64 | cutoff = Cutoff() 65 | 66 | # trying to use setter 67 | with pytest.raises(AttributeError): 68 | cutoff.device = torch.device("cpu") 69 | -------------------------------------------------------------------------------- /test/test_cutoff/test_types.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test the correct handling of types in the `Cutoff` class. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | 25 | from tad_dftd4 import defaults 26 | from tad_dftd4.cutoff import Cutoff 27 | from tad_dftd4.typing import Tensor 28 | 29 | 30 | def test_defaults() -> None: 31 | cutoff = Cutoff() 32 | assert pytest.approx(defaults.D4_DISP2_CUTOFF) == cutoff.disp2.cpu() 33 | assert pytest.approx(defaults.D4_DISP3_CUTOFF) == cutoff.disp3.cpu() 34 | assert pytest.approx(defaults.D4_CN_CUTOFF) == cutoff.cn.cpu() 35 | assert pytest.approx(defaults.D4_CN_EEQ_CUTOFF) == cutoff.cn_eeq.cpu() 36 | 37 | 38 | def test_tensor() -> None: 39 | tmp = torch.tensor([1.0]) 40 | cutoff = Cutoff(disp2=tmp) 41 | 42 | assert isinstance(cutoff.disp2, Tensor) 43 | assert isinstance(cutoff.disp3, Tensor) 44 | assert isinstance(cutoff.cn, Tensor) 45 | assert isinstance(cutoff.cn_eeq, Tensor) 46 | 47 | assert pytest.approx(tmp.cpu()) == cutoff.disp2.cpu() 48 | 49 | 50 | @pytest.mark.parametrize("vals", [(1, 2, -3, 4), (1.0, 2.0, 3.0, -4.0)]) 51 | def test_int_float(vals: tuple[int | float, ...]) -> None: 52 | disp2, disp3, cn, cn_eeq = vals 53 | cutoff = Cutoff(disp2, disp3, cn, cn_eeq) 54 | 55 | assert isinstance(cutoff.disp2, Tensor) 56 | assert isinstance(cutoff.disp3, Tensor) 57 | assert isinstance(cutoff.cn, Tensor) 58 | assert isinstance(cutoff.cn_eeq, Tensor) 59 | 60 | assert pytest.approx(vals[0]) == cutoff.disp2.cpu() 61 | assert pytest.approx(vals[1]) == cutoff.disp3.cpu() 62 | assert pytest.approx(vals[2]) == cutoff.cn.cpu() 63 | assert pytest.approx(vals[3]) == cutoff.cn_eeq.cpu() 64 | -------------------------------------------------------------------------------- /test/test_disp/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/test_disp/test_atm.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of two-body and three-body dispersion terms. 19 | """ 20 | import pytest 21 | import torch 22 | from tad_mctc.batch import pack 23 | from tad_mctc.ncoord import cn_d4 24 | 25 | from tad_dftd4.disp import dftd4, dispersion3 26 | from tad_dftd4.model import D4Model 27 | from tad_dftd4.typing import DD 28 | 29 | from ..conftest import DEVICE 30 | from .samples import samples 31 | 32 | sample_list = ["LiH", "SiH4", "MB16_43_01", "MB16_43_02", "AmF3", "actinides"] 33 | 34 | 35 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 36 | @pytest.mark.parametrize("name", sample_list) 37 | def test_single(name: str, dtype: torch.dtype) -> None: 38 | single(name, dtype) 39 | 40 | 41 | @pytest.mark.large 42 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 43 | @pytest.mark.parametrize("name", ["vancoh2"]) 44 | def test_single_large(name: str, dtype: torch.dtype) -> None: 45 | single(name, dtype) 46 | 47 | 48 | def single(name: str, dtype: torch.dtype) -> None: 49 | dd: DD = {"device": DEVICE, "dtype": dtype} 50 | tol = torch.finfo(dtype).eps ** 0.5 * 10 51 | 52 | sample = samples[name] 53 | numbers = sample["numbers"].to(DEVICE) 54 | positions = sample["positions"].to(**dd) 55 | ref = sample["disp3"].to(**dd) 56 | 57 | # TPSSh-D4-ATM parameters 58 | param = { 59 | "s6": torch.tensor(1.00000000, **dd), 60 | "s8": torch.tensor(1.85897750, **dd), 61 | "s9": torch.tensor(1.00000000, **dd), 62 | "s10": torch.tensor(0.0000000, **dd), 63 | "alp": torch.tensor(16.000000, **dd), 64 | "a1": torch.tensor(0.44286966, **dd), 65 | "a2": torch.tensor(4.60230534, **dd), 66 | } 67 | 68 | model = D4Model(numbers, **dd) 69 | cn = cn_d4(numbers, positions) 70 | weights = model.weight_references(cn, q=None) 71 | c6 = model.get_atomic_c6(weights) 72 | cutoff = torch.tensor(40.0, **dd) 73 | 74 | energy = dispersion3(numbers, positions, param, c6, cutoff=cutoff) 75 | 76 | assert energy.dtype == dtype 77 | assert pytest.approx(ref.cpu().cpu(), abs=tol) == energy.cpu() 78 | 79 | 80 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 81 | @pytest.mark.parametrize("name", sample_list) 82 | def test_s6_s8_zero(name: str, dtype: torch.dtype) -> None: 83 | dd: DD = {"device": DEVICE, "dtype": dtype} 84 | tol = torch.finfo(dtype).eps ** 0.5 * 10 85 | 86 | sample = samples[name] 87 | numbers = sample["numbers"].to(DEVICE) 88 | positions = sample["positions"].to(**dd) 89 | charge = torch.tensor(0.0, **dd) 90 | ref = sample["disp3"].to(**dd) 91 | 92 | # TPSSh-D4-ATM parameters 93 | param = { 94 | "s6": torch.tensor(0.00000000, **dd), 95 | "s8": torch.tensor(0.00000000, **dd), 96 | "s9": torch.tensor(1.00000000, **dd), 97 | "s10": torch.tensor(0.0000000, **dd), 98 | "alp": torch.tensor(16.000000, **dd), 99 | "a1": torch.tensor(0.44286966, **dd), 100 | "a2": torch.tensor(4.60230534, **dd), 101 | } 102 | 103 | energy = dftd4(numbers, positions, charge, param) 104 | 105 | assert energy.dtype == dtype 106 | assert pytest.approx(ref.cpu().cpu(), abs=tol) == energy.cpu() 107 | 108 | 109 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 110 | @pytest.mark.parametrize("name1", ["LiH"]) 111 | @pytest.mark.parametrize("name2", sample_list) 112 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 113 | batch(name1, name2, dtype) 114 | 115 | 116 | @pytest.mark.large 117 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 118 | @pytest.mark.parametrize("name1", ["LiH"]) 119 | @pytest.mark.parametrize("name2", ["vancoh2"]) 120 | def test_batch_large(name1: str, name2: str, dtype: torch.dtype) -> None: 121 | batch(name1, name2, dtype) 122 | 123 | 124 | def batch(name1: str, name2: str, dtype: torch.dtype) -> None: 125 | dd: DD = {"device": DEVICE, "dtype": dtype} 126 | tol = torch.finfo(dtype).eps ** 0.5 * 10 127 | 128 | sample1, sample2 = samples[name1], samples[name2] 129 | numbers = pack( 130 | [ 131 | sample1["numbers"].to(DEVICE), 132 | sample2["numbers"].to(DEVICE), 133 | ] 134 | ) 135 | positions = pack( 136 | [ 137 | sample1["positions"].to(**dd), 138 | sample2["positions"].to(**dd), 139 | ] 140 | ) 141 | ref = pack( 142 | [ 143 | sample1["disp3"].to(**dd), 144 | sample2["disp3"].to(**dd), 145 | ] 146 | ) 147 | 148 | # TPSSh-D4-ATM parameters 149 | param = { 150 | "s6": torch.tensor(1.00000000, **dd), 151 | "s8": torch.tensor(1.85897750, **dd), 152 | "s9": torch.tensor(1.00000000, **dd), 153 | "s10": torch.tensor(0.0000000, **dd), 154 | "alp": torch.tensor(16.000000, **dd), 155 | "a1": torch.tensor(0.44286966, **dd), 156 | "a2": torch.tensor(4.60230534, **dd), 157 | } 158 | 159 | model = D4Model(numbers, **dd) 160 | cn = cn_d4(numbers, positions) 161 | weights = model.weight_references(cn, q=None) 162 | c6 = model.get_atomic_c6(weights) 163 | 164 | energy = dispersion3(numbers, positions, param, c6) 165 | 166 | assert energy.dtype == dtype 167 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 168 | -------------------------------------------------------------------------------- /test/test_disp/test_full.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of two-body and three-body dispersion terms. 19 | """ 20 | import pytest 21 | import torch 22 | from tad_mctc.batch import pack 23 | 24 | from tad_dftd4 import data 25 | from tad_dftd4.cutoff import Cutoff 26 | from tad_dftd4.disp import dftd4 27 | from tad_dftd4.model import D4Model 28 | from tad_dftd4.typing import DD 29 | 30 | from ..conftest import DEVICE 31 | from .samples import samples 32 | 33 | sample_list = ["LiH", "SiH4", "MB16_43_01", "MB16_43_02", "AmF3", "actinides"] 34 | 35 | 36 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 37 | @pytest.mark.parametrize("name", sample_list) 38 | def test_single(name: str, dtype: torch.dtype) -> None: 39 | single(name, dtype) 40 | 41 | 42 | @pytest.mark.large 43 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 44 | @pytest.mark.parametrize("name", ["vancoh2"]) 45 | def test_single_large(name: str, dtype: torch.dtype) -> None: 46 | single(name, dtype) 47 | 48 | 49 | def single(name: str, dtype: torch.dtype) -> None: 50 | dd: DD = {"device": DEVICE, "dtype": dtype} 51 | tol = torch.finfo(dtype).eps ** 0.5 * 10 52 | 53 | sample = samples[name] 54 | numbers = sample["numbers"].to(DEVICE) 55 | positions = sample["positions"].to(**dd) 56 | q = sample["q"].to(**dd) 57 | charge = torch.tensor(0.0, **dd) 58 | ref = sample["disp"].to(**dd) 59 | 60 | # TPSSh-D4-ATM parameters 61 | param = { 62 | "s6": torch.tensor(1.00000000, **dd), 63 | "s8": torch.tensor(1.85897750, **dd), 64 | "s9": torch.tensor(1.00000000, **dd), 65 | "s10": torch.tensor(0.0000000, **dd), 66 | "alp": torch.tensor(16.000000, **dd), 67 | "a1": torch.tensor(0.44286966, **dd), 68 | "a2": torch.tensor(4.60230534, **dd), 69 | } 70 | 71 | model = D4Model(numbers, **dd) 72 | rcov = data.COV_D3.to(**dd)[numbers] 73 | r4r2 = data.R4R2.to(**dd)[numbers] 74 | cutoff = Cutoff(**dd) 75 | 76 | energy = dftd4( 77 | numbers, 78 | positions, 79 | charge, 80 | param, 81 | model=model, 82 | rcov=rcov, 83 | r4r2=r4r2, 84 | q=q, 85 | cutoff=cutoff, 86 | ) 87 | 88 | assert energy.dtype == dtype 89 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 90 | 91 | 92 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 93 | @pytest.mark.parametrize("name1", ["LiH"]) 94 | @pytest.mark.parametrize("name2", sample_list) 95 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 96 | batch(name1, name2, dtype) 97 | 98 | 99 | @pytest.mark.large 100 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 101 | @pytest.mark.parametrize("name1", ["LiH"]) 102 | @pytest.mark.parametrize("name2", ["vancoh2"]) 103 | def test_batch_large(name1: str, name2: str, dtype: torch.dtype) -> None: 104 | batch(name1, name2, dtype) 105 | 106 | 107 | def batch(name1: str, name2: str, dtype: torch.dtype) -> None: 108 | dd: DD = {"device": DEVICE, "dtype": dtype} 109 | tol = torch.finfo(dtype).eps ** 0.5 * 10 110 | 111 | sample1, sample2 = samples[name1], samples[name2] 112 | numbers = pack( 113 | [ 114 | sample1["numbers"].to(DEVICE), 115 | sample2["numbers"].to(DEVICE), 116 | ] 117 | ) 118 | positions = pack( 119 | [ 120 | sample1["positions"].to(**dd), 121 | sample2["positions"].to(**dd), 122 | ] 123 | ) 124 | 125 | charge = positions.new_zeros(numbers.shape[0]) 126 | ref = pack( 127 | [ 128 | sample1["disp"].to(**dd), 129 | sample2["disp"].to(**dd), 130 | ] 131 | ) 132 | 133 | # TPSSh-D4-ATM parameters 134 | param = { 135 | "s6": torch.tensor(1.00000000, **dd), 136 | "s8": torch.tensor(1.85897750, **dd), 137 | "s9": torch.tensor(1.00000000, **dd), 138 | "s10": torch.tensor(0.0000000, **dd), 139 | "alp": torch.tensor(16.000000, **dd), 140 | "a1": torch.tensor(0.44286966, **dd), 141 | "a2": torch.tensor(4.60230534, **dd), 142 | } 143 | 144 | energy = dftd4(numbers, positions, charge, param) 145 | assert energy.dtype == dtype 146 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 147 | -------------------------------------------------------------------------------- /test/test_disp/test_general.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Check shape of tensors. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | 25 | from tad_dftd4.disp import dftd4 26 | 27 | 28 | def test_fail() -> None: 29 | numbers = torch.tensor([1, 1]) 30 | positions = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) 31 | charge = torch.tensor(0.0) 32 | param = {"s6": torch.tensor(1.0)} 33 | 34 | # rcov wrong shape 35 | with pytest.raises(ValueError): 36 | rcov = torch.tensor([1.0]) 37 | dftd4(numbers, positions, charge, param, rcov=rcov) 38 | 39 | # expectation valus (r4r2) wrong shape 40 | with pytest.raises(ValueError): 41 | r4r2 = torch.tensor([1.0]) 42 | dftd4(numbers, positions, charge, param, r4r2=r4r2) 43 | 44 | # atomic partial charges wrong shape 45 | with pytest.raises(ValueError): 46 | q = torch.tensor([1.0]) 47 | dftd4(numbers, positions, charge, param, q=q) 48 | 49 | # wrong numbers (give charges, otherwise test fails in EEQ, not in disp) 50 | with pytest.raises(ValueError): 51 | q = torch.tensor([0.5, -0.5]) 52 | nums = torch.tensor([1]) 53 | dftd4(nums, positions, charge, param, q=q) 54 | -------------------------------------------------------------------------------- /test/test_disp/test_properties.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of dispersion model properties. 19 | """ 20 | import pytest 21 | import torch 22 | from tad_mctc.batch import pack 23 | from tad_mctc.math import einsum 24 | from tad_mctc.ncoord import cn_d4 25 | 26 | from tad_dftd4.cutoff import Cutoff 27 | from tad_dftd4.disp import get_properties 28 | from tad_dftd4.model import D4Model 29 | from tad_dftd4.model.utils import trapzd, trapzd_noref 30 | from tad_dftd4.typing import DD 31 | 32 | from ..conftest import DEVICE 33 | from .samples import samples 34 | 35 | sample_list = ["LiH", "SiH4", "MB16_43_01", "MB16_43_02", "AmF3", "actinides"] 36 | 37 | 38 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 39 | @pytest.mark.parametrize("name", sample_list) 40 | def test_single(name: str, dtype: torch.dtype) -> None: 41 | single(name, dtype) 42 | 43 | 44 | @pytest.mark.large 45 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 46 | @pytest.mark.parametrize("name", ["vancoh2"]) 47 | def test_single_large(name: str, dtype: torch.dtype) -> None: 48 | single(name, dtype) 49 | 50 | 51 | def single(name: str, dtype: torch.dtype) -> None: 52 | dd: DD = {"device": DEVICE, "dtype": dtype} 53 | tol = torch.finfo(dtype).eps ** 0.5 * 10 54 | 55 | sample = samples[name] 56 | numbers = sample["numbers"].to(DEVICE) 57 | positions = sample["positions"].to(**dd) 58 | 59 | qref = sample["q"].to(**dd) 60 | model = D4Model(numbers, **dd) 61 | cn = cn_d4(numbers, positions) 62 | weights = model.weight_references(cn, qref) 63 | c6ref = model.get_atomic_c6(weights).sum((-2, -1)) 64 | 65 | _cn, _, _c6, _ = get_properties(numbers, positions) 66 | 67 | assert pytest.approx(cn, rel=tol) == _cn 68 | assert pytest.approx(c6ref.cpu(), rel=tol) == _c6.sum((-2, -1)).cpu() 69 | 70 | # Manually calculate C6 values 71 | 72 | aiw = model._get_alpha() # pylint: disable=protected-access 73 | 74 | alpha1 = einsum("...nr,...nra->...nra", weights, aiw) 75 | c61 = trapzd(alpha1, alpha1).sum((-4, -3, -2, -1)) 76 | 77 | alpha2 = einsum("...nr,...nra->...na", weights, aiw) 78 | c62 = trapzd_noref(alpha2, alpha2).sum((-2, -1)) 79 | 80 | assert c6ref.shape == c61.shape 81 | assert c6ref.shape == c62.shape 82 | assert pytest.approx(c6ref.cpu(), rel=tol) == c61.cpu() 83 | assert pytest.approx(c6ref.cpu(), rel=tol) == c62.cpu() 84 | 85 | 86 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 87 | @pytest.mark.parametrize("name1", ["LiH"]) 88 | @pytest.mark.parametrize("name2", sample_list) 89 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 90 | batch(name1, name2, dtype) 91 | 92 | 93 | @pytest.mark.large 94 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 95 | @pytest.mark.parametrize("name1", ["LiH"]) 96 | @pytest.mark.parametrize("name2", ["vancoh2"]) 97 | def test_batch_large(name1: str, name2: str, dtype: torch.dtype) -> None: 98 | batch(name1, name2, dtype) 99 | 100 | 101 | def batch(name1: str, name2: str, dtype: torch.dtype) -> None: 102 | dd: DD = {"device": DEVICE, "dtype": dtype} 103 | tol = torch.finfo(dtype).eps ** 0.5 * 10 104 | 105 | sample1, sample2 = samples[name1], samples[name2] 106 | numbers = pack( 107 | [ 108 | sample1["numbers"].to(DEVICE), 109 | sample2["numbers"].to(DEVICE), 110 | ] 111 | ) 112 | positions = pack( 113 | [ 114 | sample1["positions"].to(**dd), 115 | sample2["positions"].to(**dd), 116 | ] 117 | ) 118 | charge = torch.tensor([0.0, 0.0], **dd) 119 | qref = pack( 120 | [ 121 | sample1["q"].to(**dd), 122 | sample2["q"].to(**dd), 123 | ] 124 | ) 125 | 126 | model = D4Model(numbers, **dd) 127 | cutoff = Cutoff(**dd) 128 | cn = cn_d4(numbers, positions) 129 | weights = model.weight_references(cn, qref) 130 | c6ref = model.get_atomic_c6(weights).sum((-2, -1)) 131 | 132 | _cn, _, _c6, _ = get_properties(numbers, positions, charge, cutoff=cutoff) 133 | 134 | assert pytest.approx(cn, rel=tol) == _cn 135 | assert pytest.approx(c6ref.cpu(), rel=tol) == _c6.sum((-2, -1)).cpu() 136 | 137 | # Manually calculate C6 values 138 | 139 | aiw = model._get_alpha() # pylint: disable=protected-access 140 | 141 | alpha1 = einsum("...nr,...nra->...nra", weights, aiw) 142 | c61 = trapzd(alpha1, alpha1).sum((-4, -3, -2, -1)) 143 | 144 | alpha2 = einsum("...nr,...nra->...na", weights, aiw) 145 | c62 = trapzd_noref(alpha2, alpha2).sum((-2, -1)) 146 | 147 | assert c6ref.shape == c61.shape 148 | assert c6ref.shape == c62.shape 149 | assert pytest.approx(c6ref.cpu(), rel=tol) == c61.cpu() 150 | assert pytest.approx(c6ref.cpu(), rel=tol) == c62.cpu() 151 | -------------------------------------------------------------------------------- /test/test_disp/test_twobody.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of two-body and three-body dispersion terms. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.batch import pack 25 | from tad_mctc.ncoord import cn_d4 26 | from tad_multicharge import get_eeq_charges 27 | 28 | from tad_dftd4 import data 29 | from tad_dftd4.disp import dftd4, dispersion2 30 | from tad_dftd4.model import D4Model, D4SModel 31 | from tad_dftd4.typing import DD 32 | 33 | from ..conftest import DEVICE 34 | from .samples import samples 35 | 36 | sample_list = ["LiH", "SiH4", "MB16_43_01", "MB16_43_02", "AmF3", "actinides"] 37 | 38 | 39 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 40 | @pytest.mark.parametrize("name", sample_list) 41 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 42 | def test_single(name: str, dtype: torch.dtype, model: str) -> None: 43 | # Skip test for double precision for actinides and AmF3 44 | if dtype == torch.double and name in ("actinides", "AmF3"): 45 | return 46 | 47 | single(name, dtype, model) 48 | 49 | 50 | @pytest.mark.parametrize("dtype", [torch.double]) 51 | @pytest.mark.parametrize("name", ["actinides", "AmF3"]) 52 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 53 | def test_single_tol(name: str, dtype: torch.dtype, model: str) -> None: 54 | single(name, dtype, model, tol=1e-4) 55 | 56 | 57 | @pytest.mark.large 58 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 59 | @pytest.mark.parametrize("name", ["vancoh2"]) 60 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 61 | def test_single_large(name: str, dtype: torch.dtype, model: str) -> None: 62 | single(name, dtype, model) 63 | 64 | 65 | def single( 66 | name: str, dtype: torch.dtype, model_str: str, tol: float | None = None 67 | ) -> None: 68 | dd: DD = {"device": DEVICE, "dtype": dtype} 69 | tol = torch.finfo(dtype).eps ** 0.5 * 10 if tol is None else tol 70 | 71 | sample = samples[name] 72 | numbers = sample["numbers"].to(DEVICE) 73 | positions = sample["positions"].to(**dd) 74 | q = sample["q"].to(**dd) 75 | 76 | # TPSSh-D4-ATM parameters 77 | param = { 78 | "s6": torch.tensor(1.00000000, **dd), 79 | "s8": torch.tensor(1.85897750, **dd), 80 | "s9": torch.tensor(1.00000000, **dd), 81 | "s10": torch.tensor(0.0000000, **dd), 82 | "alp": torch.tensor(16.000000, **dd), 83 | "a1": torch.tensor(0.44286966, **dd), 84 | "a2": torch.tensor(4.60230534, **dd), 85 | } 86 | 87 | if model_str == "d4": 88 | model = D4Model(numbers, **dd) 89 | ref = sample["disp2"].to(**dd) 90 | elif model_str == "d4s": 91 | model = D4SModel(numbers, **dd) 92 | ref = sample["disp2_d4s"].to(**dd) 93 | else: 94 | raise ValueError(f"Invalid model: {model_str}") 95 | 96 | r4r2 = data.R4R2.to(**dd)[numbers] 97 | cn = cn_d4(numbers, positions) 98 | weights = model.weight_references(cn, q) 99 | c6 = model.get_atomic_c6(weights) 100 | 101 | energy = dispersion2(numbers, positions, param, c6, r4r2) 102 | 103 | assert energy.shape == ref.shape 104 | assert energy.dtype == dtype 105 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 106 | 107 | 108 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 109 | @pytest.mark.parametrize("name", sample_list) 110 | def test_single_matrix(name: str, dtype: torch.dtype) -> None: 111 | dd: DD = {"device": DEVICE, "dtype": dtype} 112 | tol = torch.finfo(dtype).eps ** 0.5 * 10 113 | 114 | sample = samples[name] 115 | numbers = sample["numbers"].to(DEVICE) 116 | positions = sample["positions"].to(**dd) 117 | 118 | # TPSSh-D4-ATM parameters 119 | param = { 120 | "s8": torch.tensor(1.85897750, **dd), 121 | "s10": torch.tensor(1.0000000, **dd), # quadrupole-quadrupole 122 | "a1": torch.tensor(0.44286966, **dd), 123 | "a2": torch.tensor(4.60230534, **dd), 124 | } 125 | 126 | r4r2 = data.R4R2.to(**dd)[numbers] 127 | model = D4Model(numbers, **dd) 128 | cn = cn_d4(numbers, positions) 129 | weights = model.weight_references(cn) 130 | c6 = model.get_atomic_c6(weights) 131 | 132 | e_sca = dispersion2(numbers, positions, param, c6, r4r2, as_matrix=False) 133 | assert e_sca.dtype == dtype 134 | 135 | e_mat = dispersion2(numbers, positions, param, c6, r4r2, as_matrix=True) 136 | assert e_mat.dtype == dtype 137 | 138 | assert pytest.approx(e_sca.cpu(), abs=tol) == 0.5 * e_mat.sum(-1).cpu() 139 | 140 | 141 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 142 | @pytest.mark.parametrize("name", sample_list) 143 | def test_single_s9_zero(name: str, dtype: torch.dtype) -> None: 144 | dd: DD = {"device": DEVICE, "dtype": dtype} 145 | tol = torch.finfo(dtype).eps ** 0.5 * 10 146 | 147 | sample = samples[name] 148 | numbers = sample["numbers"].to(DEVICE) 149 | positions = sample["positions"].to(**dd) 150 | charge = torch.tensor(0.0, **dd) 151 | q = get_eeq_charges(numbers, positions, charge) 152 | ref = sample["disp2"].to(**dd) 153 | 154 | # TPSSh-D4-ATM parameters 155 | param = { 156 | "s8": torch.tensor(1.85897750, **dd), 157 | "s9": torch.tensor(0.00000000, **dd), # skip ATM 158 | "a1": torch.tensor(0.44286966, **dd), 159 | "a2": torch.tensor(4.60230534, **dd), 160 | } 161 | 162 | energy = dftd4(numbers, positions, charge, param, q=q) 163 | 164 | assert energy.dtype == dtype 165 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 166 | 167 | 168 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 169 | @pytest.mark.parametrize("name", ["SiH4"]) 170 | def test_single_s10_one(name: str, dtype: torch.dtype) -> None: 171 | dd: DD = {"device": DEVICE, "dtype": dtype} 172 | tol = torch.finfo(dtype).eps ** 0.5 * 10 173 | 174 | sample = samples[name] 175 | numbers = sample["numbers"].to(DEVICE) 176 | positions = sample["positions"].to(**dd) 177 | charge = torch.tensor(0.0, **dd) 178 | ref = torch.tensor( 179 | [ 180 | -8.8928018057670788e-04, 181 | -3.3765541880036940e-04, 182 | -3.3765541880036940e-04, 183 | -3.3765541880036940e-04, 184 | -3.3765541880036940e-04, 185 | ], 186 | **dd, 187 | ) 188 | 189 | # TPSSh-D4-ATM parameters 190 | param = { 191 | "s8": torch.tensor(1.85897750, **dd), 192 | "s9": torch.tensor(0.00000000, **dd), # skip ATM 193 | "s10": torch.tensor(1.0000000, **dd), # quadrupole-quadrupole 194 | "a1": torch.tensor(0.44286966, **dd), 195 | "a2": torch.tensor(4.60230534, **dd), 196 | } 197 | 198 | energy = dftd4(numbers, positions, charge, param) 199 | 200 | assert energy.dtype == dtype 201 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 202 | 203 | 204 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 205 | @pytest.mark.parametrize("name1", ["LiH"]) 206 | @pytest.mark.parametrize("name2", sample_list) 207 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 208 | batch(name1, name2, dtype) 209 | 210 | 211 | @pytest.mark.large 212 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 213 | @pytest.mark.parametrize("name1", ["LiH"]) 214 | @pytest.mark.parametrize("name2", ["vancoh2"]) 215 | def test_batch_large(name1: str, name2: str, dtype: torch.dtype) -> None: 216 | batch(name1, name2, dtype) 217 | 218 | 219 | def batch(name1: str, name2: str, dtype: torch.dtype) -> None: 220 | dd: DD = {"device": DEVICE, "dtype": dtype} 221 | tol = torch.finfo(dtype).eps ** 0.5 * 10 222 | 223 | sample1, sample2 = samples[name1], samples[name2] 224 | numbers = pack( 225 | [ 226 | sample1["numbers"].to(DEVICE), 227 | sample2["numbers"].to(DEVICE), 228 | ] 229 | ) 230 | positions = pack( 231 | [ 232 | sample1["positions"].to(**dd), 233 | sample2["positions"].to(**dd), 234 | ] 235 | ) 236 | q = pack( 237 | [ 238 | sample1["q"].to(**dd), 239 | sample2["q"].to(**dd), 240 | ] 241 | ) 242 | 243 | ref = pack( 244 | [ 245 | sample1["disp2"].to(**dd), 246 | sample2["disp2"].to(**dd), 247 | ] 248 | ) 249 | 250 | # TPSSh-D4-ATM parameters 251 | param = { 252 | "s6": torch.tensor(1.00000000, **dd), 253 | "s8": torch.tensor(1.85897750, **dd), 254 | "s9": torch.tensor(1.00000000, **dd), 255 | "s10": torch.tensor(0.0000000, **dd), 256 | "alp": torch.tensor(16.000000, **dd), 257 | "a1": torch.tensor(0.44286966, **dd), 258 | "a2": torch.tensor(4.60230534, **dd), 259 | } 260 | 261 | r4r2 = data.R4R2.to(**dd)[numbers] 262 | model = D4Model(numbers, **dd) 263 | cn = cn_d4(numbers, positions) 264 | weights = model.weight_references(cn, q) 265 | c6 = model.get_atomic_c6(weights) 266 | 267 | energy = dispersion2(numbers, positions, param, c6, r4r2) 268 | 269 | assert energy.dtype == dtype 270 | assert pytest.approx(ref.cpu(), abs=tol) == energy.cpu() 271 | -------------------------------------------------------------------------------- /test/test_grad/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/test_grad/test_hessian.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Testing dispersion Hessian (autodiff). 19 | 20 | The reference values are calculated with the dftd4 standalone (Fortran) program, 21 | version 3.6.0. However, some minor modifications are required to obtained a 22 | compatible array ordering from Fortran. In Fortran, the shape of the Hessian is 23 | `(3, mol%nat, 3, mol%nat)`, which we change to `(mol%nat, 3, mol%nat, 3)`. 24 | Correspondingly, the calculation in `get_dispersion_hessian` must also be 25 | adapted: We replace `hessian(:, :, ix, iat) = (gl - gr) / (2 * step)` by 26 | `hessian(:, :, iat, ix) = (transpose(gl) - transpose(gr)) / (2 * step)`. The 27 | Hessian can then simply be printed via `write(*, '(SP,es23.16e2,",")') hessian` 28 | and the Python resorting is handled by the reshape function. 29 | """ 30 | from __future__ import annotations 31 | 32 | import pytest 33 | import torch 34 | from tad_mctc.autograd import hessian 35 | from tad_mctc.batch import pack 36 | from tad_mctc.convert import reshape_fortran 37 | 38 | from tad_dftd4 import dftd4 39 | from tad_dftd4.typing import DD, Tensor 40 | 41 | from ..conftest import DEVICE 42 | from .samples_hessian import samples 43 | 44 | sample_list = ["LiH", "SiH4", "PbH4-BiH3", "MB16_43_01"] 45 | 46 | tol = 1e-7 47 | 48 | 49 | def test_fail() -> None: 50 | sample = samples["LiH"] 51 | numbers = sample["numbers"] 52 | positions = sample["positions"] 53 | param = {"a1": numbers} 54 | 55 | # differentiable variable is not a tensor 56 | with pytest.raises(ValueError): 57 | hessian(dftd4, (numbers, positions, param), argnums=2) 58 | 59 | 60 | def test_zeros() -> None: 61 | d = torch.randn(2, 3, requires_grad=True) 62 | zeros = torch.zeros([*d.shape, *d.shape]) 63 | 64 | def dummy(x: Tensor) -> Tensor: 65 | return torch.zeros_like(x) 66 | 67 | hess = hessian(dummy, (d,), argnums=0) 68 | assert pytest.approx(zeros.cpu()) == hess.detach().cpu() 69 | 70 | 71 | @pytest.mark.parametrize("dtype", [torch.double]) 72 | @pytest.mark.parametrize("name", sample_list) 73 | def test_single(dtype: torch.dtype, name: str) -> None: 74 | dd: DD = {"device": DEVICE, "dtype": dtype} 75 | 76 | sample = samples[name] 77 | numbers = sample["numbers"].to(DEVICE) 78 | positions = sample["positions"].to(**dd) 79 | charge = torch.tensor(0.0, **dd) 80 | 81 | # TPSS0-ATM parameters 82 | param = { 83 | "s6": torch.tensor(1.00000000, **dd), 84 | "s8": torch.tensor(1.62438102, **dd), 85 | "s9": torch.tensor(1.00000000, **dd), 86 | "a1": torch.tensor(0.40329022, **dd), 87 | "a2": torch.tensor(4.80537871, **dd), 88 | } 89 | 90 | ref = reshape_fortran( 91 | sample["hessian"].to(**dd), 92 | torch.Size(2 * (numbers.shape[-1], 3)), 93 | ) 94 | 95 | # variable to be differentiated 96 | positions.requires_grad_(True) 97 | 98 | hess = hessian(dftd4, (numbers, positions, charge, param), argnums=1) 99 | positions.detach_() 100 | 101 | assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == hess.detach().cpu() 102 | 103 | 104 | # TODO: Figure out batched Hessian computation 105 | @pytest.mark.parametrize("dtype", [torch.double]) 106 | @pytest.mark.parametrize("name1", ["LiH"]) 107 | @pytest.mark.parametrize("name2", sample_list) 108 | def skip_test_batch(dtype: torch.dtype, name1: str, name2: str) -> None: 109 | dd: DD = {"device": DEVICE, "dtype": dtype} 110 | 111 | sample1, sample2 = samples[name1], samples[name2] 112 | numbers = pack( 113 | [ 114 | sample1["numbers"].to(DEVICE), 115 | sample2["numbers"].to(DEVICE), 116 | ] 117 | ) 118 | positions = pack( 119 | [ 120 | sample1["positions"].to(**dd), 121 | sample2["positions"].to(**dd), 122 | ] 123 | ) 124 | charge = torch.tensor([0.0, 0.0], **dd) 125 | 126 | # TPSS0-ATM parameters 127 | param = { 128 | "s6": torch.tensor(1.00000000, **dd), 129 | "s8": torch.tensor(1.62438102, **dd), 130 | "s9": torch.tensor(1.00000000, **dd), 131 | "a1": torch.tensor(0.40329022, **dd), 132 | "a2": torch.tensor(4.80537871, **dd), 133 | } 134 | 135 | ref = pack( 136 | [ 137 | reshape_fortran( 138 | sample1["hessian"].to(**dd), 139 | torch.Size(2 * (sample1["numbers"].shape[-1], 3)), 140 | ), 141 | reshape_fortran( 142 | sample2["hessian"].to(**dd), 143 | torch.Size(2 * (sample2["numbers"].shape[-1], 3)), 144 | ), 145 | ] 146 | ) 147 | 148 | # variable to be differentiated 149 | positions.requires_grad_(True) 150 | 151 | hess = hessian(dftd4, (numbers, positions, charge, param), argnums=1) 152 | positions.detach_() 153 | 154 | assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == hess.detach().cpu() 155 | -------------------------------------------------------------------------------- /test/test_grad/test_nan.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Testing dispersion gradient (autodiff). 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.batch import pack 25 | from tad_mctc.data.molecules import mols as samples 26 | 27 | from tad_dftd4 import dftd4 28 | from tad_dftd4.typing import DD 29 | 30 | from ..conftest import DEVICE 31 | 32 | tol = 1e-7 33 | 34 | 35 | # sample, which previously failed with NaN's in tad-dftd3 36 | numbers = torch.tensor([6, 6, 6, 6, 6, 6, 6, 6, 1, 1, 1, 1, 1, 7, 8, 8, 8]) 37 | positions = torch.tensor( 38 | [ 39 | [-1.0981, +0.1496, +0.1346], 40 | [-0.4155, +1.2768, +0.3967], 41 | [+0.9426, +0.7848, +0.1307], 42 | [+2.1708, +1.3814, -0.0347], 43 | [+3.3234, +0.5924, -0.1535], 44 | [+3.1564, -0.8110, -0.0285], 45 | [+1.8929, -1.4673, +0.0373], 46 | [+0.8498, -0.5613, +0.0109], 47 | [-0.7751, +2.2970, +0.5540], 48 | [+2.3079, +2.4725, -0.1905], 49 | [+4.3031, +0.9815, -0.4599], 50 | [+4.0011, -1.4666, -0.0514], 51 | [+1.8340, -2.5476, -0.1587], 52 | [-2.5629, -0.0306, -0.1458], 53 | [-3.0792, +1.0280, -0.3225], 54 | [-3.0526, -1.1594, +0.1038], 55 | [-0.4839, -0.9612, -0.0048], 56 | ], 57 | ) 58 | charge = torch.tensor(0.0) 59 | 60 | param = { 61 | "s6": torch.tensor(1.00000000), 62 | "s8": torch.tensor(0.78981345), 63 | "s9": torch.tensor(1.00000000), 64 | "a1": torch.tensor(0.49484001), 65 | "a2": torch.tensor(5.73083694), 66 | } 67 | 68 | 69 | @pytest.mark.grad 70 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 71 | def test_single(dtype: torch.dtype) -> None: 72 | dd: DD = {"device": DEVICE, "dtype": dtype} 73 | 74 | nums = numbers.to(device=DEVICE) 75 | pos = positions.to(**dd) 76 | chrg = charge.to(**dd) 77 | par = {k: v.to(**dd) for k, v in param.items()} 78 | 79 | pos.requires_grad_(True) 80 | 81 | energy = dftd4(nums, pos, chrg, par) 82 | assert not torch.isnan(energy).any(), "Energy contains NaN values" 83 | 84 | energy.sum().backward() 85 | 86 | assert pos.grad is not None 87 | grad_backward = pos.grad.clone() 88 | 89 | # also zero out gradients when using `.backward()` 90 | pos.detach_() 91 | pos.grad.data.zero_() 92 | 93 | assert not torch.isnan(grad_backward).any(), "Gradient contains NaN values" 94 | 95 | 96 | @pytest.mark.grad 97 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 98 | @pytest.mark.parametrize("name", ["LiH", "SiH4"]) 99 | def test_batch(dtype: torch.dtype, name: str) -> None: 100 | dd: DD = {"device": DEVICE, "dtype": dtype} 101 | 102 | nums = pack( 103 | ( 104 | numbers.to(device=DEVICE), 105 | samples[name]["numbers"].to(device=DEVICE), 106 | ) 107 | ) 108 | pos = pack( 109 | ( 110 | positions.to(**dd), 111 | samples[name]["positions"].to(**dd), 112 | ) 113 | ) 114 | chrg = torch.tensor([0.0, 0.0], **dd) 115 | par = {k: v.to(**dd) for k, v in param.items()} 116 | 117 | pos.requires_grad_(True) 118 | 119 | energy = dftd4(nums, pos, chrg, par) 120 | assert not torch.isnan(energy).any(), "Energy contains NaN values" 121 | 122 | energy.sum().backward() 123 | 124 | assert pos.grad is not None 125 | grad_backward = pos.grad.clone() 126 | 127 | # also zero out gradients when using `.backward()` 128 | pos.detach_() 129 | pos.grad.data.zero_() 130 | 131 | assert not torch.isnan(grad_backward).any(), "Gradient contains NaN values" 132 | -------------------------------------------------------------------------------- /test/test_grad/test_param.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Testing dispersion gradient (autodiff). 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.autograd import dgradcheck, dgradgradcheck 25 | from tad_mctc.batch import pack 26 | from tad_mctc.data.molecules import mols as samples 27 | 28 | from tad_dftd4 import dftd4 29 | from tad_dftd4.typing import DD, Callable, Tensor 30 | 31 | from ..conftest import DEVICE 32 | 33 | sample_list = ["LiH", "AmF3", "SiH4"] 34 | 35 | tol = 1e-7 36 | 37 | 38 | def gradchecker(dtype: torch.dtype, name: str) -> tuple[ 39 | Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function 40 | tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor], 41 | ]: 42 | dd: DD = {"device": DEVICE, "dtype": dtype} 43 | 44 | sample = samples[name] 45 | numbers = sample["numbers"].to(device=DEVICE) 46 | positions = sample["positions"].to(**dd) 47 | charge = torch.tensor(0.0, **dd) 48 | 49 | # TPSS0-D4-ATM parameters, variables to be differentiated 50 | param = ( 51 | torch.tensor(1.00000000, **dd, requires_grad=True), 52 | torch.tensor(0.78981345, **dd, requires_grad=True), 53 | torch.tensor(1.00000000, **dd, requires_grad=True), 54 | torch.tensor(0.00000000, **dd, requires_grad=True), # s10 55 | torch.tensor(0.49484001, **dd, requires_grad=True), 56 | torch.tensor(5.73083694, **dd, requires_grad=True), 57 | torch.tensor(16.0000000, **dd, requires_grad=True), 58 | ) 59 | label = ("s6", "s8", "s9", "s10", "a1", "a2", "alp") 60 | 61 | def func(*inputs: Tensor) -> Tensor: 62 | input_param = {label[i]: input for i, input in enumerate(inputs)} 63 | return dftd4(numbers, positions, charge, input_param) 64 | 65 | return func, param 66 | 67 | 68 | @pytest.mark.grad 69 | @pytest.mark.parametrize("dtype", [torch.double]) 70 | @pytest.mark.parametrize("name", sample_list) 71 | def test_gradcheck(dtype: torch.dtype, name: str) -> None: 72 | """ 73 | Check a single analytical gradient of parameters against numerical 74 | gradient from `torch.autograd.gradcheck`. 75 | """ 76 | func, diffvars = gradchecker(dtype, name) 77 | assert dgradcheck(func, diffvars, atol=tol) 78 | 79 | 80 | @pytest.mark.grad 81 | @pytest.mark.parametrize("dtype", [torch.double]) 82 | @pytest.mark.parametrize("name", sample_list) 83 | def test_gradgradcheck(dtype: torch.dtype, name: str) -> None: 84 | """ 85 | Check a single analytical gradient of parameters against numerical 86 | gradient from `torch.autograd.gradgradcheck`. 87 | """ 88 | func, diffvars = gradchecker(dtype, name) 89 | assert dgradgradcheck(func, diffvars, atol=tol) 90 | 91 | 92 | @pytest.mark.grad 93 | @pytest.mark.parametrize("dtype", [torch.double]) 94 | @pytest.mark.parametrize("name", ["MB16_43_01"]) 95 | def test_gradgradcheck_slow(dtype: torch.dtype, name: str) -> None: 96 | """ 97 | These fail with `fast_mode=True`. 98 | """ 99 | func, diffvars = gradchecker(dtype, name) 100 | assert dgradgradcheck(func, diffvars, atol=1e-5, rtol=1e-5, fast_mode=False) 101 | 102 | 103 | def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[ 104 | Callable[[Tensor, Tensor, Tensor, Tensor], Tensor], # autograd function 105 | tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor], 106 | ]: 107 | dd: DD = {"device": DEVICE, "dtype": dtype} 108 | 109 | sample1, sample2 = samples[name1], samples[name2] 110 | numbers = pack( 111 | [ 112 | sample1["numbers"].to(device=DEVICE), 113 | sample2["numbers"].to(device=DEVICE), 114 | ] 115 | ) 116 | positions = pack( 117 | [ 118 | sample1["positions"].to(**dd), 119 | sample2["positions"].to(**dd), 120 | ] 121 | ) 122 | charge = torch.tensor([0.0, 0.0], **dd) 123 | 124 | # TPSS0-D4-ATM parameters, variables to be differentiated 125 | param = ( 126 | torch.tensor(1.00000000, **dd, requires_grad=True), 127 | torch.tensor(0.78981345, **dd, requires_grad=True), 128 | torch.tensor(1.00000000, **dd, requires_grad=True), 129 | torch.tensor(0.00000000, **dd, requires_grad=True), # s10 130 | torch.tensor(0.49484001, **dd, requires_grad=True), 131 | torch.tensor(5.73083694, **dd, requires_grad=True), 132 | torch.tensor(16.0000000, **dd, requires_grad=True), 133 | ) 134 | label = ("s6", "s8", "s9", "s10", "a1", "a2", "alp") 135 | 136 | def func(*inputs: Tensor) -> Tensor: 137 | input_param = {label[i]: input for i, input in enumerate(inputs)} 138 | return dftd4(numbers, positions, charge, input_param) 139 | 140 | return func, param 141 | 142 | 143 | @pytest.mark.grad 144 | @pytest.mark.parametrize("dtype", [torch.double]) 145 | @pytest.mark.parametrize("name1", ["LiH"]) 146 | @pytest.mark.parametrize("name2", sample_list) 147 | def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None: 148 | """ 149 | Check a single analytical gradient of parameters against numerical 150 | gradient from `torch.autograd.gradcheck`. 151 | """ 152 | func, diffvars = gradchecker_batch(dtype, name1, name2) 153 | assert dgradcheck(func, diffvars, atol=tol) 154 | 155 | 156 | @pytest.mark.grad 157 | @pytest.mark.parametrize("dtype", [torch.double]) 158 | @pytest.mark.parametrize("name1", ["LiH"]) 159 | @pytest.mark.parametrize("name2", ["LiH", "SiH4"]) 160 | def test_gradgradcheck_batch( 161 | dtype: torch.dtype, name1: str, name2: str 162 | ) -> None: 163 | """ 164 | Check a single analytical gradient of parameters against numerical 165 | gradient from `torch.autograd.gradgradcheck`. 166 | """ 167 | func, diffvars = gradchecker_batch(dtype, name1, name2) 168 | assert dgradgradcheck(func, diffvars, atol=tol) 169 | 170 | 171 | @pytest.mark.grad 172 | @pytest.mark.parametrize("dtype", [torch.double]) 173 | @pytest.mark.parametrize("name1", ["LiH"]) 174 | @pytest.mark.parametrize("name2", ["AmF3", "MB16_43_01"]) 175 | def test_gradgradcheck_batch_slow( 176 | dtype: torch.dtype, name1: str, name2: str 177 | ) -> None: 178 | """ 179 | These fail with `fast_mode=True` (and sometimes randomly on GA runners). 180 | """ 181 | func, diffvars = gradchecker_batch(dtype, name1, name2) 182 | assert dgradgradcheck(func, diffvars, atol=1e-4, rtol=1e-4, fast_mode=False) 183 | -------------------------------------------------------------------------------- /test/test_grad/test_pos.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Testing dispersion gradient (autodiff). 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.autograd import dgradcheck, dgradgradcheck 25 | from tad_mctc.batch import pack 26 | 27 | from tad_dftd4 import dftd4 28 | from tad_dftd4.typing import DD, Callable, Tensor 29 | 30 | from ..conftest import DEVICE 31 | from .samples_grad import samples 32 | 33 | sample_list = ["LiH", "SiH4", "PbH4-BiH3", "MB16_43_01"] 34 | 35 | tol = 1e-7 36 | 37 | 38 | def gradchecker(dtype: torch.dtype, name: str) -> tuple[ 39 | Callable[[Tensor], Tensor], # autograd function 40 | Tensor, # differentiable variables 41 | ]: 42 | dd: DD = {"device": DEVICE, "dtype": dtype} 43 | 44 | sample = samples[name] 45 | numbers = sample["numbers"].to(DEVICE) 46 | positions = sample["positions"].to(**dd) 47 | charge = torch.tensor(0.0, **dd) 48 | 49 | # TPSS0-ATM parameters 50 | param = { 51 | "s6": torch.tensor(1.00000000, **dd), 52 | "s8": torch.tensor(1.62438102, **dd), 53 | "s9": torch.tensor(1.00000000, **dd), 54 | "a1": torch.tensor(0.40329022, **dd), 55 | "a2": torch.tensor(4.80537871, **dd), 56 | } 57 | 58 | # variable to be differentiated 59 | positions.requires_grad_(True) 60 | 61 | def func(pos: Tensor) -> Tensor: 62 | return dftd4(numbers, pos, charge, param) 63 | 64 | return func, positions 65 | 66 | 67 | @pytest.mark.grad 68 | @pytest.mark.parametrize("dtype", [torch.double]) 69 | @pytest.mark.parametrize("name", sample_list) 70 | def test_gradcheck(dtype: torch.dtype, name: str) -> None: 71 | """ 72 | Check a single analytical gradient of parameters against numerical 73 | gradient from `torch.autograd.gradcheck`. 74 | """ 75 | func, diffvars = gradchecker(dtype, name) 76 | assert dgradcheck(func, diffvars, atol=tol) 77 | 78 | 79 | @pytest.mark.grad 80 | @pytest.mark.parametrize("dtype", [torch.double]) 81 | @pytest.mark.parametrize("name", sample_list) 82 | def test_gradgradcheck(dtype: torch.dtype, name: str) -> None: 83 | """ 84 | Check a single analytical gradient of parameters against numerical 85 | gradient from `torch.autograd.gradgradcheck`. 86 | """ 87 | func, diffvars = gradchecker(dtype, name) 88 | assert dgradgradcheck(func, diffvars, atol=tol) 89 | 90 | 91 | def gradchecker_batch(dtype: torch.dtype, name1: str, name2: str) -> tuple[ 92 | Callable[[Tensor], Tensor], # autograd function 93 | Tensor, # differentiable variables 94 | ]: 95 | dd: DD = {"device": DEVICE, "dtype": dtype} 96 | 97 | sample1, sample2 = samples[name1], samples[name2] 98 | numbers = pack( 99 | [ 100 | sample1["numbers"].to(DEVICE), 101 | sample2["numbers"].to(DEVICE), 102 | ] 103 | ) 104 | positions = pack( 105 | [ 106 | sample1["positions"].to(**dd), 107 | sample2["positions"].to(**dd), 108 | ] 109 | ) 110 | charge = torch.tensor([0.0, 0.0], **dd) 111 | 112 | # TPSS0-ATM parameters 113 | param = { 114 | "s6": torch.tensor(1.00000000, **dd), 115 | "s8": torch.tensor(1.62438102, **dd), 116 | "s9": torch.tensor(1.00000000, **dd), 117 | "a1": torch.tensor(0.40329022, **dd), 118 | "a2": torch.tensor(4.80537871, **dd), 119 | } 120 | 121 | # variable to be differentiated 122 | positions.requires_grad_(True) 123 | 124 | def func(pos: Tensor) -> Tensor: 125 | return dftd4(numbers, pos, charge, param) 126 | 127 | return func, positions 128 | 129 | 130 | @pytest.mark.grad 131 | @pytest.mark.parametrize("dtype", [torch.double]) 132 | @pytest.mark.parametrize("name1", ["LiH"]) 133 | @pytest.mark.parametrize("name2", sample_list) 134 | def test_gradcheck_batch(dtype: torch.dtype, name1: str, name2: str) -> None: 135 | """ 136 | Check a single analytical gradient of parameters against numerical 137 | gradient from `torch.autograd.gradcheck`. 138 | """ 139 | func, diffvars = gradchecker_batch(dtype, name1, name2) 140 | assert dgradcheck(func, diffvars, atol=tol) 141 | 142 | 143 | @pytest.mark.grad 144 | @pytest.mark.parametrize("dtype", [torch.double]) 145 | @pytest.mark.parametrize("name1", ["LiH"]) 146 | @pytest.mark.parametrize("name2", sample_list) 147 | def test_gradgradcheck_batch( 148 | dtype: torch.dtype, name1: str, name2: str 149 | ) -> None: 150 | """ 151 | Check a single analytical gradient of parameters against numerical 152 | gradient from `torch.autograd.gradgradcheck`. 153 | """ 154 | func, diffvars = gradchecker_batch(dtype, name1, name2) 155 | assert dgradgradcheck(func, diffvars, atol=tol) 156 | 157 | 158 | @pytest.mark.grad 159 | @pytest.mark.parametrize("dtype", [torch.double]) 160 | @pytest.mark.parametrize("name", sample_list) 161 | def test_autograd(dtype: torch.dtype, name: str) -> None: 162 | """Compare with reference values from tblite.""" 163 | dd: DD = {"device": DEVICE, "dtype": dtype} 164 | 165 | sample = samples[name] 166 | numbers = sample["numbers"].to(DEVICE) 167 | positions = sample["positions"].to(**dd) 168 | charge = torch.tensor(0.0, **dd) 169 | 170 | # TPSS0-ATM parameters 171 | param = { 172 | "s6": torch.tensor(1.00000000, **dd), 173 | "s8": torch.tensor(1.62438102, **dd), 174 | "s9": torch.tensor(1.00000000, **dd), 175 | "a1": torch.tensor(0.40329022, **dd), 176 | "a2": torch.tensor(4.80537871, **dd), 177 | } 178 | 179 | ref = sample["grad"].to(**dd) 180 | 181 | # variable to be differentiated 182 | positions.requires_grad_(True) 183 | 184 | # automatic gradient 185 | energy = torch.sum(dftd4(numbers, positions, charge, param)) 186 | (grad,) = torch.autograd.grad(energy, positions) 187 | 188 | positions.detach_() 189 | 190 | assert pytest.approx(ref.cpu(), abs=tol) == grad.cpu() 191 | 192 | 193 | @pytest.mark.grad 194 | @pytest.mark.parametrize("dtype", [torch.double]) 195 | @pytest.mark.parametrize("name", sample_list) 196 | def test_backward(dtype: torch.dtype, name: str) -> None: 197 | """Compare with reference values from tblite.""" 198 | dd: DD = {"device": DEVICE, "dtype": dtype} 199 | 200 | sample = samples[name] 201 | numbers = sample["numbers"].to(DEVICE) 202 | positions = sample["positions"].to(**dd) 203 | charge = torch.tensor(0.0, **dd) 204 | 205 | # TPSS0-ATM parameters 206 | param = { 207 | "s6": torch.tensor(1.00000000, **dd), 208 | "s8": torch.tensor(1.62438102, **dd), 209 | "s9": torch.tensor(1.00000000, **dd), 210 | "a1": torch.tensor(0.40329022, **dd), 211 | "a2": torch.tensor(4.80537871, **dd), 212 | } 213 | 214 | ref = sample["grad"].to(**dd) 215 | 216 | # variable to be differentiated 217 | positions.requires_grad_(True) 218 | 219 | # automatic gradient 220 | energy = torch.sum(dftd4(numbers, positions, charge, param)) 221 | energy.backward() 222 | 223 | assert positions.grad is not None 224 | grad_backward = positions.grad.clone() 225 | 226 | # also zero out gradients when using `.backward()` 227 | positions.detach_() 228 | positions.grad.data.zero_() 229 | 230 | assert pytest.approx(ref.cpu(), abs=tol) == grad_backward.cpu() 231 | -------------------------------------------------------------------------------- /test/test_model/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/test_model/test_c6.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of DFT-D4 model. 19 | """ 20 | 21 | import pytest 22 | import torch 23 | import torch.nn.functional as F 24 | from tad_mctc.batch import pack 25 | 26 | from tad_dftd4.model import D4Model, D4SModel 27 | from tad_dftd4.typing import DD 28 | 29 | from ..conftest import DEVICE 30 | from .samples import samples 31 | 32 | # only these references use `cn=True` and `q=True` for `gw` 33 | sample_list = ["LiH", "SiH4", "MB16_43_03"] 34 | 35 | 36 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 37 | @pytest.mark.parametrize("name", sample_list) 38 | def test_single(name: str, dtype: torch.dtype) -> None: 39 | dd: DD = {"device": DEVICE, "dtype": dtype} 40 | 41 | tol = 1e-4 if dtype == torch.float else 1e-5 42 | sample = samples[name] 43 | numbers = sample["numbers"].to(DEVICE) 44 | ref = sample["c6"].to(**dd) 45 | 46 | d4 = D4Model(numbers, **dd) 47 | 48 | # pad reference tensor to always be of shape `(natoms, 7)` 49 | src = sample["gw"].to(**dd) 50 | gw = F.pad( 51 | input=src, 52 | pad=(0, 0, 0, 7 - src.size(0)), 53 | mode="constant", 54 | value=0, 55 | ).mT 56 | 57 | c6 = d4.get_atomic_c6(gw) 58 | assert pytest.approx(ref.cpu(), rel=tol) == c6.cpu() 59 | 60 | 61 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 62 | @pytest.mark.parametrize("name", sample_list) 63 | def test_single_d4s(name: str, dtype: torch.dtype) -> None: 64 | dd: DD = {"device": DEVICE, "dtype": dtype} 65 | 66 | tol = 1e-4 if dtype == torch.float else 1e-5 67 | sample = samples[name] 68 | numbers = sample["numbers"].to(DEVICE) 69 | ref = sample["c6_d4s"].to(**dd) 70 | 71 | d4 = D4SModel(numbers, **dd) 72 | 73 | # pad reference tensor to always be of shape `(natoms, 7)` 74 | src = sample["gw_d4s"].to(**dd) 75 | gw = F.pad( 76 | input=src, 77 | pad=(0, 0, 0, 0, 0, 7 - src.size(0)), 78 | mode="constant", 79 | value=0, 80 | ).permute(2, 1, 0) 81 | 82 | c6 = d4.get_atomic_c6(gw) 83 | assert pytest.approx(ref.cpu(), rel=tol) == c6.cpu() 84 | 85 | 86 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 87 | @pytest.mark.parametrize("name1", ["LiH"]) 88 | @pytest.mark.parametrize("name2", sample_list) 89 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 90 | dd: DD = {"device": DEVICE, "dtype": dtype} 91 | 92 | tol = 1e-4 if dtype == torch.float else 1e-5 93 | sample1, sample2 = samples[name1], samples[name2] 94 | numbers = pack( 95 | [ 96 | sample1["numbers"].to(DEVICE), 97 | sample2["numbers"].to(DEVICE), 98 | ] 99 | ) 100 | refs = pack( 101 | [ 102 | sample1["c6"].to(**dd), 103 | sample2["c6"].to(**dd), 104 | ] 105 | ) 106 | 107 | d4 = D4Model(numbers, **dd) 108 | 109 | # pad reference tensor to always be of shape `(natoms, 7)` 110 | src1 = sample1["gw"].to(**dd) 111 | src2 = sample2["gw"].to(**dd) 112 | 113 | gw = pack( 114 | [ 115 | F.pad( 116 | input=src1, 117 | pad=(0, 0, 0, 7 - src1.size(0)), 118 | mode="constant", 119 | value=0, 120 | ).mT, 121 | src2.mT, 122 | ] 123 | ) 124 | 125 | c6 = d4.get_atomic_c6(gw) 126 | assert pytest.approx(refs.cpu(), rel=tol) == c6.cpu() 127 | -------------------------------------------------------------------------------- /test/test_model/test_general.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test the correct handling of types in the `D4Model` class. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | import torch 24 | from tad_mctc.convert import str_to_device 25 | 26 | from tad_dftd4.model import D4Model, D4SModel 27 | 28 | 29 | @pytest.mark.parametrize("dtype", [torch.float16, torch.float32, torch.float64]) 30 | def test_change_type(dtype: torch.dtype) -> None: 31 | numbers = torch.tensor([14, 1, 1, 1, 1]) 32 | model = D4Model(numbers).type(dtype) 33 | assert model.dtype == dtype 34 | 35 | 36 | def test_change_type_fail() -> None: 37 | numbers = torch.tensor([14, 1, 1, 1, 1]) 38 | model = D4Model(numbers) 39 | 40 | # trying to use setter 41 | with pytest.raises(AttributeError): 42 | model.dtype = torch.float64 43 | 44 | # passing disallowed dtype 45 | with pytest.raises(ValueError): 46 | model.type(torch.bool) 47 | 48 | 49 | @pytest.mark.cuda 50 | @pytest.mark.parametrize("device_str", ["cpu", "cuda"]) 51 | def test_change_device(device_str: str) -> None: 52 | device = str_to_device(device_str) 53 | numbers = torch.tensor([14, 1, 1, 1, 1]) 54 | model = D4Model(numbers).to(device) 55 | assert model.device == device 56 | 57 | 58 | def test_change_device_fail() -> None: 59 | numbers = torch.tensor([14, 1, 1, 1, 1]) 60 | model = D4Model(numbers) 61 | 62 | # trying to use setter 63 | with pytest.raises(AttributeError): 64 | model.device = torch.device("cpu") 65 | 66 | 67 | # raise error when creating the model in `_set_refalpha_eeq` 68 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 69 | def test_ref_charges_fail(model: str) -> None: 70 | numbers = torch.tensor([14, 1, 1, 1, 1]) 71 | 72 | with pytest.raises(ValueError): 73 | if model == "d4": 74 | D4Model(numbers, ref_charges="wrong") # type: ignore 75 | elif model == "d4s": 76 | D4SModel(numbers, ref_charges="wrong") # type: ignore 77 | else: 78 | raise ValueError(f"Unknown model: {model}") 79 | 80 | 81 | # raise error in `weight_references` when trying to change the ref_charges 82 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 83 | def test_ref_charges_fail_2(model: str) -> None: 84 | numbers = torch.tensor([14, 1, 1, 1, 1]) 85 | 86 | if model == "d4": 87 | d4 = D4Model(numbers, ref_charges="eeq") 88 | elif model == "d4s": 89 | d4 = D4SModel(numbers, ref_charges="eeq") 90 | else: 91 | raise ValueError(f"Unknown model: {model}") 92 | 93 | d4.ref_charges = "wrong" # type: ignore 94 | with pytest.raises(ValueError): 95 | d4.weight_references() 96 | -------------------------------------------------------------------------------- /test/test_model/test_models.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of DFT-D4 model. 19 | """ 20 | 21 | import pytest 22 | import torch 23 | from tad_mctc.batch import pack 24 | from tad_mctc.math import einsum 25 | from tad_mctc.ncoord import cn_d4 26 | from tad_mctc.typing import Tensor 27 | 28 | from tad_dftd4.model import D4Model, D4SModel 29 | from tad_dftd4.model.d4s import D4SDebug 30 | from tad_dftd4.model.utils import trapzd_noref 31 | from tad_dftd4.typing import DD 32 | 33 | from ..conftest import DEVICE 34 | from .samples import samples 35 | 36 | # only these references use `cn=True` and `q=True` for `gw` 37 | sample_list = ["LiH", "SiH4", "MB16_43_03"] 38 | 39 | 40 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 41 | @pytest.mark.parametrize("name", sample_list) 42 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 43 | def test_single(name: str, dtype: torch.dtype, model: str) -> None: 44 | dd: DD = {"device": DEVICE, "dtype": dtype} 45 | tol = 1e-5 46 | 47 | sample = samples[name] 48 | numbers = sample["numbers"].to(DEVICE) 49 | positions = sample["positions"].to(**dd) 50 | q = sample["q"].to(**dd) 51 | 52 | if model == "d4": 53 | d4 = D4Model(numbers, **dd) 54 | ref = sample["c6"].to(**dd) 55 | elif model == "d4s": 56 | d4 = D4SModel(numbers, **dd) 57 | ref = sample["c6_d4s"].to(**dd) 58 | else: 59 | raise ValueError(f"Unknown model: {model}") 60 | 61 | cn = cn_d4(numbers, positions) 62 | gw = d4.weight_references(cn=cn, q=q) 63 | c6 = d4.get_atomic_c6(gw) 64 | assert pytest.approx(ref.cpu(), abs=tol, rel=tol) == c6.cpu() 65 | 66 | # Calculate from weighted pols (only sums equivalent) 67 | if model == "d4": 68 | w = einsum("...nr,...nrw->...nw", gw, d4._get_alpha()) 69 | _c6 = trapzd_noref(w).sum() 70 | elif model == "d4s": 71 | w = einsum("...jia,...iaw->...jiw", gw, d4._get_alpha()) 72 | _c6 = _trapzd(w).sum() 73 | else: 74 | raise ValueError(f"Unknown model: {model}") 75 | 76 | assert pytest.approx(c6.sum().cpu(), rel=tol) == _c6.cpu() 77 | assert pytest.approx(ref.sum().cpu(), rel=tol) == _c6.cpu() 78 | 79 | 80 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 81 | @pytest.mark.parametrize("name1", ["LiH"]) 82 | @pytest.mark.parametrize("name2", sample_list) 83 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 84 | def test_batch(name1: str, name2: str, dtype: torch.dtype, model: str) -> None: 85 | dd: DD = {"device": DEVICE, "dtype": dtype} 86 | tol = 1e-5 87 | 88 | sample1, sample2 = samples[name1], samples[name2] 89 | numbers = pack( 90 | [ 91 | sample1["numbers"].to(DEVICE), 92 | sample2["numbers"].to(DEVICE), 93 | ] 94 | ) 95 | positions = pack( 96 | [ 97 | sample1["positions"].to(**dd), 98 | sample2["positions"].to(**dd), 99 | ] 100 | ) 101 | q = pack( 102 | [ 103 | sample1["q"].to(**dd), 104 | sample2["q"].to(**dd), 105 | ] 106 | ) 107 | 108 | if model == "d4": 109 | d4 = D4Model(numbers, **dd) 110 | refs = pack( 111 | [ 112 | sample1["c6"].to(**dd), 113 | sample2["c6"].to(**dd), 114 | ] 115 | ) 116 | elif model == "d4s": 117 | d4 = D4SModel(numbers, **dd) 118 | refs = pack( 119 | [ 120 | sample1["c6_d4s"].to(**dd), 121 | sample2["c6_d4s"].to(**dd), 122 | ] 123 | ) 124 | else: 125 | raise ValueError(f"Unknown model: {model}") 126 | 127 | cn = cn_d4(numbers, positions) 128 | gw = d4.weight_references(cn=cn, q=q) 129 | c6 = d4.get_atomic_c6(gw) 130 | assert pytest.approx(refs.cpu(), abs=tol, rel=tol) == c6.cpu() 131 | 132 | # Calculate from weighted pols (only sums equivalent) 133 | if model == "d4": 134 | w = einsum("...nr,...nrw->...nw", gw, d4._get_alpha()) 135 | _c6 = trapzd_noref(w).sum((-2, -1)) 136 | elif model == "d4s": 137 | w = einsum("...jia,...iaw->...jiw", gw, d4._get_alpha()) 138 | _c6 = _trapzd(w).sum((-2, -1)) 139 | else: 140 | raise ValueError(f"Unknown model: {model}") 141 | 142 | assert pytest.approx(c6.sum((-2, -1)).cpu(), rel=tol) == _c6.cpu() 143 | assert pytest.approx(refs.sum((-2, -1)).cpu(), rel=tol) == _c6.cpu() 144 | 145 | 146 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 147 | def test_ref_charges_d4(model: str) -> None: 148 | numbers = torch.tensor([14, 1, 1, 1, 1]) 149 | 150 | if model == "d4": 151 | model_eeq = D4Model(numbers, ref_charges="eeq") 152 | model_gfn2 = D4Model(numbers, ref_charges="gfn2") 153 | elif model == "d4s": 154 | model_eeq = D4SModel(numbers, ref_charges="eeq") 155 | model_gfn2 = D4SModel(numbers, ref_charges="gfn2") 156 | else: 157 | raise ValueError(f"Unknown model: {model}") 158 | 159 | weights_eeq = model_eeq.weight_references() 160 | weights_gfn2 = model_gfn2.weight_references() 161 | 162 | assert pytest.approx(weights_eeq.cpu(), abs=1e-1) == weights_gfn2.cpu() 163 | 164 | 165 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 166 | @pytest.mark.parametrize("name", sample_list) 167 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 168 | def test_weighted_pol(name: str, dtype: torch.dtype, model: str) -> None: 169 | dd: DD = {"device": DEVICE, "dtype": dtype} 170 | 171 | sample = samples[name] 172 | numbers = sample["numbers"].to(DEVICE) 173 | positions = sample["positions"].to(**dd) 174 | q = sample["q"].to(**dd) 175 | 176 | if model == "d4": 177 | d4 = D4Model(numbers, **dd) 178 | ref = sample["c6"].to(**dd) 179 | elif model == "d4s": 180 | d4 = D4SModel(numbers, **dd) 181 | ref = sample["c6_d4s"].to(**dd) 182 | else: 183 | raise ValueError(f"Unknown model: {model}") 184 | 185 | cn = cn_d4(numbers, positions) 186 | gw = d4.weight_references(cn=cn, q=q) 187 | aw = d4.get_weighted_pols(gw) 188 | c6 = trapzd_noref(aw) 189 | 190 | # Molecular C6 is always smaller than sqrt(C6_ii * C6_jj). 191 | # (Cauchy-Schwarz inequality) 192 | diff = c6.sum() - ref.sum() 193 | assert diff < 0.0 194 | 195 | 196 | def test_d4sdebug() -> None: 197 | numbers = torch.tensor([14, 1, 1, 1, 1]) 198 | m_d4 = D4Model(numbers) 199 | m_debug = D4SDebug(numbers) 200 | 201 | weights_d4 = m_d4.weight_references() 202 | weights_debug = m_debug.weight_references() 203 | assert weights_d4.shape == weights_debug.shape[1:] 204 | 205 | c6_d4 = m_d4.get_atomic_c6(weights_d4) 206 | c6_debug = m_debug.get_atomic_c6(weights_debug) 207 | assert c6_d4.shape == c6_debug.shape == (5, 5) 208 | assert pytest.approx(c6_d4.cpu()) == c6_debug.cpu() 209 | 210 | 211 | def _trapzd(pol: Tensor) -> Tensor: 212 | thopi = 3.0 / 3.141592653589793238462643383279502884197 213 | 214 | weights = torch.tensor( 215 | [ 216 | 2.4999500000000000e-002, 217 | 4.9999500000000000e-002, 218 | 7.5000000000000010e-002, 219 | 0.1000000000000000, 220 | 0.1000000000000000, 221 | 0.1000000000000000, 222 | 0.1000000000000000, 223 | 0.1000000000000000, 224 | 0.1000000000000000, 225 | 0.1000000000000000, 226 | 0.1000000000000000, 227 | 0.1500000000000000, 228 | 0.2000000000000000, 229 | 0.2000000000000000, 230 | 0.2000000000000000, 231 | 0.2000000000000000, 232 | 0.3500000000000000, 233 | 0.5000000000000000, 234 | 0.7500000000000000, 235 | 1.0000000000000000, 236 | 1.7500000000000000, 237 | 2.5000000000000000, 238 | 1.2500000000000000, 239 | ], 240 | device=pol.device, 241 | dtype=pol.dtype, 242 | ) 243 | 244 | return thopi * einsum("w,...ijw,...jiw->...ij", *(weights, pol, pol)) 245 | -------------------------------------------------------------------------------- /test/test_model/test_params.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Sanity check for parameters since they are created from the Fortran parameters 19 | with a script. 20 | """ 21 | from __future__ import annotations 22 | 23 | import torch 24 | 25 | from tad_dftd4 import data, reference 26 | from tad_dftd4.reference import charge_eeq, charge_gfn2 27 | 28 | 29 | def test_params_shape() -> None: 30 | maxel = 104 # 103 elements + dummy 31 | assert reference.refc.shape == torch.Size((maxel, 7)) 32 | assert reference.refascale.shape == torch.Size((maxel, 7)) 33 | assert reference.refcovcn.shape == torch.Size((maxel, 7)) 34 | assert reference.refsys.shape == torch.Size((maxel, 7)) 35 | assert reference.refalpha.shape == torch.Size((maxel, 7, 23)) 36 | 37 | assert charge_eeq.clsq.shape == torch.Size((maxel, 7)) 38 | assert charge_eeq.clsh.shape == torch.Size((maxel, 7)) 39 | 40 | # GFN2 charges only up to Rn 41 | assert charge_gfn2.refq.shape == torch.Size((87, 7)) 42 | assert charge_gfn2.refh.shape == torch.Size((87, 7)) 43 | 44 | 45 | def test_data_shape() -> None: 46 | assert data.GAM.shape == torch.Size((119,)) 47 | assert data.R4R2.shape == torch.Size((119,)) 48 | -------------------------------------------------------------------------------- /test/test_model/test_weights.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test calculation of DFT-D4 model. 19 | 20 | For an explanation of the unusual loose tolerances, see `test_charges.py`. 21 | """ 22 | import pytest 23 | import torch 24 | import torch.nn.functional as F 25 | from tad_mctc._version import __tversion__ 26 | from tad_mctc.autograd import jacrev 27 | from tad_mctc.batch import pack 28 | from tad_mctc.ncoord import cn_d4 29 | 30 | from tad_dftd4.model import D4Model, D4SModel 31 | from tad_dftd4.typing import DD 32 | 33 | from ..conftest import DEVICE 34 | from .samples import samples 35 | 36 | 37 | def single( 38 | name: str, 39 | dtype: torch.dtype, 40 | with_cn: bool, 41 | with_q: bool, 42 | ) -> None: 43 | dd: DD = {"device": DEVICE, "dtype": dtype} 44 | tol = 1e-4 if dtype == torch.float32 else 1e-6 45 | 46 | sample = samples[name] 47 | numbers = sample["numbers"].to(DEVICE) 48 | positions = sample["positions"].to(**dd) 49 | 50 | d4 = D4Model(numbers, **dd) 51 | 52 | if with_cn is True: 53 | cn = cn_d4(numbers, positions) 54 | else: 55 | cn = None # positions.new_zeros(numbers.shape) 56 | 57 | if with_q is True: 58 | q = sample["q"].to(**dd) 59 | else: 60 | q = None # positions.new_zeros(numbers.shape) 61 | 62 | gwvec = d4.weight_references(cn, q) 63 | 64 | # pad reference tensor to always be of shape `(natoms, 7)` 65 | src = sample["gw"].to(**dd) 66 | ref = F.pad( 67 | input=src, 68 | pad=(0, 0, 0, 7 - src.size(0)), 69 | mode="constant", 70 | value=0, 71 | ).mT 72 | 73 | assert gwvec.dtype == ref.dtype 74 | assert gwvec.shape == ref.shape 75 | assert pytest.approx(gwvec.cpu(), abs=tol) == ref.cpu() 76 | 77 | 78 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 79 | def test_mb16_43_01(dtype: torch.dtype) -> None: 80 | single("MB16_43_01", dtype, with_cn=True, with_q=False) 81 | 82 | 83 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 84 | def test_mb16_43_02(dtype: torch.dtype) -> None: 85 | single("MB16_43_02", dtype, with_cn=False, with_q=True) 86 | 87 | 88 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 89 | def test_mb16_43_03(dtype: torch.dtype) -> None: 90 | single("MB16_43_03", dtype, with_cn=True, with_q=True) 91 | 92 | 93 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 94 | def test_sih4(dtype: torch.dtype) -> None: 95 | single("SiH4", dtype, with_cn=True, with_q=True) 96 | 97 | 98 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 99 | def test_lih(dtype: torch.dtype) -> None: 100 | single("LiH", dtype, with_cn=True, with_q=True) 101 | 102 | 103 | @pytest.mark.parametrize("dtype", [torch.float, torch.double]) 104 | @pytest.mark.parametrize("name1", ["LiH"]) 105 | @pytest.mark.parametrize("name2", ["LiH", "SiH4", "MB16_43_03"]) 106 | def test_batch(name1: str, name2: str, dtype: torch.dtype) -> None: 107 | dd: DD = {"device": DEVICE, "dtype": dtype} 108 | tol = 1e-4 if dtype == torch.float32 else 1e-6 109 | 110 | sample1, sample2 = samples[name1], samples[name2] 111 | numbers = pack( 112 | [ 113 | sample1["numbers"].to(DEVICE), 114 | sample2["numbers"].to(DEVICE), 115 | ] 116 | ) 117 | positions = pack( 118 | [ 119 | sample1["positions"].to(**dd), 120 | sample2["positions"].to(**dd), 121 | ] 122 | ) 123 | q = pack( 124 | [ 125 | sample1["q"].to(**dd), 126 | sample2["q"].to(**dd), 127 | ] 128 | ) 129 | 130 | d4 = D4Model(numbers, **dd) 131 | 132 | cn = cn_d4(numbers, positions) 133 | gwvec = d4.weight_references(cn, q) 134 | 135 | # pad reference tensor to always be of shape `(natoms, 7)` 136 | src1 = sample1["gw"].to(**dd) 137 | src2 = sample2["gw"].to(**dd) 138 | 139 | ref = pack( 140 | [ 141 | F.pad( 142 | input=src1, 143 | pad=(0, 0, 0, 7 - src1.size(0)), 144 | mode="constant", 145 | value=0, 146 | ).mT, 147 | src2.mT, 148 | ] 149 | ) 150 | 151 | assert gwvec.dtype == ref.dtype 152 | assert gwvec.shape == ref.shape 153 | assert pytest.approx(gwvec.cpu(), abs=tol) == ref.cpu() 154 | 155 | 156 | @pytest.mark.skipif(__tversion__ < (2, 0, 0), reason="Requires torch>=2.0.0") 157 | @pytest.mark.parametrize("name", ["LiH", "SiH4", "MB16_43_03"]) 158 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 159 | def test_grad_q(name: str, model: str) -> None: 160 | dd: DD = {"device": DEVICE, "dtype": torch.float64} 161 | 162 | sample = samples[name] 163 | numbers = sample["numbers"].to(DEVICE) 164 | positions = sample["positions"].to(**dd) 165 | 166 | q = sample["q"].to(**dd) 167 | q_grad = q.detach().clone().requires_grad_(True) 168 | 169 | if model == "d4": 170 | d4 = D4Model(numbers, **dd) 171 | elif model == "d4s": 172 | d4 = D4SModel(numbers, **dd) 173 | else: 174 | raise ValueError(f"Invalid model: {model}") 175 | 176 | cn = cn_d4(numbers, positions) 177 | 178 | # analytical gradient 179 | _, dgwdq_ana = d4.weight_references(cn, q, with_dgwdq=True) 180 | 181 | # autodiff gradient 182 | dgwdq_auto = jacrev(d4.weight_references, 1)(cn, q_grad) 183 | assert isinstance(dgwdq_auto, torch.Tensor) 184 | dgwdq_auto = dgwdq_auto.sum(-1).detach() 185 | 186 | assert pytest.approx(dgwdq_auto.cpu(), abs=1e-6) == dgwdq_ana.cpu() 187 | 188 | 189 | @pytest.mark.skipif(__tversion__ < (2, 0, 0), reason="Requires torch>=2.0.0") 190 | @pytest.mark.parametrize("name1", ["LiH"]) 191 | @pytest.mark.parametrize("name2", ["LiH", "SiH4", "MB16_43_03"]) 192 | @pytest.mark.parametrize("model", ["d4", "d4s"]) 193 | def test_grad_q_batch(name1: str, name2: str, model: str) -> None: 194 | dd: DD = {"device": DEVICE, "dtype": torch.float64} 195 | 196 | sample1, sample2 = samples[name1], samples[name2] 197 | numbers = pack( 198 | [ 199 | sample1["numbers"].to(DEVICE), 200 | sample2["numbers"].to(DEVICE), 201 | ] 202 | ) 203 | positions = pack( 204 | [ 205 | sample1["positions"].to(**dd), 206 | sample2["positions"].to(**dd), 207 | ] 208 | ) 209 | 210 | q = pack( 211 | [ 212 | sample1["q"].to(**dd), 213 | sample2["q"].to(**dd), 214 | ] 215 | ) 216 | 217 | q_grad = q.detach().clone().requires_grad_(True) 218 | 219 | if model == "d4": 220 | d4 = D4Model(numbers, **dd) 221 | elif model == "d4s": 222 | d4 = D4SModel(numbers, **dd) 223 | else: 224 | raise ValueError(f"Invalid model: {model}") 225 | 226 | cn = cn_d4(numbers, positions) 227 | 228 | # analytical gradient 229 | _, dgwdq_ana = d4.weight_references(cn, q, with_dgwdq=True) 230 | 231 | # autodiff gradient 232 | dgwdq_auto = jacrev(d4.weight_references, 1)(cn, q_grad) 233 | assert isinstance(dgwdq_auto, torch.Tensor) 234 | dgwdq_auto = dgwdq_auto.sum((-1, -2)).detach() 235 | 236 | assert dgwdq_auto.shape == dgwdq_ana.shape 237 | assert pytest.approx(dgwdq_auto.cpu(), abs=1e-6) == dgwdq_ana.cpu() 238 | 239 | 240 | @pytest.mark.skipif(__tversion__ < (2, 0, 0), reason="Requires torch>=2.0.0") 241 | @pytest.mark.parametrize("name", ["LiH", "SiH4", "MB16_43_03"]) 242 | def test_grad_cn(name: str) -> None: 243 | dd: DD = {"device": DEVICE, "dtype": torch.float64} 244 | 245 | sample = samples[name] 246 | numbers = sample["numbers"].to(DEVICE) 247 | 248 | pos = sample["positions"].to(**dd) 249 | pos_grad = pos.detach().clone().requires_grad_(True) 250 | 251 | d4 = D4Model(numbers, **dd) 252 | 253 | # analytical gradient 254 | cn = cn_d4(numbers, pos) 255 | _, dgwdq_ana = d4.weight_references(cn, with_dgwdcn=True) 256 | 257 | # autodiff gradient 258 | cn_grad = cn_d4(numbers, pos_grad) 259 | dgwdcn_auto = jacrev(d4.weight_references, 0)(cn_grad) 260 | assert isinstance(dgwdcn_auto, torch.Tensor) 261 | dgwdcn_auto = dgwdcn_auto.sum(-1).detach() 262 | 263 | assert pytest.approx(dgwdcn_auto.cpu(), abs=1e-6) == -dgwdq_ana.cpu() 264 | 265 | 266 | @pytest.mark.skipif(__tversion__ < (2, 0, 0), reason="Requires torch>=2.0.0") 267 | @pytest.mark.parametrize("name", ["LiH", "SiH4", "MB16_43_03"]) 268 | def test_grad_both(name: str) -> None: 269 | dd: DD = {"device": DEVICE, "dtype": torch.float64} 270 | 271 | sample = samples[name] 272 | numbers = sample["numbers"].to(DEVICE) 273 | 274 | pos = sample["positions"].to(**dd) 275 | pos_grad = pos.detach().clone().requires_grad_(True) 276 | 277 | q = sample["q"].to(**dd) 278 | q_grad = q.detach().clone().requires_grad_(True) 279 | 280 | d4 = D4Model(numbers, **dd) 281 | 282 | # analytical gradient 283 | cn = cn_d4(numbers, pos) 284 | _, dgwdq_ana, dgwdcn_ana = d4.weight_references( 285 | cn, q, with_dgwdcn=True, with_dgwdq=True 286 | ) 287 | 288 | # autodiff gradient 289 | cn_grad = cn_d4(numbers, pos_grad) 290 | dgwdq_auto, dgwdcn_auto = jacrev( 291 | d4.weight_references, 292 | (0, 1), # type: ignore 293 | )(cn_grad, q_grad) 294 | 295 | assert isinstance(dgwdcn_auto, torch.Tensor) 296 | dgwdcn_auto = dgwdcn_auto.sum(-1).detach() 297 | 298 | assert pytest.approx(dgwdcn_auto.cpu(), abs=1e-6) == dgwdcn_ana.cpu() 299 | 300 | assert isinstance(dgwdq_auto, torch.Tensor) 301 | dgwdq_auto = dgwdq_auto.sum(-1).detach() 302 | 303 | assert pytest.approx(dgwdq_auto.cpu(), abs=1e-6) == -dgwdq_ana.cpu() 304 | -------------------------------------------------------------------------------- /test/test_param/__init__.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | -------------------------------------------------------------------------------- /test/test_param/test_fail.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test reading parameters from TOML file. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | 24 | from tad_dftd4.damping import get_params 25 | 26 | 27 | def test_unknown_func() -> None: 28 | with pytest.raises(KeyError): 29 | get_params("unknown") 30 | 31 | 32 | def test_unknown_variant() -> None: 33 | with pytest.raises(KeyError): 34 | get_params("pbe", variant="unknown") # type: ignore 35 | -------------------------------------------------------------------------------- /test/test_param/test_read.py: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | """ 18 | Test reading parameters from TOML file. 19 | """ 20 | from __future__ import annotations 21 | 22 | import pytest 23 | 24 | from tad_dftd4.damping.parameters import get_params, get_params_default 25 | 26 | 27 | def test_default() -> None: 28 | params = get_params_default() 29 | assert isinstance(params, dict) 30 | assert "s6" in params 31 | 32 | 33 | @pytest.mark.parametrize("func", ["pbe", "b3lyp", "revpbe"]) 34 | def test_func(func: str) -> None: 35 | params = get_params(func) 36 | assert isinstance(params, dict) 37 | assert "a1" in params 38 | assert "a2" in params 39 | 40 | 41 | def test_with_doi() -> None: 42 | params = get_params("pbe", with_reference=True) 43 | assert isinstance(params, dict) 44 | assert "doi" in params 45 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # This file is part of tad-dftd4. 2 | # 3 | # SPDX-Identifier: Apache-2.0 4 | # Copyright (C) 2024 Grimme Group 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | [tox] 18 | min_version = 4.0 19 | isolated_build = True 20 | envlist = 21 | py38-torch{1110,1121,1131,201,212,222,231,240,241}, 22 | py39-torch{1110,1121,1131,201,212,222,231,240,241,250,251,260}, 23 | py310-torch{1110,1121,1131,201,212,222,231,240,241,250,251,260}, 24 | py311-torch{1131,201,212,222,231,240,241,250,251,260} 25 | py312-torch{222,231,240,241,250,251,260} 26 | 27 | [testenv] 28 | setenv = 29 | PIP_EXTRA_INDEX_URL = {env:PIP_EXTRA_INDEX_URL:https://download.pytorch.org/whl/cpu} 30 | 31 | deps = 32 | torch1110: torch==1.11.0 33 | torch1120: torch==1.12.0 34 | torch1121: torch==1.12.1 35 | torch1130: torch==1.13.0 36 | torch1131: torch==1.13.1 37 | torch200: torch==2.0.0 38 | torch201: torch==2.0.1 39 | torch210: torch==2.1.0 40 | torch211: torch==2.1.1 41 | torch212: torch==2.1.2 42 | torch220: torch==2.2.0 43 | torch221: torch==2.2.1 44 | torch222: torch==2.2.2 45 | torch230: torch==2.3.0 46 | torch231: torch==2.3.1 47 | torch240: torch==2.4.0 48 | torch241: torch==2.4.1 49 | torch250: torch==2.5.0 50 | torch251: torch==2.5.1 51 | torch260: torch==2.6.0 52 | .[tox] 53 | 54 | commands = 55 | pytest -vv {posargs: \ 56 | -n logical \ 57 | --random-order-bucket=global \ 58 | --cov=tad_dftd4 \ 59 | --cov-report=term-missing \ 60 | --cov-report=xml:coverage.xml \ 61 | test} 62 | --------------------------------------------------------------------------------