├── torch_dftd
├── nn
│ ├── __init__.py
│ ├── params
│ │ ├── __init__.py
│ │ ├── dftd3_params.npz
│ │ └── dftd2_params.py
│ ├── dftd2_module.py
│ ├── dftd3_module.py
│ └── base_dftd_module.py
├── functions
│ ├── __init__.py
│ ├── smoothing.py
│ ├── distance.py
│ ├── dftd2.py
│ ├── edge_extraction.py
│ ├── triplets_kernel.py
│ ├── triplets.py
│ └── dftd3.py
├── testing
│ ├── __init__.py
│ └── damping.py
├── _version.py
├── __init__.py
├── torch_dftd3_calculator.py
└── dftd3_xc_params.py
├── setup.cfg
├── .flexci
├── config.pbtxt
├── build_and_push.sh
└── pytest_script.sh
├── tests
├── test_init.py
├── nn_tests
│ ├── test_dftd2_module.py
│ └── test_dftd3_module.py
├── functions_tests
│ ├── test_edge_extraction.py
│ └── test_triplets.py
├── test_torch_dftd3_calculator_benchmark.py
├── test_torch_dftd3_calculator_batch.py
├── test_torch_dftd3_calculator_zero_adjacency.py
└── test_torch_dftd3_calculator.py
├── examples
├── quick_start.py
└── check_speed.py
├── pyproject.toml
├── setup.py
├── LICENSE
├── docker
└── Dockerfile
├── .gitignore
└── README.md
/torch_dftd/nn/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/torch_dftd/functions/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/torch_dftd/nn/params/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/torch_dftd/testing/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/torch_dftd/_version.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.3.0"
2 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [tool:pytest]
2 | markers =
3 | slow: mark test as slow.
4 |
--------------------------------------------------------------------------------
/torch_dftd/__init__.py:
--------------------------------------------------------------------------------
1 | from torch_dftd import _version # NOQA
2 |
3 | __version__ = _version.__version__
4 |
--------------------------------------------------------------------------------
/torch_dftd/nn/params/dftd3_params.npz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/shinh/torch-dftd/master/torch_dftd/nn/params/dftd3_params.npz
--------------------------------------------------------------------------------
/.flexci/config.pbtxt:
--------------------------------------------------------------------------------
1 | configs {
2 | # Project name.
3 | key: "torch-dftd.pytest"
4 | value {
5 | requirement {
6 | cpu: 6
7 | memory: 36
8 | disk: 10
9 | gpu: 1
10 | }
11 | time_limit {
12 | seconds: 1800
13 | }
14 | command:
15 | "bash -x .flexci/pytest_script.sh"
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/tests/test_init.py:
--------------------------------------------------------------------------------
1 | import pkg_resources
2 | import pytest
3 | import torch_dftd
4 |
5 |
6 | def test_version():
7 | expect = pkg_resources.get_distribution("torch_dftd").version
8 | actual = torch_dftd.__version__
9 | assert expect == actual
10 |
11 |
12 | if __name__ == "__main__":
13 | pytest.main([__file__, "-v", "-s"])
14 |
--------------------------------------------------------------------------------
/examples/quick_start.py:
--------------------------------------------------------------------------------
1 | from ase.build import molecule
2 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
3 |
4 | if __name__ == "__main__":
5 | atoms = molecule("CH3CH2OCH3")
6 | # device="cuda:0" for fast GPU computation.
7 | calc = TorchDFTD3Calculator(atoms=atoms, device="cpu", damping="bj")
8 |
9 | energy = atoms.get_potential_energy()
10 | forces = atoms.get_forces()
11 |
12 | print(f"energy {energy} eV")
13 | print(f"forces {forces}")
14 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.pysen]
2 | version = "0.9"
3 |
4 | [tool.pysen.lint]
5 | enable_black = true
6 | enable_flake8 = false
7 | enable_isort = true
8 | enable_mypy = false
9 | mypy_preset = "strict"
10 | line_length = 99
11 | py_version = "py37"
12 | [[tool.pysen.lint.mypy_targets]]
13 | paths = ["."]
14 |
15 | [tool.black] # automatically generated by pysen
16 | line-length = 99
17 | target-version = ["py37"]
18 |
19 | [tool.isort] # automatically generated by pysen
20 | default_section = "THIRDPARTY"
21 | ensure_newline_before_comments = true
22 | force_grid_wrap = 0
23 | force_single_line = false
24 | include_trailing_comma = true
25 | line_length = 99
26 | multi_line_output = 3
27 | use_parentheses = true
28 |
29 |
--------------------------------------------------------------------------------
/torch_dftd/functions/smoothing.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from torch import Tensor
3 |
4 |
5 | def poly_smoothing(r: Tensor, cutoff: float) -> Tensor:
6 | """Computes a smooth step from 1 to 0 starting at 1 bohr before the cutoff
7 |
8 | Args:
9 | r (Tensor): (n_edges,)
10 | cutoff (float): ()
11 |
12 | Returns:
13 | r (Tensor): Smoothed `r`
14 | """
15 | cuton = cutoff - 1
16 | x = (cutoff - r) / (cutoff - cuton)
17 | x2 = x ** 2
18 | x3 = x2 * x
19 | x4 = x3 * x
20 | x5 = x4 * x
21 | return torch.where(
22 | r <= cuton,
23 | torch.ones_like(x),
24 | torch.where(r >= cutoff, torch.zeros_like(x), 6 * x5 - 15 * x4 + 10 * x3),
25 | )
26 |
--------------------------------------------------------------------------------
/tests/nn_tests/test_dftd2_module.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 | from torch_dftd.nn.dftd2_module import DFTD2Module
4 |
5 |
6 | def test_dftd2_module_init():
7 | params = dict(s6=1.2, alp=20.0, rs6=1.1, s18=0.0) # rs18=None
8 | module = DFTD2Module(params)
9 | assert module.c6ab.shape == (87, 87)
10 | assert module.r0ab.shape == (87, 87)
11 |
12 |
13 | def test_dftd2_module_calc():
14 | params = dict(s6=1.2, alp=20.0, rs6=1.1, s18=0.0) # rs18=None
15 | dtype = torch.float32
16 | module = DFTD2Module(params, bidirectional=False, dtype=dtype)
17 | Z = torch.tensor([1, 2, 3], dtype=torch.int64)
18 | pos = torch.tensor([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 2.0, 0.0]], dtype=dtype)
19 | edge_index = torch.tensor([[0, 0, 1], [1, 2, 2]], dtype=torch.int64)
20 | results = module.calc_energy(Z, pos, edge_index)
21 | assert results[0]["energy"] == pytest.approx(-0.0808654052663793)
22 |
23 |
24 | if __name__ == "__main__":
25 | pytest.main([__file__, "-v", "-s"])
26 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Dict, List
3 |
4 | from setuptools import find_packages, setup # NOQA
5 |
6 | setup_requires: List[str] = []
7 | install_requires: List[str] = [
8 | "ase>=3.18, <4.0.0", # Note that we require ase==3.21.1 for pytest.
9 | "pymatgen>=2020.1.28",
10 | ]
11 | extras_require: Dict[str, List[str]] = {
12 | "develop": ["pysen[lint]==0.9.1"],
13 | }
14 |
15 |
16 | __version__: str
17 | here = os.path.abspath(os.path.dirname(__file__))
18 | # Get __version__ variable
19 | exec(open(os.path.join(here, "torch_dftd", "_version.py")).read())
20 |
21 | package_data = {"torch_dftd": ["nn/params/dftd3_params.npz"]}
22 |
23 | setup(
24 | name="torch-dftd",
25 | version=__version__, # NOQA
26 | description="pytorch implementation of dftd2 & dftd3",
27 | packages=find_packages(),
28 | setup_requires=setup_requires,
29 | install_requires=install_requires,
30 | extras_require=extras_require,
31 | include_package_data=True,
32 | package_data=package_data,
33 | )
34 |
--------------------------------------------------------------------------------
/tests/nn_tests/test_dftd3_module.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 | from torch_dftd.nn.dftd3_module import DFTD3Module
4 |
5 |
6 | def test_dftd3_module_init():
7 | params = dict(s6=1.0, alp=14.0, rs6=0.486434, s18=0.672820, rs18=3.656466)
8 | module = DFTD3Module(params)
9 | assert module.c6ab.shape == (95, 95, 5, 5, 3)
10 | assert module.r0ab.shape == (95, 95)
11 | assert module.rcov.shape == (95,)
12 | assert module.r2r4.shape == (95,)
13 |
14 |
15 | def test_dftd3_module_calc():
16 | params = dict(s6=1.0, alp=14.0, rs6=0.486434, s18=0.672820, rs18=3.656466)
17 | dtype = torch.float32
18 | module = DFTD3Module(params, bidirectional=False, dtype=dtype)
19 | Z = torch.tensor([1, 2, 3], dtype=torch.int64)
20 | pos = torch.tensor([[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [1.0, 2.0, 0.0]], dtype=dtype)
21 | edge_index = torch.tensor([[0, 0, 1], [1, 2, 2]], dtype=torch.int64)
22 | results = module.calc_energy(Z, pos, edge_index)
23 | assert results[0]["energy"] == pytest.approx(-0.6810069680213928)
24 |
25 |
26 | if __name__ == "__main__":
27 | pytest.main([__file__, "-v", "-s"])
28 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Preferred Networks, Inc.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/.flexci/build_and_push.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash -uex
2 |
3 | IMAGE_BASE="${1:-}"
4 | IMAGE_PUSH=1
5 | if [ "${IMAGE_BASE}" = "" ]; then
6 | IMAGE_BASE="torch-dftd"
7 | IMAGE_PUSH=0
8 | fi
9 |
10 | TEST_PIP_PACKAGES="
11 | flake8 pytest pytest-cov pytest-xdist pytest-benchmark
12 | "
13 |
14 | SRC_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/.."; pwd)"
15 |
16 | docker_build_and_push() {
17 |
18 | IMAGE_TAG="${1}"; shift
19 | IMAGE_NAME="${IMAGE_BASE}:${IMAGE_TAG}"
20 |
21 | pushd "$(dirname ${0})"
22 | docker build -f ${SRC_ROOT}/docker/Dockerfile -t "${IMAGE_NAME}" "$@" .
23 | popd
24 |
25 | if [ "${IMAGE_PUSH}" = "0" ]; then
26 | echo "Skipping docker push."
27 | else
28 | docker push "${IMAGE_NAME}"
29 | fi
30 | }
31 |
32 | WAIT_PIDS=""
33 |
34 | # PyTorch 1.5 + Python 3.6
35 | docker_build_and_push torch15 \
36 | --build-arg base_image="nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04" \
37 | --build-arg python_version="3.6.12" \
38 | --build-arg pip_packages="torch==1.5.* torchvision==0.6.* ${TEST_PIP_PACKAGES}" &
39 | WAIT_PIDS="$! ${WAIT_PIDS}"
40 |
41 | # Wait until the build complete.
42 | for P in ${WAIT_PIDS}; do
43 | wait ${P}
44 | done
45 |
--------------------------------------------------------------------------------
/torch_dftd/functions/distance.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import torch
4 | from torch import Tensor
5 |
6 |
7 | def calc_distances(
8 | pos: Tensor,
9 | edge_index: Tensor,
10 | cell: Optional[Tensor] = None,
11 | shift_pos: Optional[Tensor] = None,
12 | eps=1e-20,
13 | ) -> Tensor:
14 | """Distance calculation function.
15 |
16 | Args:
17 | pos (Tensor): (n_atoms, 3) atom positions.
18 | edge_index (Tensor): (2, n_edges) edge_index for graph.
19 | cell (Tensor): cell size, None for non periodic system.
20 | shift_pos (Tensor): (n_edges, 3) position shift vectors of edges owing to the periodic boundary. It should be length unit.
21 | eps (float): Small float value to avoid NaN in backward when the distance is 0.
22 |
23 | Returns:
24 | Dij (Tensor): (n_edges, ) distances of edges
25 |
26 | """
27 |
28 | idx_i, idx_j = edge_index
29 | # calculate interatomic distances
30 | Ri = pos[idx_i]
31 | Rj = pos[idx_j]
32 | if cell is not None:
33 | Rj += shift_pos
34 | # eps is to avoid Nan in backward when Dij = 0 with sqrt.
35 | Dij = torch.sqrt(torch.sum((Ri - Rj) ** 2, dim=-1) + eps)
36 | return Dij
37 |
--------------------------------------------------------------------------------
/.flexci/pytest_script.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -eu
3 |
4 | #IMAGE=pytorch/pytorch:1.5.1-cuda10.1-cudnn7-devel
5 | IMAGE=asia.gcr.io/pfn-public-ci/torch-dftd-ci:torch15
6 |
7 |
8 | main() {
9 | SRC_ROOT="$(cd "$(dirname "${BASH_SOURCE}")/.."; pwd)"
10 |
11 | prepare_docker &
12 | wait
13 |
14 | # Build and push docker images for unit tests.
15 | # bash -x -c "${SRC_ROOT}/.flexci/build_and_push.sh" \
16 | # "${IMAGE}"
17 |
18 | # 1st pytest: when xdist is enabled with `-n $(nproc)`, benchmark is not executed.
19 | # 2nd pytest: only execute pytest-benchmark.
20 | docker run --runtime=nvidia --rm --volume="$(pwd)":/workspace -w /workspace \
21 | ${IMAGE} \
22 | bash -x -c "pip install flake8 pytest pytest-cov pytest-xdist pytest-benchmark && \
23 | pip install cupy-cuda102 pytorch-pfn-extras!=0.5.0 && \
24 | pip install -e .[develop] && \
25 | pysen run lint && \
26 | pytest --cov=torch_dftd -n $(nproc) -m 'not slow' tests &&
27 | pytest --benchmark-only tests"
28 | }
29 |
30 |
31 | # prepare_docker makes docker use tmpfs to speed up.
32 | # CAVEAT: Do not use docker during this is running.
33 | prepare_docker() {
34 | service docker stop
35 | mount -t tmpfs -o size=100% tmpfs /var/lib/docker
36 | service docker start
37 | gcloud auth configure-docker
38 | }
39 |
40 |
41 | main "$@"
42 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | # FROM nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04
2 | ARG base_image
3 | FROM ${base_image}
4 |
5 | # Install pyenv requirements.
6 | # https://github.com/pyenv/pyenv/wiki/Common-build-problems#requirements
7 | RUN export DEBIAN_FRONTEND=noninteractive && \
8 | apt-get -y update && \
9 | apt-get -y install \
10 | build-essential libssl-dev zlib1g-dev libbz2-dev \
11 | libreadline-dev libsqlite3-dev wget curl llvm libncurses5-dev libncursesw5-dev \
12 | xz-utils tk-dev libffi-dev liblzma-dev git gfortran && \
13 | rm -rf /var/lib/apt/lists/* /var/cache/apt/archives/*
14 |
15 | # Env setting
16 | ENV LC_ALL=C.UTF-8
17 | ENV LANG=C.UTF-8
18 |
19 | # Install pyenv.
20 | RUN git clone https://github.com/pyenv/pyenv.git /opt/pyenv
21 | ENV PYENV_ROOT=/opt/pyenv
22 | ENV PATH ${PYENV_ROOT}/shims:${PYENV_ROOT}/bin:${PATH}
23 |
24 | # Install Python.
25 | ARG python_version
26 | RUN pyenv install ${python_version} && \
27 | pyenv global ${python_version}
28 |
29 | # Install test dependencies.
30 | ARG pip_packages
31 | RUN pip install -U pip && \
32 | pip install ${pip_packages} && \
33 | pip list
34 |
35 | # Install DFTD3
36 | RUN cd /tmp && \
37 | wget https://www.chemie.uni-bonn.de/pctc/mulliken-center/software/dft-d3/dftd3.tgz && \
38 | tar zxvf dftd3.tgz && \
39 | make && \
40 | mv dftd3 /usr/local/bin && \
41 | rm dftd3.tgz
42 |
--------------------------------------------------------------------------------
/tests/functions_tests/test_edge_extraction.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 | from torch_dftd.functions.edge_extraction import calc_neighbor_by_ase, calc_neighbor_by_pymatgen
4 |
5 |
6 | def test_calc_neighbor_equivalent():
7 | n_nodes = 5
8 | pbc = torch.tensor([True, True, True])
9 | cell = torch.randn((3, 3))
10 | rel_pos = torch.rand((n_nodes, 3)) # relative position inside cell
11 | pos = torch.matmul(rel_pos, cell)
12 | cutoff = torch.rand(1).item() * 5.0
13 |
14 | edge_index1, S1 = calc_neighbor_by_ase(pos, cell, pbc, cutoff)
15 | edge_index2, S2 = calc_neighbor_by_pymatgen(pos, cell, pbc, cutoff)
16 |
17 | n_edges = edge_index1.shape[1]
18 | assert (
19 | edge_index1.shape == edge_index2.shape
20 | ), f"{edge_index1.shape} != {edge_index2.shape}, edge shape does not match!"
21 | assert S1.shape == S2.shape, f"{S1.shape} != {S2.shape}, Shift tensor shape does not match!"
22 | edge_shift_list1 = []
23 | edge_shift_list2 = []
24 | S1_int = S1.type(torch.long)
25 | S2_int = S2.type(torch.long)
26 | for i in range(n_edges):
27 | edge_shift_list1.append(
28 | (
29 | edge_index1[0, i].item(),
30 | edge_index1[1, i].item(),
31 | S1_int[i, 0].item(),
32 | S1_int[i, 1].item(),
33 | S1_int[i, 2].item(),
34 | )
35 | )
36 | edge_shift_list2.append(
37 | (
38 | edge_index2[0, i].item(),
39 | edge_index2[1, i].item(),
40 | S2_int[i, 0].item(),
41 | S2_int[i, 1].item(),
42 | S2_int[i, 2].item(),
43 | )
44 | )
45 | assert set(edge_shift_list1) == set(edge_shift_list2)
46 |
47 |
48 | if __name__ == "__main__":
49 | pytest.main([__file__, "-v", "-s"])
50 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | # PyCharm
132 | .idea/
133 | tests/ase_dftd3.out
134 | tests/ase_dftd3.xyz
135 |
--------------------------------------------------------------------------------
/tests/test_torch_dftd3_calculator_benchmark.py:
--------------------------------------------------------------------------------
1 | import tempfile
2 | from typing import List
3 |
4 | import numpy as np
5 | import pytest
6 | from ase import Atoms
7 | from ase.build import bulk, fcc111, molecule
8 | from ase.calculators.dftd3 import DFTD3
9 | from ase.units import Bohr
10 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
11 |
12 |
13 | @pytest.fixture(
14 | params=[
15 | pytest.param("mol", id="mol"),
16 | pytest.param("slab", id="slab"),
17 | pytest.param("large", marks=[pytest.mark.slow], id="large"),
18 | ]
19 | )
20 | def atoms(request) -> Atoms:
21 | """Initialization"""
22 | mol = molecule("CH3CH2OCH3")
23 |
24 | slab = fcc111("Au", size=(2, 1, 3), vacuum=80.0)
25 | slab.pbc = np.array([True, True, True])
26 |
27 | large_bulk = bulk("Pt", "fcc") * (4, 4, 4)
28 |
29 | atoms_dict = {"mol": mol, "slab": slab, "large": large_bulk}
30 | return atoms_dict[request.param]
31 |
32 |
33 | def calc_energy(calculator, atoms):
34 | calculator.reset()
35 | atoms.calc = calculator
36 | e1 = atoms.get_potential_energy()
37 | return True
38 |
39 |
40 | def calc_force_stress(calculator, atoms):
41 | calculator.reset()
42 | atoms.calc = calculator
43 | f1 = atoms.get_forces()
44 | if np.all(atoms.pbc == np.array([True, True, True])):
45 | s1 = atoms.get_stress()
46 | return True
47 |
48 |
49 | def test_dftd3_calculator_benchmark(atoms, benchmark):
50 | damping = "bj"
51 | xc = "pbe"
52 | old = False
53 | cutoff = 95 * Bohr
54 | with tempfile.TemporaryDirectory() as tmpdirname:
55 | dftd3_calc = DFTD3(
56 | damping=damping, xc=xc, grad=True, old=old, cutoff=cutoff, directory=tmpdirname
57 | )
58 | benchmark.pedantic(
59 | calc_force_stress,
60 | kwargs=dict(calculator=dftd3_calc, atoms=atoms),
61 | rounds=3,
62 | iterations=5,
63 | )
64 |
65 |
66 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
67 | def test_torch_dftd3_calculator_benchmark(atoms, device, benchmark):
68 | damping = "bj"
69 | xc = "pbe"
70 | old = False
71 | cutoff = 95 * Bohr
72 | dftd3_calc = TorchDFTD3Calculator(
73 | damping=damping,
74 | xc=xc,
75 | grad=True,
76 | old=old,
77 | cutoff=cutoff,
78 | device=device,
79 | )
80 | # Dry run once
81 | calc_force_stress(calculator=dftd3_calc, atoms=atoms),
82 |
83 | benchmark.pedantic(
84 | calc_force_stress, kwargs=dict(calculator=dftd3_calc, atoms=atoms), rounds=3, iterations=5
85 | )
86 |
87 |
88 | if __name__ == "__main__":
89 | pytest.main([__file__, "-v", "-s"])
90 |
--------------------------------------------------------------------------------
/torch_dftd/functions/dftd2.py:
--------------------------------------------------------------------------------
1 | """pytorch implementation of Grimme's D2 method""" # NOQA
2 | from typing import Dict, Optional
3 |
4 | import torch
5 | from torch import Tensor
6 | from torch_dftd.functions.smoothing import poly_smoothing
7 |
8 |
9 | def edisp_d2(
10 | Z: Tensor,
11 | r: Tensor,
12 | edge_index: Tensor,
13 | r0ab: Tensor,
14 | c6ab: Tensor,
15 | params: Dict[str, float],
16 | damping: str = "zero",
17 | bidirectional: bool = False,
18 | cutoff: Optional[float] = None,
19 | batch: Optional[Tensor] = None,
20 | batch_edge: Optional[Tensor] = None,
21 | cutoff_smoothing: str = "none",
22 | ):
23 | """compute d3 dispersion energy in Hartree
24 |
25 | Args:
26 | Z (Tensor): (n_atoms,) atomic numbers
27 | r (Tensor): (n_edges,) distance in **bohr**
28 | edge_index (Tensor): (2, n_edges)
29 | r0ab (Tensor): (n_atom_types, n_atom_types) Pre-computed R0AB parameter.
30 | c6ab (Tensor): (n_atom_types, n_atom_types) Pre-computed C6AB parameter.
31 | params (dict): xc-dependent parameters. alp6, s6, rs6.
32 | damping (str): damping method, only "zero" is supported.
33 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
34 | cutoff (float or None): cutoff distance in **bohr**
35 | batch (Tensor or None): (n_atoms,)
36 | batch_edge (Tensor or None): (n_edges,)
37 | cutoff_smoothing (str): cutoff smoothing makes gradient smooth at `cutoff` distance
38 |
39 | Returns:
40 | energy: (n_graphs,) Energy in Hartree unit.
41 | """
42 | # compute all necessary powers of the distance
43 | # square of distances
44 | r2 = r ** 2
45 | r6 = r2 ** 3
46 |
47 | idx_i, idx_j = edge_index
48 | # compute all necessary quantities
49 | Zi = Z[idx_i] # (n_edges,)
50 | Zj = Z[idx_j]
51 |
52 | if damping != "zero":
53 | raise ValueError(
54 | f"Only zero-damping can be used with the D2 dispersion correction method!"
55 | )
56 | alp6 = params["alp"]
57 | s6 = params["s6"]
58 | rs6 = params["rs6"]
59 |
60 | r0ab = r0ab.to(r.device)
61 | c6ab = c6ab.to(r.device)
62 | c6 = c6ab[Zi, Zj] # (n_edges,)
63 | damp6 = 1.0 / (1.0 + torch.exp(-alp6 * (r / (rs6 * r0ab[Zi, Zj]) - 1.0)))
64 | e6 = damp6 / r6
65 | e6 = -0.5 * s6 * c6 * e6 # (n_edges,)
66 |
67 | if cutoff is not None and cutoff_smoothing == "poly":
68 | e6 *= poly_smoothing(r, cutoff)
69 |
70 | if batch_edge is None:
71 | # (1,)
72 | g = e6.sum()[None]
73 | else:
74 | # (n_graphs,)
75 | if batch.size()[0] == 0:
76 | n_graphs = 1
77 | else:
78 | n_graphs = int(batch[-1]) + 1
79 | g = e6.new_zeros((n_graphs,))
80 | g.scatter_add_(0, batch_edge, e6)
81 |
82 | if not bidirectional:
83 | g *= 2.0
84 | return g # (n_graphs,)
85 |
--------------------------------------------------------------------------------
/torch_dftd/nn/dftd2_module.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Optional
2 |
3 | import torch
4 | from ase.units import Bohr
5 | from torch import Tensor
6 | from torch_dftd.functions.dftd2 import edisp_d2
7 | from torch_dftd.functions.dftd3 import d3_autoang, d3_autoev
8 | from torch_dftd.functions.distance import calc_distances
9 | from torch_dftd.nn.base_dftd_module import BaseDFTDModule
10 | from torch_dftd.nn.params.dftd2_params import get_dftd2_params
11 |
12 |
13 | class DFTD2Module(BaseDFTDModule):
14 | """DFTD2Module
15 |
16 | Args:
17 | params (dict): xc-dependent parameters. alp6, s6, rs6.
18 | cutoff (float): cutoff distance in angstrom. Default value is 95bohr := 50 angstrom.
19 | dtype (dtype): internal calculation is done in this precision.
20 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
21 | """
22 |
23 | def __init__(
24 | self,
25 | params: Dict[str, float],
26 | cutoff: float = 95.0 * Bohr,
27 | dtype=torch.float32,
28 | bidirectional: bool = False,
29 | cutoff_smoothing: str = "none",
30 | ):
31 | super(DFTD2Module, self).__init__()
32 |
33 | self.params = params
34 | self.cutoff = cutoff
35 | self.dtype = dtype
36 | self.bidirectional = bidirectional
37 | self.cutoff_smoothing = cutoff_smoothing
38 | r0ab, c6ab = get_dftd2_params()
39 | # atom pair coefficient (87, 87)
40 | self.register_buffer("c6ab", c6ab)
41 | # atom pair distance (95, 95)
42 | self.register_buffer("r0ab", r0ab)
43 |
44 | def calc_energy_batch(
45 | self,
46 | Z: Tensor,
47 | pos: Tensor,
48 | edge_index: Tensor,
49 | cell: Optional[Tensor] = None,
50 | pbc: Optional[Tensor] = None,
51 | shift_pos: Optional[Tensor] = None,
52 | batch: Optional[Tensor] = None,
53 | batch_edge: Optional[Tensor] = None,
54 | damping: str = "zero",
55 | ) -> Tensor:
56 | """Forward computation to calculate atomic wise dispersion energy"""
57 | shift_pos = pos.new_zeros((edge_index.size()[1], 3, 3)) if shift_pos is None else shift_pos
58 | pos_bohr = pos / d3_autoang # angstrom -> bohr
59 | if cell is None:
60 | cell_bohr: Optional[Tensor] = None
61 | else:
62 | cell_bohr = cell / d3_autoang # angstrom -> bohr
63 | shift_bohr = shift_pos / d3_autoang # angstrom -> bohr
64 | r = calc_distances(pos_bohr, edge_index, cell_bohr, shift_bohr)
65 |
66 | # E_disp (n_graphs,): Energy in eV unit
67 | E_disp = d3_autoev * edisp_d2(
68 | Z,
69 | r,
70 | edge_index,
71 | c6ab=self.c6ab, # type:ignore
72 | r0ab=self.r0ab, # type:ignore
73 | params=self.params,
74 | damping=damping,
75 | bidirectional=self.bidirectional,
76 | cutoff=self.cutoff / Bohr,
77 | batch=batch,
78 | batch_edge=batch_edge,
79 | cutoff_smoothing=self.cutoff_smoothing,
80 | )
81 | return E_disp
82 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # torch-dftd
2 | pytorch implementation of dftd2 [1] & dftd3 [2, 3]
3 |
4 | - Blog: [Open sourcing pytorch implementation of DFTD3](https://tech.preferred.jp/en/blog/oss-pytorch-dftd3/)
5 |
6 | ## Install
7 |
8 | ```bash
9 | # Install from pypi
10 | pip install torch-dftd
11 |
12 | # Install from source (for developers)
13 | git clone https://github.com/pfnet-research/torch-dftd
14 | pip install -e .
15 | ```
16 |
17 | ## Quick start
18 |
19 | ```python
20 | from ase.build import molecule
21 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
22 |
23 | atoms = molecule("CH3CH2OCH3")
24 | # device="cuda:0" for fast GPU computation.
25 | calc = TorchDFTD3Calculator(atoms=atoms, device="cpu", damping="bj")
26 |
27 | energy = atoms.get_potential_energy()
28 | forces = atoms.get_forces()
29 |
30 | print(f"energy {energy} eV")
31 | print(f"forces {forces}")
32 | ```
33 |
34 | ## Dependency
35 |
36 | The library is tested under following environment.
37 | - python: 3.6
38 | - CUDA: 10.2
39 | ```bash
40 | torch==1.5.1
41 | ase==3.21.1
42 | # Below is only for 3-body term
43 | cupy-cuda102==8.6.0
44 | pytorch-pfn-extras==0.3.2
45 | ```
46 |
47 | ## Development tips
48 | ### Formatting & Linting
49 | [pysen](https://github.com/pfnet/pysen) is used to format the python code of this repository.
50 | You can simply run below to get your code formatted :)
51 | ```bash
52 | # Format the code
53 | $ pysen run format
54 | # Check the code format
55 | $ pysen run lint
56 | ```
57 |
58 | ### CUDA Kernel function implementation with cupy
59 | [cupy](https://github.com/cupy/cupy) supports users to implement CUDA kernels within python code,
60 | and it can be easily linked with pytorch tensor calculations.
61 | Element wise kernel is implemented and used in some pytorch functions to accelerate speed with GPU.
62 |
63 | See [document](https://docs.cupy.dev/en/stable/user_guide/kernel.html) for details about user defined kernel.
64 |
65 | ## Citation
66 |
67 | Please always cite original paper of DFT-D2 [1] or DFT-D3 [2, 3].
68 | Also, please cite the paper [4] if you used this software for your publication.
69 |
70 | DFT-D2:
71 | [1] S. Grimme, J. Comput. Chem, 27 (2006), 1787-1799.
72 | DOI: [10.1002/jcc.20495](https://doi.org/10.1002/jcc.20495)
73 |
74 | DFT-D3:
75 | [2] S. Grimme, J. Antony, S. Ehrlich and H. Krieg, J. Chem. Phys, 132 (2010), 154104.
76 | DOI: [10.1063/1.3382344](https://doi.org/10.1063/1.3382344)
77 |
78 | If BJ-damping is used in DFT-D3:
79 | [3] S. Grimme, S. Ehrlich and L. Goerigk, J. Comput. Chem, 32 (2011), 1456-1465.
80 | DOI: [10.1002/jcc.21759](https://doi.org/10.1002/jcc.21759)
81 |
82 | [4] [PFP: Universal Neural Network Potential for Material Discovery](https://arxiv.org/abs/2106.14583)
83 |
84 | ```text
85 | @misc{takamoto2021pfp,
86 | title={PFP: Universal Neural Network Potential for Material Discovery},
87 | author={So Takamoto and Chikashi Shinagawa and Daisuke Motoki and Kosuke Nakago and Wenwen Li and Iori Kurata and Taku Watanabe and Yoshihiro Yayama and Hiroki Iriguchi and Yusuke Asano and Tasuku Onodera and Takafumi Ishii and Takao Kudo and Hideki Ono and Ryohto Sawada and Ryuichiro Ishitani and Marc Ong and Taiki Yamaguchi and Toshiki Kataoka and Akihide Hayashi and Takeshi Ibuka},
88 | year={2021},
89 | eprint={2106.14583},
90 | archivePrefix={arXiv},
91 | primaryClass={cond-mat.mtrl-sci}
92 | }
93 | ```
94 |
--------------------------------------------------------------------------------
/torch_dftd/nn/params/dftd2_params.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | import torch
4 | from torch import Tensor
5 |
6 | # for converting distance from bohr to angstrom
7 | d3_autoang = 0.52917726
8 | # J/mol nm^6 - > au
9 | c6conv = 1.0e-3 / 2625.4999 / (0.052917726 ** 6)
10 |
11 | r0 = torch.tensor(
12 | [
13 | 0.0,
14 | 0.91,
15 | 0.92,
16 | 0.75,
17 | 1.28,
18 | 1.35,
19 | 1.32,
20 | 1.27,
21 | 1.22,
22 | 1.17,
23 | 1.13,
24 | 1.04,
25 | 1.24,
26 | 1.49,
27 | 1.56,
28 | 1.55,
29 | 1.53,
30 | 1.49,
31 | 1.45,
32 | 1.35,
33 | 1.34,
34 | 1.42,
35 | 1.42,
36 | 1.42,
37 | 1.42,
38 | 1.42,
39 | 1.42,
40 | 1.42,
41 | 1.42,
42 | 1.42,
43 | 1.42,
44 | 1.50,
45 | 1.57,
46 | 1.60,
47 | 1.61,
48 | 1.59,
49 | 1.57,
50 | 1.48,
51 | 1.46,
52 | 1.49,
53 | 1.49,
54 | 1.49,
55 | 1.49,
56 | 1.49,
57 | 1.49,
58 | 1.49,
59 | 1.49,
60 | 1.49,
61 | 1.49,
62 | 1.52,
63 | 1.64,
64 | 1.71,
65 | 1.72,
66 | 1.72,
67 | 1.71,
68 | 1.638,
69 | 1.602,
70 | 1.564,
71 | 1.594,
72 | 1.594,
73 | 1.594,
74 | 1.594,
75 | 1.594,
76 | 1.594,
77 | 1.594,
78 | 1.594,
79 | 1.594,
80 | 1.594,
81 | 1.594,
82 | 1.594,
83 | 1.594,
84 | 1.594,
85 | 1.625,
86 | 1.611,
87 | 1.611,
88 | 1.611,
89 | 1.611,
90 | 1.611,
91 | 1.611,
92 | 1.611,
93 | 1.598,
94 | 1.805,
95 | 1.767,
96 | 1.725,
97 | 1.823,
98 | 1.810,
99 | 1.749,
100 | ],
101 | dtype=torch.float64,
102 | )
103 | c6 = torch.tensor(
104 | [
105 | 0.0,
106 | 0.14,
107 | 0.08,
108 | 1.61,
109 | 1.61,
110 | 3.13,
111 | 1.75,
112 | 1.23,
113 | 0.70,
114 | 0.75,
115 | 0.63,
116 | 5.71,
117 | 5.71,
118 | 10.79,
119 | 9.23,
120 | 7.84,
121 | 5.57,
122 | 5.07,
123 | 4.61,
124 | 10.8,
125 | 10.8,
126 | 10.8,
127 | 10.8,
128 | 10.8,
129 | 10.8,
130 | 10.8,
131 | 10.8,
132 | 10.8,
133 | 10.8,
134 | 10.8,
135 | 10.8,
136 | 16.99,
137 | 17.10,
138 | 16.37,
139 | 12.64,
140 | 12.47,
141 | 12.01,
142 | 24.67,
143 | 24.67,
144 | 24.67,
145 | 24.67,
146 | 24.67,
147 | 24.67,
148 | 24.67,
149 | 24.67,
150 | 24.67,
151 | 24.67,
152 | 24.67,
153 | 24.67,
154 | 37.32,
155 | 38.71,
156 | 38.44,
157 | 31.74,
158 | 31.50,
159 | 29.99,
160 | 315.275,
161 | 226.994,
162 | 176.252,
163 | 140.68,
164 | 140.68,
165 | 140.68,
166 | 140.68,
167 | 140.68,
168 | 140.68,
169 | 140.68,
170 | 140.68,
171 | 140.68,
172 | 140.68,
173 | 140.68,
174 | 140.68,
175 | 140.68,
176 | 140.68,
177 | 105.112,
178 | 81.24,
179 | 81.24,
180 | 81.24,
181 | 81.24,
182 | 81.24,
183 | 81.24,
184 | 81.24,
185 | 57.364,
186 | 57.254,
187 | 63.162,
188 | 63.540,
189 | 55.283,
190 | 57.171,
191 | 56.64,
192 | ],
193 | dtype=torch.float64,
194 | )
195 |
196 |
197 | def get_dftd2_params() -> Tuple[Tensor, Tensor]:
198 | r0ab = (r0[:, None] + r0[None, :]) / d3_autoang
199 | c6ab = torch.sqrt(c6[:, None] * c6[None, :]) * c6conv
200 | return r0ab, c6ab
201 |
--------------------------------------------------------------------------------
/torch_dftd/nn/dftd3_module.py:
--------------------------------------------------------------------------------
1 | import os
2 | from pathlib import Path
3 | from typing import Dict, Optional
4 |
5 | import numpy as np
6 | import torch
7 | from ase.units import Bohr
8 | from torch import Tensor
9 | from torch_dftd.functions.dftd3 import d3_autoang, d3_autoev, edisp
10 | from torch_dftd.functions.distance import calc_distances
11 | from torch_dftd.nn.base_dftd_module import BaseDFTDModule
12 |
13 |
14 | class DFTD3Module(BaseDFTDModule):
15 | """DFTD3Module
16 |
17 | Args:
18 | params (dict): xc-dependent parameters. alp, s6, rs6, s18, rs18.
19 | cutoff (float): cutoff distance in angstrom. Default value is 95bohr := 50 angstrom.
20 | cnthr (float): coordination number cutoff distance in angstrom.
21 | Default value is 40bohr := 21 angstrom.
22 | abc (bool): ATM 3-body interaction
23 | dtype (dtype): internal calculation is done in this precision.
24 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
25 | """
26 |
27 | def __init__(
28 | self,
29 | params: Dict[str, float],
30 | cutoff: float = 95.0 * Bohr,
31 | cnthr: float = 40.0 * Bohr,
32 | abc: bool = False,
33 | dtype=torch.float32,
34 | bidirectional: bool = False,
35 | cutoff_smoothing: str = "none",
36 | ):
37 | super(DFTD3Module, self).__init__()
38 |
39 | # relative filepath to package folder
40 | d3_filepath = str(Path(os.path.abspath(__file__)).parent / "params" / "dftd3_params.npz")
41 | d3_params = np.load(d3_filepath)
42 | c6ab = torch.tensor(d3_params["c6ab"], dtype=dtype)
43 | r0ab = torch.tensor(d3_params["r0ab"], dtype=dtype)
44 | rcov = torch.tensor(d3_params["rcov"], dtype=dtype)
45 | r2r4 = torch.tensor(d3_params["r2r4"], dtype=dtype)
46 | # (95, 95, 5, 5, 3) c0, c1, c2 for coordination number dependent c6ab term.
47 | self.register_buffer("c6ab", c6ab)
48 | self.register_buffer("r0ab", r0ab) # atom pair distance (95, 95)
49 | self.register_buffer("rcov", rcov) # atom covalent distance (95)
50 | self.register_buffer("r2r4", r2r4) # (95,)
51 |
52 | if cnthr > cutoff:
53 | print(
54 | f"WARNING: cnthr {cnthr} is larger than cutoff {cutoff}. "
55 | f"cutoff distance is used for cnthr"
56 | )
57 | cnthr = cutoff
58 | self.params = params
59 | self.cutoff = cutoff
60 | self.cnthr = cnthr
61 | self.abc = abc
62 | self.dtype = dtype
63 | self.bidirectional = bidirectional
64 | self.cutoff_smoothing = cutoff_smoothing
65 |
66 | def calc_energy_batch(
67 | self,
68 | Z: Tensor,
69 | pos: Tensor,
70 | edge_index: Tensor,
71 | cell: Optional[Tensor] = None,
72 | pbc: Optional[Tensor] = None,
73 | shift_pos: Optional[Tensor] = None,
74 | batch: Optional[Tensor] = None,
75 | batch_edge: Optional[Tensor] = None,
76 | damping: str = "zero",
77 | ) -> Tensor:
78 | """Forward computation to calculate atomic wise dispersion energy"""
79 | shift_pos = pos.new_zeros((edge_index.size()[1], 3, 3)) if shift_pos is None else shift_pos
80 | pos_bohr = pos / d3_autoang # angstrom -> bohr
81 | if cell is None:
82 | cell_bohr: Optional[Tensor] = None
83 | else:
84 | cell_bohr = cell / d3_autoang # angstrom -> bohr
85 | shift_bohr = shift_pos / d3_autoang # angstrom -> bohr
86 | r = calc_distances(pos_bohr, edge_index, cell_bohr, shift_bohr)
87 | # E_disp (n_graphs,): Energy in eV unit
88 | E_disp = d3_autoev * edisp(
89 | Z,
90 | r,
91 | edge_index,
92 | c6ab=self.c6ab, # type:ignore
93 | r0ab=self.r0ab, # type:ignore
94 | rcov=self.rcov, # type:ignore
95 | r2r4=self.r2r4, # type:ignore
96 | params=self.params,
97 | cutoff=self.cutoff / Bohr,
98 | cnthr=self.cnthr / Bohr,
99 | batch=batch,
100 | batch_edge=batch_edge,
101 | shift_pos=shift_bohr,
102 | damping=damping,
103 | cutoff_smoothing=self.cutoff_smoothing,
104 | bidirectional=self.bidirectional,
105 | abc=self.abc,
106 | pos=pos_bohr,
107 | cell=cell_bohr,
108 | )
109 | return E_disp
110 |
--------------------------------------------------------------------------------
/torch_dftd/functions/edge_extraction.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Tuple
2 |
3 | import numpy as np
4 | import torch
5 | from ase.neighborlist import primitive_neighbor_list
6 | from ase.units import Bohr
7 | from pymatgen.core import Structure
8 | from torch import Tensor
9 |
10 |
11 | def calc_neighbor_by_ase(
12 | pos: Tensor, cell: Tensor, pbc: Tensor, cutoff: float
13 | ) -> Tuple[Tensor, Tensor]:
14 | idx_i, idx_j, S = primitive_neighbor_list(
15 | "ijS",
16 | pbc.detach().cpu().numpy(),
17 | cell.detach().cpu().numpy(),
18 | pos.detach().cpu().numpy(),
19 | cutoff,
20 | )
21 | edge_index = torch.tensor(np.stack([idx_i, idx_j], axis=0), device=pos.device)
22 | # convert int64 -> pos.dtype (float)
23 | S = torch.tensor(S, dtype=pos.dtype, device=pos.device)
24 | return edge_index, S
25 |
26 |
27 | def calc_neighbor_by_pymatgen(
28 | pos: Tensor, cell: Tensor, pbc: Tensor, cutoff: float
29 | ) -> Tuple[Tensor, Tensor]:
30 | """calculate neighbor nodes in pbc condition.
31 |
32 | Implementation referred from https://github.com/Open-Catalyst-Project/ocp/blob/a5634ee4f0dc4a874752ab8d3117492ce83261ac/ocpmodels/preprocessing/atoms_to_graphs.py#L76
33 | under MIT license.
34 |
35 | Args:
36 | pos (Tensor):
37 | cell (Tensor):
38 | pbc (Tensor): periodic boundary condition.
39 | cutoff (float): cutoff distance to find neighbor
40 |
41 | Returns:
42 | edge_index (Tensor): (2, n_edges) indices of edge, src -> dst.
43 | S (Tensor): (n_edges, 3) shift tensor
44 | """ # NOQA
45 | if not torch.all(pbc):
46 | raise NotImplementedError(f"pbc {pbc} must be true for all axis!")
47 |
48 | positions = pos.detach().cpu().numpy().copy()
49 | lattice = cell.detach().cpu().numpy().copy()
50 | n_atoms = positions.shape[0]
51 | symbols = np.ones(n_atoms) # Dummy symbols to create `Structure`...
52 |
53 | struct = Structure(lattice, symbols, positions, coords_are_cartesian=True)
54 | c_index, n_index, offsets, n_distance = struct.get_neighbor_list(
55 | r=cutoff,
56 | numerical_tol=1e-8,
57 | exclude_self=True,
58 | )
59 | edge_index = torch.tensor(
60 | np.stack([c_index, n_index], axis=0), dtype=torch.long, device=pos.device
61 | )
62 | S = torch.tensor(offsets, dtype=pos.dtype, device=pos.device)
63 |
64 | return edge_index, S
65 |
66 |
67 | def calc_edge_index(
68 | pos: Tensor,
69 | cell: Optional[Tensor] = None,
70 | pbc: Optional[Tensor] = None,
71 | cutoff: float = 95.0 * Bohr,
72 | bidirectional: bool = False,
73 | ) -> Tuple[Tensor, Tensor]:
74 | """Calculate atom pair as `edge_index`, and shift vector `S`.
75 |
76 | Args:
77 | pos (Tensor): atom positions in angstrom
78 | cell (Tensor): cell size in angstrom, None for non periodic system.
79 | pbc (Tensor): pbc condition, None for non periodic system.
80 | cutoff (float): cutoff distance in angstrom
81 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
82 |
83 | Returns:
84 | edge_index (Tensor): (2, n_edges)
85 | S (Tensor): (n_edges, 3) dtype is same with `pos`
86 | """
87 | if pbc is None or torch.all(~pbc):
88 | assert cell is None
89 | # Calculate distance brute force way
90 | distances = torch.sum((pos.unsqueeze(0) - pos.unsqueeze(1)).pow_(2), dim=2)
91 | right_ind, left_ind = torch.where(distances < cutoff ** 2)
92 | if bidirectional:
93 | edge_index = torch.stack(
94 | (left_ind[left_ind != right_ind], right_ind[left_ind != right_ind])
95 | )
96 | else:
97 | edge_index = torch.stack(
98 | (left_ind[left_ind < right_ind], right_ind[left_ind < right_ind])
99 | )
100 | n_edges = edge_index.shape[1]
101 | S = pos.new_zeros((n_edges, 3))
102 | else:
103 | if not bidirectional:
104 | raise NotImplementedError("bidirectional=False is not supported")
105 | if pos.shape[0] == 0:
106 | edge_index = torch.zeros([2, 0], dtype=torch.long, device=pos.device)
107 | S = torch.zeros_like(pos)
108 | else:
109 | try:
110 | edge_index, S = calc_neighbor_by_pymatgen(pos, cell, pbc, cutoff)
111 | except NotImplementedError:
112 | # This is slower.
113 | edge_index, S = calc_neighbor_by_ase(pos, cell, pbc, cutoff)
114 |
115 | return edge_index, S
116 |
--------------------------------------------------------------------------------
/torch_dftd/testing/damping.py:
--------------------------------------------------------------------------------
1 | # --- DFTD2/DFTD3 possible damping & xc combinations ---
2 |
3 | # damping, xc, old
4 | damping_xc_combination_list = [
5 | ("bjm", "b2-plyp", False),
6 | ("bjm", "b3-lyp", False),
7 | ("bjm", "b97-d", False),
8 | ("bjm", "b-lyp", False),
9 | ("bjm", "b-p", False),
10 | ("bjm", "pbe", False),
11 | ("bjm", "pbe0", False),
12 | ("bjm", "lc-wpbe", False),
13 | ("zerom", "b2-plyp", False),
14 | ("zerom", "b3-lyp", False),
15 | ("zerom", "b97-d", False),
16 | ("zerom", "b-lyp", False),
17 | ("zerom", "b-p", False),
18 | ("zerom", "pbe", False),
19 | ("zerom", "pbe0", False),
20 | ("zerom", "lc-wpbe", False),
21 | ("bj", "b-p", False),
22 | ("bj", "b-lyp", False),
23 | ("bj", "revpbe", False),
24 | ("bj", "rpbe", False),
25 | ("bj", "b97-d", False),
26 | ("bj", "pbe", False),
27 | ("bj", "rpw86-pbe", False),
28 | ("bj", "b3-lyp", False),
29 | ("bj", "tpss", False),
30 | ("bj", "hf", False),
31 | ("bj", "tpss0", False),
32 | ("bj", "pbe0", False),
33 | ("bj", "hse06", False),
34 | ("bj", "revpbe38", False),
35 | ("bj", "pw6b95", False),
36 | ("bj", "b2-plyp", False),
37 | ("bj", "dsd-blyp", False),
38 | ("bj", "dsd-blyp-fc", False),
39 | ("bj", "bop", False),
40 | ("bj", "mpwlyp", False),
41 | ("bj", "o-lyp", False),
42 | ("bj", "pbesol", False),
43 | ("bj", "bpbe", False),
44 | ("bj", "opbe", False),
45 | ("bj", "ssb", False),
46 | ("bj", "revssb", False),
47 | ("bj", "otpss", False),
48 | ("bj", "b3pw91", False),
49 | ("bj", "bh-lyp", False),
50 | ("bj", "revpbe0", False),
51 | ("bj", "tpssh", False),
52 | ("bj", "mpw1b95", False),
53 | ("bj", "pwb6k", False),
54 | ("bj", "b1b95", False),
55 | ("bj", "bmk", False),
56 | ("bj", "cam-b3lyp", False),
57 | ("bj", "lc-wpbe", False),
58 | ("bj", "b2gp-plyp", False),
59 | ("bj", "ptpss", False),
60 | ("bj", "pwpb95", False),
61 | ("bj", "hf/mixed", False),
62 | ("bj", "hf/sv", False),
63 | ("bj", "hf/minis", False),
64 | ("bj", "b3-lyp/6-31gd", False),
65 | ("bj", "hcth120", False),
66 | ("bj", "dftb3", False),
67 | ("bj", "pw1pw", False),
68 | ("bj", "pwgga", False),
69 | ("bj", "hsesol", False),
70 | ("bj", "hf3c", False),
71 | ("bj", "hf3cv", False),
72 | ("bj", "pbeh3c", False),
73 | ("bj", "pbeh-3c", False),
74 | ("zero", "slater-dirac-exchange", False),
75 | ("zero", "b-lyp", False),
76 | ("zero", "b-p", False),
77 | ("zero", "b97-d", False),
78 | ("zero", "revpbe", False),
79 | ("zero", "pbe", False),
80 | ("zero", "pbesol", False),
81 | ("zero", "rpw86-pbe", False),
82 | ("zero", "rpbe", False),
83 | ("zero", "tpss", False),
84 | ("zero", "b3-lyp", False),
85 | ("zero", "pbe0", False),
86 | ("zero", "hse06", False),
87 | ("zero", "revpbe38", False),
88 | ("zero", "pw6b95", False),
89 | ("zero", "tpss0", False),
90 | ("zero", "b2-plyp", False),
91 | ("zero", "pwpb95", False),
92 | ("zero", "b2gp-plyp", False),
93 | ("zero", "ptpss", False),
94 | ("zero", "hf", False),
95 | ("zero", "mpwlyp", False),
96 | ("zero", "bpbe", False),
97 | ("zero", "bh-lyp", False),
98 | ("zero", "tpssh", False),
99 | ("zero", "pwb6k", False),
100 | ("zero", "b1b95", False),
101 | ("zero", "bop", False),
102 | ("zero", "o-lyp", False),
103 | ("zero", "o-pbe", False),
104 | ("zero", "ssb", False),
105 | ("zero", "revssb", False),
106 | ("zero", "otpss", False),
107 | ("zero", "b3pw91", False),
108 | ("zero", "revpbe0", False),
109 | ("zero", "pbe38", False),
110 | ("zero", "mpw1b95", False),
111 | ("zero", "mpwb1k", False),
112 | ("zero", "bmk", False),
113 | ("zero", "cam-b3lyp", False),
114 | ("zero", "lc-wpbe", False),
115 | ("zero", "m05", False),
116 | ("zero", "m052x", False),
117 | ("zero", "m06l", False),
118 | ("zero", "m06", False),
119 | ("zero", "m062x", False),
120 | ("zero", "m06hf", False),
121 | ("zero", "hcth120", False),
122 | # skip tz=True of damping="zero".
123 | # "b-lyp"
124 | # "b-p"
125 | # "b97-d"
126 | # "revpbe"
127 | # "pbe"
128 | # "tpss"
129 | # "b3-lyp"
130 | # "pbe0"
131 | # "pw6b95"
132 | # "tpss0"
133 | # "b2-plyp"
134 | ("zero", "b-lyp", True),
135 | ("zero", "b-p", True),
136 | ("zero", "b97-d", True),
137 | ("zero", "revpbe", True),
138 | ("zero", "pbe", True),
139 | ("zero", "tpss", True),
140 | ("zero", "b3-lyp", True),
141 | ("zero", "pbe0", True),
142 | ("zero", "pw6b95", True),
143 | ("zero", "tpss0", True),
144 | ("zero", "b2-plyp", True),
145 | ("zero", "b2gp-plyp", True),
146 | ("zero", "dsd-blyp", True),
147 | ]
148 |
149 | # damping, old
150 | damping_method_list = [
151 | ("zero", False),
152 | ("bj", False),
153 | ("zerom", False),
154 | ("bjm", False),
155 | ("zero", True),
156 | ]
157 |
--------------------------------------------------------------------------------
/tests/functions_tests/test_triplets.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import torch
3 | from torch_dftd.functions.triplets import calc_triplets
4 |
5 |
6 | def test_calc_triplets():
7 | # TODO: Currently returned value has different order due to torch.argsort,
8 | # and expected value is not set correctly for GPU.
9 | # device = "cuda:0"
10 | device = "cpu"
11 | edge_index = torch.tensor(
12 | [[0, 0, 0, 1, 1, 1, 4, 3, 1, 2, 4, 3], [4, 3, 1, 2, 4, 3, 0, 0, 0, 1, 1, 1]],
13 | dtype=torch.long,
14 | device=device,
15 | )
16 | shift_pos = torch.zeros((edge_index.shape[1], 3), dtype=torch.float32, device=device)
17 | shift_pos[:, 0] = torch.tensor(
18 | [1, 2, 3, 4, 5, 6, -1, -2, -3, -4, -5, -6], dtype=torch.float32, device=device
19 | )
20 | # print("shift", shift.shape)
21 | triplet_node_index, multiplicity, edge_jk, batch_triplets = calc_triplets(
22 | edge_index, shift_pos
23 | )
24 | # print("triplet_node_index", triplet_node_index.shape, triplet_node_index)
25 | # print("multiplicity", multiplicity.shape, multiplicity)
26 | # print("triplet_shift", triplet_shift.shape, triplet_shift)
27 | # print("triplet_shift[:, :, 0]", triplet_shift.shape, triplet_shift[:, :, 0])
28 |
29 | # 6 triplets exist.
30 | n_triplets = 6
31 | # idx_i, idx_j, idx_k = triplet_node_index
32 | assert triplet_node_index.shape == (n_triplets, 3)
33 | assert torch.all(
34 | triplet_node_index.cpu()
35 | == torch.tensor(
36 | [[0, 3, 4], [0, 1, 4], [0, 1, 3], [1, 2, 4], [1, 2, 3], [1, 3, 4]], dtype=torch.long
37 | )
38 | )
39 | assert multiplicity.shape == (n_triplets,)
40 | assert torch.all(multiplicity.cpu() == torch.ones((n_triplets,), dtype=torch.float32))
41 |
42 | assert torch.allclose(
43 | edge_jk.cpu(),
44 | torch.tensor([[7, 6], [8, 6], [8, 7], [9, 10], [9, 11], [11, 10]], dtype=torch.long),
45 | )
46 | # shift for edge `i->j`, `i->k`, `j->k`.
47 | triplet_shift = torch.stack(
48 | [
49 | -shift_pos[edge_jk[:, 0]],
50 | -shift_pos[edge_jk[:, 1]],
51 | shift_pos[edge_jk[:, 0]] - shift_pos[edge_jk[:, 1]],
52 | ],
53 | dim=1,
54 | )
55 | assert torch.allclose(
56 | triplet_shift.cpu()[:, :, 0],
57 | torch.tensor(
58 | [
59 | [2.0, 1.0, -1.0],
60 | [3.0, 1.0, -2.0],
61 | [3.0, 2.0, -1.0],
62 | [4.0, 5.0, 1.0],
63 | [4.0, 6.0, 2.0],
64 | [6.0, 5.0, -1.0],
65 | ],
66 | dtype=torch.float32,
67 | ),
68 | )
69 | assert torch.all(batch_triplets.cpu() == torch.zeros((n_triplets,), dtype=torch.long))
70 |
71 |
72 | def test_calc_triplets_noshift():
73 | # device = "cuda:0"
74 | device = "cpu"
75 | edge_index = torch.tensor(
76 | [[0, 1, 1, 3, 1, 2, 3, 0], [1, 2, 3, 0, 0, 1, 1, 3]], dtype=torch.long, device=device
77 | )
78 | triplet_node_index, multiplicity, edge_jk, batch_triplets = calc_triplets(
79 | edge_index, dtype=torch.float64
80 | )
81 | # print("triplet_node_index", triplet_node_index.shape, triplet_node_index)
82 | # print("multiplicity", multiplicity.shape, multiplicity)
83 | # print("triplet_shift", triplet_shift.shape, triplet_shift)
84 | # print("batch_triplets", batch_triplets.shape, batch_triplets)
85 |
86 | # 2 triplets exist
87 | n_triplets = 2
88 | assert triplet_node_index.shape == (n_triplets, 3)
89 | assert torch.all(
90 | triplet_node_index.cpu() == torch.tensor([[0, 1, 3], [1, 2, 3]], dtype=torch.long)
91 | )
92 | assert multiplicity.shape == (n_triplets,)
93 | assert multiplicity.dtype == torch.float64
94 | assert torch.all(multiplicity.cpu() == torch.ones((n_triplets,), dtype=torch.float64))
95 | assert torch.all(edge_jk.cpu() == torch.tensor([[1, 0], [2, 3]], dtype=torch.long))
96 | assert torch.all(batch_triplets.cpu() == torch.zeros((n_triplets,), dtype=torch.long))
97 |
98 |
99 | @pytest.mark.parametrize(
100 | "edge_index",
101 | [torch.zeros((2, 0), dtype=torch.long), torch.tensor([[0, 0], [1, 2]], dtype=torch.long)],
102 | )
103 | def test_calc_triplets_no_triplets(edge_index):
104 | # edge_index = edge_index.to("cuda:0")
105 | # No triplet exist in this graph. Case1: No edge, Case 2 No triplets in this edge.
106 | triplet_node_index, multiplicity, edge_jk, batch_triplets = calc_triplets(edge_index)
107 | # print("triplet_node_index", triplet_node_index.shape, triplet_node_index)
108 | # print("multiplicity", multiplicity.shape, multiplicity)
109 | # print("triplet_shift", triplet_shift.shape, triplet_shift)
110 | # print("batch_triplets", batch_triplets.shape, batch_triplets)
111 |
112 | # 0 triplets exist.
113 | assert triplet_node_index.shape == (0, 3)
114 | assert multiplicity.shape == (0,)
115 | assert edge_jk.shape == (0, 2)
116 | assert batch_triplets.shape == (0,)
117 |
118 |
119 | if __name__ == "__main__":
120 | pytest.main([__file__, "-v", "-s"])
121 |
--------------------------------------------------------------------------------
/examples/check_speed.py:
--------------------------------------------------------------------------------
1 | import os
2 | from time import perf_counter
3 |
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | import pandas as pd
7 | import plotly.express as px
8 | import torch
9 | from ase import Atoms
10 | from ase.calculators.dftd3 import DFTD3
11 | from ase.cluster.cubic import FaceCenteredCubic
12 | from ase.io import write
13 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
14 |
15 |
16 | def compare_forces(atoms: Atoms, calc1, calc2):
17 | print(f"atoms # {len(atoms.numbers)}")
18 | calc1.reset()
19 | atoms.calc = calc1
20 | start = perf_counter()
21 | try:
22 | F1 = atoms.get_forces()
23 | t1 = perf_counter() - start
24 | except:
25 | print("Calculation failed")
26 | F1 = np.array([np.nan])
27 | t1 = np.nan
28 |
29 | print(f"F1 {F1.shape} took {t1} sec")
30 |
31 | calc2.reset()
32 | atoms.calc = calc2
33 | start = perf_counter()
34 | F2 = atoms.get_forces()
35 | t2 = perf_counter() - start
36 | print(f"F2 {F2.shape} took {t2} sec")
37 | # print(F2)
38 | print(f"diff {np.max(np.abs(F1 - F2))}, calc1/calc2 -> {t1 / t2} times faster")
39 | return t1, t2, F1, F2
40 |
41 |
42 | def create_fcc_cluster_atoms(layers):
43 | surfaces = [(1, 0, 0), (1, 1, 0), (1, 1, 1)]
44 | # layers = [4, 4, 4]
45 | lc = 3.61000
46 | cluster = FaceCenteredCubic("Cu", surfaces, layers, latticeconstant=lc)
47 | return Atoms(cluster.symbols, cluster.positions, cell=cluster.cell)
48 |
49 |
50 | if __name__ == "__main__":
51 | os.makedirs(str("results"), exist_ok=True)
52 |
53 | damping = "bj"
54 | xc = "pbe"
55 | device = "cuda:0"
56 | old = False
57 | print("Initializing calculators...")
58 | print(f"xc = {xc}, damping = {damping}, old = {old}")
59 | torch_dftd3_calc = TorchDFTD3Calculator(
60 | damping=damping, xc=xc, device=device, dtype=torch.float64, old=old, bidirectional=True
61 | )
62 | dftd3_calc = DFTD3(damping=damping, xc=xc, grad=True, old=old, directory=".")
63 |
64 | F1_F2_list = []
65 | t1_list = []
66 | t2_list = []
67 | name_list = []
68 |
69 | # Dry-run once.
70 | atoms = create_fcc_cluster_atoms([3, 3, 3])
71 | t1, t2, F1, F2 = compare_forces(atoms, dftd3_calc, torch_dftd3_calc)
72 |
73 | n_repeat = 10
74 | for i in [3, 5, 7, 9]:
75 | print(f"Calculate Cu cluster with size ({i}, {i}, {i})")
76 | atoms = create_fcc_cluster_atoms([i, i, i])
77 |
78 | _t1_list = []
79 | _t2_list = []
80 | for j in range(n_repeat):
81 | t1, t2, F1, F2 = compare_forces(atoms, dftd3_calc, torch_dftd3_calc)
82 | _t1_list.append(t1)
83 | _t2_list.append(t2)
84 |
85 | if np.sum(np.isnan(F1)) == 0:
86 | F1_F2_list.append([F1, F2]) # Only add successful results
87 | t1_list.append(np.mean(_t1_list) * 1000) # take average in ms order
88 | t2_list.append(np.mean(_t2_list) * 1000) # take average in ms order
89 | name_list.append(f"cluster{i}{i}{i}: {atoms.get_number_of_atoms()} atoms")
90 |
91 | write(f"results/cluster{i}{i}{i}_v1.png", atoms)
92 | write(f"results/cluster{i}{i}{i}_v2.png", atoms, rotation="225z, -60x")
93 |
94 | # --- Check time ---
95 | df = pd.DataFrame(
96 | {
97 | "name": name_list,
98 | "DFTD3": t1_list,
99 | "TorchDFTD3": t2_list,
100 | }
101 | )
102 | melt_df = pd.melt(df, id_vars="name", value_vars=["DFTD3", "TorchDFTD3"])
103 | melt_df["value1"] = melt_df["value"].round(0)
104 | melt_df = melt_df.rename(
105 | {"name": "Atoms", "variable": "Calculator", "value": "time (ms)", "value1": "time_round"},
106 | axis=1,
107 | )
108 | fig = px.bar(
109 | melt_df,
110 | x="Atoms",
111 | y="time (ms)",
112 | color="Calculator",
113 | barmode="group",
114 | title=f"Execution time comparison",
115 | text="time_round",
116 | height=600,
117 | width=1200,
118 | orientation="v",
119 | )
120 | # fig.show()
121 | fig.write_image("results/exe_time.png")
122 |
123 | print("Saved to exe_time.png")
124 | print("Execution time list:")
125 | print(t1_list)
126 | print(t2_list)
127 |
128 | # --- Check calculated result is same ---
129 | # (n_total_atoms, 3)
130 | F1 = np.concatenate([f1 for f1, f2 in F1_F2_list], axis=0)
131 | F2 = np.concatenate([f2 for f1, f2 in F1_F2_list], axis=0)
132 | mae = np.mean(np.abs(F1 - F2))
133 | max_ae = np.max(np.abs(F1 - F2))
134 |
135 | F1_F2 = np.array([F1, F2])
136 | fig, ax = plt.subplots()
137 | E_max = np.max(F1_F2)
138 | E_min = np.min(F1_F2)
139 |
140 | ax.plot([E_min, E_max], [E_min, E_max])
141 | for i in range(3):
142 | # Fx, Fy, Fz scatter plot
143 | ax.scatter(F1[:, i], F2[:, i], label=["x", "y", "z"][i], marker="x")
144 | ax.set_xlabel("ase DFTD3")
145 | ax.set_ylabel("pytorch DFTD3")
146 | ax.set_title(f"DFTD3 Force difference MAE: {mae:.3} eV")
147 | ax.legend()
148 | fig.savefig("results/F1-F2.png")
149 |
150 | print("Saved to F1-F2.png")
151 | print("MAE", mae)
152 | print("Max AE", max_ae)
153 |
--------------------------------------------------------------------------------
/torch_dftd/functions/triplets_kernel.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple
2 |
3 | import torch
4 | from torch import Tensor
5 | from torch.utils.dlpack import from_dlpack, to_dlpack
6 |
7 | try:
8 | import cupy as cp
9 |
10 | _cupy_available = True
11 | except ImportError:
12 | import numpy as cp # Dummy for mypy annotation.
13 |
14 | _cupy_available = False
15 |
16 | try:
17 | import pytorch_pfn_extras as ppe
18 |
19 | _ppe_available = True
20 | except ImportError:
21 | _ppe_available = False
22 |
23 |
24 | if _ppe_available and _cupy_available:
25 | ppe.cuda.use_torch_mempool_in_cupy()
26 |
27 |
28 | def _torch2cupy(tensor: Tensor) -> cp.ndarray:
29 | return cp.fromDlpack(to_dlpack(tensor))
30 |
31 |
32 | def _cupy2torch(array: cp.ndarray) -> Tensor:
33 | return from_dlpack(array.toDlpack())
34 |
35 |
36 | if _cupy_available:
37 | _calc_triplets_core_gpu_kernel = cp.ElementwiseKernel(
38 | "raw int64 counts, raw int64 unique, raw int64 dst, raw int64 edge_indices, raw int64 batch_edge, raw int64 counts_cumsum",
39 | "raw int64 triplet_node_index, raw T multiplicity, raw int64 edge_jk, raw int64 batch_triplets",
40 | """
41 | long long n_unique = unique.size();
42 | long long a = 0;
43 | // a, b, c corresponds to i, j, k in the original function.
44 | long long current_counts = 0;
45 | long long _i = 0;
46 | for (a = 0; a < n_unique; a++) {
47 | current_counts += counts[a] * (counts[a] - 1) / 2;
48 | if (i < current_counts) {
49 | _i = i - (current_counts - counts[a] * (counts[a] - 1) / 2);
50 | break;
51 | }
52 | }
53 |
54 | long long _src = unique[a];
55 | long long _n_edges = counts[a];
56 | long long _offset = counts_cumsum[a];
57 | long long _batch_index = batch_edge[_offset];
58 |
59 | long long b, c;
60 | for (b = 1; b < _n_edges; b++) {
61 | if (_i < (2 * _n_edges - b - 1) * b / 2) {
62 | b -= 1;
63 | c = _i - (2 * _n_edges - b - 1) * b / 2 + b + 1;
64 | break;
65 | }
66 | }
67 | long long _dst0 = dst[_offset + b];
68 | long long _dst1 = dst[_offset + c];
69 | if (_dst0 > _dst1) {
70 | // Swap _dst0 & _dst1, b & c.
71 | long long tmp = _dst0;
72 | _dst0 = _dst1;
73 | _dst1 = tmp;
74 | tmp = b;
75 | b = c;
76 | c = tmp;
77 | }
78 |
79 | // --- triplet_node_index ---
80 | triplet_node_index[3 * i] = _src; // idx_i
81 | triplet_node_index[3 * i + 1] = _dst0; // idx_j
82 | triplet_node_index[3 * i + 2] = _dst1; // idx_k
83 |
84 | // --- multiplicity ---
85 | if (_dst0 == _dst1) {
86 | if (_src == _dst0) {
87 | // Case 0: _src == _dst0 == _dst1
88 | multiplicity[i] = 3.0;
89 | } else {
90 | // Case 1: _src < _dst0 == _dst1
91 | multiplicity[i] = 1.0;
92 | }
93 | } else {
94 | if (_src == _dst0) {
95 | // Case 2: _src == _dst0 < _dst1
96 | multiplicity[i] = 2.0;
97 | } else {
98 | // Case 3: i < _dst0 < _dst1
99 | multiplicity[i] = 1.0;
100 | }
101 | }
102 |
103 | // --- edge_jk ---
104 | edge_jk[2 * i] = edge_indices[_offset + b];
105 | edge_jk[2 * i + 1] = edge_indices[_offset + c];
106 |
107 | // --- batch_triplets ---
108 | batch_triplets[i] = _batch_index;
109 | """,
110 | "_calc_triplets_core_gpu_kernel",
111 | )
112 | else:
113 | _calc_triplets_core_gpu_kernel = None
114 |
115 |
116 | def _calc_triplets_core_gpu(
117 | counts: Tensor,
118 | unique: Tensor,
119 | dst: Tensor,
120 | edge_indices: Tensor,
121 | batch_edge: Tensor,
122 | counts_cumsum: Tensor,
123 | dtype: torch.dtype = torch.float32,
124 | ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
125 | if not _ppe_available:
126 | raise ImportError("Please install pytorch_pfn_extras to use `_calc_triplets_core_gpu`!")
127 | if not _cupy_available:
128 | raise ImportError("Please install cupy to use `_calc_triplets_core_gpu`!")
129 | device = unique.device
130 | n_triplets = int(torch.sum(counts * (counts - 1) / 2).item())
131 |
132 | # (n_triplet_edges, 3)
133 | triplet_node_index = torch.zeros((n_triplets, 3), dtype=torch.long, device=device)
134 | # (n_triplet_edges)
135 | multiplicity = torch.zeros((n_triplets,), dtype=dtype, device=device)
136 | # (n_triplet_edges, 2=(j, k))
137 | edge_jk = torch.zeros((n_triplets, 2), dtype=torch.long, device=device)
138 | # (n_triplet_edges)
139 | batch_triplets = torch.zeros((n_triplets,), dtype=torch.long, device=device)
140 | if n_triplets == 0:
141 | return triplet_node_index, multiplicity, edge_jk, batch_triplets
142 |
143 | _calc_triplets_core_gpu_kernel(
144 | _torch2cupy(counts),
145 | _torch2cupy(unique),
146 | _torch2cupy(dst),
147 | _torch2cupy(edge_indices),
148 | _torch2cupy(batch_edge),
149 | _torch2cupy(counts_cumsum),
150 | # n_triplets,
151 | _torch2cupy(triplet_node_index),
152 | _torch2cupy(multiplicity),
153 | _torch2cupy(edge_jk),
154 | _torch2cupy(batch_triplets),
155 | size=n_triplets,
156 | )
157 | # torch tensor buffer is already modified in above cupy functions.
158 | return triplet_node_index, multiplicity, edge_jk, batch_triplets
159 |
--------------------------------------------------------------------------------
/tests/test_torch_dftd3_calculator_batch.py:
--------------------------------------------------------------------------------
1 | """
2 | DFTD3 program need to be installed to test this method.
3 | """
4 | from copy import deepcopy
5 | from typing import List
6 |
7 | import numpy as np
8 | import pytest
9 | import torch
10 | from ase import Atoms
11 | from ase.build import bulk, fcc111, molecule
12 | from torch_dftd.testing.damping import damping_method_list
13 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
14 |
15 |
16 | @pytest.fixture(
17 | params=[
18 | pytest.param("case1", id="mol+slab"),
19 | pytest.param("case2", id="mol+slab(wo_pbc)"),
20 | pytest.param("case3", id="null"),
21 | pytest.param("case4", marks=[pytest.mark.slow], id="large"),
22 | ]
23 | )
24 | def atoms_list(request) -> List[Atoms]:
25 | """Initialization"""
26 | mol = molecule("CH3CH2OCH3")
27 |
28 | slab = fcc111("Au", size=(2, 1, 3), vacuum=80.0)
29 | slab.pbc = np.array([True, True, True])
30 |
31 | slab_wo_pbc = slab.copy()
32 | slab_wo_pbc.pbc = np.array([False, False, False])
33 |
34 | null = Atoms()
35 |
36 | large_bulk = bulk("Pt", "fcc") * (8, 8, 8)
37 |
38 | atoms_dict = {
39 | "case1": [mol, slab],
40 | "case2": [mol, slab_wo_pbc],
41 | "case3": [null],
42 | "case4": [large_bulk],
43 | }
44 |
45 | return atoms_dict[request.param]
46 |
47 |
48 | def _assert_energy_equal_batch(calc1, atoms_list: List[Atoms]):
49 | expected_results_list = []
50 | for atoms in atoms_list:
51 | calc1.reset()
52 | atoms.calc = calc1
53 | calc1.calculate(atoms, properties=["energy"])
54 | expected_results_list.append(deepcopy(calc1.results))
55 |
56 | results_list = calc1.batch_calculate(atoms_list, properties=["energy"])
57 | for exp, actual in zip(expected_results_list, results_list):
58 | assert np.allclose(exp["energy"], actual["energy"], atol=1e-4, rtol=1e-4)
59 |
60 |
61 | def _test_calc_energy(damping, xc, old, atoms_list, device="cpu", dtype=torch.float64):
62 | cutoff = 25.0 # Make test faster
63 | torch_dftd3_calc = TorchDFTD3Calculator(
64 | damping=damping, xc=xc, device=device, dtype=dtype, old=old, cutoff=cutoff
65 | )
66 | _assert_energy_equal_batch(torch_dftd3_calc, atoms_list)
67 |
68 |
69 | def _assert_energy_force_stress_equal_batch(calc1, atoms_list: List[Atoms]):
70 | expected_results_list = []
71 | for atoms in atoms_list:
72 | calc1.reset()
73 | atoms.calc = calc1
74 | calc1.calculate(atoms, properties=["energy", "forces", "stress"])
75 | expected_results_list.append(deepcopy(calc1.results))
76 |
77 | results_list = calc1.batch_calculate(atoms_list, properties=["energy", "forces", "stress"])
78 | for exp, actual in zip(expected_results_list, results_list):
79 | assert np.allclose(exp["energy"], actual["energy"], atol=1e-4, rtol=1e-4)
80 | assert np.allclose(exp["forces"], actual["forces"], atol=1e-5, rtol=1e-5)
81 | if hasattr(exp, "stress"):
82 | assert np.allclose(exp["stress"], actual["stress"], atol=1e-5, rtol=1e-5)
83 |
84 |
85 | def _test_calc_energy_force_stress(
86 | damping,
87 | xc,
88 | old,
89 | atoms_list,
90 | device="cpu",
91 | dtype=torch.float64,
92 | bidirectional=True,
93 | abc=False,
94 | cnthr=15.0,
95 | ):
96 | cutoff = 22.0 # Make test faster
97 | torch_dftd3_calc = TorchDFTD3Calculator(
98 | damping=damping,
99 | xc=xc,
100 | device=device,
101 | dtype=dtype,
102 | old=old,
103 | cutoff=cutoff,
104 | cnthr=cnthr,
105 | abc=abc,
106 | bidirectional=bidirectional,
107 | )
108 | _assert_energy_force_stress_equal_batch(torch_dftd3_calc, atoms_list)
109 |
110 |
111 | @pytest.mark.parametrize("damping,old", damping_method_list)
112 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
113 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
114 | def test_calc_energy_device_batch(damping, old, atoms_list, device, dtype):
115 | """Test2-1: check device, dtype dependency. with only various damping method."""
116 | xc = "pbe"
117 | _test_calc_energy(damping, xc, old, atoms_list, device=device, dtype=dtype)
118 |
119 |
120 | @pytest.mark.parametrize("damping,old", damping_method_list)
121 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
122 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
123 | def test_calc_energy_force_stress_device_batch(damping, old, atoms_list, device, dtype):
124 | """Test2-2: check device, dtype dependency. with only various damping method."""
125 | xc = "pbe"
126 | _test_calc_energy_force_stress(damping, xc, old, atoms_list, device=device, dtype=dtype)
127 |
128 |
129 | @pytest.mark.parametrize("damping,old", damping_method_list)
130 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
131 | @pytest.mark.parametrize("bidirectional", [True, False])
132 | @pytest.mark.parametrize("dtype", [torch.float64])
133 | def test_calc_energy_force_stress_device_batch_abc(
134 | damping, old, atoms_list, device, bidirectional, dtype
135 | ):
136 | """Test2-3: check device, dtype dependency. with only various damping method."""
137 | xc = "pbe"
138 | abc = True
139 | if any([np.all(atoms.pbc) for atoms in atoms_list]) and bidirectional == False:
140 | # TODO: bidirectional=False is not implemented for pbc now.
141 | with pytest.raises(NotImplementedError):
142 | _test_calc_energy_force_stress(
143 | damping,
144 | xc,
145 | old,
146 | atoms_list,
147 | device=device,
148 | dtype=dtype,
149 | bidirectional=bidirectional,
150 | abc=abc,
151 | cnthr=7.0,
152 | )
153 | else:
154 | _test_calc_energy_force_stress(
155 | damping,
156 | xc,
157 | old,
158 | atoms_list,
159 | device=device,
160 | dtype=dtype,
161 | bidirectional=bidirectional,
162 | abc=abc,
163 | cnthr=7.0,
164 | )
165 |
166 |
167 | if __name__ == "__main__":
168 | pytest.main([__file__, "-v", "-s"])
169 |
--------------------------------------------------------------------------------
/torch_dftd/functions/triplets.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Tuple
2 |
3 | import torch
4 | from torch import Tensor
5 | from torch_dftd.functions.triplets_kernel import _calc_triplets_core_gpu
6 |
7 |
8 | def calc_triplets(
9 | edge_index: Tensor,
10 | shift_pos: Optional[Tensor] = None,
11 | dtype=torch.float32,
12 | batch_edge: Optional[Tensor] = None,
13 | ) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
14 | """Calculate triplet edge index.
15 |
16 | Args:
17 | edge_index (Tensor): (2, n_edges) edge_index for graph. It must be bidirectional edge.
18 | shift_pos (Tensor or None): (n_edges, 3) used to calculate unique atoms when pbc=True.
19 | dtype: dtype for `multiplicity`
20 | batch_edge (Tensor or None): Specify batch indices for `edge_index`.
21 |
22 | Returns:
23 | triplet_node_index (Tensor): (3, n_triplets) index for node `i`, `j`, `k` respectively.
24 | i.e.: idx_i, idx_j, idx_k = triplet_node_index
25 | multiplicity (Tensor): (n_triplets,) multiplicity indicates duplication of same triplet pair.
26 | It only takes 1 in non-pbc, but it takes 2 or 3 in pbc case. dtype is specified in the argument.
27 | edge_jk (Tensor): (n_triplet_edges, 2=(j, k)) edge indices for j and k.
28 | i.e.: idx_j, idx_k = edge_jk[:, 0], edge_jk[:, 1]
29 | batch_triplets (Tensor): (n_triplets,) batch indices for each triplets.
30 | """
31 | dst, src = edge_index
32 | is_larger = dst >= src
33 | dst = dst[is_larger]
34 | src = src[is_larger]
35 | # sort `src`
36 | sort_inds = torch.argsort(src)
37 | src = src[sort_inds]
38 | dst = dst[sort_inds]
39 |
40 | if shift_pos is None:
41 | edge_indices = torch.arange(src.shape[0], dtype=torch.long, device=edge_index.device)
42 | else:
43 | edge_indices = torch.arange(shift_pos.shape[0], dtype=torch.long, device=edge_index.device)
44 | edge_indices = edge_indices[is_larger][sort_inds]
45 |
46 | if batch_edge is None:
47 | batch_edge = torch.zeros(src.shape[0], dtype=torch.long, device=edge_index.device)
48 | else:
49 | batch_edge = batch_edge[is_larger][sort_inds]
50 |
51 | unique, counts = torch.unique_consecutive(src, return_counts=True)
52 | counts_cumsum = torch.cumsum(counts, dim=0)
53 | counts_cumsum = torch.cat(
54 | [torch.zeros((1,), device=counts.device, dtype=torch.long), counts_cumsum], dim=0
55 | )
56 |
57 | if str(unique.device) == "cpu":
58 | return _calc_triplets_core(
59 | counts, unique, dst, edge_indices, batch_edge, counts_cumsum, dtype=dtype
60 | )
61 | else:
62 | return _calc_triplets_core_gpu(
63 | counts, unique, dst, edge_indices, batch_edge, counts_cumsum, dtype=dtype
64 | )
65 |
66 |
67 | def _calc_triplets_core(counts, unique, dst, edge_indices, batch_edge, counts_cumsum, dtype):
68 | device = unique.device
69 | n_triplets = torch.sum(counts * (counts - 1) / 2)
70 | if n_triplets == 0:
71 | # (n_triplet_edges, 3)
72 | triplet_node_index = torch.zeros((0, 3), dtype=torch.long, device=device)
73 | # (n_triplet_edges)
74 | multiplicity = torch.zeros((0,), dtype=dtype, device=device)
75 | # (n_triplet_edges, 2=(j, k))
76 | edge_jk = torch.zeros((0, 2), dtype=torch.long, device=device)
77 | # (n_triplet_edges)
78 | batch_triplets = torch.zeros((0,), dtype=torch.long, device=device)
79 | return triplet_node_index, multiplicity, edge_jk, batch_triplets
80 |
81 | triplet_node_index_list = [] # (n_triplet_edges, 3)
82 | edge_jk_list = [] # (n_triplet_edges, 2) represents j and k indices
83 | multiplicity_list = [] # (n_triplet_edges) represents multiplicity
84 | batch_triplets_list = [] # (n_triplet_edges) represents batch index for triplets
85 | for i in range(len(unique)):
86 | _src = unique[i].item()
87 | _n_edges = counts[i].item()
88 | _dst = dst[counts_cumsum[i] : counts_cumsum[i + 1]]
89 | _offset = counts_cumsum[i].item()
90 | _batch_index = batch_edge[counts_cumsum[i]].item()
91 | for j in range(_n_edges - 1):
92 | for k in range(j + 1, _n_edges):
93 | _dst0 = _dst[j].item() # _dst0 maybe swapped with _dst1, need to reset here.
94 | _dst1 = _dst[k].item()
95 | batch_triplets_list.append(_batch_index)
96 | # --- triplet_node_index_list & shift_list in sorted way... ---
97 | # sort order to be _src <= _dst0 <= _dst1, and i <= _j <= _k
98 | if _dst0 <= _dst1:
99 | _j, _k = j, k
100 | else:
101 | _dst0, _dst1 = _dst1, _dst0
102 | _j, _k = k, j
103 |
104 | triplet_node_index_list.append([_src, _dst0, _dst1])
105 | edge_jk_list.append(
106 | [
107 | _offset + _j,
108 | _offset + _k,
109 | ]
110 | )
111 | # --- multiplicity ---
112 | if _dst0 == _dst1:
113 | if _src == _dst0:
114 | # Case 0: _src == _dst0 == _dst1
115 | multiplicity_list.append(3.0)
116 | else:
117 | # Case 1: _src < _dst0 == _dst1
118 | multiplicity_list.append(1.0)
119 | else:
120 | if _src == _dst0:
121 | # Case 2: _src == _dst0 < _dst1
122 | multiplicity_list.append(2.0)
123 | else:
124 | assert i < _dst0
125 | assert i < _dst1
126 | # Case 3: i < _dst0 < _dst1
127 | multiplicity_list.append(1.0)
128 |
129 | # (n_triplet_edges, 3)
130 | triplet_node_index = torch.as_tensor(triplet_node_index_list, device=device)
131 | # (n_triplet_edges)
132 | multiplicity = torch.as_tensor(multiplicity_list, dtype=dtype, device=device)
133 | # (n_triplet_edges, 2=(j, k))
134 | edge_jk = edge_indices[torch.tensor(edge_jk_list, dtype=torch.long, device=device)]
135 | # (n_triplet_edges, 3=(ij, ik, jk), 3=(xyz) )
136 | batch_triplets = torch.as_tensor(batch_triplets_list, dtype=torch.long, device=device)
137 | return triplet_node_index, multiplicity, edge_jk, batch_triplets
138 |
--------------------------------------------------------------------------------
/tests/test_torch_dftd3_calculator_zero_adjacency.py:
--------------------------------------------------------------------------------
1 | """
2 | DFTD3 program need to be installed to test this method.
3 | """
4 | import tempfile
5 | from typing import List
6 |
7 | import numpy as np
8 | import pytest
9 | import torch
10 | from ase import Atoms
11 | from ase.build import fcc111, molecule
12 | from ase.calculators.emt import EMT
13 | from torch_dftd.testing.damping import damping_method_list, damping_xc_combination_list
14 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
15 |
16 |
17 | def _create_atoms() -> List[Atoms]:
18 | """Initialization"""
19 | H = molecule("H")
20 | H_pbc = molecule("H", vacuum=100.0, pbc=True)
21 | null = Atoms()
22 | null_pbc = Atoms(cell=np.eye(3) * 100, pbc=True)
23 | return [H, H_pbc, null, null_pbc]
24 |
25 |
26 | def _assert_energy_equal(calc, atoms: Atoms):
27 | calc.reset()
28 | atoms.calc = calc
29 | e1 = atoms.get_potential_energy()
30 |
31 | e2 = 0.0
32 | assert np.allclose(e1, e2, atol=1e-4, rtol=1e-4)
33 |
34 |
35 | def _test_calc_energy(damping, xc, old, atoms, device="cpu", dtype=torch.float64, abc=False):
36 | cutoff = 25.0 # Make test faster
37 | torch_dftd3_calc = TorchDFTD3Calculator(
38 | damping=damping, xc=xc, device=device, dtype=dtype, old=old, cutoff=cutoff, abc=abc
39 | )
40 | _assert_energy_equal(torch_dftd3_calc, atoms)
41 |
42 |
43 | def _assert_energy_force_stress_equal(calc, atoms: Atoms):
44 | calc.reset()
45 | atoms.calc = calc
46 | f1 = atoms.get_forces()
47 | e1 = atoms.get_potential_energy()
48 |
49 | if calc.dft is not None:
50 | calc2 = calc.dft
51 | calc2.reset()
52 | atoms.calc = calc2
53 | e2 = atoms.get_potential_energy()
54 | f2 = atoms.get_forces()
55 | else:
56 | f2 = np.zeros_like(atoms.get_positions())
57 | e2 = 0.0
58 | assert np.allclose(e1, e2, atol=1e-4, rtol=1e-4), (e1, e2)
59 | assert np.allclose(f1, f2, atol=1e-5, rtol=1e-5)
60 | if np.all(atoms.pbc == np.array([True, True, True])):
61 | s1 = atoms.get_stress()
62 | s2 = np.zeros([6])
63 | assert np.allclose(s1, s2, atol=1e-5, rtol=1e-5)
64 |
65 |
66 | def _test_calc_energy_force_stress(
67 | damping, xc, old, atoms, device="cpu", dtype=torch.float64, abc=False, cnthr=15.0
68 | ):
69 | cutoff = 22.0 # Make test faster
70 | torch_dftd3_calc = TorchDFTD3Calculator(
71 | damping=damping,
72 | xc=xc,
73 | device=device,
74 | dtype=dtype,
75 | old=old,
76 | cutoff=cutoff,
77 | cnthr=cnthr,
78 | abc=abc,
79 | )
80 | _assert_energy_force_stress_equal(torch_dftd3_calc, atoms)
81 |
82 |
83 | @pytest.mark.parametrize("damping,xc,old", damping_xc_combination_list)
84 | @pytest.mark.parametrize("atoms", _create_atoms())
85 | def test_calc_energy(damping, xc, old, atoms):
86 | """Test1-1: check damping,xc,old combination works for energy"""
87 | _test_calc_energy(damping, xc, old, atoms, device="cpu")
88 |
89 |
90 | @pytest.mark.parametrize("damping,xc,old", damping_xc_combination_list)
91 | @pytest.mark.parametrize("atoms", _create_atoms())
92 | def test_calc_energy_force_stress(damping, xc, old, atoms):
93 | """Test1-2: check damping,xc,old combination works for energy, force & stress"""
94 | _test_calc_energy_force_stress(damping, xc, old, atoms, device="cpu")
95 |
96 |
97 | @pytest.mark.parametrize("damping,old", damping_method_list)
98 | @pytest.mark.parametrize("atoms", _create_atoms())
99 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
100 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
101 | def test_calc_energy_device(damping, old, atoms, device, dtype):
102 | """Test2-1: check device, dtype dependency. with only various damping method."""
103 | xc = "pbe"
104 | _test_calc_energy(damping, xc, old, atoms, device=device, dtype=dtype)
105 |
106 |
107 | @pytest.mark.parametrize("damping,old", damping_method_list)
108 | @pytest.mark.parametrize("atoms", _create_atoms())
109 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
110 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
111 | def test_calc_energy_force_stress_device(damping, old, atoms, device, dtype):
112 | """Test2-2: check device, dtype dependency. with only various damping method."""
113 | xc = "pbe"
114 | _test_calc_energy_force_stress(damping, xc, old, atoms, device=device, dtype=dtype)
115 |
116 |
117 | @pytest.mark.parametrize("atoms", _create_atoms())
118 | @pytest.mark.parametrize("damping,old", damping_method_list)
119 | def test_calc_energy_force_stress_bidirectional(atoms, damping, old):
120 | """Test with bidirectional=False"""
121 | device = "cpu"
122 | xc = "pbe"
123 | torch_dftd3_calc = TorchDFTD3Calculator(
124 | damping=damping, xc=xc, device=device, old=old, bidirectional=False
125 | )
126 | if np.all(atoms.pbc):
127 | # TODO: bidirectional=False is not implemented for pbc now.
128 | with pytest.raises(NotImplementedError):
129 | _assert_energy_force_stress_equal(torch_dftd3_calc, atoms)
130 | else:
131 | _assert_energy_force_stress_equal(torch_dftd3_calc, atoms)
132 |
133 |
134 | @pytest.mark.parametrize("atoms", _create_atoms())
135 | @pytest.mark.parametrize("damping,old", damping_method_list)
136 | def test_calc_energy_force_stress_cutoff_smoothing(atoms, damping, old):
137 | """Test wit cutoff_smoothing."""
138 | device = "cpu"
139 | xc = "pbe"
140 | cutoff_smoothing = "poly"
141 | torch_dftd3_calc = TorchDFTD3Calculator(
142 | damping=damping,
143 | xc=xc,
144 | device=device,
145 | old=old,
146 | bidirectional=False,
147 | cutoff_smoothing=cutoff_smoothing,
148 | )
149 | try:
150 | _assert_energy_force_stress_equal(torch_dftd3_calc, atoms)
151 | except NotImplementedError:
152 | print("NotImplementedError with atoms", atoms)
153 | # sometimes, bidirectional=False is not implemented.
154 | pass
155 |
156 |
157 | def test_calc_energy_force_stress_with_dft():
158 | """Test with `dft` argument"""
159 | atoms = molecule("H")
160 | # Set calculator. EMT supports H & C just for fun, which is enough for the test!
161 | # https://wiki.fysik.dtu.dk/ase/ase/calculators/emt.html#module-ase.calculators.emt
162 | dft = EMT()
163 | damping = "bj"
164 | old = False
165 | device = "cpu"
166 | xc = "pbe"
167 | torch_dftd3_calc = TorchDFTD3Calculator(
168 | damping=damping, xc=xc, device=device, old=old, bidirectional=False, dft=dft
169 | )
170 | _assert_energy_force_stress_equal(torch_dftd3_calc, atoms)
171 |
172 |
173 | @pytest.mark.parametrize("damping,old", damping_method_list)
174 | @pytest.mark.parametrize("atoms", _create_atoms())
175 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
176 | @pytest.mark.parametrize("dtype", [torch.float64])
177 | @pytest.mark.parametrize("abc", [True])
178 | def test_calc_energy_force_stress_device_abc(damping, old, atoms, device, dtype, abc):
179 | """Test: check tri-partite calc with device, dtype dependency."""
180 | xc = "pbe"
181 | _test_calc_energy_force_stress(
182 | damping, xc, old, atoms, device=device, dtype=dtype, abc=abc, cnthr=7.0
183 | )
184 |
185 |
186 | if __name__ == "__main__":
187 | pytest.main([__file__, "-v", "-s"])
188 |
--------------------------------------------------------------------------------
/torch_dftd/nn/base_dftd_module.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, List, Optional, Tuple
2 |
3 | import numpy as np
4 | import torch
5 | from ase.neighborlist import primitive_neighbor_list
6 | from ase.units import Bohr
7 | from torch import Tensor, nn
8 |
9 |
10 | class BaseDFTDModule(nn.Module):
11 | """BaseDFTDModule"""
12 |
13 | def calc_energy_batch(
14 | self,
15 | Z: Tensor,
16 | pos: Tensor,
17 | edge_index: Tensor,
18 | cell: Optional[Tensor] = None,
19 | pbc: Optional[Tensor] = None,
20 | shift_pos: Optional[Tensor] = None,
21 | batch: Optional[Tensor] = None,
22 | batch_edge: Optional[Tensor] = None,
23 | damping: str = "zero",
24 | ) -> Tensor:
25 | """Forward computation to calculate atomic wise dispersion energy.
26 |
27 | Each subclass should override this method
28 |
29 | Args:
30 | Z (Tensor): (n_atoms,) atomic numbers.
31 | pos (Tensor): (n_toms, 3) atom positions in angstrom
32 | edge_index (Tensor): (2, n_edges) edge index within cutoff
33 | cell (Tensor): (n_atoms, 3) cell size in angstrom, None for non periodic system.
34 | pbc (Tensor): (bs, 3) pbc condition, None for non periodic system.
35 | shift_pos (Tensor): (n_atoms, 3) shift vector (length unit).
36 | batch (Tensor): (n_atoms,) Specify which graph this atom belongs to
37 | batch_edge (Tensor): (n_edges, 3) Specify which graph this edge belongs to
38 | damping (str):
39 |
40 | Returns:
41 | energy (Tensor): (n_atoms,)
42 | """
43 | raise NotImplementedError()
44 |
45 | def calc_energy(
46 | self,
47 | Z: Tensor,
48 | pos: Tensor,
49 | edge_index: Tensor,
50 | cell: Optional[Tensor] = None,
51 | pbc: Optional[Tensor] = None,
52 | shift_pos: Optional[Tensor] = None,
53 | batch: Optional[Tensor] = None,
54 | batch_edge: Optional[Tensor] = None,
55 | damping: str = "zero",
56 | ) -> List[Dict[str, Any]]:
57 | """Forward computation of dispersion energy
58 |
59 | Backward computation is skipped for fast computation of only energy.
60 |
61 | Args:
62 | Z (Tensor): (n_atoms,) atomic numbers.
63 | pos (Tensor): atom positions in angstrom
64 | edge_index (Tensor):
65 | cell (Tensor): cell size in angstrom, None for non periodic system.
66 | pbc (Tensor): pbc condition, None for non periodic system.
67 | shift_pos (Tensor): (n_atoms, 3) shift vector (length unit).
68 | batch (Tensor):
69 | batch_edge (Tensor):
70 | damping (str): damping method. "zero", "bj", "zerom", "bjm"
71 |
72 | Returns:
73 | results_list (list): calculated result. It contains calculate energy in "energy" key.
74 | """
75 | with torch.no_grad():
76 | E_disp = self.calc_energy_batch(
77 | Z, pos, edge_index, cell, pbc, shift_pos, batch, batch_edge, damping=damping
78 | )
79 | if batch is None:
80 | return [{"energy": E_disp.item()}]
81 | else:
82 | if batch.size()[0] == 0:
83 | n_graphs = 1
84 | else:
85 | n_graphs = int(batch[-1]) + 1
86 | return [{"energy": E_disp[i].item()} for i in range(n_graphs)]
87 |
88 | def calc_energy_and_forces(
89 | self,
90 | Z: Tensor,
91 | pos: Tensor,
92 | edge_index: Tensor,
93 | cell: Optional[Tensor] = None,
94 | pbc: Optional[Tensor] = None,
95 | shift_pos: Optional[Tensor] = None,
96 | batch: Optional[Tensor] = None,
97 | batch_edge: Optional[Tensor] = None,
98 | damping: str = "zero",
99 | ) -> List[Dict[str, Any]]:
100 | """Forward computation of dispersion energy, force and stress
101 |
102 | Args:
103 | Z (Tensor): (n_atoms,) atomic numbers.
104 | pos (Tensor): atom positions in angstrom
105 | cell (Tensor): cell size in angstrom, None for non periodic system.
106 | pbc (Tensor): pbc condition, None for non periodic system.
107 | shift_pos (Tensor): (n_atoms, 3) shift vector (length unit).
108 | damping (str): damping method. "zero", "bj", "zerom", "bjm"
109 |
110 | Returns:
111 | results (list): calculated results. Contains following:
112 | "energy": ()
113 | "forces": (n_atoms, 3)
114 | "stress": (6,)
115 | """
116 | pos.requires_grad_(True)
117 | if cell is not None:
118 | # pos is depending on `cell` size
119 | # We need to explicitly include this dependency to calculate cell gradient
120 | # for stress computation.
121 | # pos is assumed to be inside "cell", so relative position `rel_pos` lies between 0~1.
122 | assert isinstance(shift_pos, Tensor)
123 | shift_pos.requires_grad_(True)
124 |
125 | E_disp = self.calc_energy_batch(
126 | Z, pos, edge_index, cell, pbc, shift_pos, batch, batch_edge, damping=damping
127 | )
128 |
129 | E_disp.sum().backward()
130 | forces = -pos.grad # [eV/angstrom]
131 | if batch is None:
132 | results_list = [{"energy": E_disp.item(), "forces": forces.cpu().numpy()}]
133 | else:
134 | if batch.size()[0] == 0:
135 | n_graphs = 1
136 | else:
137 | n_graphs = int(batch[-1]) + 1
138 | results_list = [{"energy": E_disp[i].item()} for i in range(n_graphs)]
139 | for i in range(n_graphs):
140 | results_list[i]["forces"] = forces[batch == i].cpu().numpy()
141 |
142 | if cell is not None:
143 | # stress = torch.mm(cell_grad, cell.T) / cell_volume
144 | # Get stress in Voigt notation (xx, yy, zz, yz, xz, xy)
145 | assert isinstance(shift_pos, Tensor)
146 | voigt_left = [0, 1, 2, 1, 2, 0]
147 | voigt_right = [0, 1, 2, 2, 0, 1]
148 | if batch is None:
149 | cell_volume = torch.det(cell).abs()
150 | cell_grad = torch.sum(
151 | (pos[:, voigt_left] * pos.grad[:, voigt_right]).to(torch.float64), dim=0
152 | )
153 | cell_grad += torch.sum(
154 | (shift_pos[:, voigt_left] * shift_pos.grad[:, voigt_right]).to(torch.float64),
155 | dim=0,
156 | )
157 | stress = cell_grad.to(cell.dtype) / cell_volume
158 | results_list[0]["stress"] = stress.detach().cpu().numpy()
159 | else:
160 | assert isinstance(batch_edge, Tensor)
161 | # cell (bs, 3, 3)
162 | cell_volume = torch.det(cell).abs()
163 | cell_grad = pos.new_zeros((n_graphs, 6), dtype=torch.float64)
164 | cell_grad.scatter_add_(
165 | 0,
166 | batch.view(batch.size()[0], 1).expand(batch.size()[0], 6),
167 | (pos[:, voigt_left] * pos.grad[:, voigt_right]).to(torch.float64),
168 | )
169 | cell_grad.scatter_add_(
170 | 0,
171 | batch_edge.view(batch_edge.size()[0], 1).expand(batch_edge.size()[0], 6),
172 | (shift_pos[:, voigt_left] * shift_pos.grad[:, voigt_right]).to(torch.float64),
173 | )
174 | stress = cell_grad.to(cell.dtype) / cell_volume[:, None]
175 | stress = stress.detach().cpu().numpy()
176 | for i in range(n_graphs):
177 | results_list[i]["stress"] = stress[i]
178 | return results_list
179 |
--------------------------------------------------------------------------------
/torch_dftd/torch_dftd3_calculator.py:
--------------------------------------------------------------------------------
1 | from typing import Dict, Optional, Tuple
2 |
3 | import torch
4 | from ase import Atoms
5 | from ase.calculators.calculator import Calculator, PropertyNotImplementedError, all_changes
6 | from ase.units import Bohr
7 | from torch import Tensor
8 | from torch_dftd.dftd3_xc_params import get_dftd3_default_params
9 | from torch_dftd.functions.edge_extraction import calc_edge_index
10 | from torch_dftd.nn.dftd2_module import DFTD2Module
11 | from torch_dftd.nn.dftd3_module import DFTD3Module
12 |
13 |
14 | class TorchDFTD3Calculator(Calculator):
15 | """ase compatible DFTD3 calculator using pytorch
16 |
17 | Args:
18 | dft (Calculator or None): base dft calculator can be set here
19 | atoms (Atoms):
20 | damping (str): damping method. "zero", "bj", "zerom", "bjm"
21 | xc (str): exchange correlation functional
22 | old (bool): Use DFTD2 method when `True`, DFTD3 method is used when `False`
23 | device (str): torch device. Ex. "cuda:0" to use GPU ID 0
24 | cutoff (float): cutoff distance in angstrom. Default value is 95bohr := 50 angstrom.
25 | cnthr (float): coordination number cutoff distance in angstrom.
26 | Default value is 40bohr := 21 angstrom.
27 | abc (bool): ATM 3-body interaction
28 | dtype (dtype): internal calculation is done in this precision.
29 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
30 | cutoff_smoothing (str): cutoff smoothing makes gradient smooth at `cutoff` distance
31 | **kwargs:
32 | """
33 |
34 | name = "TorchDFTD3Calculator"
35 | implemented_properties = ["energy", "forces", "stress"]
36 |
37 | def __init__(
38 | self,
39 | dft: Optional[Calculator] = None,
40 | atoms: Atoms = None,
41 | damping: str = "zero",
42 | xc: str = "pbe",
43 | old: bool = False,
44 | device: str = "cpu",
45 | cutoff: float = 95.0 * Bohr,
46 | cnthr: float = 40.0 * Bohr,
47 | abc: bool = False,
48 | # --- torch dftd3 specific params ---
49 | dtype: torch.dtype = torch.float32,
50 | bidirectional: bool = True,
51 | cutoff_smoothing: str = "none",
52 | **kwargs,
53 | ):
54 | self.dft = dft
55 | self.params = get_dftd3_default_params(damping, xc, old=old)
56 | self.damping = damping
57 | self.abc = abc
58 | self.old = old
59 | self.device = torch.device(device)
60 | if old:
61 | self.dftd_module: torch.nn.Module = DFTD2Module(
62 | self.params,
63 | cutoff=cutoff,
64 | dtype=dtype,
65 | bidirectional=bidirectional,
66 | cutoff_smoothing=cutoff_smoothing,
67 | )
68 | else:
69 | self.dftd_module = DFTD3Module(
70 | self.params,
71 | cutoff=cutoff,
72 | cnthr=cnthr,
73 | abc=abc,
74 | dtype=dtype,
75 | bidirectional=bidirectional,
76 | cutoff_smoothing=cutoff_smoothing,
77 | )
78 | self.dftd_module.to(device)
79 | self.dtype = dtype
80 | self.cutoff = cutoff
81 | self.bidirectional = bidirectional
82 | super(TorchDFTD3Calculator, self).__init__(atoms=atoms, **kwargs)
83 |
84 | def _calc_edge_index(
85 | self,
86 | pos: Tensor,
87 | cell: Optional[Tensor] = None,
88 | pbc: Optional[Tensor] = None,
89 | ) -> Tuple[Tensor, Tensor]:
90 | return calc_edge_index(
91 | pos, cell, pbc, cutoff=self.cutoff, bidirectional=self.bidirectional
92 | )
93 |
94 | def _preprocess_atoms(self, atoms: Atoms) -> Dict[str, Optional[Tensor]]:
95 | pos = torch.tensor(atoms.get_positions(), device=self.device, dtype=self.dtype)
96 | Z = torch.tensor(atoms.get_atomic_numbers(), device=self.device)
97 | if any(atoms.pbc):
98 | cell: Optional[Tensor] = torch.tensor(
99 | atoms.get_cell(), device=self.device, dtype=self.dtype
100 | )
101 | else:
102 | cell = None
103 | pbc = torch.tensor(atoms.pbc, device=self.device)
104 | edge_index, S = self._calc_edge_index(pos, cell, pbc)
105 | if cell is None:
106 | shift_pos = S
107 | else:
108 | shift_pos = torch.mm(S, cell.detach())
109 | input_dicts = dict(
110 | pos=pos, Z=Z, cell=cell, pbc=pbc, edge_index=edge_index, shift_pos=shift_pos
111 | )
112 | return input_dicts
113 |
114 | def calculate(self, atoms=None, properties=["energy"], system_changes=all_changes):
115 | Calculator.calculate(self, atoms, properties, system_changes)
116 | input_dicts = self._preprocess_atoms(atoms)
117 |
118 | if "forces" in properties or "stress" in properties:
119 | results = self.dftd_module.calc_energy_and_forces(**input_dicts, damping=self.damping)[
120 | 0
121 | ]
122 | else:
123 | results = self.dftd_module.calc_energy(**input_dicts, damping=self.damping)[0]
124 | self.results["energy"] = results["energy"]
125 | self.results["free_energy"] = self.results["energy"]
126 |
127 | # Referenced DFTD3 impl.
128 | if self.dft is not None:
129 | try:
130 | efree = self.dft.get_potential_energy(force_consistent=True)
131 | self.results["free_energy"] += efree
132 | except PropertyNotImplementedError:
133 | pass
134 |
135 | if "forces" in results:
136 | self.results["forces"] = results["forces"]
137 | if "stress" in results:
138 | self.results["stress"] = results["stress"]
139 |
140 | def get_property(self, name, atoms=None, allow_calculation=True):
141 | dft_result = None
142 | if self.dft is not None:
143 | dft_result = self.dft.get_property(name, atoms, allow_calculation)
144 |
145 | dftd3_result = Calculator.get_property(self, name, atoms, allow_calculation)
146 |
147 | if dft_result is None and dftd3_result is None:
148 | return None
149 | elif dft_result is None:
150 | return dftd3_result
151 | elif dftd3_result is None:
152 | return dft_result
153 | else:
154 | return dft_result + dftd3_result
155 |
156 | def batch_calculate(self, atoms_list=None, properties=["energy"], system_changes=all_changes):
157 | # Calculator.calculate(self, atoms, properties, system_changes)
158 | input_dicts_list = [self._preprocess_atoms(atoms) for atoms in atoms_list]
159 | # --- Make batch ---
160 | n_nodes_list = [d["Z"].shape[0] for d in input_dicts_list]
161 | shift_index_array = torch.cumsum(torch.tensor([0] + n_nodes_list), dim=0)
162 | cell_batch = torch.stack(
163 | [
164 | torch.eye(3, device=self.device, dtype=self.dtype)
165 | if d["cell"] is None
166 | else d["cell"]
167 | for d in input_dicts_list
168 | ]
169 | )
170 | batch_dicts = dict(
171 | Z=torch.cat([d["Z"] for d in input_dicts_list], dim=0), # (n_nodes,)
172 | pos=torch.cat([d["pos"] for d in input_dicts_list], dim=0), # (n_nodes,)
173 | cell=cell_batch, # (bs, 3, 3)
174 | pbc=torch.stack([d["pbc"] for d in input_dicts_list]), # (bs, 3)
175 | shift_pos=torch.cat([d["shift_pos"] for d in input_dicts_list], dim=0), # (n_nodes,)
176 | )
177 |
178 | batch_dicts["edge_index"] = torch.cat(
179 | [d["edge_index"] + shift_index_array[i] for i, d in enumerate(input_dicts_list)],
180 | dim=1,
181 | )
182 | batch_dicts["batch"] = torch.cat(
183 | [
184 | torch.full((n_nodes,), i, dtype=torch.long, device=self.device)
185 | for i, n_nodes in enumerate(n_nodes_list)
186 | ],
187 | dim=0,
188 | )
189 | batch_dicts["batch_edge"] = torch.cat(
190 | [
191 | torch.full((d["edge_index"].shape[1],), i, dtype=torch.long, device=self.device)
192 | for i, d in enumerate(input_dicts_list)
193 | ],
194 | dim=0,
195 | )
196 |
197 | if "forces" in properties or "stress" in properties:
198 | results_list = self.dftd_module.calc_energy_and_forces(
199 | **batch_dicts, damping=self.damping
200 | )
201 | else:
202 | results_list = self.dftd_module.calc_energy(**batch_dicts, damping=self.damping)
203 | return results_list
204 |
--------------------------------------------------------------------------------
/tests/test_torch_dftd3_calculator.py:
--------------------------------------------------------------------------------
1 | """
2 | DFTD3 program need to be installed to test this method.
3 | """
4 | import tempfile
5 | from typing import List
6 |
7 | import numpy as np
8 | import pytest
9 | import torch
10 | from ase import Atoms
11 | from ase.build import bulk, fcc111, molecule
12 | from ase.calculators.dftd3 import DFTD3
13 | from ase.calculators.emt import EMT
14 | from torch_dftd.testing.damping import damping_method_list, damping_xc_combination_list
15 | from torch_dftd.torch_dftd3_calculator import TorchDFTD3Calculator
16 |
17 |
18 | @pytest.fixture(
19 | params=[
20 | pytest.param("mol", id="mol"),
21 | pytest.param("slab", id="slab"),
22 | pytest.param("large", marks=[pytest.mark.slow], id="large"),
23 | ]
24 | )
25 | def atoms(request) -> Atoms:
26 | """Initialization"""
27 | mol = molecule("CH3CH2OCH3")
28 |
29 | slab = fcc111("Au", size=(2, 1, 3), vacuum=80.0)
30 | slab.set_cell(
31 | slab.get_cell().array @ np.array([[1.0, 0.1, 0.2], [0.0, 1.0, 0.3], [0.0, 0.0, 1.0]])
32 | )
33 | slab.pbc = np.array([True, True, True])
34 |
35 | large_bulk = bulk("Pt", "fcc") * (4, 4, 4)
36 |
37 | atoms_dict = {"mol": mol, "slab": slab, "large": large_bulk}
38 |
39 | return atoms_dict[request.param]
40 |
41 |
42 | def _assert_energy_equal(calc1, calc2, atoms: Atoms):
43 | calc1.reset()
44 | atoms.calc = calc1
45 | e1 = atoms.get_potential_energy()
46 |
47 | calc2.reset()
48 | atoms.calc = calc2
49 | e2 = atoms.get_potential_energy()
50 | assert np.allclose(e1, e2, atol=1e-4, rtol=1e-4)
51 |
52 |
53 | def _test_calc_energy(damping, xc, old, atoms, device="cpu", dtype=torch.float64, abc=False):
54 | cutoff = 25.0 # Make test faster
55 | with tempfile.TemporaryDirectory() as tmpdirname:
56 | dftd3_calc = DFTD3(
57 | damping=damping,
58 | xc=xc,
59 | grad=True,
60 | old=old,
61 | cutoff=cutoff,
62 | directory=tmpdirname,
63 | abc=abc,
64 | )
65 | torch_dftd3_calc = TorchDFTD3Calculator(
66 | damping=damping, xc=xc, device=device, dtype=dtype, old=old, cutoff=cutoff, abc=abc
67 | )
68 | _assert_energy_equal(dftd3_calc, torch_dftd3_calc, atoms)
69 |
70 |
71 | def _assert_energy_force_stress_equal(calc1, calc2, atoms: Atoms, force_tol: float = 1e-5):
72 | calc1.reset()
73 | atoms.calc = calc1
74 | f1 = atoms.get_forces()
75 | e1 = atoms.get_potential_energy()
76 | if np.all(atoms.pbc == np.array([True, True, True])):
77 | s1 = atoms.get_stress()
78 |
79 | calc2.reset()
80 | atoms.calc = calc2
81 | f2 = atoms.get_forces()
82 | e2 = atoms.get_potential_energy()
83 | assert np.allclose(e1, e2, atol=1e-4, rtol=1e-4)
84 | assert np.allclose(f1, f2, atol=force_tol, rtol=force_tol)
85 | if np.all(atoms.pbc == np.array([True, True, True])):
86 | s2 = atoms.get_stress()
87 | assert np.allclose(s1, s2, atol=1e-5, rtol=1e-5)
88 |
89 |
90 | def _test_calc_energy_force_stress(
91 | damping,
92 | xc,
93 | old,
94 | atoms,
95 | device="cpu",
96 | dtype=torch.float64,
97 | bidirectional=True,
98 | abc=False,
99 | cnthr=15.0,
100 | ):
101 | cutoff = 22.0 # Make test faster
102 | force_tol = 1e-5
103 | if dtype == torch.float32:
104 | force_tol = 1.0e-4
105 | with tempfile.TemporaryDirectory() as tmpdirname:
106 | dftd3_calc = DFTD3(
107 | damping=damping,
108 | xc=xc,
109 | grad=True,
110 | old=old,
111 | cutoff=cutoff,
112 | cnthr=cnthr,
113 | directory=tmpdirname,
114 | abc=abc,
115 | )
116 | torch_dftd3_calc = TorchDFTD3Calculator(
117 | damping=damping,
118 | xc=xc,
119 | device=device,
120 | dtype=dtype,
121 | old=old,
122 | cutoff=cutoff,
123 | cnthr=cnthr,
124 | abc=abc,
125 | bidirectional=bidirectional,
126 | )
127 | _assert_energy_force_stress_equal(dftd3_calc, torch_dftd3_calc, atoms, force_tol=force_tol)
128 |
129 |
130 | @pytest.mark.parametrize("damping,xc,old", damping_xc_combination_list)
131 | def test_calc_energy(damping, xc, old, atoms):
132 | """Test1-1: check damping,xc,old combination works for energy"""
133 | _test_calc_energy(damping, xc, old, atoms, device="cpu")
134 |
135 |
136 | @pytest.mark.parametrize("damping,xc,old", damping_xc_combination_list)
137 | def test_calc_energy_force_stress(damping, xc, old, atoms):
138 | """Test1-2: check damping,xc,old combination works for energy, force & stress"""
139 | _test_calc_energy_force_stress(damping, xc, old, atoms, device="cpu")
140 |
141 |
142 | @pytest.mark.parametrize("damping,old", damping_method_list)
143 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
144 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
145 | def test_calc_energy_device(damping, old, atoms, device, dtype):
146 | """Test2-1: check device, dtype dependency. with only various damping method."""
147 | xc = "pbe"
148 | _test_calc_energy(damping, xc, old, atoms, device=device, dtype=dtype)
149 |
150 |
151 | @pytest.mark.parametrize("damping,old", damping_method_list)
152 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
153 | @pytest.mark.parametrize("dtype", [torch.float32, torch.float64])
154 | def test_calc_energy_force_stress_device(damping, old, atoms, device, dtype):
155 | """Test2-2: check device, dtype dependency. with only various damping method."""
156 | xc = "pbe"
157 | _test_calc_energy_force_stress(damping, xc, old, atoms, device=device, dtype=dtype)
158 |
159 |
160 | @pytest.mark.parametrize("damping,old", damping_method_list)
161 | def test_calc_energy_force_stress_bidirectional(atoms, damping, old):
162 | """Test with bidirectional=False"""
163 | device = "cpu"
164 | xc = "pbe"
165 | with tempfile.TemporaryDirectory() as tmpdirname:
166 | dftd3_calc = DFTD3(damping=damping, xc=xc, grad=True, old=old, directory=tmpdirname)
167 | torch_dftd3_calc = TorchDFTD3Calculator(
168 | damping=damping, xc=xc, device=device, old=old, bidirectional=False
169 | )
170 | if np.all(atoms.pbc):
171 | # TODO: bidirectional=False is not implemented for pbc now.
172 | with pytest.raises(NotImplementedError):
173 | _assert_energy_force_stress_equal(dftd3_calc, torch_dftd3_calc, atoms)
174 | else:
175 | _assert_energy_force_stress_equal(dftd3_calc, torch_dftd3_calc, atoms)
176 |
177 |
178 | @pytest.mark.parametrize("damping,old", damping_method_list)
179 | def test_calc_energy_force_stress_cutoff_smoothing(atoms, damping, old):
180 | """Test wit cutoff_smoothing."""
181 | device = "cpu"
182 | xc = "pbe"
183 | cutoff_smoothing = "poly"
184 | with tempfile.TemporaryDirectory() as tmpdirname:
185 | dftd3_calc = DFTD3(damping=damping, xc=xc, grad=True, old=old, directory=tmpdirname)
186 | torch_dftd3_calc = TorchDFTD3Calculator(
187 | damping=damping,
188 | xc=xc,
189 | device=device,
190 | old=old,
191 | bidirectional=False,
192 | cutoff_smoothing=cutoff_smoothing,
193 | )
194 | try:
195 | _assert_energy_force_stress_equal(dftd3_calc, torch_dftd3_calc, atoms)
196 | except NotImplementedError:
197 | print("NotImplementedError with atoms", atoms)
198 | # sometimes, bidirectional=False is not implemented.
199 | pass
200 |
201 |
202 | def test_calc_energy_force_stress_with_dft():
203 | """Test with `dft` argument"""
204 | atoms = molecule("CH3CH2OCH3")
205 | # Set calculator. EMT supports H & C just for fun, which is enough for the test!
206 | # https://wiki.fysik.dtu.dk/ase/ase/calculators/emt.html#module-ase.calculators.emt
207 | dft = EMT()
208 | damping = "bj"
209 | old = False
210 | device = "cpu"
211 | xc = "pbe"
212 | with tempfile.TemporaryDirectory() as tmpdirname:
213 | dftd3_calc = DFTD3(
214 | damping=damping, xc=xc, grad=True, old=old, directory=tmpdirname, dft=dft
215 | )
216 | torch_dftd3_calc = TorchDFTD3Calculator(
217 | damping=damping, xc=xc, device=device, old=old, bidirectional=False, dft=dft
218 | )
219 | _assert_energy_force_stress_equal(dftd3_calc, torch_dftd3_calc, atoms)
220 |
221 |
222 | @pytest.mark.parametrize("damping,old", damping_method_list)
223 | @pytest.mark.parametrize("device", ["cpu", "cuda:0"])
224 | @pytest.mark.parametrize("dtype", [torch.float64])
225 | @pytest.mark.parametrize("bidirectional", [True, False])
226 | @pytest.mark.parametrize("abc", [True])
227 | def test_calc_energy_force_stress_device_abc(
228 | damping, old, atoms, device, dtype, bidirectional, abc
229 | ):
230 | """Test: check tri-partite calc with device, dtype dependency."""
231 | xc = "pbe"
232 | if np.all(atoms.pbc) and bidirectional == False:
233 | # TODO: bidirectional=False is not implemented for pbc now.
234 | with pytest.raises(NotImplementedError):
235 | _test_calc_energy_force_stress(
236 | damping,
237 | xc,
238 | old,
239 | atoms,
240 | device=device,
241 | dtype=dtype,
242 | bidirectional=bidirectional,
243 | abc=abc,
244 | cnthr=7.0,
245 | )
246 | else:
247 | _test_calc_energy_force_stress(
248 | damping,
249 | xc,
250 | old,
251 | atoms,
252 | device=device,
253 | dtype=dtype,
254 | bidirectional=bidirectional,
255 | abc=abc,
256 | cnthr=7.0,
257 | )
258 |
259 |
260 | if __name__ == "__main__":
261 | pytest.main([__file__, "-v", "-s"])
262 |
--------------------------------------------------------------------------------
/torch_dftd/functions/dftd3.py:
--------------------------------------------------------------------------------
1 | """pytorch implementation of Grimme's D3 method""" # NOQA
2 | from typing import Dict, Optional
3 |
4 | import torch
5 | from torch import Tensor
6 | from torch_dftd.functions.distance import calc_distances
7 | from torch_dftd.functions.smoothing import poly_smoothing
8 | from torch_dftd.functions.triplets import calc_triplets
9 |
10 | # conversion factors used in grimme d3 code
11 |
12 | d3_autoang = 0.52917726 # for converting distance from bohr to angstrom
13 | d3_autoev = 27.21138505 # for converting a.u. to eV
14 |
15 | d3_k1 = 16.000
16 | d3_k2 = 4 / 3
17 | d3_k3 = -4.000
18 | d3_maxc = 5 # maximum number of coordination complexes
19 |
20 |
21 | def _ncoord(
22 | Z: Tensor,
23 | r: Tensor,
24 | idx_i: Tensor,
25 | idx_j: Tensor,
26 | rcov: Tensor,
27 | cutoff: Optional[float] = None,
28 | k1: float = d3_k1,
29 | cutoff_smoothing: str = "none",
30 | bidirectional: bool = False,
31 | ) -> Tensor:
32 | """Compute coordination numbers by adding an inverse damping function
33 |
34 | Args:
35 | Z: (n_atoms,)
36 | r: (n_edges,)
37 | idx_i: (n_edges,)
38 | cutoff:
39 | k1:
40 | rcov:
41 |
42 | Returns:
43 | g (Tensor): (n_atoms,) coordination number for each atom
44 | """
45 | if cutoff is not None:
46 | # Calculate _ncoord only for r < cutoff
47 | within_cutoff = r <= cutoff
48 | r = r[within_cutoff]
49 | # Zi = Zi[within_cutoff]
50 | # Zj = Zj[within_cutoff]
51 | idx_i = idx_i[within_cutoff]
52 | idx_j = idx_j[within_cutoff]
53 | Zi = Z[idx_i]
54 | Zj = Z[idx_j]
55 | rco = rcov[Zi] + rcov[Zj] # (n_edges,)
56 | rr = rco.type(r.dtype) / r
57 | damp = 1.0 / (1.0 + torch.exp(-k1 * (rr - 1.0)))
58 | if cutoff is not None and cutoff_smoothing == "poly":
59 | damp *= poly_smoothing(r, cutoff)
60 |
61 | n_atoms = Z.shape[0]
62 | g = damp.new_zeros((n_atoms,))
63 | g = g.scatter_add_(0, idx_i, damp)
64 | if not bidirectional:
65 | g = g.scatter_add_(0, idx_j, damp)
66 | return g # (n_atoms,)
67 |
68 |
69 | def _getc6(
70 | Zi: Tensor, Zj: Tensor, nci: Tensor, ncj: Tensor, c6ab: Tensor, k3: float = d3_k3
71 | ) -> Tensor:
72 | """interpolate c6
73 |
74 | Args:
75 | Zi: (n_edges,)
76 | Zj: (n_edges,)
77 | nci: (n_edges,)
78 | ncj: (n_edges,)
79 | c6ab:
80 | k3:
81 |
82 | Returns:
83 | c6 (Tensor): (n_edges,)
84 | """
85 | # gather the relevant entries from the table
86 | # c6ab (95, 95, 5, 5, 3) --> c6ab_ (n_edges, 5, 5, 3)
87 | c6ab_ = c6ab[Zi, Zj].type(nci.dtype)
88 | # calculate c6 coefficients
89 |
90 | # cn0, cn1, cn2 (n_edges, 5, 5)
91 | cn0 = c6ab_[:, :, :, 0]
92 | cn1 = c6ab_[:, :, :, 1]
93 | cn2 = c6ab_[:, :, :, 2]
94 | r = (cn1 - nci[:, None, None]) ** 2 + (cn2 - ncj[:, None, None]) ** 2
95 |
96 | n_edges = r.shape[0]
97 | n_c6ab = r.shape[1] * r.shape[2]
98 | if cn0.size(0) == 0:
99 | k3_rnc = (k3 * r).view(n_edges, n_c6ab)
100 | else:
101 | k3_rnc = torch.where(cn0 > 0.0, k3 * r, -1.0e20 * torch.ones_like(r)).view(n_edges, n_c6ab)
102 | r_ratio = torch.softmax(k3_rnc, dim=1)
103 | c6 = (r_ratio * cn0.view(n_edges, n_c6ab)).sum(dim=1)
104 | return c6
105 |
106 |
107 | def edisp(
108 | Z: Tensor,
109 | r: Tensor,
110 | edge_index: Tensor,
111 | c6ab: Tensor,
112 | r0ab: Tensor,
113 | rcov: Tensor,
114 | r2r4: Tensor,
115 | params: Dict[str, float],
116 | cutoff: Optional[float] = None,
117 | cnthr: Optional[float] = None,
118 | batch: Optional[Tensor] = None,
119 | batch_edge: Optional[Tensor] = None,
120 | shift_pos: Optional[Tensor] = None,
121 | pos: Optional[Tensor] = None,
122 | cell: Optional[Tensor] = None,
123 | r2=None,
124 | r6=None,
125 | r8=None,
126 | k1=d3_k1,
127 | k2=d3_k2,
128 | k3=d3_k3,
129 | cutoff_smoothing: str = "none",
130 | damping: str = "zero",
131 | bidirectional: bool = False,
132 | abc: bool = False,
133 | ):
134 | """compute d3 dispersion energy in Hartree
135 |
136 | Args:
137 | Z (Tensor): (n_atoms,) atomic numbers
138 | r (Tensor): (n_edges,) distance in **bohr**
139 | edge_index (Tensor): (2, n_edges)
140 | c6ab (Tensor): (n_atom_types, n_atom_types, n_cn=5, n_cn=5, 3) Pre-computed C6AB parameter
141 | r0ab (Tensor): (n_atom_types, n_atom_types) Pre-computed R0AB parameter
142 | rcov (Tensor): (n_atom_types,) Pre-computed Rcov parameter
143 | r2r4 (Tensor): (n_atom_types,) Pre-computed R2R4 parameter
144 | params (dict): xc-dependent parameters. alp, s6, rs6, s18, rs18.
145 | cutoff (float or None): cutoff distance in **bohr**
146 | cnthr (float or None): cutoff distance for coordination number calculation in **bohr**
147 | batch (Tensor or None): (n_atoms,)
148 | batch_edge (Tensor or None): (n_edges,)
149 | shift_pos (Tensor or None): (n_atoms,) used to calculate 3-body term when abc=True
150 | pos (Tensor): (n_atoms, 3) position in **bohr**
151 | cell (Tensor): (3, 3) cell size in **bohr**
152 | r2 (Tensor or None):
153 | r6 (Tensor or None):
154 | r8 (Tensor or None):
155 | k1 (float):
156 | k2 (float):
157 | k3 (float):
158 | cutoff_smoothing (str): cutoff smoothing makes gradient smooth at `cutoff` distance
159 | damping (str): damping method, only "zero" is supported.
160 | bidirectional (bool): calculated `edge_index` is bidirectional or not.
161 | abc (bool): ATM 3-body interaction
162 |
163 | Returns:
164 | energy: (n_graphs,) Energy in Hartree unit.
165 | """
166 | # compute all necessary powers of the distance
167 | if r2 is None:
168 | r2 = r ** 2 # square of distances
169 | if r6 is None:
170 | r6 = r2 ** 3
171 | if r8 is None:
172 | r8 = r6 * r2
173 |
174 | idx_i, idx_j = edge_index
175 | # compute all necessary quantities
176 | Zi = Z[idx_i] # (n_edges,)
177 | Zj = Z[idx_j]
178 |
179 | nc = _ncoord(
180 | Z,
181 | r,
182 | idx_i,
183 | idx_j,
184 | rcov=rcov,
185 | cutoff=cnthr,
186 | cutoff_smoothing=cutoff_smoothing,
187 | k1=k1,
188 | bidirectional=bidirectional,
189 | ) # coordination numbers (n_atoms,)
190 |
191 | nci = nc[idx_i]
192 | ncj = nc[idx_j]
193 | c6 = _getc6(Zi, Zj, nci, ncj, c6ab=c6ab, k3=k3) # c6 coefficients
194 |
195 | c8 = 3 * c6 * r2r4[Zi].type(c6.dtype) * r2r4[Zj].type(c6.dtype) # c8 coefficient
196 |
197 | s6 = params["s6"]
198 | s8 = params["s18"]
199 | if damping in ["bj", "bjm"]:
200 | a1 = params["rs6"]
201 | a2 = params["rs18"]
202 |
203 | # Becke-Johnson damping, zero-damping introduces spurious repulsion
204 | # and is therefore not supported/implemented
205 | tmp = a1 * torch.sqrt(c8 / c6) + a2
206 | tmp2 = tmp ** 2
207 | tmp6 = tmp2 ** 3
208 | tmp8 = tmp6 * tmp2
209 | e6 = 1 / (r6 + tmp6)
210 | e8 = 1 / (r8 + tmp8)
211 | elif damping == "zero":
212 | rs6 = params["rs6"]
213 | rs8 = params["rs18"]
214 | alp = params["alp"]
215 | alp6 = alp
216 | alp8 = alp + 2.0
217 | tmp2 = r0ab[Zi, Zj]
218 | rr = tmp2 / r
219 | damp6 = 1.0 / (1.0 + 6.0 * (rs6 * rr) ** alp6)
220 | damp8 = 1.0 / (1.0 + 6.0 * (rs8 * rr) ** alp8)
221 | e6 = damp6 / r6
222 | e8 = damp8 / r8
223 | elif damping == "zerom":
224 | rs6 = params["rs6"]
225 | rs8 = params["rs18"]
226 | alp = params["alp"]
227 | alp6 = alp
228 | alp8 = alp + 2.0
229 | tmp2 = r0ab[Zi, Zj]
230 | r0_beta = rs8 * tmp2
231 | rr = r / tmp2
232 | tmp = rr / rs6 + r0_beta
233 | damp6 = 1.0 / (1.0 + 6.0 * tmp ** (-alp6))
234 | tmp = rr + r0_beta
235 | damp8 = 1.0 / (1.0 + 6.0 * tmp ** (-alp8))
236 | e6 = damp6 / r6
237 | e8 = damp8 / r8
238 | else:
239 | raise ValueError(f"[ERROR] Unexpected value damping={damping}")
240 |
241 | e6 = -0.5 * s6 * c6 * e6 # (n_edges,)
242 | e8 = -0.5 * s8 * c8 * e8 # (n_edges,)
243 | e68 = e6 + e8
244 |
245 | if cutoff is not None and cutoff_smoothing == "poly":
246 | e68 *= poly_smoothing(r, cutoff)
247 |
248 | if batch_edge is None:
249 | # (1,)
250 | g = e68.to(torch.float64).sum()[None]
251 | else:
252 | # (n_graphs,)
253 | if batch.size()[0] == 0:
254 | n_graphs = 1
255 | else:
256 | n_graphs = int(batch[-1]) + 1
257 | g = e68.new_zeros((n_graphs,), dtype=torch.float64)
258 | g.scatter_add_(0, batch_edge, e68.to(torch.float64))
259 |
260 | if not bidirectional:
261 | g *= 2.0
262 |
263 | if abc:
264 | within_cutoff = r <= cnthr
265 | # r_abc = r[within_cutoff]
266 | # r2_abc = r2[within_cutoff]
267 | edge_index_abc = edge_index[:, within_cutoff]
268 | batch_edge_abc = None if batch_edge is None else batch_edge[within_cutoff]
269 | # c6_abc = c6[within_cutoff]
270 | shift_abc = None if shift_pos is None else shift_pos[within_cutoff]
271 |
272 | n_atoms = Z.shape[0]
273 | if not bidirectional:
274 | # (2, n_edges) -> (2, n_edges * 2)
275 | edge_index_abc = torch.cat([edge_index_abc, edge_index_abc.flip(dims=[0])], dim=1)
276 | # (n_edges, ) -> (n_edges * 2, )
277 | batch_edge_abc = (
278 | None
279 | if batch_edge_abc is None
280 | else torch.cat([batch_edge_abc, batch_edge_abc], dim=0)
281 | )
282 | # (n_edges, ) -> (n_edges * 2, )
283 | shift_abc = None if shift_abc is None else torch.cat([shift_abc, -shift_abc], dim=0)
284 | with torch.no_grad():
285 | # triplet_node_index, triplet_edge_index = calc_triplets_cycle(edge_index_abc, n_atoms, shift=shift_abc)
286 | # Type hinting
287 | triplet_node_index: Tensor
288 | multiplicity: Tensor
289 | edge_jk: Tensor
290 | batch_triplets: Optional[Tensor]
291 | triplet_node_index, multiplicity, edge_jk, batch_triplets = calc_triplets(
292 | edge_index_abc,
293 | shift_pos=shift_abc,
294 | dtype=pos.dtype,
295 | batch_edge=batch_edge_abc,
296 | )
297 | batch_triplets = None if batch_edge is None else batch_triplets
298 |
299 | # Apply `cnthr` cutoff threshold for r_kj
300 | idx_j, idx_k = triplet_node_index[:, 1], triplet_node_index[:, 2]
301 | shift_jk = (
302 | None if shift_abc is None else shift_abc[edge_jk[:, 0]] - shift_abc[edge_jk[:, 1]]
303 | )
304 | r_jk = calc_distances(pos, torch.stack([idx_j, idx_k], dim=0), cell, shift_jk)
305 | kj_within_cutoff = r_jk <= cnthr
306 | del shift_jk
307 |
308 | triplet_node_index = triplet_node_index[kj_within_cutoff]
309 | multiplicity, edge_jk, batch_triplets = (
310 | multiplicity[kj_within_cutoff],
311 | edge_jk[kj_within_cutoff],
312 | None if batch_triplets is None else batch_triplets[kj_within_cutoff],
313 | )
314 |
315 | idx_i, idx_j, idx_k = (
316 | triplet_node_index[:, 0],
317 | triplet_node_index[:, 1],
318 | triplet_node_index[:, 2],
319 | )
320 | shift_ij = None if shift_abc is None else -shift_abc[edge_jk[:, 0]]
321 | shift_ik = None if shift_abc is None else -shift_abc[edge_jk[:, 1]]
322 |
323 | r_ij = calc_distances(pos, torch.stack([idx_i, idx_j], dim=0), cell, shift_ij)
324 | r_ik = calc_distances(pos, torch.stack([idx_i, idx_k], dim=0), cell, shift_ik)
325 | r_jk = r_jk[kj_within_cutoff]
326 |
327 | Zti, Ztj, Ztk = Z[idx_i], Z[idx_j], Z[idx_k]
328 | rrjk, rrij, rrik = r0ab[Ztk, Ztj] / r_jk, r0ab[Ztj, Zti] / r_ij, r0ab[Zti, Ztk] / r_ik
329 | rr3_jk, rr3_ij, rr3_ik = (
330 | (1.0 / rrjk) ** (1.0 / 3.0),
331 | (1.0 / rrij) ** (1.0 / 3.0),
332 | (1.0 / rrik) ** (1.0 / 3.0),
333 | )
334 | rav = (4.0 / 3.0) / (rr3_jk * rr3_ij * rr3_ik)
335 | alp = params["alp"]
336 | alp8 = alp + 2.0
337 | damp = 1.0 / (1.0 + 6.0 * rav ** alp8)
338 |
339 | c6_mem = torch.zeros((n_atoms, n_atoms), dtype=c6.dtype, device=c6.device)
340 | c6_mem[edge_index[0], edge_index[1]] = c6
341 | c6_mem[edge_index[1], edge_index[0]] = c6
342 |
343 | c9 = torch.sqrt(c6_mem[idx_k, idx_j] * c6_mem[idx_j, idx_i] * c6_mem[idx_i, idx_k])
344 | r2ik, r2jk, r2ij = r_ik ** 2, r_jk ** 2, r_ij ** 2
345 | t1 = r2jk + r2ij - r2ik
346 | t2 = r2ij + r2ik - r2jk
347 | t3 = r2ik + r2jk - r2ij
348 | tmp2 = r2ik * r2jk * r2ij
349 | ang = (0.375 * t1 * t2 * t3 / tmp2 + 1.0) / (tmp2 ** 1.5)
350 | e3 = damp * c9 * ang / multiplicity
351 |
352 | # ---------------------------------------------------------------
353 | # TODO: support cutoff_smoothing
354 | if batch_edge is None:
355 | e6abc = e3.to(torch.float64).sum()
356 | g += e6abc
357 | else:
358 | g.scatter_add_(0, batch_triplets, e3.to(torch.float64))
359 | return g # (n_graphs,)
360 |
--------------------------------------------------------------------------------
/torch_dftd/dftd3_xc_params.py:
--------------------------------------------------------------------------------
1 | from typing import Dict
2 |
3 |
4 | def get_dftd3_default_params(
5 | damping: str = "zero", xc: str = "pbe", tz: bool = False, old: bool = False
6 | ) -> Dict[str, float]:
7 | """Get DFTD3 parameter for specified damping method & correlational functional.
8 |
9 | Args:
10 | damping (str): damping method. [zero, bj, zerom, bjm, dftd2] is supported.
11 | xc (str): exchange-correlation functional.
12 | tz (bool): Use special parameters for TZ-type calculations.
13 | Only effective when damping=zero
14 | old (bool): Use DFT-D2 calculation
15 |
16 | Returns:
17 | params (Dict): Parameters for s6, rs6, s18, rs18, alp.
18 | """
19 | if old:
20 | assert damping == "zero", "Only zero damping is supported in DFT-D2"
21 | damping = "dftd2"
22 | if damping == "bjm": # version 6 of Original DFTD3
23 | # s6, rs6, s18, rs18, alp is used.
24 | s6 = 1.0
25 | alp = 14.0
26 | if xc == "b2-plyp":
27 | rs6 = 0.486434
28 | s18 = 0.672820
29 | rs18 = 3.656466
30 | s6 = 0.640000
31 | elif xc == "b3-lyp":
32 | rs6 = 0.278672
33 | s18 = 1.466677
34 | rs18 = 4.606311
35 | elif xc == "b97-d":
36 | rs6 = 0.240184
37 | s18 = 1.206988
38 | rs18 = 3.864426
39 | elif xc == "b-lyp":
40 | rs6 = 0.448486
41 | s18 = 1.875007
42 | rs18 = 3.610679
43 | elif xc == "b-p":
44 | rs6 = 0.821850
45 | s18 = 3.140281
46 | rs18 = 2.728151
47 | elif xc == "pbe":
48 | rs6 = 0.012092
49 | s18 = 0.358940
50 | rs18 = 5.938951
51 | elif xc == "pbe0":
52 | rs6 = 0.007912
53 | s18 = 0.528823
54 | rs18 = 6.162326
55 | elif xc == "lc-wpbe":
56 | rs6 = 0.563761
57 | s18 = 0.906564
58 | rs18 = 3.593680
59 | else:
60 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
61 | elif damping == "zerom": # version 5
62 | # s6, rs6, s18, rs18, alp is used.
63 | s6 = 1.0
64 | alp = 14.0
65 | if xc == "b2-plyp":
66 | rs6 = 1.313134
67 | s18 = 0.717543
68 | rs18 = 0.016035
69 | s6 = 0.640000
70 | elif xc == "b3-lyp":
71 | rs6 = 1.338153
72 | s18 = 1.532981
73 | rs18 = 0.013988
74 | elif xc == "b97-d":
75 | rs6 = 1.151808
76 | s18 = 1.020078
77 | rs18 = 0.035964
78 | elif xc == "b-lyp":
79 | rs6 = 1.279637
80 | s18 = 1.841686
81 | rs18 = 0.014370
82 | elif xc == "b-p":
83 | rs6 = 1.233460
84 | s18 = 1.945174
85 | rs18 = 0.000000
86 | elif xc == "pbe":
87 | rs6 = 2.340218
88 | s18 = 0.000000
89 | rs18 = 0.129434
90 | elif xc == "pbe0":
91 | rs6 = 2.077949
92 | s18 = 0.000081
93 | rs18 = 0.116755
94 | elif xc == "lc-wpbe":
95 | rs6 = 1.366361
96 | s18 = 1.280619
97 | rs18 = 0.003160
98 | else:
99 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
100 | elif damping == "bj":
101 | # version 4, Becke-Johnson finite-damping, variant 2 with their radii
102 | # s6, rs6, s18, rs18, alp is used.
103 | s6 = 1.0
104 | alp = 14.0
105 | if xc == "b-p":
106 | rs6 = 0.3946
107 | s18 = 3.2822
108 | rs18 = 4.8516
109 | elif xc == "b-lyp":
110 | rs6 = 0.4298
111 | s18 = 2.6996
112 | rs18 = 4.2359
113 | elif xc == "revpbe":
114 | rs6 = 0.5238
115 | s18 = 2.3550
116 | rs18 = 3.5016
117 | elif xc == "rpbe":
118 | rs6 = 0.1820
119 | s18 = 0.8318
120 | rs18 = 4.0094
121 | elif xc == "b97-d":
122 | rs6 = 0.5545
123 | s18 = 2.2609
124 | rs18 = 3.2297
125 | elif xc == "pbe":
126 | rs6 = 0.4289
127 | s18 = 0.7875
128 | rs18 = 4.4407
129 | elif xc == "rpw86-pbe":
130 | rs6 = 0.4613
131 | s18 = 1.3845
132 | rs18 = 4.5062
133 | elif xc == "b3-lyp":
134 | rs6 = 0.3981
135 | s18 = 1.9889
136 | rs18 = 4.4211
137 | elif xc == "tpss":
138 | rs6 = 0.4535
139 | s18 = 1.9435
140 | rs18 = 4.4752
141 | elif xc == "hf":
142 | rs6 = 0.3385
143 | s18 = 0.9171
144 | rs18 = 2.8830
145 | elif xc == "tpss0":
146 | rs6 = 0.3768
147 | s18 = 1.2576
148 | rs18 = 4.5865
149 | elif xc == "pbe0":
150 | rs6 = 0.4145
151 | s18 = 1.2177
152 | rs18 = 4.8593
153 | elif xc == "hse06":
154 | rs6 = 0.383
155 | s18 = 2.310
156 | rs18 = 5.685
157 | elif xc == "revpbe38":
158 | rs6 = 0.4309
159 | s18 = 1.4760
160 | rs18 = 3.9446
161 | elif xc == "pw6b95":
162 | rs6 = 0.2076
163 | s18 = 0.7257
164 | rs18 = 6.3750
165 | elif xc == "b2-plyp":
166 | rs6 = 0.3065
167 | s18 = 0.9147
168 | rs18 = 5.0570
169 | s6 = 0.64
170 | elif xc == "dsd-blyp":
171 | rs6 = 0.0000
172 | s18 = 0.2130
173 | rs18 = 6.0519
174 | s6 = 0.50
175 | elif xc == "dsd-blyp-fc":
176 | rs6 = 0.0009
177 | s18 = 0.2112
178 | rs18 = 5.9807
179 | s6 = 0.50
180 | elif xc == "bop":
181 | rs6 = 0.4870
182 | s18 = 3.2950
183 | rs18 = 3.5043
184 | elif xc == "mpwlyp":
185 | rs6 = 0.4831
186 | s18 = 2.0077
187 | rs18 = 4.5323
188 | elif xc == "o-lyp":
189 | rs6 = 0.5299
190 | s18 = 2.6205
191 | rs18 = 2.8065
192 | elif xc == "pbesol":
193 | rs6 = 0.4466
194 | s18 = 2.9491
195 | rs18 = 6.1742
196 | elif xc == "bpbe":
197 | rs6 = 0.4567
198 | s18 = 4.0728
199 | rs18 = 4.3908
200 | elif xc == "opbe":
201 | rs6 = 0.5512
202 | s18 = 3.3816
203 | rs18 = 2.9444
204 | elif xc == "ssb":
205 | rs6 = -0.0952
206 | s18 = -0.1744
207 | rs18 = 5.2170
208 | elif xc == "revssb":
209 | rs6 = 0.4720
210 | s18 = 0.4389
211 | rs18 = 4.0986
212 | elif xc == "otpss":
213 | rs6 = 0.4634
214 | s18 = 2.7495
215 | rs18 = 4.3153
216 | elif xc == "b3pw91":
217 | rs6 = 0.4312
218 | s18 = 2.8524
219 | rs18 = 4.4693
220 | elif xc == "bh-lyp":
221 | rs6 = 0.2793
222 | s18 = 1.0354
223 | rs18 = 4.9615
224 | elif xc == "revpbe0":
225 | rs6 = 0.4679
226 | s18 = 1.7588
227 | rs18 = 3.7619
228 | elif xc == "tpssh":
229 | rs6 = 0.4529
230 | s18 = 2.2382
231 | rs18 = 4.6550
232 | elif xc == "mpw1b95":
233 | rs6 = 0.1955
234 | s18 = 1.0508
235 | rs18 = 6.4177
236 | elif xc == "pwb6k":
237 | rs6 = 0.1805
238 | s18 = 0.9383
239 | rs18 = 7.7627
240 | elif xc == "b1b95":
241 | rs6 = 0.2092
242 | s18 = 1.4507
243 | rs18 = 5.5545
244 | elif xc == "bmk":
245 | rs6 = 0.1940
246 | s18 = 2.0860
247 | rs18 = 5.9197
248 | elif xc == "cam-b3lyp":
249 | rs6 = 0.3708
250 | s18 = 2.0674
251 | rs18 = 5.4743
252 | elif xc == "lc-wpbe":
253 | rs6 = 0.3919
254 | s18 = 1.8541
255 | rs18 = 5.0897
256 | elif xc == "b2gp-plyp":
257 | rs6 = 0.0000
258 | s18 = 0.2597
259 | rs18 = 6.3332
260 | s6 = 0.560
261 | elif xc == "ptpss":
262 | rs6 = 0.0000
263 | s18 = 0.2804
264 | rs18 = 6.5745
265 | s6 = 0.750
266 | elif xc == "pwpb95":
267 | rs6 = 0.0000
268 | s18 = 0.2904
269 | rs18 = 7.3141
270 | s6 = 0.820
271 | # special HF/DFT with eBSSE correction
272 | elif xc == "hf/mixed":
273 | rs6 = 0.5607
274 | s18 = 3.9027
275 | rs18 = 4.5622
276 | elif xc == "hf/sv":
277 | rs6 = 0.4249
278 | s18 = 2.1849
279 | rs18 = 4.2783
280 | elif xc == "hf/minis":
281 | rs6 = 0.1702
282 | s18 = 0.9841
283 | rs18 = 3.8506
284 | elif xc == "b3-lyp/6-31gd":
285 | rs6 = 0.5014
286 | s18 = 4.0672
287 | rs18 = 4.8409
288 | elif xc == "hcth120":
289 | rs6 = 0.3563
290 | s18 = 1.0821
291 | rs18 = 4.3359
292 | # DFTB3 old, deprecated parameters:
293 | # elif xc == "dftb3":
294 | # rs6=0.7461
295 | # s18=3.209
296 | # rs18=4.1906
297 | # special SCC-DFTB parametrization
298 | # full third order DFTB, self consistent charges, hydrogen pair damping with
299 | # exponent 4.2
300 | elif xc == "dftb3":
301 | rs6 = 0.5719
302 | s18 = 0.5883
303 | rs18 = 3.6017
304 | elif xc == "pw1pw":
305 | rs6 = 0.3807
306 | s18 = 2.3363
307 | rs18 = 5.8844
308 | elif xc == "pwgga":
309 | rs6 = 0.2211
310 | s18 = 2.6910
311 | rs18 = 6.7278
312 | elif xc == "hsesol":
313 | rs6 = 0.4650
314 | s18 = 2.9215
315 | rs18 = 6.2003
316 | # special HF-D3-gCP-SRB/MINIX parametrization
317 | elif xc == "hf3c":
318 | rs6 = 0.4171
319 | s18 = 0.8777
320 | rs18 = 2.9149
321 | # special HF-D3-gCP-SRB2/ECP-2G parametrization
322 | elif xc == "hf3cv":
323 | rs6 = 0.3063
324 | s18 = 0.5022
325 | rs18 = 3.9856
326 | # special PBEh-D3-gCP/def2-mSVP parametrization
327 | elif xc in ["pbeh3c", "pbeh-3c"]:
328 | rs6 = 0.4860
329 | s18 = 0.0000
330 | rs18 = 4.5000
331 | else:
332 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
333 | elif damping == "zero":
334 | # s6, s18, rs6, rs18, alp is used.
335 | s6 = 1.0
336 | rs18 = 1.0
337 | alp = 14.0
338 | if not tz:
339 | if xc == "slater-dirac-exchange":
340 | rs6 = 0.999
341 | s18 = -1.957
342 | rs18 = 0.697
343 | elif xc == "b-lyp":
344 | rs6 = 1.094
345 | s18 = 1.682
346 | elif xc == "b-p":
347 | rs6 = 1.139
348 | s18 = 1.683
349 | elif xc == "b97-d":
350 | rs6 = 0.892
351 | s18 = 0.909
352 | elif xc == "revpbe":
353 | rs6 = 0.923
354 | s18 = 1.010
355 | elif xc == "pbe":
356 | rs6 = 1.217
357 | s18 = 0.722
358 | elif xc == "pbesol":
359 | rs6 = 1.345
360 | s18 = 0.612
361 | elif xc == "rpw86-pbe":
362 | rs6 = 1.224
363 | s18 = 0.901
364 | elif xc == "rpbe":
365 | rs6 = 0.872
366 | s18 = 0.514
367 | elif xc == "tpss":
368 | rs6 = 1.166
369 | s18 = 1.105
370 | elif xc == "b3-lyp":
371 | rs6 = 1.261
372 | s18 = 1.703
373 | elif xc == "pbe0":
374 | rs6 = 1.287
375 | s18 = 0.928
376 |
377 | elif xc == "hse06":
378 | rs6 = 1.129
379 | s18 = 0.109
380 | elif xc == "revpbe38":
381 | rs6 = 1.021
382 | s18 = 0.862
383 | elif xc == "pw6b95":
384 | rs6 = 1.532
385 | s18 = 0.862
386 | elif xc == "tpss0":
387 | rs6 = 1.252
388 | s18 = 1.242
389 | elif xc == "b2-plyp":
390 | rs6 = 1.427
391 | s18 = 1.022
392 | s6 = 0.64
393 | elif xc == "pwpb95":
394 | rs6 = 1.557
395 | s18 = 0.705
396 | s6 = 0.82
397 | elif xc == "b2gp-plyp":
398 | rs6 = 1.586
399 | s18 = 0.760
400 | s6 = 0.56
401 | elif xc == "ptpss":
402 | rs6 = 1.541
403 | s18 = 0.879
404 | s6 = 0.75
405 | elif xc == "hf":
406 | rs6 = 1.158
407 | s18 = 1.746
408 | elif xc == "mpwlyp":
409 | rs6 = 1.239
410 | s18 = 1.098
411 | elif xc == "bpbe":
412 | rs6 = 1.087
413 | s18 = 2.033
414 | elif xc == "bh-lyp":
415 | rs6 = 1.370
416 | s18 = 1.442
417 | elif xc == "tpssh":
418 | rs6 = 1.223
419 | s18 = 1.219
420 | elif xc == "pwb6k":
421 | rs6 = 1.660
422 | s18 = 0.550
423 | elif xc == "b1b95":
424 | rs6 = 1.613
425 | s18 = 1.868
426 | elif xc == "bop":
427 | rs6 = 0.929
428 | s18 = 1.975
429 | elif xc == "o-lyp":
430 | rs6 = 0.806
431 | s18 = 1.764
432 | elif xc == "o-pbe":
433 | rs6 = 0.837
434 | s18 = 2.055
435 | elif xc == "ssb":
436 | rs6 = 1.215
437 | s18 = 0.663
438 | elif xc == "revssb":
439 | rs6 = 1.221
440 | s18 = 0.560
441 | elif xc == "otpss":
442 | rs6 = 1.128
443 | s18 = 1.494
444 | elif xc == "b3pw91":
445 | rs6 = 1.176
446 | s18 = 1.775
447 | elif xc == "revpbe0":
448 | rs6 = 0.949
449 | s18 = 0.792
450 | elif xc == "pbe38":
451 | rs6 = 1.333
452 | s18 = 0.998
453 | elif xc == "mpw1b95":
454 | rs6 = 1.605
455 | s18 = 1.118
456 | elif xc == "mpwb1k":
457 | rs6 = 1.671
458 | s18 = 1.061
459 | elif xc == "bmk":
460 | rs6 = 1.931
461 | s18 = 2.168
462 | elif xc == "cam-b3lyp":
463 | rs6 = 1.378
464 | s18 = 1.217
465 | elif xc == "lc-wpbe":
466 | rs6 = 1.355
467 | s18 = 1.279
468 | elif xc == "m05":
469 | rs6 = 1.373
470 | s18 = 0.595
471 | elif xc == "m052x":
472 | rs6 = 1.417
473 | s18 = 0.000
474 | elif xc == "m06l":
475 | rs6 = 1.581
476 | s18 = 0.000
477 | elif xc == "m06":
478 | rs6 = 1.325
479 | s18 = 0.000
480 | elif xc == "m062x":
481 | rs6 = 1.619
482 | s18 = 0.000
483 | elif xc == "m06hf":
484 | rs6 = 1.446
485 | s18 = 0.000
486 | # DFTB3 (zeta=4.0), old deprecated parameters
487 | # elif xc == "dftb3":
488 | # rs6=1.235
489 | # s18=0.673
490 | elif xc == "hcth120":
491 | rs6 = 1.221
492 | s18 = 1.206
493 | else:
494 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
495 | else:
496 | # special TZVPP parameter
497 | if xc == "b-lyp":
498 | rs6 = 1.243
499 | s18 = 2.022
500 | elif xc == "b-p":
501 | rs6 = 1.221
502 | s18 = 1.838
503 | elif xc == "b97-d":
504 | rs6 = 0.921
505 | s18 = 0.894
506 | elif xc == "revpbe":
507 | rs6 = 0.953
508 | s18 = 0.989
509 | elif xc == "pbe":
510 | rs6 = 1.277
511 | s18 = 0.777
512 | elif xc == "tpss":
513 | rs6 = 1.213
514 | s18 = 1.176
515 | elif xc == "b3-lyp":
516 | rs6 = 1.314
517 | s18 = 1.706
518 | elif xc == "pbe0":
519 | rs6 = 1.328
520 | s18 = 0.926
521 | elif xc == "pw6b95":
522 | rs6 = 1.562
523 | s18 = 0.821
524 | elif xc == "tpss0":
525 | rs6 = 1.282
526 | s18 = 1.250
527 | elif xc == "b2-plyp":
528 | rs6 = 1.551
529 | s18 = 1.109
530 | s6 = 0.5
531 | else:
532 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
533 | elif damping == "dftd2": # version 2, "old=True"
534 | # s6, rs6, s18, alp is used.
535 | rs6 = 1.1
536 | s18 = 0.0
537 | alp = 20.0
538 | rs18 = None # Not used.
539 | if xc == "b-lyp":
540 | s6 = 1.2
541 | elif xc == "b-p":
542 | s6 = 1.05
543 | elif xc == "b97-d":
544 | s6 = 1.25
545 | elif xc == "revpbe":
546 | s6 = 1.25
547 | elif xc == "pbe":
548 | s6 = 0.75
549 | elif xc == "tpss":
550 | s6 = 1.0
551 | elif xc == "b3-lyp":
552 | s6 = 1.05
553 | elif xc == "pbe0":
554 | s6 = 0.6
555 | elif xc == "pw6b95":
556 | s6 = 0.5
557 | elif xc == "tpss0":
558 | s6 = 0.85
559 | elif xc == "b2-plyp":
560 | s6 = 0.55
561 | elif xc == "b2gp-plyp":
562 | s6 = 0.4
563 | elif xc == "dsd-blyp":
564 | s6 = 0.41
565 | alp = 60.0
566 | else:
567 | raise ValueError(f"[ERROR] Unexpected value xc={xc}")
568 | else:
569 | raise ValueError(f"[ERROR] damping={damping} not supported.")
570 | return {"s6": s6, "rs6": rs6, "s18": s18, "rs18": rs18, "alp": alp}
571 |
--------------------------------------------------------------------------------