├── .pylintrc
├── .flake8
├── setup.cfg
├── docs
├── argon-lj.json
├── ode.png
├── ag-lj.png
├── argon.db
├── lj-vs-dft.png
├── restored-result.png
├── sensitivity-analysis.png
├── dap.tf.hooke.org
├── dap.xsf.org
├── dap.tf.lennardjones.org
├── dap.py.bpnn.org
└── dap.ag.org
├── requirements.txt
├── .gitignore
├── development.org
├── dap
├── __init__.py
├── tests
│ ├── __init__.py
│ ├── test_tf_chebyshev.py
│ ├── test_tf_hook.py
│ ├── test_py_bpnn.py
│ ├── test_ag_emt.py
│ ├── test_ag.py
│ ├── test_tf_lennardjones.py
│ ├── test_tf_utils.py
│ └── test_tf_neighborlist.py
├── tf
│ ├── __init__.py
│ ├── ipython.py
│ ├── chebyshev.py
│ ├── hooke.py
│ ├── visualize.py
│ ├── utils.py
│ ├── neighborlist.py
│ └── lennardjones.py
├── py
│ ├── __init__.py
│ ├── xsf.py
│ └── bpnn.py
└── ag
│ ├── __init__.py
│ ├── lennardjones.py
│ ├── emt.py
│ └── neighborlist.py
├── CONTRIBUTING.md
├── setup.py
├── .travis.yml
├── dap.el
├── README.org
└── LICENSE
/.pylintrc:
--------------------------------------------------------------------------------
1 | [FORMAT]
2 | indent-string=" "
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E111
3 | max-line-length = 80
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [flake8]
2 | ignore = E111
3 | max-line-length = 80
--------------------------------------------------------------------------------
/docs/argon-lj.json:
--------------------------------------------------------------------------------
1 | {"sigma": 3.7112412621703967, "epsilon": 0.005270791268325109}
--------------------------------------------------------------------------------
/docs/ode.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/ode.png
--------------------------------------------------------------------------------
/docs/ag-lj.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/ag-lj.png
--------------------------------------------------------------------------------
/docs/argon.db:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/argon.db
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | matplotlib
4 | nose
5 | ase
6 | Ipython
7 | tensorflow
8 | graphviz
9 |
--------------------------------------------------------------------------------
/docs/lj-vs-dft.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/lj-vs-dft.png
--------------------------------------------------------------------------------
/docs/restored-result.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/restored-result.png
--------------------------------------------------------------------------------
/docs/sensitivity-analysis.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/google/differentiable-atomistic-potentials/HEAD/docs/sensitivity-analysis.png
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.pyo
3 | __pycache__
4 | *.egg-info
5 | *.eggs
6 | release.org
7 |
8 | *.meta
9 | *.index
10 | *.data*
11 | checkpoint
--------------------------------------------------------------------------------
/development.org:
--------------------------------------------------------------------------------
1 | * Tests
2 |
3 | In the project directory you can run these commands to run the tests.
4 |
5 | #+BEGIN_SRC sh
6 | python setup.py test
7 | #+END_SRC
8 |
9 | or
10 |
11 | #+BEGIN_SRC sh
12 | python -m unittest discover
13 | #+END_SRC
14 |
--------------------------------------------------------------------------------
/dap/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dap/tests/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dap/tf/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
--------------------------------------------------------------------------------
/dap/py/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Pure python modules related to this project.
15 |
16 | These are not differentiable, but provide benchmark functions.
17 | """
18 |
--------------------------------------------------------------------------------
/docs/dap.tf.hooke.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Hookean Spring in Tensorflow
2 |
3 | This is a model calculator to show how to use Tensorflow in an ASE style calculator.
4 |
5 | Here is an example usage.
6 |
7 | #+BEGIN_SRC python :results output org drawer
8 | import numpy as np
9 | from ase.build import molecule
10 | from dap.tf.hooke import HookeanSpring
11 |
12 |
13 | atoms = molecule('H2')
14 | atoms.set_calculator(HookeanSpring())
15 |
16 | print('Analytical energy: ', 0.5 * 1.0 * (1 - atoms.get_distance(0, 1))**2)
17 | print('Hookean energy: ', atoms.get_potential_energy())
18 |
19 | print('Analytical force magnitude: ', np.abs(-1.0 * (1 - atoms.get_distance(0, 1))))
20 | print('Hookean forces: ', [np.linalg.norm(f)
21 | for f in atoms.get_forces()])
22 | #+END_SRC
23 |
24 | #+RESULTS:
25 | :RESULTS:
26 | Analytical energy: 0.034540855778
27 | Hookean energy: 0.034540855778
28 | Analytical force magnitude: 0.262834
29 | Hookean forces: [0.262834, 0.262834]
30 | :END:
31 |
--------------------------------------------------------------------------------
/dap/ag/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """Autograd modules.
16 |
17 | These are mostly experimental implementations of differentiable potentials using
18 | autograd (https://github.com/HIPS/autograd).
19 |
20 | Autograd is essentially limited to functions. It does not support gradients on
21 | classes. Autograd is built on top of numpy though, which makes it an all Python
22 | approach to differentiable programming that is pretty easy to use.
23 |
24 | """
25 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | We'd love to accept your patches and contributions to this project. There are
4 | just a few small guidelines you need to follow.
5 |
6 | ## Contributor License Agreement
7 |
8 | Contributions to this project must be accompanied by a Contributor License
9 | Agreement. You (or your employer) retain the copyright to your contribution;
10 | this simply gives us permission to use and redistribute your contributions as
11 | part of the project. Head over to to see
12 | your current agreements on file or to sign a new one.
13 |
14 | You generally only need to submit a CLA once, so if you've already submitted one
15 | (even if it was for a different project), you probably don't need to do it
16 | again.
17 |
18 | ## Code reviews
19 |
20 | All submissions, including submissions by project members, require review. We
21 | use GitHub pull requests for this purpose. Consult
22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
23 | information on using pull requests.
24 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from setuptools import setup, find_packages
16 |
17 | with open('README.org') as f:
18 | readme = f.read()
19 |
20 | with open('LICENSE') as f:
21 | license = f.read()
22 |
23 | setup(
24 | name='dap',
25 | version='0.0.1',
26 | description='Differentiable atomistic potentials',
27 | long_description=readme,
28 | author='John Kitchin',
29 | author_email='kitchin@google.com',
30 | license=license,
31 | setup_requires=['nose>=1.0'],
32 | data_files=['requirements.txt', 'LICENSE'],
33 | packages=find_packages(exclude=('tests', 'docs')))
34 |
35 | # python setup.py register to setup user
36 | # to push to pypi - (shell-command "python setup.py sdist upload")
37 |
--------------------------------------------------------------------------------
/dap/tf/ipython.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Visualization utilities for tensorflow graphs."""
15 |
16 | import tensorflow as tf
17 | from graphviz import Digraph
18 |
19 |
20 | def tf_graph_to_dot(graph):
21 | ('Adapted from '
22 | 'https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html')
23 | dot = Digraph()
24 |
25 | for n in g.as_graph_def().node:
26 | dot.node(n.name, label=n.name)
27 |
28 | for i in n.input:
29 | dot.edge(i, n.name)
30 | dot.format = 'svg'
31 | return dot.pipe().decode('utf-8')
32 |
33 |
34 | ip = get_ipython()
35 | svg_f = ip.display_formatter.formatters['image/svg+xml']
36 | svg_f.for_type_by_name('tensorflow.python.framework.ops', 'Graph',
37 | tf_graph_to_dot)
38 |
--------------------------------------------------------------------------------
/dap/tests/test_tf_chebyshev.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 | import tensorflow as tf
17 |
18 | from dap.tf.chebyshev import (chebvander_py, chebvander)
19 | import os
20 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
21 |
22 |
23 | class TestTFChebyshev(tf.test.TestCase):
24 |
25 | def test_chebvander_py(self):
26 | """Test the python implementation."""
27 | x = np.linspace(0, 1, 11)
28 |
29 | for deg in range(1, 49):
30 | ref = np.polynomial.chebyshev.chebvander(x, deg)
31 | Tn = chebvander_py(x, deg)
32 | self.assertTrue(np.allclose(ref, Tn))
33 |
34 | def test_chebvander_tf(self):
35 | """Test the Tensorflow implementation."""
36 | x = np.linspace(0, 1, 11)
37 |
38 | with self.test_session():
39 | for deg in range(1, 49):
40 | ref = np.polynomial.chebyshev.chebvander(x, deg)
41 | Tn = chebvander(x, deg)
42 | self.assertTrue(np.allclose(ref, Tn.eval()))
43 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | # adapted from https://conda.io/docs/travis.html
2 |
3 | group: deprecated-2017Q4
4 | dist: trusty
5 | sudo: required
6 | language: python
7 | python:
8 | # We don't actually use the Travis Python, but this keeps it organized.
9 | # - "2.7"
10 | # - "3.5"
11 | - "3.6"
12 | install:
13 | - sudo apt-get update
14 | - sudo apt-get install gfortran
15 | # We do this conditionally because it saves us some downloading if the
16 | # version is the same.
17 | - if [[ "$TRAVIS_PYTHON_VERSION" == "2.7" ]]; then
18 | wget https://repo.continuum.io/miniconda/Miniconda2-latest-Linux-x86_64.sh -O miniconda.sh;
19 | else
20 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh;
21 | fi
22 | - bash miniconda.sh -b -p $HOME/miniconda
23 | - export PATH="$HOME/miniconda/bin:$PATH"
24 | - hash -r
25 | - conda config --set always_yes yes --set changeps1 no
26 | - conda update -q conda
27 | # Useful for debugging any issues with conda
28 | - conda info -a
29 |
30 | # Replace dep1 dep2 ... with your dependencies
31 | - conda create -q -n test-environment python=$TRAVIS_PYTHON_VERSION numpy scipy matplotlib nose
32 | - source activate test-environment
33 | - python setup.py install
34 | - pip install python-coveralls nose-cov coverage
35 | - pip install ase
36 | - pip install autograd
37 | - pip install tensorflow
38 | - pushd .
39 | # Installation of amp
40 | - sudo ln -s `which f2py` `which f2py`3
41 | - git clone https://bitbucket.org/andrewpeterson/amp.git
42 | - cd amp/amp
43 | - make python3
44 | - cd ..
45 | - pip install -e .
46 | - popd
47 |
48 | # Run test
49 | script:
50 | - pwd
51 | - nosetests -v --with-coverage
52 |
53 | after_success:
54 | coveralls
55 |
--------------------------------------------------------------------------------
/dap/tf/chebyshev.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """Chebyshev module for tensorflow.
16 |
17 | This implementation was inspired by https://arxiv.org/pdf/1511.09199v1.pdf.
18 |
19 |
20 | """
21 |
22 | import numpy as np
23 | import tensorflow as tf
24 |
25 |
26 | def chebvander_py(x, deg):
27 | """python implementation of pydoc:numpy.polynomial.chebyshev.chebvander.
28 |
29 | This is not intended for usage. It is here to document a transition to the
30 | tensorflow function below.
31 |
32 | """
33 | x = np.array(x)
34 | v = [x * 0 + 1]
35 | # Use forward recursion to generate the entries.
36 |
37 | x2 = 2 * x
38 | v += [x]
39 | for i in range(2, deg + 1):
40 | v += [v[i - 1] * x2 - v[i - 2]]
41 | v = np.stack(v)
42 |
43 | roll = list(np.arange(v.ndim))[1:] + [0]
44 | return v.transpose(roll)
45 |
46 |
47 | def chebvander(x, deg):
48 | """TF implementation of pydoc:numpy.polynomial.chebyshev.chebvander."""
49 | x = tf.convert_to_tensor(x)
50 |
51 | v = [x * 0 + 1]
52 |
53 | x2 = 2 * x
54 | v += [x]
55 |
56 | for i in range(2, deg + 1):
57 | v += [v[i - 1] * x2 - v[i - 2]]
58 |
59 | v = tf.stack(v)
60 |
61 | roll = tf.unstack(tf.range(len(v.get_shape().as_list())))
62 | roll = tf.stack([roll[1:], [roll[0]]], axis=1)
63 | roll = tf.squeeze(roll)
64 | v = tf.transpose(v, roll)
65 | return v
66 |
--------------------------------------------------------------------------------
/dap.el:
--------------------------------------------------------------------------------
1 | ;;; dap.el --- Emacs setup for dap.
2 | ;; * Convenience function for tensorflow blocks in org-mode
3 |
4 | ;;; Commentary:
5 | ;;
6 |
7 | ;;; Code:
8 |
9 | (defalias 'org-babel-execute:tf 'org-babel-execute:python)
10 | (defalias 'org-babel-prep-session:tf 'org-babel-prep-session:python)
11 | (defalias 'org-babel-tf-initiate-session 'org-babel-python-initiate-session)
12 |
13 | (add-to-list 'org-src-lang-modes '("tf" . python))
14 | (add-to-list 'org-latex-minted-langs '(tf "python"))
15 |
16 | (setq org-src-block-faces '(("tf" (:background "#EEE2FF"))))
17 |
18 | (add-to-list 'org-structure-template-alist
19 | '("tf" "#+BEGIN_SRC tf :results output drawer org\nimport tensorflow as tf\n?\n\nwith tf.Session():\n #+END_SRC"
20 | "\n?\n"))
21 |
22 | (require 'color)
23 |
24 | (defface org-block-tf
25 | `((t (:background ,(color-lighten-name "LightSalmon1" 0.50))))
26 | "Face for tensorflow python blocks")
27 |
28 |
29 | (defun dap-insert-header ()
30 | (interactive)
31 | (goto-char (point-min))
32 | (insert "# Copyright 2018 Google Inc.
33 | #
34 | # Licensed under the Apache License, Version 2.0 (the \"License\");
35 | # you may not use this file except in compliance with the License.
36 | # You may obtain a copy of the License at
37 | #
38 | # http://www.apache.org/licenses/LICENSE-2.0
39 | #
40 | # Unless required by applicable law or agreed to in writing, software
41 | # distributed under the License is distributed on an \"AS IS\" BASIS,
42 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
43 | # See the License for the specific language governing permissions and
44 | # limitations under the License.
45 | "))
46 |
47 |
48 | (defun dap-run-test-in-shell ()
49 | "Run the test that point is in in a shell.
50 | Assumes the test is in a class in a file in dap/tests/"
51 | (interactive)
52 | (let (cmd file class method)
53 | (save-excursion
54 | (re-search-backward "^class \\(.*\\)(")
55 | (setq class (s-trim (match-string 1))))
56 | (save-excursion
57 | (re-search-backward "def \\(.*\\)(")
58 | (setq method (match-string 1)))
59 | (setq file (file-name-base (buffer-file-name)))
60 | (setq cmd (format "python -m unittest dap.tests.%s.%s.%s" file class method))
61 | (message (shell-command-to-string cmd))))
62 |
63 | (provide 'dap)
64 |
65 | ;;; dap.el ends here
66 |
--------------------------------------------------------------------------------
/dap/tests/test_tf_hook.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for the tensorflow HookeanSpring module
15 |
16 | pydoc:dap.tf.hooke
17 | """
18 |
19 | import numpy as np
20 | import tensorflow as tf
21 | from ase.build import molecule
22 | from dap.tf.hooke import HookeanSpring
23 |
24 | import os
25 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
26 |
27 |
28 | class TestHookeanSpring(tf.test.TestCase):
29 | """Tests for the model HookeanSpring Calculator."""
30 |
31 | def test_default(self):
32 | """Test energy and forces with default settings."""
33 | atoms = molecule('H2')
34 | atoms.set_calculator(HookeanSpring())
35 | e0 = 0.5 * 1.0 * (1 - atoms.get_distance(0, 1))**2
36 | e1 = atoms.get_potential_energy()
37 | self.assertTrue(e0 == e1)
38 | fmag = np.abs(-1.0 * (1 - atoms.get_distance(0, 1)))
39 | f = atoms.get_forces()
40 | self.assertTrue(fmag == np.linalg.norm(f[0]))
41 | self.assertTrue(fmag == np.linalg.norm(f[1]))
42 |
43 | def test_custom(self):
44 | """Test energy and forces with custom settings."""
45 | atoms = molecule('H2')
46 | k, x0 = 1.5, 0.9
47 | atoms.set_calculator(HookeanSpring(k=k, x0=x0))
48 | e0 = 0.5 * k * (x0 - atoms.get_distance(0, 1))**2
49 | e1 = atoms.get_potential_energy()
50 | self.assertTrue(e0 == e1)
51 | fmag = np.abs(-k * (x0 - atoms.get_distance(0, 1)))
52 | f = atoms.get_forces()
53 | self.assertTrue(fmag == np.linalg.norm(f[0]))
54 | self.assertTrue(fmag == np.linalg.norm(f[1]))
55 |
56 | def test_custom(self):
57 | """Test energy and forces with custom settings."""
58 | atoms = molecule('H2O')
59 | atoms.set_calculator(HookeanSpring())
60 | with self.assertRaisesRegexp(Exception,
61 | 'You can only use a two atom systems'):
62 | e = atoms.get_potential_energy()
63 |
--------------------------------------------------------------------------------
/docs/dap.xsf.org:
--------------------------------------------------------------------------------
1 | #+TITLE: XSF utilities
2 |
3 | aenet uses a modified version of the [[http://www.xcrysden.org/doc/XSF.html][xsf]] file format as the starting point for training.
4 |
5 | #+BEGIN_SRC ipython
6 | from dap.py.xsf import write_xsf
7 | from ase.calculators.emt import EMT
8 | from ase.build import bulk
9 |
10 | atoms = bulk('Cu', 'fcc', a=3.6).repeat((2, 2, 2))
11 | atoms.rattle()
12 | atoms.set_calculator(EMT())
13 |
14 |
15 | print(write_xsf('0001.xsf', atoms))
16 | #+END_SRC
17 |
18 | #+RESULTS:
19 | :RESULTS:
20 | # Out[4]:
21 | # output
22 | : # total energy = -0.0534529034684148 eV
23 | :
24 | : CRYSTAL
25 | : PRIMVEC
26 | : 0.0 3.6 3.6
27 | : 3.6 0.0 3.6
28 | : 3.6 3.6 0.0
29 | : PRIMCOORD
30 | : 8 1
31 | : Cu 0.000496714153 -0.000138264301 0.000647688538 0.000318579371 -0.004676393488 -0.008212937162
32 | : Cu 1.801523029856 1.799765846625 -0.000234136957 -0.007259597937 0.005102141267 -0.006461956839
33 | : Cu 1.801579212816 0.000767434729 1.799530525614 -0.002357531707 -0.009663570323 0.003836331513
34 | : Cu 3.600542560044 1.799536582307 1.799534270246 -0.000747250509 0.003763298575 0.010677371668
35 | : Cu 0.000241962272 1.798086719755 1.798275082167 0.001160114038 0.014347070461 0.008689609585
36 | : Cu 1.799437712471 3.598987168880 1.800314247333 -0.001563826330 -0.004978775118 -0.000069725327
37 | : Cu 1.799091975924 1.798587696299 3.601465648769 0.006418871442 0.007411118538 -0.012801846980
38 | : Cu 3.599774223700 3.600067528205 3.598575251814 0.004030641631 -0.011304889912 0.004343153543
39 | :
40 | :END:
41 |
42 | Note that periodic boundary conditions are implied to be in effect (i.e. this will not read non-periodic xsf files at this time).
43 |
44 | #+BEGIN_SRC ipython
45 | from dap.py.xsf import read_xsf
46 |
47 | atoms = read_xsf('0001.xsf')
48 | print(atoms)
49 | print(atoms.get_potential_energy())
50 | print(atoms.get_forces())
51 | #+END_SRC
52 |
53 | #+RESULTS:
54 | :RESULTS:
55 | # Out[5]:
56 | # output
57 | : Atoms(symbols='Cu8', pbc=True, cell=[[0.0, 3.6, 3.6], [3.6, 0.0, 3.6], [3.6, 3.6, 0.0]], calculator=SinglePointCalculator(...))
58 | : -0.0534529034684148
59 | : [[ 3.18579371e-04 -4.67639349e-03 -8.21293716e-03]
60 | : [-7.25959794e-03 5.10214127e-03 -6.46195684e-03]
61 | : [-2.35753171e-03 -9.66357032e-03 3.83633151e-03]
62 | : [-7.47250509e-04 3.76329857e-03 1.06773717e-02]
63 | : [ 1.16011404e-03 1.43470705e-02 8.68960959e-03]
64 | : [-1.56382633e-03 -4.97877512e-03 -6.97253270e-05]
65 | : [ 6.41887144e-03 7.41111854e-03 -1.28018470e-02]
66 | : [ 4.03064163e-03 -1.13048899e-02 4.34315354e-03]]
67 | :
68 | :END:
69 |
--------------------------------------------------------------------------------
/dap/py/xsf.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """XSF file utilities.
15 | """
16 | import numpy as np
17 | import re
18 | import ase.io
19 |
20 |
21 | def read_xsf(xsfile):
22 | """Return an atoms with energy and forces for the aenet xsfile."""
23 | atoms = ase.io.read(xsfile)
24 | calc = atoms.get_calculator()
25 |
26 | with open(xsfile, 'r') as f:
27 | lines = f.readlines()
28 | for i, line in enumerate(lines):
29 | if line.startswith('# total energy'):
30 | m = re.findall(r'[-+]?\d*\.\d+|\d+', line)
31 | energy = float(m[0])
32 | break
33 | calc.results['energy'] = energy
34 |
35 | forces = []
36 | with open(xsfile, 'r') as f:
37 | while True:
38 | l = f.readline()
39 | if not l:
40 | break
41 | if l.startswith('PRIMCOORD'):
42 | break
43 | count = int(f.readline().split()[0])
44 | for i in range(count):
45 | fields = f.readline().split()
46 | forces += [[float(x) for x in fields[4:]]]
47 | calc.results['forces'] = np.array(forces)
48 |
49 | return atoms
50 |
51 |
52 | def write_xsf(xsfile, atoms):
53 | """Create an aenet compatible xsf file in FNAME for ATOMS.
54 |
55 | fname: a string for the filename.
56 | atoms: an ase atoms object with an attached calculator containing energy and
57 | forces.
58 |
59 | returns the string that is written to the file.
60 | """
61 | energy = atoms.get_potential_energy()
62 | forces = atoms.get_forces()
63 |
64 | xsf = ['# total energy = {} eV'.format(energy), '']
65 |
66 | if True in atoms.pbc:
67 | xsf += ['CRYSTAL', 'PRIMVEC']
68 | for v in atoms.get_cell():
69 | xsf += ['{} {} {}'.format(*v)]
70 | xsf += ['PRIMCOORD', '{} 1'.format(len(atoms))]
71 |
72 | else:
73 | xsf += ['ATOMS']
74 |
75 | S = ('{atom.symbol:<3s} {atom.x: .12f} {atom.y: .12f} {atom.z: .12f} {f[0]: '
76 | '.12f} {f[1]: .12f} {f[2]: .12f}')
77 | xsf += [S.format(atom=atom, f=forces[i]) for i, atom in enumerate(atoms)]
78 |
79 | output = '\n'.join(xsf)
80 | with open(xsfile, 'w') as f:
81 | f.write(output)
82 |
83 | return output
84 |
--------------------------------------------------------------------------------
/dap/tf/hooke.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """A prototype ASE calculator using Tensorflow."""
15 |
16 | import numpy as np
17 | import tensorflow as tf
18 | from ase.calculators.calculator import Calculator, all_changes
19 |
20 |
21 | class HookeanSpring(Calculator):
22 | """A simple Hookean spring."""
23 | implemented_properties = ['energy', 'forces']
24 | default_parameters = {'k': 1.0, 'x0': 1.0}
25 |
26 | def __init__(self, **kwargs):
27 | Calculator.__init__(self, **kwargs)
28 | print(kwargs)
29 | self.sess = tf.Session()
30 |
31 | def __del__(self):
32 | self.sess.close()
33 |
34 | def _energy(self, positions):
35 | """Compute the energy of the spring.
36 |
37 | Parameters
38 | ----------
39 | positions: an array (2, 3) of positions.
40 |
41 | Returns
42 | -------
43 | a tensor containing the energy.
44 | """
45 |
46 | k = self.parameters.k
47 | x0 = self.parameters.x0
48 | positions = tf.convert_to_tensor(positions)
49 | x = tf.norm(positions[1] - positions[0])
50 | e = 0.5 * k * (x - x0)**2
51 | # We return a tensor here and eval it later.
52 | return e
53 |
54 | def _forces(self, positions):
55 | """Compute the forces on the atoms.
56 |
57 | Parameters
58 | ----------
59 | positions: an array (2, 3) of positions.
60 |
61 | Returns
62 | -------
63 | a tensor containing the forces
64 | """
65 | positions = tf.convert_to_tensor(positions)
66 | f = tf.gradients(-self._energy(positions), positions)[0]
67 | # We return a tensor here and eval it later.
68 | return f
69 |
70 | def calculate(self,
71 | atoms=None,
72 | properties=['energy'],
73 | system_changes=all_changes):
74 | """Run the calculator.
75 | You don't usually call this, it is usually called by methods on the Atoms.
76 | """
77 |
78 | if len(atoms) != 2:
79 | raise Exception('You can only use a two atom systems.')
80 | Calculator.calculate(self, atoms, properties, system_changes)
81 |
82 | self.results['energy'] = self._energy(
83 | atoms.positions).eval(session=self.sess)
84 | self.results['forces'] = self._forces(
85 | atoms.positions).eval(session=self.sess)
86 |
--------------------------------------------------------------------------------
/dap/tests/test_py_bpnn.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import os
16 | import unittest
17 | import numpy as np
18 | import numpy.testing as npt
19 | from ase.build import molecule
20 | from amp.descriptor.gaussian import *
21 | from amp.utilities import hash_images
22 |
23 | from dap.ag.neighborlist import get_distances
24 | from dap.py.bpnn import pad, cosine_cutoff, G2
25 | import os
26 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
27 |
28 |
29 | class TestPadding(unittest.TestCase):
30 |
31 | def test0(self):
32 | a = np.ones((2, 3))
33 |
34 | padded_array = pad(a, (3, 3))
35 |
36 | res = np.ones((3, 3))
37 | res[2, :] = 0
38 |
39 | self.assertTrue(np.all(padded_array == res))
40 |
41 |
42 | class TestG2(unittest.TestCase):
43 | """Run comparisons of the python G2 functions against Amp."""
44 |
45 | def setUp(self):
46 | # amp saves these and we want to eliminate them
47 | os.system('rm -fr amp-data*')
48 |
49 | def tearDown(self):
50 | os.system('rm -fr amp-data*')
51 |
52 | def test_radial_G2(self):
53 | atoms = molecule('H2O')
54 | atoms.cell = 100 * np.eye(3)
55 |
56 | # Amp setup
57 |
58 | sf = {
59 | 'H': make_symmetry_functions(['H', 'O'], 'G2', [0.05, 1.0]),
60 | 'O': make_symmetry_functions(['H', 'O'], 'G2', [0.05, 1.0])
61 | }
62 |
63 | descriptor = Gaussian(Gs=sf)
64 | images = hash_images([atoms], ordered=True)
65 | descriptor.calculate_fingerprints(images)
66 |
67 | fparray = []
68 | for index, hash in enumerate(images.keys()):
69 | for fp in descriptor.fingerprints[hash]:
70 | fparray += [fp[1]]
71 | fparray = np.array(fparray)
72 |
73 | # This module setup
74 | positions = atoms.positions
75 | cell = atoms.cell
76 | atom_mask = [[1] for atom in atoms]
77 |
78 | numbers = list(np.unique(atoms.numbers))
79 |
80 | species_mask = np.stack(
81 | [[atom.number == el for atom in atoms] for el in numbers],
82 | axis=1).astype(int)
83 |
84 | config = {'cutoff_radius': 6.5}
85 | d, _ = get_distances(positions, cell, config['cutoff_radius'])
86 |
87 | g0 = G2(0, 0.05, 0.0)
88 | g1 = G2(1, 0.05, 0.0)
89 | g2 = G2(0, 1.0, 0.0)
90 | g3 = G2(1, 1.0, 0.0)
91 |
92 | # This builds the array of fingerprints
93 | this = np.concatenate(
94 | (g0(config, d, atom_mask, species_mask),
95 | g1(config, d, atom_mask, species_mask),
96 | g2(config, d, atom_mask, species_mask),
97 | g3(config, d, atom_mask, species_mask)),
98 | axis=1)
99 |
100 | npt.assert_almost_equal(fparray, this, 5)
101 |
--------------------------------------------------------------------------------
/dap/py/bpnn.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import numpy as np
16 |
17 |
18 | def pad(array, shape):
19 | """Returns a zero-padded array with shape."""
20 | array = np.array(array)
21 | p = np.zeros(shape)
22 |
23 | r, c = array.shape
24 | p[:r, :c] = array
25 | return p
26 |
27 |
28 | def cosine_cutoff(config, distances, atom_mask=None):
29 | """Cosine cutoff function.
30 |
31 | Parameters
32 | ----------
33 |
34 | config: a dictionary containing 'cutoff_radius' as a float
35 | distances: the distance array from get_distances
36 | atom_mask: An array of ones for atoms, 0 for non-atoms.
37 | Defaults to all atoms.
38 |
39 | """
40 | cutoff_radius = config.get('cutoff_radius', 6.0)
41 | distances = np.array(distances)
42 |
43 | if atom_mask is None:
44 | atom_mask = np.ones((len(distances), 1))
45 | else:
46 | atom_mask = np.array(atom_mask)
47 |
48 | cc = 0.5 * (np.cos(np.pi * (distances / cutoff_radius)) + 1.0)
49 | cc *= (distances <= cutoff_radius) & (distances > 0.0)
50 | cc *= atom_mask
51 | cc *= atom_mask[:, None]
52 | return cc
53 |
54 |
55 | def G2(species_index, eta, Rs):
56 | """G2 function generator.
57 |
58 | This is a radial function between an atom and atoms with some chemical
59 | symbol. It is defined in cite:khorshidi-2016-amp, eq. 6. This version is
60 | scaled a little differently than the one Behler uses.
61 |
62 | Parameters
63 | ----------
64 |
65 | species_index : integer
66 | species index for this function. Elements that do not have this index will
67 | be masked out
68 |
69 | eta : float
70 | The gaussian width
71 |
72 | Rs : float
73 | The gaussian center or shift
74 |
75 | Returns
76 | -------
77 | The g2 function with the cosine_cutoff function integrated into it.
78 |
79 | """
80 |
81 | def g2(config, distances, atom_mask, species_masks):
82 | distances = np.array(distances)
83 | atom_mask = np.array(atom_mask)
84 | species_masks = np.array(species_masks)
85 | # Mask out non-species contributions
86 | smask = species_masks[:, species_index][:, None]
87 | distances *= smask
88 | distances *= atom_mask
89 | distances *= atom_mask[:, None]
90 |
91 | Rc = config.get('cutoff_radius', 6.5)
92 | result = np.where(distances > 0,
93 | np.exp(-eta * ((distances - Rs)**2 / Rc**2)), 0.0)
94 |
95 | result *= cosine_cutoff(config, distances, atom_mask)
96 | gsum = np.sum(result, (1, 2))
97 | return gsum[:, None]
98 |
99 | g2.__desc__ = 'g2({species_index}, eta={eta}, Rs={Rs})'.format(**locals())
100 | return g2
101 |
--------------------------------------------------------------------------------
/dap/tests/test_ag_emt.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for the autograd EMT module.
15 |
16 | pydoc:dap.ag.emt.
17 | """
18 |
19 | import unittest
20 | import autograd.numpy as np
21 | from ase.build import bulk
22 | from ase.calculators.emt import EMT
23 | from dap.ag.emt import (parameters, energy, forces, stress)
24 | import os
25 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
26 |
27 |
28 | class TestAgEmt(unittest.TestCase):
29 | def test0(self):
30 | atoms = bulk('Cu', 'fcc', a=3.61).repeat((2, 2, 2))
31 | atoms.rattle(0.2)
32 | atoms.set_calculator(EMT())
33 |
34 | ase_e = atoms.get_potential_energy()
35 | age = energy(parameters, atoms.positions, atoms.numbers, atoms.cell)
36 | self.assertEqual(ase_e, age)
37 |
38 | ase_f = atoms.get_forces()
39 | ag_f = forces(parameters, atoms.positions, atoms.numbers, atoms.cell)
40 | self.assertTrue(np.allclose(ase_f, ag_f))
41 |
42 | def test_energy_forces(self):
43 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
44 | for repeat in ((1, 1, 1), (1, 2, 3)):
45 | for a in [3.0, 4.0]:
46 | atoms = bulk('Cu', structure, a=a).repeat(repeat)
47 | atoms.rattle()
48 | atoms.set_calculator(EMT())
49 | ase_energy = atoms.get_potential_energy()
50 | emt_energy = energy(parameters, atoms.positions, atoms.numbers, atoms.cell)
51 | self.assertEqual(ase_energy, emt_energy)
52 |
53 | ase_f = atoms.get_forces()
54 | ag_f = forces(parameters, atoms.positions, atoms.numbers, atoms.cell)
55 | self.assertTrue(np.allclose(ase_f, ag_f))
56 |
57 | def test_stress(self):
58 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
59 | for repeat in ((1, 1, 1), (1, 2, 3)):
60 | for a in [3.0, 4.0]:
61 | atoms = bulk('Cu', structure, a=a).repeat(repeat)
62 | atoms.rattle()
63 | atoms.set_calculator(EMT())
64 |
65 | # Numerically calculate the ase stress
66 | d = 1e-9 # a delta strain
67 |
68 | ase_stress = np.empty((3, 3)).flatten()
69 | cell0 = atoms.cell
70 |
71 | # Use a finite difference approach that is centered.
72 | for i in [0, 4, 8, 5, 2, 1]:
73 | strain_tensor = np.zeros((3, 3))
74 | strain_tensor = strain_tensor.flatten()
75 | strain_tensor[i] = d
76 | strain_tensor = strain_tensor.reshape((3, 3))
77 | strain_tensor += strain_tensor.T
78 | strain_tensor /= 2
79 | strain_tensor += np.eye(3, 3)
80 |
81 | cell = np.dot(strain_tensor, cell0.T).T
82 | positions = np.dot(strain_tensor, atoms.positions.T).T
83 | atoms.cell = cell
84 | atoms.positions = positions
85 | ep = atoms.get_potential_energy()
86 |
87 | strain_tensor = np.zeros((3, 3))
88 | strain_tensor = strain_tensor.flatten()
89 | strain_tensor[i] = -d
90 | strain_tensor = strain_tensor.reshape((3, 3))
91 | strain_tensor += strain_tensor.T
92 | strain_tensor /= 2
93 | strain_tensor += np.eye(3, 3)
94 |
95 | cell = np.dot(strain_tensor, cell0.T).T
96 | positions = np.dot(strain_tensor, atoms.positions.T).T
97 | atoms.cell = cell
98 | atoms.positions = positions
99 | em = atoms.get_potential_energy()
100 |
101 | ase_stress[i] = (ep - em) / (2 * d) / atoms.get_volume()
102 |
103 | ase_stress = np.take(ase_stress.reshape((3, 3)), [0, 4, 8, 5, 2, 1])
104 |
105 | ag_stress = stress(parameters, atoms.positions, atoms.numbers, atoms.cell)
106 |
107 | # I picked the 0.03 tolerance here. I thought it should be closer, but
108 | # it is a simple numerical difference I am using for the derivative,
109 | # and I am not sure it is totally correct.
110 | self.assertTrue(np.all(np.abs(ase_stress - ag_stress) <= 0.03),
111 | f'''
112 | ase: {ase_stress}
113 | ag : {ag_stress}')
114 | diff {ase_stress - ag_stress}
115 | ''')
116 |
--------------------------------------------------------------------------------
/README.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Differentiable atomistic potentials
2 | #+AUTHOR: John Kitchin
3 |
4 | This is not an official Google product.
5 | #+BEGIN_EXPORT html
6 |
7 |
8 | #+END_EXPORT
9 |
10 |
11 | * Summary
12 |
13 | An atomistic potential is a function that takes atomic coordinates, atom types and a unit cell and calculates the potential energy of that atomic configuration. These potentials are used in molecular simulations such as molecular dynamics and Monte Carlo Simulations. Historically, the forces and stress were derived by analytical or numerical derivatives of the potential energy function, then implemented in a program. In this project, we use automatic differentiation to compute forces and stresses from the potential energy program. This project focuses on materials that are described by periodic boundary conditions.
14 |
15 | The first demonstration of this approach is for the Lennard Jones potential, which is fully implemented in TensorFlow to compute energies, forces and stress of periodic atomic systems containing one kind of chemical element. The potential is trainable from a database of reference data, e.g. density functional theory calculations. An example is illustrated in [[./docs/dap.tf.lennardjones.org]].
16 |
17 | * Installation
18 |
19 | If you have root access, and you want to install the bleeding edge version system-wide, it should be sufficient to run:
20 |
21 | #+BEGIN_SRC sh
22 | pip install git+git://github.com/google/differentiable-atomistic-potentials
23 | #+END_SRC
24 |
25 | If you want an editable, developer installation you might prefer this:
26 |
27 | #+BEGIN_SRC sh
28 | git clone https://github.com/google/differentiable-atomistic-potentials.git
29 | cd differentiable-atomistic-potentials
30 | pip install --user -e .
31 | #+END_SRC
32 |
33 | * Related projects
34 |
35 | The earliest code we are aware of is [[http://www.theochem.ruhr-uni-bochum.de/~joerg.behler/runner.htm][Runner]], but it is only available by request from the authors.
36 |
37 | - [[https://bitbucket.org/andrewpeterson/amp][Amp]] cite:khorshidi-2016-amp is an open-source Python/Fortran package for machine learned neural network potentials.
38 | - [[https://biklooost.github.io/PROPhet/][PROPhet]] cite:kolb-2017-discov-charg is an open-source package for machine learned neural network potentials.
39 | - [[http://ann.atomistic.net/Documentation/][aenet]] cite:artrith-2016 is an open source Fortran package for machine learned neural network potentials.
40 |
41 | This project is complementary to those projects. We share a common goal of open-source machine learned neural network potentials. Our approach differs primarily in the use of automatic differentiation to enable efficient training as well as automatic forces and stresses. We also aim to make it possible to generate hybrid potentials comprised of a classical potential and a neural network potential.
42 |
43 | - TensorMol https://github.com/jparkhill/TensorMol is an open source package focused on molecular properties. It also uses TensorFlow.
44 |
45 | - DiffiQult https://github.com/aspuru-guzik-group/DiffiQult (https://arxiv.org/abs/1711.08127) is an open source autodifferentiable quantum chemistry package.
46 |
47 | - DeePMD https://arxiv.org/abs/1707.09571 Deep Potential Molecular Dynamics: a scalable model with the accuracy of quantum mechanics
48 |
49 | ** Automatic differentiation toolkits
50 |
51 | Here are a few of the AD toolkits that are currently around.
52 |
53 | - [[https://github.com/HIPS/autograd][autograd]] :: A Numpy/Python framework
54 | - [[https://www.tensorflow.org/][Tensorflow]] :: An open-source machine learning framework for everyone
55 | - [[https://github.com/google/tangent][tangent]] :: Source-to-Source Debuggable Derivatives in Pure Python
56 |
57 | I have not tried these
58 | - [[https://pypi.python.org/pypi/algopy][algopy]] :: a tool for Algorithmic Differentiation (AD) and Taylor polynomial approximations.
59 | - pytorch :: Another machine learning framework in Python
60 | - Chainer :: Another machine learning framework in Python
61 |
62 | * Roadmap
63 |
64 | These projects are planned in the future.
65 | - [X] Vectorized neighborlists for periodic boundary conditions [[./dap/ag/neighborlist.py][autograd]] and [[./dap/tf/neighborlist.py][TensorFlow]].
66 | - [X] Vectorized one-way neighborlists for periodic boundary conditions [[./dap/ag/neighborlist.py][autograd]] and Tensorflow
67 | - [X] Lennard Jones potential in [[./dap/ag/lennardjones.py][autograd]] and [[./dap/tf/lennardjones.py][Tensorflow]].
68 | - [ ] Effective medium theory for multicomponent alloys in [[./dap/ag/emt.py][autograd]] and Tensorflow
69 | - [ ] Behler-Parinello Neural Network for multicomponent systems
70 |
71 | * Requirements
72 |
73 | This project is written for Python 3.6.
74 |
75 | See [[./requirements.txt]] for a list of required Python packages.
76 |
77 |
--------------------------------------------------------------------------------
/dap/tf/visualize.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 |
16 | """Visualization utilities for tensorflow graphs."""
17 |
18 |
19 | import tensorflow as tf
20 | from graphviz import Digraph
21 | import tempfile
22 | import hashlib
23 | import numpy as np
24 | import os
25 | import webbrowser
26 | from IPython.display import clear_output, Image, display, HTML
27 | import time
28 |
29 |
30 | def tf_to_dot(graph=None, fname=None, format=None):
31 | """
32 | Create an image from a tensorflow graph.
33 |
34 | graph: The tensorflow graph to visualize. Defaults to tf.get_default_graph()
35 | fname: Filename to save the graph image in
36 | format: Optional image extension. If you do not use this, the extension is
37 | derived from the fname.
38 |
39 | Returns an org-mode link to the path where the image is.
40 |
41 | Adapted from https://blog.jakuba.net/2017/05/30/tensorflow-visualization.html
42 |
43 | Note: This can make very large images for complex graphs.
44 | """
45 |
46 | dot = Digraph()
47 |
48 | if graph is None:
49 | graph = tf.get_default_graph()
50 |
51 | shapes = {'Const': 'circle',
52 | 'Placeholder': 'oval'}
53 |
54 | for n in graph.as_graph_def().node:
55 | shape = tuple([dim.size for dim
56 | in n.attr['value'].tensor.tensor_shape.dim])
57 | dot.node(n.name, label=f'{n.name} {shape}',
58 | shape=shapes.get(n.op, None))
59 |
60 | for i in n.input:
61 | dot.edge(i, n.name)
62 |
63 | m = hashlib.md5()
64 | m.update(str(dot).encode('utf-8'))
65 |
66 | if fname is None:
67 | fname = 'tf-graph-' + m.hexdigest()
68 |
69 | if format is None:
70 | base, ext = os.path.splitext(fname)
71 | fname = base
72 | format = ext[1:] or 'png'
73 |
74 | dot.format = format
75 | dot.render(fname)
76 | os.unlink(fname)
77 | print(f'{fname}, {format}')
78 | return f'[[./{fname}.{format}]]'
79 |
80 |
81 | # Tensorboard visualizations
82 | # Adapted from https://gist.githubusercontent.com/yaroslavvb/97504b8221a8529e7a51a50915206d68/raw/f1473d2873676c0e885b9fbd363c882a7a83b28a/show_graph
83 |
84 |
85 | def strip_consts(graph_def, max_const_size=32):
86 | """Strip large constant values from graph_def."""
87 | strip_def = tf.GraphDef()
88 | for n0 in graph_def.node:
89 | n = strip_def.node.add()
90 | n.MergeFrom(n0)
91 | if n.op == 'Const':
92 | tensor = n.attr['value'].tensor
93 | size = len(tensor.tensor_content)
94 | if size > max_const_size:
95 | tensor.tensor_content = f"".encode('utf-8')
96 | return strip_def
97 |
98 |
99 | def show_graph(graph_def=None, browser=True,
100 | width=1200, height=800,
101 | max_const_size=32, ungroup_gradients=False):
102 | """Open a graph in Tensorboard. By default this is done in a browser. If you set
103 | browser to False, then html will be emitted that shows up in a Jupyter
104 | notebook.
105 |
106 | """
107 | if not graph_def:
108 | graph_def = tf.get_default_graph().as_graph_def()
109 |
110 | """Visualize TensorFlow graph."""
111 | if hasattr(graph_def, 'as_graph_def'):
112 | graph_def = graph_def.as_graph_def()
113 | strip_def = strip_consts(graph_def, max_const_size=max_const_size)
114 | data = str(strip_def)
115 | if ungroup_gradients:
116 | data = data.replace('"gradients/', '"b_')
117 | #print(data)
118 | code = """
119 |
124 |
125 |
126 |
127 |
128 | """.format(data=repr(data), id='graph'+str(np.random.rand()))
129 |
130 | iframe = """
131 |
132 | """.format(code.replace('"', '"'))
133 | if browser:
134 |
135 | fh, tmpf = tempfile.mkstemp(prefix='tf-graph-', suffix='.html')
136 | os.close(fh)
137 | with open(tmpf, 'w') as f:
138 | f.write(iframe)
139 | webbrowser.open('file://' + tmpf)
140 | else:
141 | display(HTML(iframe))
142 |
--------------------------------------------------------------------------------
/dap/tests/test_ag.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import unittest
16 | import autograd.numpy as np
17 | from ase.build import bulk
18 | from ase.neighborlist import NeighborList
19 | from ase.calculators.lj import LennardJones
20 |
21 | from dap.ag.neighborlist import (get_distances, get_neighbors,
22 | get_neighbors_oneway)
23 | from dap.ag.lennardjones import (energy, forces, stress, energy_oneway,
24 | forces_oneway, stress_oneway)
25 | import os
26 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
27 |
28 |
29 | class TestAGNeighborListBothways(unittest.TestCase):
30 |
31 | def test0(self):
32 | """Check the fcc cell neighbors in a variety of repeats."""
33 | a = 3.6
34 | for cutoff_radius in np.linspace(a / 2, 5 * a, 10):
35 | for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
36 | (2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
37 | atoms = bulk('Cu', 'fcc', a=a).repeat(rep)
38 |
39 | nl = NeighborList(
40 | [cutoff_radius / 2] * len(atoms),
41 | skin=0.01,
42 | self_interaction=False,
43 | bothways=True)
44 | nl.update(atoms)
45 | nns_ase = [len(nl.get_neighbors(i)[0]) for i in range(len(atoms))]
46 |
47 | d, _ = get_distances(atoms.positions, atoms.cell, cutoff_radius)
48 | inds = (d <= (cutoff_radius + 0.01)) & (d > 0.00)
49 | nns = inds.sum((1, 2))
50 |
51 | self.assertTrue(np.all(nns_ase == nns))
52 |
53 |
54 | class TestAGNeighborListOneWay(unittest.TestCase):
55 |
56 | def test0(self):
57 | """check one-way neighborlist for fcc on different repeats."""
58 | a = 3.6
59 | for rep in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 2),
60 | (2, 1, 1), (2, 2, 1), (2, 2, 2), (1, 2, 3), (4, 1, 1)):
61 | for cutoff_radius in np.linspace(a / 2.1, 5 * a, 5):
62 | atoms = bulk('Cu', 'fcc', a=a).repeat(rep)
63 | # It is important to rattle the atoms off the lattice points.
64 | # Otherwise, float tolerances makes it hard to count correctly.
65 | atoms.rattle(0.02)
66 | nl = NeighborList(
67 | [cutoff_radius / 2] * len(atoms),
68 | skin=0.0,
69 | self_interaction=False,
70 | bothways=False)
71 | nl.update(atoms)
72 |
73 | neighbors, displacements = get_neighbors_oneway(
74 | atoms.positions, atoms.cell, cutoff_radius, skin=0.0)
75 |
76 | for i in range(len(atoms)):
77 | an, ad = nl.get_neighbors(i)
78 | # Check the same number of neighbors
79 | self.assertEqual(len(neighbors[i]), len(an))
80 | # Check the same indices
81 | self.assertCountEqual(neighbors[i], an)
82 |
83 | # I am not sure how to test for the displacements.
84 |
85 |
86 | class TestAGLennardJones(unittest.TestCase):
87 |
88 | def test_fcc(self):
89 | 'Check LJ with structures and repeats with different symmetries.'
90 | for struct in ['fcc', 'bcc', 'diamond']:
91 | for repeat in [(1, 1, 1), (1, 1, 2), (1, 2, 1), (2, 1, 1), (1, 2, 3),
92 | (2, 2, 2)]:
93 | atoms = bulk('Cu', struct, a=3.7).repeat(repeat)
94 | atoms.rattle(0.02)
95 | atoms.set_calculator(LennardJones())
96 |
97 | ase_energy = atoms.get_potential_energy()
98 | lj_energy = energy({}, atoms.positions, atoms.cell)
99 |
100 | self.assertAlmostEqual(ase_energy, lj_energy)
101 |
102 | lj_forces = forces({}, atoms.positions, atoms.cell)
103 | self.assertTrue(np.allclose(atoms.get_forces(), lj_forces))
104 |
105 | lj_stress = stress({}, atoms.positions, atoms.cell)
106 |
107 | self.assertTrue(np.allclose(atoms.get_stress(), lj_stress))
108 |
109 |
110 | class TestLennardJonesOneWay(unittest.TestCase):
111 |
112 | def test_fcc(self):
113 | """Check LJ with oneway neighbors.
114 | Uses structures and repeats with different symmetries."""
115 | for struct in ['fcc', 'bcc', 'diamond']:
116 | for repeat in [(1, 1, 1), (1, 1, 2), (1, 2, 1), (2, 1, 1), (1, 2, 3),
117 | (2, 2, 2)]:
118 | atoms = bulk('Cu', struct, a=3.7).repeat(repeat)
119 | atoms.rattle(0.02)
120 | atoms.set_calculator(LennardJones())
121 |
122 | ase_energy = atoms.get_potential_energy()
123 | lj_energy = energy_oneway({}, atoms.positions, atoms.cell)
124 |
125 | self.assertAlmostEqual(ase_energy, lj_energy)
126 |
127 | lj_forces = forces_oneway({}, atoms.positions, atoms.cell)
128 | self.assertTrue(np.allclose(atoms.get_forces(), lj_forces))
129 |
130 | lj_stress = stress_oneway({}, atoms.positions, atoms.cell)
131 |
132 | self.assertTrue(np.allclose(atoms.get_stress(), lj_stress))
133 |
--------------------------------------------------------------------------------
/dap/tf/utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tensorflow utilities.
15 | These are mostly functions that bring additional numpy functionality to
16 | Tensorflow.
17 | """
18 | import itertools
19 | import numpy as np
20 | import tensorflow as tf
21 |
22 |
23 | def swish(x, beta=1.0):
24 | """Swish activation function. See https://arxiv.org/abs/1710.05941"""
25 | with tf.name_scope("swish"):
26 | return x * tf.nn.sigmoid(beta * x)
27 |
28 |
29 | def tri(N, M=None, k=0, dtype=tf.float64):
30 | """
31 | An array with ones at and below the given diagonal and zeros elsewhere.
32 | Parameters
33 | ----------
34 | N : int
35 | Number of rows in the array
36 | M : int, optional
37 | Number of columns in the array. Defaults to number of rows.
38 | k : The subdiagonal at and below which the array is filled, optional
39 | Returns
40 | -------
41 | out : tensor of shape (N, M)
42 | Modeled after pydoc:numpy.tri.
43 | """
44 |
45 | if M is None:
46 | M = N
47 |
48 | r1 = tf.range(N)
49 | r2 = tf.range(-k, M - k)
50 | return tf.greater_equal(r1[:, None], r2[None, :])
51 |
52 |
53 | def triu_indices(n, k=0, m=None):
54 | """Return indices for upper triangle of an (n, m) array.
55 | Parameters
56 | ----------
57 | n : int
58 | number of rows in the array.
59 | k : int, optional
60 | diagonal offset.
61 | m : int, optional
62 | number of columns in the array. Defaults to `n`.
63 | Returns
64 | -------
65 | inds : a tensor, shape = (None, 2)
66 | column 0 is one set of indices, column 1 is the other set.
67 | modeled after pydoc:numpy.triu_indices.
68 | """
69 |
70 | result = tf.where(tf.logical_not(tri(n, m, k=k - 1)))
71 | return result[:, 0], result[:, 1]
72 |
73 |
74 | def triu_indices_from(arr, k=0):
75 | """Return the indices for the upper-triangle of arr.
76 | Parameters
77 | ----------
78 | arr : tensor or array.
79 | k : diagonal.
80 | see pydoc:numpy.triu_indices_from.
81 | """
82 | tensor = tf.convert_to_tensor(arr)
83 | shape = tensor.get_shape().as_list()
84 | if len(shape) != 2:
85 | raise ValueError("Tensor must be 2d")
86 | return triu_indices(shape[-2], k=k, m=shape[-1])
87 |
88 |
89 | def tril_indices(n, k=0, m=None):
90 | """Return indices for lower triangle of an (n, m) array.
91 | Parameters
92 | ----------
93 | n : int
94 | number of rows in the array.
95 | k : int, optional
96 | diagonal offset.
97 | m : int, optional
98 | number of columns in the array. Defaults to `n`.
99 | Returns
100 | -------
101 | inds : a tensor, shape = (None, 2)
102 | column 0 is one set of indices, column 1 is the other set.
103 | modeled after pydoc:numpy.tril_indices.
104 | """
105 |
106 | result = tf.where(tri(n, m, k=k))
107 | return result[:, 0], result[:, 1]
108 |
109 |
110 | def tril_indices_from(arr, k=0):
111 | """Return the indices for the lower-triangle of arr.
112 | Parameters
113 | ----------
114 | arr : tensor or array.
115 | k : diagonal.
116 | see pydoc:numpy.tril_indices_from.
117 | """
118 | tensor = tf.convert_to_tensor(arr)
119 | shape = tensor.get_shape().as_list()
120 | if len(shape) != 2:
121 | raise ValueError("Tensor must be 2d")
122 | return tril_indices(shape[-2], k=k, m=shape[-1])
123 |
124 |
125 | def combinations(arr, k):
126 | """Return tensor of combinations of k elements.
127 | Parameters
128 | ----------
129 | arr : 1D array or tensor
130 | k : number of elements to make combinations of .
131 | Returns
132 | -------
133 | a 2D tensor of combinations. Each row is a combination, and each element of
134 | the combination is in the columns.
135 | Related: pydoc:itertools.combinations
136 | """
137 | tensor = tf.convert_to_tensor(arr)
138 |
139 | shape = tensor.get_shape().as_list()
140 | if len(shape) != 1:
141 | raise ValueError("Tensor must be 1d")
142 |
143 | N = shape[0]
144 | inds = np.arange(N)
145 | combination_indices = [
146 | combination for combination in itertools.combinations(inds, k)
147 | ]
148 | return tf.stack([tf.gather(arr, ind) for ind in combination_indices])
149 |
150 |
151 | def slices_values_to_sparse_tensor(slices, values, dense_shape):
152 | """Convert a tensor of slices and corresponding values to a sparse tensor.
153 |
154 | Given a 2D tensor of slices, where each row corresponds to the row the slice
155 | is from in another tensor, and the columns are the indices in that row, and a
156 | tensor of corresponding values, create a tf.SparseTensor representation.
157 |
158 | For example, to create a sparse representation of the top_k results:
159 | >> arr = [[1, 2, 3], [3, 2, 1], [2, 1, 3]]
160 | >> kv, ki = tf.nn.top_k(arr, k)
161 | >> sparse_tensor = slices_values_to_sparse_tensor(kv, ki, arr.shape)
162 |
163 | This is useful to then make a dense tensor comprised of those values, with
164 | some other default value for the rest.
165 |
166 | Here the default other values are zero.
167 |
168 | >> dst = tf.sparse_tensor_to_dense(sparse_tensor, validate_indices=False)
169 | """
170 |
171 | slices = tf.cast(tf.convert_to_tensor(slices), dtype=tf.int64)
172 | values = tf.convert_to_tensor(values)
173 |
174 | shape = tf.shape(slices, out_type=tf.int64)
175 |
176 | nrows = shape[0]
177 | row_inds = tf.range(nrows)
178 |
179 | flattened_indices = tf.reshape(slices * nrows + row_inds[:, None], [-1])
180 | twod_inds = tf.stack(
181 | [flattened_indices % nrows, flattened_indices // nrows], axis=1)
182 | return tf.SparseTensor(
183 | twod_inds, values=tf.reshape(values, [-1]), dense_shape=dense_shape)
184 |
--------------------------------------------------------------------------------
/dap/ag/lennardjones.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """A periodic Lennard Jones potential using autograd"""
15 |
16 | import autograd.numpy as np
17 | from autograd import elementwise_grad
18 | from dap.ag.neighborlist import get_distances, get_neighbors_oneway
19 |
20 |
21 | def energy(params, positions, cell, strain=np.zeros((3, 3))):
22 | """Compute the energy of a Lennard-Jones system.
23 |
24 | Parameters
25 | ----------
26 |
27 | params : dictionary of paramters.
28 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
29 |
30 | positions : array of floats. Shape = (natoms, 3)
31 |
32 | cell: array of unit cell vectors. Shape = (3, 3)
33 |
34 | strain: array of strains to apply to cell. Shape = (3, 3)
35 |
36 | Returns
37 | -------
38 | energy : float
39 | """
40 |
41 | sigma = params.get('sigma', 1.0)
42 | epsilon = params.get('epsilon', 1.0)
43 |
44 | rc = 3 * sigma
45 |
46 | e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
47 |
48 | strain_tensor = np.eye(3) + strain
49 | cell = np.dot(strain_tensor, cell.T).T
50 | positions = np.dot(strain_tensor, positions.T).T
51 |
52 | r2 = get_distances(positions, cell, rc, 0.01)[0]**2
53 |
54 | zeros = np.equal(r2, 0.0)
55 | adjusted = np.where(zeros, np.ones_like(r2), r2)
56 |
57 | c6 = np.where((r2 <= rc**2) & (r2 > 0.0), (sigma**2 / adjusted)**3,
58 | np.zeros_like(r2))
59 | c6 = np.where(zeros, np.zeros_like(r2), c6)
60 | energy = -e0 * (c6 != 0.0).sum()
61 | c12 = c6**2
62 | energy += np.sum(4 * epsilon * (c12 - c6))
63 |
64 | # get_distances double counts the interactions, so we divide by two.
65 | return energy / 2
66 |
67 |
68 | def forces(params, positions, cell):
69 | """Compute the forces of a Lennard-Jones system.
70 |
71 | Parameters
72 | ----------
73 |
74 | params : dictionary of paramters.
75 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
76 |
77 | positions : array of floats. Shape = (natoms, 3)
78 |
79 | cell: array of unit cell vectors. Shape = (3, 3)
80 |
81 | Returns
82 | -------
83 | forces : an array of forces. Shape = (natoms, 3)
84 |
85 | """
86 | dEdR = elementwise_grad(energy, 1)
87 | return -dEdR(params, positions, cell)
88 |
89 |
90 | def stress(params, positions, cell, strain=np.zeros((3, 3))):
91 | """Compute the stress on a Lennard-Jones system.
92 |
93 | Parameters
94 | ----------
95 |
96 | params : dictionary of paramters.
97 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
98 |
99 | positions : array of floats. Shape = (natoms, 3)
100 |
101 | cell: array of unit cell vectors. Shape = (3, 3)
102 |
103 | Returns
104 | -------
105 | stress : an array of stress components. Shape = (6,)
106 | [sxx, syy, szz, syz, sxz, sxy]
107 |
108 | """
109 | dEdst = elementwise_grad(energy, 3)
110 |
111 | volume = np.abs(np.linalg.det(cell))
112 |
113 | der = dEdst(params, positions, cell, strain)
114 | result = (der + der.T) / 2 / volume
115 | return np.take(result, [0, 4, 8, 5, 2, 1])
116 |
117 |
118 | # Oneway LennardJones potential
119 |
120 |
121 | def energy_oneway(params, positions, cell, strain=np.zeros((3, 3))):
122 | """Compute the energy of a Lennard-Jones system.
123 |
124 | Parameters
125 | ----------
126 |
127 | params : dictionary of paramters.
128 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
129 |
130 | positions : array of floats. Shape = (natoms, 3)
131 |
132 | cell: array of unit cell vectors. Shape = (3, 3)
133 |
134 | strain: array of strains to apply to cell. Shape = (3, 3)
135 |
136 | Returns
137 | -------
138 | energy : float
139 | """
140 |
141 | sigma = params.get('sigma', 1.0)
142 | epsilon = params.get('epsilon', 1.0)
143 |
144 | rc = 3 * sigma
145 |
146 | e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
147 |
148 | strain_tensor = np.eye(3) + strain
149 | cell = np.dot(strain_tensor, cell.T).T
150 | positions = np.dot(strain_tensor, positions.T).T
151 |
152 | inds, disps = get_neighbors_oneway(positions, cell, rc)
153 |
154 | natoms = len(positions)
155 | energy = 0.0
156 |
157 | for a in range(natoms):
158 | neighbors = inds[a]
159 | offsets = disps[a]
160 | cells = np.dot(offsets, cell)
161 | d = positions[neighbors] + cells - positions[a]
162 | r2 = (d**2).sum(1)
163 | c6 = np.where(r2 <= rc**2, (sigma**2 / r2)**3, np.zeros_like(r2))
164 | energy -= e0 * (c6 != 0.0).sum()
165 | c12 = c6**2
166 | energy += 4 * epsilon * (c12 - c6).sum()
167 | return energy
168 |
169 |
170 | def forces_oneway(params, positions, cell):
171 | """Compute the forces of a Lennard-Jones system.
172 |
173 | Parameters
174 | ----------
175 |
176 | params : dictionary of paramters.
177 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
178 |
179 | positions : array of floats. Shape = (natoms, 3)
180 |
181 | cell: array of unit cell vectors. Shape = (3, 3)
182 |
183 | Returns
184 | -------
185 | forces : an array of forces. Shape = (natoms, 3)
186 |
187 | """
188 | dEdR = elementwise_grad(energy_oneway, 1)
189 | return -dEdR(params, positions, cell)
190 |
191 |
192 | def stress_oneway(params, positions, cell, strain=np.zeros((3, 3))):
193 | """Compute the stress on a Lennard-Jones system.
194 |
195 | Parameters
196 | ----------
197 |
198 | params : dictionary of paramters.
199 | Defaults to {'sigma': 1.0, 'epsilon': 1.0}
200 |
201 | positions : array of floats. Shape = (natoms, 3)
202 |
203 | cell: array of unit cell vectors. Shape = (3, 3)
204 |
205 | Returns
206 | -------
207 | stress : an array of stress components. Shape = (6,)
208 | [sxx, syy, szz, syz, sxz, sxy]
209 |
210 | """
211 | dEdst = elementwise_grad(energy_oneway, 3)
212 |
213 | volume = np.abs(np.linalg.det(cell))
214 |
215 | der = dEdst(params, positions, cell, strain)
216 | result = (der + der.T) / 2 / volume
217 | return np.take(result, [0, 4, 8, 5, 2, 1])
218 |
--------------------------------------------------------------------------------
/dap/ag/emt.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Differentiable effective medium theory potential
15 |
16 | Adapted from https://wiki.fysik.dtu.dk/ase/_modules/ase/calculators/emt.html#EMT
17 |
18 | """
19 |
20 | import autograd.numpy as np
21 | from autograd import elementwise_grad
22 | from ase.data import chemical_symbols
23 | from ase.units import Bohr
24 | from dap.ag.neighborlist import get_neighbors_oneway
25 |
26 | parameters = {
27 | # E0 s0 V0 eta2 kappa lambda n0
28 | # eV bohr eV bohr^-1 bohr^-1 bohr^-1 bohr^-3
29 | 'Al': (-3.28, 3.00, 1.493, 1.240, 2.000, 1.169, 0.00700),
30 | 'Cu': (-3.51, 2.67, 2.476, 1.652, 2.740, 1.906, 0.00910),
31 | 'Ag': (-2.96, 3.01, 2.132, 1.652, 2.790, 1.892, 0.00547),
32 | 'Au': (-3.80, 3.00, 2.321, 1.674, 2.873, 2.182, 0.00703),
33 | 'Ni': (-4.44, 2.60, 3.673, 1.669, 2.757, 1.948, 0.01030),
34 | 'Pd': (-3.90, 2.87, 2.773, 1.818, 3.107, 2.155, 0.00688),
35 | 'Pt': (-5.85, 2.90, 4.067, 1.812, 3.145, 2.192, 0.00802),
36 | # extra parameters - just for fun ...
37 | 'H': (-3.21, 1.31, 0.132, 2.652, 2.790, 3.892, 0.00547),
38 | 'C': (-3.50, 1.81, 0.332, 1.652, 2.790, 1.892, 0.01322),
39 | 'N': (-5.10, 1.88, 0.132, 1.652, 2.790, 1.892, 0.01222),
40 | 'O': (-4.60, 1.95, 0.332, 1.652, 2.790, 1.892, 0.00850)
41 | }
42 |
43 | beta = 1.809 # (16 * pi / 3)**(1.0 / 3) / 2**0.5, preserve historical rounding
44 |
45 |
46 | def energy(parameters, positions, numbers, cell, strain=np.zeros((3, 3))):
47 | """Compute the energy using Effective medium theory.
48 |
49 | Parameters
50 | ----------
51 |
52 | positions : array of floats. Shape = (natoms, 3)
53 |
54 | numbers : array of integers of atomic numbers (natoms,)
55 |
56 | cell: array of unit cell vectors. Shape = (3, 3)
57 |
58 | strain: array of strains to apply to cell. Shape = (3, 3)
59 |
60 | Returns
61 | -------
62 | energy : float
63 | """
64 | strain_tensor = np.eye(3) + strain
65 | cell = np.dot(strain_tensor, cell.T).T
66 | positions = np.dot(strain_tensor, positions.T).T
67 |
68 | par = {}
69 | rc = 0.0
70 |
71 | relevant_pars = parameters
72 | maxseq = max(par[1] for par in relevant_pars.values()) * Bohr
73 | rc = rc = beta * maxseq * 0.5 * (np.sqrt(3) + np.sqrt(4))
74 | rr = rc * 2 * np.sqrt(4) / (np.sqrt(3) + np.sqrt(4))
75 | acut = np.log(9999.0) / (rr - rc)
76 |
77 | rc_list = rc + 0.5
78 | for Z in numbers:
79 | if Z not in par:
80 | sym = chemical_symbols[Z]
81 | if sym not in parameters:
82 | raise NotImplementedError('No EMT-potential for {0}'.format(sym))
83 | p = parameters[sym]
84 | s0 = p[1] * Bohr
85 | eta2 = p[3] / Bohr
86 | kappa = p[4] / Bohr
87 | x = eta2 * beta * s0
88 | gamma1 = 0.0
89 | gamma2 = 0.0
90 | for i, n in enumerate([12, 6, 24]):
91 | r = s0 * beta * np.sqrt(i + 1)
92 | x = n / (12 * (1.0 + np.exp(acut * (r - rc))))
93 | gamma1 += x * np.exp(-eta2 * (r - beta * s0))
94 | gamma2 += x * np.exp(-kappa / beta * (r - beta * s0))
95 |
96 | par[Z] = {
97 | 'E0': p[0],
98 | 's0': s0,
99 | 'V0': p[2],
100 | 'eta2': eta2,
101 | 'kappa': kappa,
102 | 'lambda': p[5] / Bohr,
103 | 'n0': p[6] / Bohr**3,
104 | 'rc': rc,
105 | 'gamma1': gamma1,
106 | 'gamma2': gamma2
107 | }
108 |
109 | ksi = {}
110 | for s1, p1 in par.items():
111 | ksi[s1] = {}
112 | for s2, p2 in par.items():
113 | ksi[s1][s2] = p2['n0'] / p1['n0']
114 |
115 | natoms = len(positions)
116 | sigma1 = [0.0] * natoms
117 |
118 | all_neighbors, all_offsets = get_neighbors_oneway(
119 | positions, cell, rc_list, skin=0.0)
120 |
121 | # Calculate
122 | energy = 0.0
123 |
124 | for a1 in range(natoms):
125 | Z1 = numbers[a1]
126 | p1 = par[Z1]
127 | _ksi = ksi[Z1]
128 | neighbors, offsets = all_neighbors[a1], all_offsets[a1]
129 | offsets = np.dot(offsets, cell)
130 | for a2, offset in zip(neighbors, offsets):
131 | d = positions[a2] + offset - positions[a1]
132 | r = np.sqrt(np.dot(d, d))
133 | if r < rc_list:
134 | Z2 = numbers[a2]
135 | p2 = par[Z2]
136 | x = np.exp(acut * (r - rc))
137 | theta = 1.0 / (1.0 + x)
138 | y1 = (0.5 * p1['V0'] * np.exp(-p2['kappa'] * (
139 | r / beta - p2['s0'])) * _ksi[Z2] / p1['gamma2'] * theta)
140 | y2 = (0.5 * p2['V0'] * np.exp(-p1['kappa'] * (
141 | r / beta - p1['s0'])) / _ksi[Z2] / p2['gamma2'] * theta)
142 | energy = energy - (y1 + y2)
143 |
144 | sa = (
145 | np.exp(-p2['eta2'] *
146 | (r - beta * p2['s0'])) * _ksi[Z2] * theta / p1['gamma1'])
147 | sigma1[a1] = sigma1[a1] + sa
148 |
149 | sa = (
150 | np.exp(-p1['eta2'] *
151 | (r - beta * p1['s0'])) / _ksi[Z2] * theta / p2['gamma1'])
152 |
153 | sigma1[a2] = sigma1[a2] + sa
154 |
155 | for a in range(natoms):
156 | Z = numbers[a]
157 | p = par[Z]
158 | try:
159 | ds = -np.log(sigma1[a] / 12) / (beta * p['eta2'])
160 | except (OverflowError, ValueError):
161 | energy -= p['E0']
162 | continue
163 | x = p['lambda'] * ds
164 | y = np.exp(-x)
165 | z = 6 * p['V0'] * np.exp(-p['kappa'] * ds)
166 | energy += p['E0'] * ((1 + x) * y - 1) + z
167 |
168 | return energy
169 |
170 |
171 | def forces(parameters, positions, numbers, cell):
172 | """Compute the forces of an EMT system.
173 |
174 | Parameters
175 | ----------
176 |
177 | positions : array of floats. Shape = (natoms, 3)
178 |
179 | numbers : array of integers of atomic numbers (natoms,)
180 |
181 | cell: array of unit cell vectors. Shape = (3, 3)
182 |
183 | Returns
184 | -------
185 | forces : an array of forces. Shape = (natoms, 3)
186 |
187 | """
188 | dEdR = elementwise_grad(energy, 1)
189 | return -dEdR(parameters, positions, numbers, cell)
190 |
191 |
192 | def stress(parameters, positions, numbers, cell, strain=np.zeros((3, 3))):
193 | """Compute the stress on an EMT system.
194 |
195 | Parameters
196 | ----------
197 |
198 | positions : array of floats. Shape = (natoms, 3)
199 |
200 | numbers : array of integers of atomic numbers (natoms,)
201 |
202 | cell: array of unit cell vectors. Shape = (3, 3)
203 |
204 | Returns
205 | -------
206 | stress : an array of stress components. Shape = (6,)
207 | [sxx, syy, szz, syz, sxz, sxy]
208 |
209 | """
210 | dEdst = elementwise_grad(energy, 4)
211 |
212 | volume = np.abs(np.linalg.det(cell))
213 |
214 | der = dEdst(parameters, positions, numbers, cell, strain)
215 | result = (der + der.T) / 2 / volume
216 | return np.take(result, [0, 4, 8, 5, 2, 1])
217 |
--------------------------------------------------------------------------------
/dap/tests/test_tf_lennardjones.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for the tensorflow LennardJones module.
15 |
16 | pydoc:dap.tf.lennardjones.
17 | """
18 |
19 | import tensorflow as tf
20 | from ase.build import bulk
21 | from ase.calculators.lj import LennardJones as aseLJ
22 | from dap.tf.lennardjones import (energy, forces, stress)
23 | from dap.tf.lennardjones import LennardJones as TFLJ
24 | import os
25 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
26 |
27 |
28 | class TestLJ(tf.test.TestCase):
29 | """Tests comparing the TF version and ASE LennardJones implementations.
30 |
31 | Tests the energy, forces and stress in different crystal structures with
32 | different symmetries and repeats. The atoms are rattled in each structure to
33 | further break symmetry.
34 |
35 | """
36 |
37 | def test_energy(self):
38 | pos = tf.placeholder(tf.float64, (None, 3))
39 | cell = tf.placeholder(tf.float64, (3, 3))
40 |
41 | lj_energy = energy(pos, cell)
42 |
43 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
44 | for repeat in ((1, 1, 1), (1, 2, 3)):
45 | for a in [3.0, 4.0]:
46 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
47 | atoms.rattle()
48 | atoms.set_calculator(aseLJ())
49 | ase_energy = atoms.get_potential_energy()
50 |
51 | init = tf.global_variables_initializer()
52 | with self.test_session() as sess:
53 | sess.run(init)
54 | tfe = sess.run(lj_energy, feed_dict={pos: atoms.positions,
55 | cell: atoms.cell})
56 | self.assertAllClose(ase_energy, tfe)
57 |
58 | def test_forces(self):
59 |
60 | pos = tf.placeholder(tf.float64, (None, 3))
61 | cell = tf.placeholder(tf.float64, (3, 3))
62 |
63 | lj_forces = forces(pos, cell)
64 |
65 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
66 | for repeat in ((1, 1, 1), (1, 2, 3)):
67 | for a in [3.0, 4.0]:
68 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
69 | atoms.rattle()
70 | atoms.set_calculator(aseLJ())
71 | ase_forces = atoms.get_forces()
72 |
73 |
74 | init = tf.global_variables_initializer()
75 | with self.test_session() as sess:
76 | sess.run(init)
77 | ljf = sess.run(lj_forces, feed_dict={pos: atoms.positions,
78 | cell: atoms.cell})
79 | self.assertAllClose(ase_forces, ljf)
80 |
81 | def test_stress(self):
82 | pos = tf.placeholder(tf.float64, (None, 3))
83 | cell = tf.placeholder(tf.float64, (3, 3))
84 |
85 | lj_stress = stress(pos, cell)
86 |
87 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
88 | for repeat in ((1, 1, 1), (1, 2, 3)):
89 | for a in [3.0, 4.0]:
90 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
91 | atoms.rattle()
92 | atoms.set_calculator(aseLJ())
93 | ase_stress = atoms.get_stress()
94 | init = tf.global_variables_initializer()
95 | with self.test_session() as sess:
96 | sess.run(init)
97 | ljs = sess.run(lj_stress, feed_dict={pos: atoms.positions,
98 | cell: atoms.cell})
99 | self.assertAllClose(ase_stress, ljs)
100 |
101 |
102 | class TestLJ_1way(tf.test.TestCase):
103 | """Tests comparing the TF version and ASE LennardJones implementations.
104 |
105 | Tests the energy, forces and stress in different crystal structures with
106 | different symmetries and repeats. The atoms are rattled in each structure to
107 | further break symmetry.
108 |
109 | """
110 |
111 | def test_energy_1way(self):
112 | """Test oneway list"""
113 | import warnings
114 | warnings.filterwarnings('ignore')
115 |
116 | for structure in ('hcp', 'fcc', 'bcc', 'hcp', 'diamond', 'sc'):
117 | for repeat in ((2, 1, 1), (1, 1, 1), (2, 2, 2), (1, 2, 3)):
118 | for a in [2.0, 3.0]:
119 | print(len([n.name for n in tf.get_default_graph().as_graph_def().node]))
120 | print(f'{structure} {repeat} {a}')
121 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
122 | atoms.rattle()
123 | atoms.set_calculator(aseLJ())
124 | ase_energy = atoms.get_potential_energy()
125 | # This context manager forces a new graph for each iteration.
126 | # Otherwise, your graph accumulates nodes and slows down.
127 | with tf.Graph().as_default():
128 | atoms.set_calculator(TFLJ())
129 | lj_energy = atoms.get_potential_energy()
130 | self.assertAllClose(ase_energy, lj_energy)
131 |
132 | def test_forces_1way(self):
133 | import warnings
134 | warnings.filterwarnings('ignore')
135 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
136 | for repeat in ((1, 1, 1), (2, 2, 2), (2, 1, 1), (1, 2, 3)):
137 | for a in [2.0, 3.0]:
138 | tf.reset_default_graph()
139 | print(f'{structure} {repeat} {a}')
140 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
141 | atoms.rattle()
142 | atoms.set_calculator(aseLJ())
143 | ase_forces = atoms.get_forces()
144 | with tf.Graph().as_default():
145 | atoms.set_calculator(TFLJ())
146 | lj_forces = atoms.get_forces()
147 | self.assertAllClose(ase_forces, lj_forces)
148 |
149 | def test_stress_1way(self):
150 | import numpy as np
151 | np.set_printoptions(precision=3, suppress=True)
152 | import warnings
153 | warnings.filterwarnings('ignore')
154 | for structure in ('fcc', 'bcc', 'hcp', 'diamond', 'sc'):
155 | for repeat in ((1, 1, 1), (2, 2, 2), (2, 1, 1), (1, 2, 3)):
156 | for a in [2.0, 3.0]:
157 | tf.reset_default_graph()
158 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
159 | atoms.rattle()
160 | atoms.set_calculator(aseLJ())
161 | ase_stress = atoms.get_stress()
162 |
163 | with tf.Graph().as_default():
164 | atoms.set_calculator(TFLJ())
165 | lj_stress = atoms.get_stress()
166 | # TODO. I am suspicious about the need for this high tolerance. The
167 | # test does not pass without it, due to some stress differences that
168 | # are about 0.005 in magnitude. This is not that large, but neither
169 | # zero. The biggest issue is fcc (1, 1, 1) 3.0
170 | # [ 0.005 0.005 0.005 0. 0. -0. ]
171 | # The rest of them seem fine.
172 | delta = ase_stress - lj_stress
173 | if delta.max() > 0.0001:
174 | print(f'{structure} {repeat} {a}')
175 | print(delta)
176 | self.assertAllClose(ase_stress, lj_stress, 0.006, 0.006)
177 |
--------------------------------------------------------------------------------
/dap/ag/neighborlist.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """A neighborlist for autograd-based potentials"""
15 | import autograd.numpy as np
16 |
17 |
18 | def get_distances(positions,
19 | cell,
20 | cutoff_distance,
21 | skin=0.01,
22 | strain=np.zeros((3, 3))):
23 | """Get distances to atoms in a periodic unitcell.
24 |
25 | Parameters
26 | ----------
27 |
28 | positions: atomic positions. array-like (natoms, 3)
29 | cell: unit cell. array-like (3, 3)
30 | cutoff_distance: Maximum distance to get neighbor distances for. float
31 | skin: A tolerance for the cutoff_distance. float
32 | strain: array-like (3, 3)
33 |
34 | Returns
35 | -------
36 |
37 | distances : an array of dimension (atom_i, atom_j, distance) The shape is
38 | (natoms, natoms, nunitcells) where nunitcells is the total number of unit
39 | cells required to tile the space to be sure all neighbors will be found. The
40 | atoms that are outside the cutoff distance are zeroed.
41 |
42 | offsets
43 |
44 | """
45 | positions = np.array(positions)
46 | cell = np.array(cell)
47 | strain_tensor = np.eye(3) + strain
48 | cell = np.dot(strain_tensor, cell.T).T
49 | positions = np.dot(strain_tensor, positions.T).T
50 |
51 | inverse_cell = np.linalg.inv(cell)
52 | num_repeats = cutoff_distance * np.linalg.norm(inverse_cell, axis=0)
53 |
54 | fractional_coords = np.dot(positions, inverse_cell) % 1
55 | mins = np.min(np.floor(fractional_coords - num_repeats), axis=0)
56 | maxs = np.max(np.ceil(fractional_coords + num_repeats), axis=0)
57 |
58 | # Now we generate a set of cell offsets
59 | v0_range = np.arange(mins[0], maxs[0])
60 | v1_range = np.arange(mins[1], maxs[1])
61 | v2_range = np.arange(mins[2], maxs[2])
62 |
63 | xhat = np.array([1, 0, 0])
64 | yhat = np.array([0, 1, 0])
65 | zhat = np.array([0, 0, 1])
66 |
67 | v0_range = v0_range[:, None] * xhat[None, :]
68 | v1_range = v1_range[:, None] * yhat[None, :]
69 | v2_range = v2_range[:, None] * zhat[None, :]
70 |
71 | offsets = (
72 | v0_range[:, None, None] + v1_range[None, :, None] +
73 | v2_range[None, None, :])
74 |
75 | offsets = np.int_(offsets.reshape(-1, 3))
76 | # Now we have a vector of unit cell offsets (offset_index, 3)
77 | # We convert that to cartesian coordinate offsets
78 | cart_offsets = np.dot(offsets, cell)
79 |
80 | # we need to offset each coord by each offset.
81 | # This array is (atom_index, offset, 3)
82 | shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
83 |
84 | # Next, we subtract each position from the array of positions
85 | # (atom_i, atom_j, positionvector, 3)
86 | pv = shifted_cart_coords - positions[:, None, None]
87 |
88 | # This is the distance squared
89 | # (atom_i, atom_j, distance_ij)
90 | d2 = np.sum(pv**2, axis=3)
91 |
92 | # The gradient of sqrt is nan at r=0, so we do this round about way to
93 | # avoid that.
94 | zeros = np.equal(d2, 0.0)
95 | adjusted = np.where(zeros, np.ones_like(d2), d2)
96 | d = np.where(zeros, np.zeros_like(d2), np.sqrt(adjusted))
97 |
98 | distances = np.where(d < (cutoff_distance + skin), d, np.zeros_like(d))
99 | return distances, offsets
100 |
101 |
102 | def get_neighbors(i, distances, offsets, oneway=False):
103 | """Get the indices and distances of neighbors to atom i.
104 |
105 | Parameters
106 | ----------
107 |
108 | i: int, index of the atom to get neighbors for.
109 | distances: the distances returned from `get_distances`.
110 |
111 | Returns
112 | -------
113 | indices: a list of indices for the neighbors corresponding to the index of the
114 | original atom list.
115 | offsets: a list of unit cell offsets to generate the position of the neighbor.
116 | """
117 |
118 | di = distances[i]
119 |
120 | within_cutoff = di > 0.0
121 |
122 | indices = np.arange(len(distances))
123 |
124 | inds = indices[np.where(within_cutoff)[0]]
125 | offs = offsets[np.where(within_cutoff)[1]]
126 |
127 | return inds, np.int_(offs)
128 |
129 |
130 | def get_neighbors_oneway(positions,
131 | cell,
132 | cutoff_radius,
133 | skin=0.01,
134 | strain=np.zeros((3, 3))):
135 | """A one-way neighbor list.
136 |
137 | Parameters
138 | ----------
139 |
140 | positions: atomic positions. array-like (natoms, 3)
141 | cell: unit cell. array-like (3, 3)
142 | cutoff_radius: Maximum radius to get neighbor distances for. float
143 | skin: A tolerance for the cutoff_radius. float
144 | strain: array-like (3, 3)
145 |
146 | Returns
147 | -------
148 | indices, offsets
149 |
150 | Note: this function works with autograd, but it has been very difficult to
151 | translate to Tensorflow. The challenge is because TF treats iteration
152 | differently, and does not like when you try to modify variables outside the
153 | iteration scope.
154 |
155 | """
156 |
157 | strain_tensor = np.eye(3) + strain
158 | cell = np.dot(strain_tensor, cell.T).T
159 | positions = np.dot(strain_tensor, positions.T).T
160 | inverse_cell = np.linalg.inv(cell)
161 | h = 1 / np.linalg.norm(inverse_cell, axis=0)
162 | N = np.floor(2 * cutoff_radius / h) + 1
163 |
164 | scaled = np.dot(positions, inverse_cell)
165 | scaled0 = np.dot(positions, inverse_cell) % 1.0
166 |
167 | # this is autograd compatible.
168 | offsets = np.int_((scaled0 - scaled).round())
169 |
170 | positions0 = positions + np.dot(offsets, cell)
171 | natoms = len(positions)
172 | indices = np.arange(natoms)
173 |
174 | v0_range = np.arange(0, N[0] + 1)
175 | v1_range = np.arange(-N[1], N[1] + 1)
176 | v2_range = np.arange(-N[2], N[2] + 1)
177 |
178 | xhat = np.array([1, 0, 0])
179 | yhat = np.array([0, 1, 0])
180 | zhat = np.array([0, 0, 1])
181 |
182 | v0_range = v0_range[:, None] * xhat[None, :]
183 | v1_range = v1_range[:, None] * yhat[None, :]
184 | v2_range = v2_range[:, None] * zhat[None, :]
185 |
186 | N = (
187 | v0_range[:, None, None] + v1_range[None, :, None] +
188 | v2_range[None, None, :])
189 |
190 | N = N.reshape(-1, 3)
191 |
192 | neighbors = [np.empty(0, int) for a in range(natoms)]
193 | displacements = [np.empty((0, 3), int) for a in range(natoms)]
194 |
195 | def offset_mapfn(n):
196 | n1, n2, n3 = n
197 | if n1 == 0 and (n2 < 0 or (n2 == 0 and n3 < 0)):
198 | return
199 | displacement = np.dot((n1, n2, n3), cell)
200 |
201 | def atoms_mapfn(a):
202 | d = positions0 + displacement - positions0[a]
203 | i = indices[(d**2).sum(1) < (cutoff_radius**2 + skin)]
204 | if n1 == 0 and n2 == 0 and n3 == 0:
205 | i = i[i > a]
206 | neighbors[a] = np.concatenate((neighbors[a], i))
207 | disp = np.empty((len(i), 3), int)
208 | disp[:] = (n1, n2, n3)
209 | disp += offsets[i] - offsets[a]
210 | displacements[a] = np.concatenate((displacements[a], disp))
211 |
212 | list(map(atoms_mapfn, range(natoms)))
213 |
214 | list(map(offset_mapfn, N))
215 |
216 | return neighbors, displacements
217 |
--------------------------------------------------------------------------------
/dap/tests/test_tf_utils.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for tensorflow module."""
15 | import itertools
16 | import numpy as np
17 | import tensorflow as tf
18 |
19 | from dap.tf.utils import (tri, triu_indices, tril_indices, triu_indices_from,
20 | tril_indices_from, combinations,
21 | slices_values_to_sparse_tensor)
22 | import os
23 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
24 |
25 |
26 | class TestTFUtils_tri(tf.test.TestCase):
27 |
28 | def test_tri(self):
29 | npt = np.tri(3, dtype=np.bool)
30 | tft = tri(3)
31 | with self.test_session():
32 | self.assertTrue(np.all(npt == tft.eval()))
33 |
34 | def test_above(self):
35 | npt = np.tri(3, k=1, dtype=np.bool)
36 | tft = tri(3, k=1)
37 | with self.test_session():
38 | self.assertTrue(np.all(npt == tft.eval()))
39 |
40 | def test_below(self):
41 | npt = np.tri(3, k=-1, dtype=np.bool)
42 | tft = tri(3, k=-1)
43 | with self.test_session():
44 | self.assertTrue(np.all(npt == tft.eval()))
45 |
46 | def test_notsquare(self):
47 | npt = np.tri(3, 4, dtype=np.bool)
48 | tft = tri(3, 4)
49 | with self.test_session():
50 | self.assertTrue(np.all(npt == tft.eval()))
51 |
52 | def test_notsquare_above(self):
53 | npt = np.tri(3, 4, k=1, dtype=np.bool)
54 | tft = tri(3, 4, k=1)
55 | with self.test_session():
56 | self.assertTrue(np.all(npt == tft.eval()))
57 |
58 | def test_notsquare_below(self):
59 | npt = np.tri(3, 4, k=-1, dtype=np.bool)
60 | tft = tri(3, 4, k=-1)
61 | with self.test_session():
62 | self.assertTrue(np.all(npt == tft.eval()))
63 |
64 |
65 | class TestTFUtils_triu(tf.test.TestCase):
66 |
67 | def test_triu(self):
68 | npu = np.triu_indices(3)
69 | r0, r1 = triu_indices(3)
70 | with self.test_session():
71 | self.assertTrue(np.all(npu[0] == r0.eval()))
72 | self.assertTrue(np.all(npu[1] == r1.eval()))
73 |
74 | def test_triu_k_over(self):
75 | npu = np.triu_indices(3, k=1)
76 | r0, r1 = triu_indices(3, k=1)
77 | with self.test_session():
78 | self.assertTrue(np.all(npu[0] == r0.eval()))
79 | self.assertTrue(np.all(npu[1] == r1.eval()))
80 |
81 | def test_triu_k_under(self):
82 | npu = np.triu_indices(3, k=-1)
83 | r0, r1 = triu_indices(3, k=-1)
84 | with self.test_session():
85 | self.assertTrue(np.all(npu[0] == r0.eval()))
86 | self.assertTrue(np.all(npu[1] == r1.eval()))
87 |
88 | def test_triu_nonsquare(self):
89 | npu = np.triu_indices(3, m=4)
90 | r0, r1 = triu_indices(3, m=4)
91 | with self.test_session():
92 | self.assertTrue(np.all(npu[0] == r0.eval()))
93 | self.assertTrue(np.all(npu[1] == r1.eval()))
94 |
95 | def test_triu_nonsquare_long(self):
96 | npu = np.triu_indices(3, m=2)
97 | r0, r1 = triu_indices(3, m=2)
98 | with self.test_session():
99 | self.assertTrue(np.all(npu[0] == r0.eval()))
100 | self.assertTrue(np.all(npu[1] == r1.eval()))
101 |
102 |
103 | class TestTFUtils_tril(tf.test.TestCase):
104 |
105 | def test_tril(self):
106 | npu = np.tril_indices(3)
107 | r0, r1 = tril_indices(3)
108 | with self.test_session():
109 | self.assertTrue(np.all(npu[0] == r0.eval()))
110 | self.assertTrue(np.all(npu[1] == r1.eval()))
111 |
112 | def test_tril_k_over(self):
113 | npu = np.tril_indices(3, k=1)
114 | r0, r1 = tril_indices(3, k=1)
115 | with self.test_session():
116 | self.assertTrue(np.all(npu[0] == r0.eval()))
117 | self.assertTrue(np.all(npu[1] == r1.eval()))
118 |
119 | def test_tril_k_under(self):
120 | npu = np.tril_indices(3, k=-1)
121 | r0, r1 = tril_indices(3, k=-1)
122 | with self.test_session():
123 | self.assertTrue(np.all(npu[0] == r0.eval()))
124 | self.assertTrue(np.all(npu[1] == r1.eval()))
125 |
126 | def test_tril_nonsquare(self):
127 | npu = np.tril_indices(3, m=4)
128 | r0, r1 = tril_indices(3, m=4)
129 | with self.test_session():
130 | self.assertTrue(np.all(npu[0] == r0.eval()))
131 | self.assertTrue(np.all(npu[1] == r1.eval()))
132 |
133 | def test_tril_nonsquare_long(self):
134 | npu = np.tril_indices(3, m=2)
135 | r0, r1 = tril_indices(3, m=2)
136 | with self.test_session():
137 | self.assertTrue(np.all(npu[0] == r0.eval()))
138 | self.assertTrue(np.all(npu[1] == r1.eval()))
139 |
140 |
141 | class TestTFUtils_triu_indices_from(tf.test.TestCase):
142 |
143 | def test_triu_indices_from(self):
144 |
145 | a = np.zeros((3, 3))
146 | ref1, ref2 = np.triu_indices_from(a)
147 |
148 | tref1, tref2 = triu_indices_from(a)
149 | with self.test_session():
150 | self.assertTrue(np.all(ref1 == tref1.eval()))
151 | self.assertTrue(np.all(ref2 == tref2.eval()))
152 |
153 | def test_triu_indices_from_kover(self):
154 |
155 | a = np.zeros((3, 3))
156 | ref1, ref2 = np.triu_indices_from(a, k=1)
157 |
158 | tref1, tref2 = triu_indices_from(a, k=1)
159 | with self.test_session():
160 | self.assertTrue(np.all(ref1 == tref1.eval()))
161 | self.assertTrue(np.all(ref2 == tref2.eval()))
162 |
163 | def test_triu_indices_from_kunder(self):
164 |
165 | a = np.zeros((3, 3))
166 | ref1, ref2 = np.triu_indices_from(a, k=-1)
167 | tref1, tref2 = triu_indices_from(a, k=-1)
168 | with self.test_session():
169 | self.assertTrue(np.all(ref1 == tref1.eval()))
170 | self.assertTrue(np.all(ref2 == tref2.eval()))
171 |
172 | def test_triu_indices_from_non2d(self):
173 | a = np.zeros((3, 3, 3))
174 | with self.test_session():
175 | with self.assertRaises(ValueError):
176 | triu_indices_from(a)
177 |
178 |
179 | class TestTFUtils_tril_indices_from(tf.test.TestCase):
180 |
181 | def test_tril_indices_from(self):
182 |
183 | a = np.zeros((3, 3))
184 | ref1, ref2 = np.tril_indices_from(a)
185 |
186 | tref1, tref2 = tril_indices_from(a)
187 | with self.test_session():
188 | self.assertTrue(np.all(ref1 == tref1.eval()))
189 | self.assertTrue(np.all(ref2 == tref2.eval()))
190 |
191 | def test_tril_indices_from_kover(self):
192 |
193 | a = np.zeros((3, 3))
194 | ref1, ref2 = np.tril_indices_from(a, k=1)
195 |
196 | tref1, tref2 = tril_indices_from(a, k=1)
197 | with self.test_session():
198 | self.assertTrue(np.all(ref1 == tref1.eval()))
199 | self.assertTrue(np.all(ref2 == tref2.eval()))
200 |
201 | def test_tril_indices_from_kunder(self):
202 |
203 | a = np.zeros((3, 3))
204 | ref1, ref2 = np.tril_indices_from(a, k=-1)
205 | tref1, tref2 = tril_indices_from(a, k=-1)
206 | with self.test_session():
207 | self.assertTrue(np.all(ref1 == tref1.eval()))
208 | self.assertTrue(np.all(ref2 == tref2.eval()))
209 |
210 | def test_tril_indices_from_non2d(self):
211 | a = np.zeros((3, 3, 3))
212 | with self.test_session():
213 | with self.assertRaises(ValueError):
214 | tril_indices_from(a)
215 |
216 |
217 | class TestTFUtils_combinations(tf.test.TestCase):
218 |
219 | def test_combinations_2(self):
220 | a = [0, 1, 2, 3, 4]
221 | for k in [2, 3]:
222 | combs = np.array(list(itertools.combinations(a, k)))
223 | with self.test_session():
224 | tf_combs = combinations(a, k).eval()
225 | self.assertTrue(np.all(combs == tf_combs))
226 |
227 | def test_combinations_non1d(self):
228 | a = [[0, 1, 2, 3, 4]]
229 | with self.assertRaises(ValueError):
230 | with self.test_session():
231 | combinations(a, 2).eval()
232 |
233 |
234 | class TestTFUtils_slices(tf.test.TestCase):
235 |
236 | def test(self):
237 | arr = [[1, 2, 3], [3, 2, 1], [2, 1, 3]]
238 | k = 2
239 | kv, ki = tf.nn.top_k(arr, k)
240 | st = slices_values_to_sparse_tensor(ki, kv, (3, 3))
241 |
242 | ref = tf.SparseTensor([[0, 1], [0, 2], [1, 0], [1, 1], [2, 0], [2, 2]],
243 | [2, 3, 3, 2, 2, 3], (3, 3))
244 |
245 | dst = tf.sparse_tensor_to_dense(st, validate_indices=False)
246 | dref = tf.sparse_tensor_to_dense(
247 | ref,
248 | validate_indices=False,
249 | )
250 |
251 | with self.test_session():
252 | self.assertTrue(np.all((tf.equal(dst, dref).eval())))
253 |
--------------------------------------------------------------------------------
/docs/dap.tf.lennardjones.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Lennard-Jones in Tensorflow with gradient forces and stress
2 | #+AUTHOR: John Kitchin
3 |
4 |
5 | The main purpose of this document is to show from start to finish that you can define an energy model in Tensorflow, and then get forces and stresses using tf.gradients. The second goal of this code is to show that batch training is possible. Both of those goals have been achieved.
6 |
7 | A potential use of this code is to run molecular simulations, e.g. molecular dynamics, on accelerated hardware. I don't have a sense for how well it will perform for large numbers of atoms. The neighbor distances code is fully vectorized, but state of the art MD codes are even smarter. In a simulation, they use a skin, and only update the distance matrix when an atom has moved further than a skin tolerance (which could cause it to move across the cutoff radius). This code is not that clever.
8 |
9 | * The training database
10 |
11 | The [[./argon.db]] database contains 139 DFT calculations of Ar in different crystal structures at different volumes where the atoms have been rattled several times in each calculation. Here is a brief summary of the contents of the database.
12 |
13 | #+BEGIN_SRC python :results output org drawer
14 | import ase.db
15 |
16 | db = ase.db.connect('argon.db')
17 | data = db.select()
18 |
19 | count = 0
20 | structures = set()
21 | volumes = set()
22 |
23 | for row in data:
24 | structures.update([row.structure])
25 | volumes.update([float(row.volume)])
26 | count += 1
27 |
28 | print(f'Number of calculations = {count}')
29 | print(f'structures: {structures}')
30 | print(f'volumes: {volumes}')
31 | #+END_SRC
32 |
33 | #+RESULTS:
34 | :RESULTS:
35 | Number of calculations = 139
36 | structures: {'fcc', 'diamond', 'bcc', 'hcp', 'sc'}
37 | volumes: {64.923518, 97.46901300288376, 129.731256178007, 68.92099999999999, 69.09420649999996, 37.8434835, 71.05491046091358, 171.53224199999997, 235.2980000000001, 48.77799999999999, 50.243408999999986, 51.911500000000004, 313.181638, 91.73385100000003}
38 | :END:
39 |
40 | * Training the parameters on energies and forces
41 |
42 | Here, I train the model to a database of DFT calculations on Argon. I ran these calculations on my cluster at CMU. The arrays are padded as needed. We train on 50 of the data points here.
43 |
44 | #+BEGIN_SRC python :results output org drawer
45 | import warnings
46 | warnings.filterwarnings("ignore")
47 |
48 | import ase.db
49 | from dap.tf.lennardjones import *
50 |
51 | db = ase.db.connect('argon.db')
52 |
53 | N = 50
54 | ATOMS = [row.toatoms() for row in db.select(limit=N)]
55 |
56 | print('Atoms in set: ', set([len(atoms) for atoms in ATOMS]))
57 |
58 | maxnatoms = max([len(atoms) for atoms in ATOMS])
59 |
60 | POSITIONS = [row.positions for row in db.select(limit=N)]
61 | CELLS = [row.cell for row in db.select(limit=N)]
62 |
63 | ENERGIES = np.array([row.energy for row in db.select(limit=N)])
64 | FORCES = np.array([row.forces for row in db.select(limit=N)])
65 |
66 | # PADDING
67 | MASKS = [np.ones(maxnatoms, dtype=np.float64) for atoms in ATOMS]
68 | PADDED_POSITIONS = [np.zeros((maxnatoms, 3), dtype=np.float64)
69 | for atoms in ATOMS]
70 |
71 | PADDED_FORCES = [np.zeros((maxnatoms, 3), dtype=np.float64)
72 | for atoms in ATOMS]
73 |
74 | for i, atoms in enumerate(ATOMS):
75 | MASKS[i][len(atoms):] = 0.0
76 | PADDED_POSITIONS[i][:len(atoms),:] = atoms.positions
77 | PADDED_FORCES[i][:len(atoms),:] = atoms.get_forces()
78 |
79 | PADDED_FORCES = np.array(PADDED_FORCES)
80 |
81 | # Initial guesses. These are close to what I have previously fitted
82 | with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
83 | sigma = tf.get_variable(
84 | "sigma",
85 | initializer=tf.constant(3.71, dtype=tf.float64))
86 |
87 | with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
88 | epsilon = tf.get_variable(
89 | "epsilon",
90 | initializer=tf.constant(0.0058, dtype=tf.float64))
91 |
92 |
93 | predicted_energies = energy_batch(PADDED_POSITIONS, CELLS, MASKS)
94 | e_errs = tf.convert_to_tensor(ENERGIES) - predicted_energies
95 |
96 | predicted_forces = forces_batch(PADDED_POSITIONS, CELLS, MASKS)
97 | f_errs = tf.square(tf.convert_to_tensor(PADDED_FORCES) - predicted_forces)
98 |
99 | loss = tf.reduce_mean(tf.square(e_errs)) + tf.reduce_mean(f_errs)
100 |
101 | optimizer = tf.train.AdamOptimizer(3e-4)
102 | train = optimizer.minimize(loss)
103 |
104 | init = tf.global_variables_initializer()
105 | sess = tf.Session()
106 | sess.run(init)
107 |
108 | for i in range(1000):
109 | if i % 200 and sess.run(loss) < 1e-5:
110 | print(sess.run([epsilon, sigma, loss]))
111 | break
112 |
113 | sess.run(train)
114 | if i % 200 == 0:
115 | print(sess.run([epsilon, sigma, loss]))
116 | #+END_SRC
117 |
118 | #+RESULTS:
119 | :RESULTS:
120 | Atoms in set: {1, 2}
121 | [0.0055000025750425409, 3.7102998552198923, 1.2074265234698833e-05]
122 | [0.0054327672115345414, 3.7138452508823128, 9.5131816405131827e-06]
123 | :END:
124 |
125 | Now we can use the parameters above to predict new energies.
126 |
127 | #+BEGIN_SRC python :results output org drawer
128 | from dap.tf.lennardjones import *
129 | import ase.db
130 |
131 | db = ase.db.connect('argon.db')
132 |
133 |
134 | ATOMS = [row.toatoms() for row in db.select()]
135 |
136 | STRUCTURES = [row.structure for row in db.select()]
137 | # Indices for plotting
138 | train = np.arange(50)
139 | fcc = [i for i in range(len(STRUCTURES)) if STRUCTURES[i] == 'fcc']
140 | bcc = [i for i in range(len(STRUCTURES)) if STRUCTURES[i] == 'bcc']
141 | hcp = [i for i in range(len(STRUCTURES)) if STRUCTURES[i] == 'hcp']
142 | sc = [i for i in range(len(STRUCTURES)) if STRUCTURES[i] == 'sc']
143 | diamond = [i for i in range(len(STRUCTURES)) if STRUCTURES[i] == 'diamond']
144 |
145 | maxnatoms = max([len(atoms) for atoms in ATOMS])
146 |
147 | POSITIONS = [row.positions for row in db.select()]
148 | CELLS = [row.cell for row in db.select()]
149 |
150 | ENERGIES = np.array([row.energy for row in db.select()])
151 |
152 | # PADDING
153 | MASKS = [np.ones(maxnatoms, dtype=np.float64) for atoms in ATOMS]
154 | PADDED_POSITIONS = [np.zeros((maxnatoms, 3), dtype=np.float64)
155 | for atoms in ATOMS]
156 |
157 | for i, atoms in enumerate(ATOMS):
158 | MASKS[i][len(atoms):] = 0.0
159 | PADDED_POSITIONS[i][:len(atoms),:] = atoms.positions
160 |
161 |
162 | # These are copied from the fitting results above.
163 | with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
164 | sigma = tf.get_variable(
165 | "sigma",
166 | initializer=tf.constant(3.7138452508823128, dtype=tf.float64))
167 |
168 | with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
169 | epsilon = tf.get_variable(
170 | "epsilon",
171 | initializer=tf.constant(0.0054327672115345414, dtype=tf.float64))
172 |
173 |
174 | predicted_energies = energy_batch(PADDED_POSITIONS, CELLS, MASKS)
175 | e_errs = tf.convert_to_tensor(ENERGIES) - predicted_energies
176 |
177 | init = tf.global_variables_initializer()
178 | sess = tf.Session()
179 | sess.run(init)
180 |
181 | pe, ee = sess.run([predicted_energies, e_errs])
182 |
183 | import matplotlib.pyplot as plt
184 | plt.plot(ENERGIES[fcc], pe[fcc], 'bo', label='fcc')
185 | plt.plot(ENERGIES[bcc], pe[bcc], 'go', label='bcc')
186 | plt.plot(ENERGIES[hcp], pe[hcp], 'ro', label='hcp')
187 | plt.plot(ENERGIES[sc], pe[sc], 'ys', label='sc')
188 | plt.plot(ENERGIES[diamond], pe[diamond], 'ks', label='diamond')
189 | plt.plot(ENERGIES[train], pe[train], 'w.', label='train')
190 |
191 | plt.legend(loc='best')
192 | plt.xlabel('DFT energies (eV)')
193 | plt.ylabel('LJ energies (eV)')
194 | plt.savefig('lj-vs-dft.png')
195 | #+END_SRC
196 |
197 | #+RESULTS:
198 | :RESULTS:
199 | :END:
200 |
201 | [[./lj-vs-dft.png]]
202 |
203 | This figure shows the training data (white circles on top of the symbols) was mostly on the fcc and bcc structures and some of the hcp structures. The energies are not very spread out, and the variations are largely due to the changes in volume. It is evident here that the most close-packed structures (fcc, bcc and hcp) fall closest to parity, while the more open and directional structures (sc and diamond) deviate. This is expected for a simple pair-wise potential with no angular dependence.
204 |
205 | * Tensorboard graphs
206 |
207 | These blocks will launch a Tensorboard visualization of the graphs for each function.
208 |
209 | ** energy
210 | #+BEGIN_SRC python :results output org drawer
211 | from ase.build import bulk
212 | from dap.tf.visualize import show_graph
213 | import dap.tf.lennardjones as lj
214 |
215 | atoms = bulk('Ar', 'fcc', a=3)
216 |
217 | e = lj.energy(atoms.positions, atoms.cell)
218 |
219 | show_graph()
220 | #+END_SRC
221 |
222 | #+RESULTS:
223 | :RESULTS:
224 | Created new window in existing browser session.
225 | :END:
226 |
227 | ** forces
228 |
229 | #+BEGIN_SRC python :results output org drawer
230 | from ase.build import bulk
231 | from dap.tf.visualize import show_graph
232 | import dap.tf.lennardjones as lj
233 |
234 | atoms = bulk('Ar', 'fcc', a=3)
235 |
236 | e = lj.forces(atoms.positions, atoms.cell)
237 |
238 | show_graph()
239 | #+END_SRC
240 |
241 | #+RESULTS:
242 | :RESULTS:
243 | Created new window in existing browser session.
244 | :END:
245 |
246 | ** stress
247 |
248 | #+BEGIN_SRC python :results output org drawer
249 | from ase.build import bulk
250 | from dap.tf.visualize import show_graph
251 | import dap.tf.lennardjones as lj
252 |
253 | atoms = bulk('Ar', 'fcc', a=3)
254 |
255 | e = lj.stress(atoms.positions, atoms.cell)
256 |
257 | show_graph()
258 | #+END_SRC
259 |
260 | #+RESULTS:
261 | :RESULTS:
262 | Created new window in existing browser session.
263 | :END:
264 |
265 |
266 | * The Lennard Jones calculator
267 |
268 |
269 | #+BEGIN_SRC python :results output org drawer
270 | from dap.tf.lennardjones import LennardJones
271 | from ase.build import bulk
272 |
273 |
274 | atoms = bulk('Ar', 'fcc', a=2.5).repeat((2, 1, 1))
275 | atoms.rattle()
276 | atoms.set_calculator(LennardJones())
277 |
278 | print(atoms.get_potential_energy())
279 | print(atoms.get_forces())
280 | print(atoms.get_stress())
281 | #+END_SRC
282 |
283 | #+RESULTS:
284 | :RESULTS:
285 | -1.5205998507878264
286 | [[-0.0041116 0.00362234 0.00403562]
287 | [ 0.0041116 -0.00362234 -0.00403562]]
288 | [ 4.01225582e-01 4.01225198e-01 4.01225601e-01 -1.81750310e-07
289 | -5.36816118e-07 9.27398348e-08]
290 | :END:
291 |
292 |
293 | * Class LJ
294 |
295 |
296 | #+BEGIN_SRC python :results output org drawer
297 | from dap.tf.lennardjones import LennardJones
298 |
299 | lj = LennardJones()
300 |
301 | lj.train('Ar', 'argon.db')
302 | #+END_SRC
303 |
304 | #+RESULTS:
305 | :RESULTS:
306 | :END:
307 |
--------------------------------------------------------------------------------
/dap/tests/test_tf_neighborlist.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Tests for the tensorflow neighborlist module.
15 |
16 | pydoc:dap.tf.neighborlist
17 | """
18 |
19 | import numpy as np
20 | import tensorflow as tf
21 | from ase.build import bulk
22 | from ase.neighborlist import NeighborList
23 |
24 | # import sys
25 | # sys.path.insert(0, '.')
26 | # from .ase_nl import NeighborList
27 |
28 | from dap.tf.neighborlist import (get_distances, get_neighbors_oneway)
29 | import os
30 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
31 |
32 |
33 | class TestNeighborlist(tf.test.TestCase):
34 | """Tests comparing the TF version and ASE neighborlist implementations.
35 |
36 | """
37 |
38 | def test_basic(self):
39 | """Basic neighborlist test in TF"""
40 | a = 3.6
41 | Rc = a / np.sqrt(2) / 2
42 | atoms = bulk('Cu', 'fcc', a=a)
43 |
44 | nl = NeighborList(
45 | [Rc] * len(atoms), skin=0.01, self_interaction=False, bothways=True)
46 | nl.update(atoms)
47 |
48 | distances = get_distances({
49 | 'cutoff_radius': 2 * Rc
50 | }, atoms.positions, atoms.cell, np.ones((len(atoms), 1)))
51 |
52 | mask = (distances <= 2 * Rc) & (distances > 0)
53 | tf_nneighbors = tf.reduce_sum(tf.cast(mask, tf.int32), axis=[1, 2])
54 |
55 | with self.test_session():
56 | for i, atom in enumerate(atoms):
57 | inds, disps = nl.get_neighbors(i)
58 | ase_nneighbors = len(inds)
59 | self.assertEqual(ase_nneighbors, tf_nneighbors.eval()[i])
60 |
61 | def test_structure_repeats(self):
62 | 'Check several structures and repeats for consistency with ase.'
63 | for repeat in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 3), (4, 1,
64 | 1)):
65 | for structure in ('fcc', 'bcc', 'sc', 'hcp', 'diamond'):
66 | a = 3.6
67 | # Float tolerances are tricky. The 0.01 in the next line is important.
68 | # This test fails without it due to subtle differences in computed
69 | # positions.
70 | Rc = 2 * a + 0.01
71 | atoms = bulk('Cu', structure, a=a).repeat(repeat)
72 | nl = NeighborList(
73 | [Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=True)
74 | nl.update(atoms)
75 | distances = get_distances({
76 | 'cutoff_radius': 2 * Rc
77 | }, atoms.positions, atoms.cell, np.ones((len(atoms), 1)))
78 |
79 | mask = (distances <= 2 * Rc) & (distances > 0)
80 | tf_nneighbors = tf.reduce_sum(tf.cast(mask, tf.int32), axis=[1, 2])
81 |
82 | with self.test_session():
83 | for i, atom in enumerate(atoms):
84 | inds, disps = nl.get_neighbors(i)
85 | ase_nneighbors = len(inds)
86 | self.assertEqual(ase_nneighbors, tf_nneighbors.eval()[i])
87 |
88 | # These are the indices of each neighbor in the atom list.
89 | tf_inds = tf.where(mask[i])[:, 0].eval()
90 | self.assertCountEqual(inds, tf_inds)
91 |
92 | def test_atom_types(self):
93 | """Tests if the neighbor indices agree with ase.
94 |
95 | This is important to find the
96 | chemical element associated with a specific neighbor.
97 |
98 | """
99 | a = 3.6
100 | Rc = a / np.sqrt(2) / 2 + 0.01
101 |
102 | atoms = bulk('Cu', 'fcc', a=a).repeat((3, 1, 1))
103 | atoms[1].symbol = 'Au'
104 |
105 | nl = NeighborList(
106 | [Rc] * len(atoms), skin=0.01, self_interaction=False, bothways=True)
107 | nl.update(atoms)
108 | nns = [nl.get_neighbors(i) for i in range(len(atoms))]
109 | ase_nau = [np.sum(atoms.numbers[inds] == 79) for inds, offs in nns]
110 |
111 | au_mask = tf.convert_to_tensor(atoms.numbers == 79, tf.int32)
112 |
113 | distances = get_distances({
114 | 'cutoff_radius': 2 * Rc
115 | }, atoms.positions, atoms.cell)
116 | mask = (distances <= (2 * Rc)) & (distances > 0)
117 |
118 | nau = tf.reduce_sum(tf.cast(mask, tf.int32) * au_mask[:, None], [1, 2])
119 |
120 | with self.test_session():
121 | self.assertTrue(np.all(ase_nau == nau.eval()))
122 |
123 |
124 | class TestOneWayNeighborlist(tf.test.TestCase):
125 | """These tests are a pain.
126 |
127 | The actual neighbors are pretty sensitive to the unit cell, and it is hard to
128 | get exact agreement on the number of neighbors.
129 |
130 | """
131 |
132 | def test0(self):
133 | import warnings
134 | warnings.filterwarnings('ignore')
135 |
136 | a = 3.6
137 | Rc = 5
138 | atoms = bulk('Cu', 'bcc', a=a).repeat((1, 1, 1))
139 | atoms.rattle(0.02)
140 | nl = NeighborList(
141 | [Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=False)
142 | nl.update(atoms)
143 |
144 | inds, dists, N = get_neighbors_oneway(
145 | atoms.positions, atoms.cell, 2 * Rc, skin=0.0)
146 |
147 | with self.test_session() as sess:
148 | inds, dists, N = sess.run([inds, dists, N])
149 |
150 | for i in range(len(atoms)):
151 | ase_inds, ase_offs = nl.get_neighbors(i)
152 |
153 | these_inds = np.array([x[1] for x in inds if x[0] == i])
154 | these_offs = N[np.where(inds[:, 0] == i)]
155 |
156 | self.assertAllClose(ase_inds, these_inds)
157 | self.assertAllClose(ase_offs, these_offs)
158 |
159 | def test_molecules(self):
160 | """Tests oneway list on a bunch of molecules.
161 |
162 | These are in large unit cells, so practically they don't have periodic
163 | boundary conditions.
164 |
165 | """
166 | from ase.build import molecule
167 | from ase.collections import g2
168 |
169 | Rc = 2.0
170 | pos = tf.placeholder(tf.float64, [None, 3])
171 | cell = tf.placeholder(tf.float64, [3, 3])
172 | inds, dists, N = get_neighbors_oneway(pos, cell, 2 * Rc, skin=0.0)
173 |
174 | with self.test_session() as sess:
175 | for mlc in g2.names:
176 | atoms = molecule(mlc)
177 | atoms.set_cell((50.0, 50.0, 50.0))
178 | atoms.center()
179 |
180 | if len(atoms) < 2:
181 | continue
182 |
183 | nl = NeighborList(
184 | [Rc] * len(atoms), skin=0.0, bothways=False, self_interaction=0)
185 | nl.update(atoms)
186 |
187 | _inds, _N = sess.run(
188 | [inds, N], feed_dict={
189 | pos: atoms.positions,
190 | cell: atoms.cell
191 | })
192 |
193 | for i in range(len(atoms)):
194 | ase_inds, ase_offs = nl.get_neighbors(i)
195 |
196 | these_inds = np.array([x[1] for x in _inds if x[0] == i])
197 | these_offs = _N[np.where(_inds[:, 0] == i)]
198 |
199 | # Check indices are the same
200 | self.assertAllClose(ase_inds, these_inds)
201 |
202 | # Check offsets are the same
203 | if ase_offs.shape[0] > 0:
204 | self.assertAllClose(ase_offs, these_offs)
205 |
206 | def test_structure_repeats(self):
207 | 'Check several structures and repeats for consistency with ase.'
208 | import warnings
209 | warnings.filterwarnings('ignore')
210 |
211 | import numpy as np
212 | np.set_printoptions(precision=3, suppress=True)
213 |
214 | a = 3.6
215 | Rc = 2 * a + 0.01
216 |
217 | pos = tf.placeholder(tf.float64, [None, 3])
218 | cell = tf.placeholder(tf.float64, [3, 3])
219 | inds, dists, N = get_neighbors_oneway(pos, cell, 2 * Rc, skin=0.0)
220 |
221 | for repeat in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 3)):
222 | for structure in ('fcc', 'bcc', 'sc', 'hcp', 'diamond'):
223 | print('\n', structure, repeat, '\n')
224 | print('============================\n')
225 | atoms = bulk('Cu', structure, a=a).repeat(repeat)
226 | atoms.rattle(0.02)
227 | nl = NeighborList(
228 | [Rc] * len(atoms), skin=0.0, self_interaction=False, bothways=False)
229 | nl.update(atoms)
230 |
231 | with tf.Session() as sess:
232 |
233 | _inds, _dists, _N = sess.run(
234 | [inds, dists, N],
235 | feed_dict={
236 | pos: atoms.positions,
237 | cell: atoms.cell
238 | })
239 |
240 | for i in range(len(atoms)):
241 | ase_inds, ase_offs = nl.get_neighbors(i)
242 |
243 | these_inds = np.array([x[1] for x in _inds if x[0] == i])
244 | these_offs = np.array(
245 | [offset for x, offset in zip(_inds, _N) if x[0] == i])
246 |
247 | # Check indices are the same
248 | #print('Indices are equal: ', np.all(ase_inds == these_inds))
249 | #print(ase_inds)
250 | #print(these_inds)
251 | self.assertAllClose(ase_inds, these_inds)
252 |
253 | # Check offsets are the same
254 | if ase_offs.shape[0] > 0:
255 | #print(ase_offs[1], these_offs[1])
256 | #print('\n\nOffset differences:\n', ase_offs - these_offs, '\n\n')
257 | #print('****** offsets:\n',
258 | self.assertAllClose(ase_offs, these_offs)
259 |
260 | def test_structure_repeats_2(self):
261 | """This test was put in to debug the LJ oneway calculator.
262 |
263 | I noticed that for some unit cells the forces were not correct, and this is
264 | to check that it is not due to the neighborlist. If this test is passing,
265 | the neighborlist from tf and the one from ase are in agreement.
266 |
267 | """
268 | import warnings
269 | warnings.filterwarnings('ignore')
270 |
271 | import numpy as np
272 | np.set_printoptions(precision=3, suppress=True)
273 |
274 | Rc = 3.0
275 |
276 | pos = tf.placeholder(tf.float64, [None, 3])
277 | cell = tf.placeholder(tf.float64, [3, 3])
278 | inds, dists, N = get_neighbors_oneway(pos, cell, 2 * Rc, skin=0.0)
279 |
280 | for repeat in ((1, 1, 1), (2, 1, 1), (1, 2, 1), (1, 1, 2), (1, 2, 3)):
281 | for structure in ('fcc', 'bcc', 'sc', 'hcp', 'diamond'):
282 | for a in [3.0, 4.0]:
283 | print('\n', structure, repeat, a, '\n')
284 | print('============================\n')
285 |
286 | atoms = bulk('Ar', structure, a=a).repeat(repeat)
287 | atoms.rattle()
288 |
289 | nl = NeighborList(
290 | [Rc] * len(atoms),
291 | skin=0.0,
292 | self_interaction=False,
293 | bothways=False)
294 | nl.update(atoms)
295 |
296 | with tf.Session() as sess:
297 |
298 | _inds, _dists, _N = sess.run(
299 | [inds, dists, N],
300 | feed_dict={
301 | pos: atoms.positions,
302 | cell: atoms.cell
303 | })
304 |
305 | for i in range(len(atoms)):
306 | ase_inds, ase_offs = nl.get_neighbors(i)
307 |
308 | these_inds = np.array([x[1] for x in _inds if x[0] == i])
309 | these_offs = _N[np.where(_inds[:, 0] == i)]
310 |
311 | self.assertAllClose(ase_inds, these_inds)
312 |
313 | # Check offsets are the same
314 | if ase_offs.shape[0] > 0:
315 | self.assertAllClose(ase_offs, these_offs)
316 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
--------------------------------------------------------------------------------
/docs/dap.py.bpnn.org:
--------------------------------------------------------------------------------
1 | #+TITLE: dap.py.bpnn
2 |
3 | This code is a pure python, vectorized implementation of the main pieces of a BPNN. It exists because it was useful to develop them in pure python, easy to benchmark and validate them against Amp, and potentially useful later.
4 |
5 | I did not implement forces in this module, because it would require analytical derivatives, and the whole point of this project is to avoid those. If these make it into the dap.ag module, there might be forces then.
6 |
7 |
8 | * Amp benchmark
9 |
10 | Here is a water fingerprint for radial functions in Amp.
11 |
12 | #+BEGIN_SRC python :results output org drawer
13 | import os
14 |
15 | os.system('rm -fr amp-data*')
16 |
17 | from ase.structure import molecule
18 | atoms = molecule('H2O')
19 |
20 | from amp.descriptor.gaussian import *
21 |
22 | sf = {'H': make_symmetry_functions(['H', 'O'], 'G2', [0.05, 0.1]),
23 | 'O': make_symmetry_functions(['H', 'O'], 'G2', [0.05, 0.1])}
24 |
25 | descriptor = Gaussian(Gs=sf)
26 |
27 | from amp.utilities import hash_images
28 | images = hash_images([atoms], ordered=True)
29 | descriptor.calculate_fingerprints(images)
30 |
31 | fparray = []
32 | for index, hash in enumerate(images.keys()):
33 | for fp in descriptor.fingerprints[hash]:
34 | fparray += [fp[1]]
35 | print(np.array(fparray))
36 | os.system('rm -fr amp-data*')
37 | #+END_SRC
38 |
39 | #+RESULTS:
40 | :RESULTS:
41 | [[1.89031441 0. 1.88821695 0. ]
42 | [0.867586 0.9451572 0.86519688 0.94410847]
43 | [0.867586 0.9451572 0.86519688 0.94410847]]
44 | :END:
45 |
46 | Here is a water fingerprint for angular functions in amp
47 |
48 | #+BEGIN_SRC python :results output org drawer
49 | import os
50 |
51 | os.system('rm -fr amp-data*')
52 |
53 | from ase.structure import molecule
54 | atoms = molecule('H2O')
55 |
56 | from amp.descriptor.gaussian import *
57 |
58 | sf = {'H': make_symmetry_functions(['H', 'O'], 'G4', [0.05], zetas=[1], gammas=[+1]),
59 | 'O': make_symmetry_functions(['H', 'O'], 'G4', [0.05], zetas=[1], gammas=[+1])}
60 |
61 | for el in sf:
62 | print(el)
63 | for f in sf[el]:
64 | print(f' {f}')
65 |
66 | descriptor = Gaussian(Gs=sf)
67 |
68 | from amp.utilities import hash_images
69 | images = hash_images([atoms], ordered=True)
70 | descriptor.calculate_fingerprints(images)
71 |
72 | fparray = []
73 | for index, hash in enumerate(images.keys()):
74 | for fp in descriptor.fingerprints[hash]:
75 | fparray += [fp[1]]
76 | print(np.array(fparray))
77 | os.system('rm -fr amp-data*')
78 | #+END_SRC
79 |
80 | #+RESULTS:
81 | :RESULTS:
82 | H
83 | {'type': 'G4', 'elements': ['H', 'H'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
84 | {'type': 'G4', 'elements': ['H', 'O'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
85 | {'type': 'G4', 'elements': ['O', 'O'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
86 | O
87 | {'type': 'G4', 'elements': ['H', 'H'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
88 | {'type': 'G4', 'elements': ['H', 'O'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
89 | {'type': 'G4', 'elements': ['O', 'O'], 'eta': 0.05, 'gamma': 1, 'zeta': 1}
90 | [[0.58753778 0. 0. ]
91 | [0. 1.38576822 0. ]
92 | [0. 1.38576822 0. ]]
93 | :END:
94 |
95 | * dap.py.bpnn
96 |
97 | We take a different approach to computing fingerprints than Amp in using a fully vectorized approach. This style is challenging to implement, so we use Amp to benchmark and validate our code.
98 |
99 | ** Cosine cutoff function
100 |
101 | A critical difference between dap and Amp is that we compute all the data for every atom in a multidimensional array. This requires us to remember some things.
102 |
103 | 1. The array may have more "rows" than atoms, and then you must provide an atom_mask
104 |
105 | #+BEGIN_SRC python :results output org drawer
106 | import numpy as np
107 | from dap.py.bpnn import cosine_cutoff, G2
108 | from dap.ag.neighborlist import get_distances
109 |
110 | from ase.structure import molecule
111 | atoms = molecule('H2O')
112 | atoms.cell = 100 * np.eye(3)
113 | print(atoms)
114 |
115 | positions = atoms.positions
116 | cell = atoms.cell
117 | atom_mask = [[1] for atom in atoms]
118 |
119 | config = {'cutoff_radius': 6.5}
120 | d = get_distances(positions, cell, config['cutoff_radius'])
121 |
122 | print(cosine_cutoff(config, d, atom_mask))
123 | #+END_SRC
124 |
125 | #+RESULTS:
126 | :RESULTS:
127 | Atoms(symbols='OH2', pbc=False, cell=[100.0, 100.0, 100.0])
128 | [[[0. 0. 0. 0. 0. 0.
129 | 0. 0. 0. 0. 0. 0.
130 | 0. 0. 0. 0. 0. 0. ]
131 | [0. 0. 0. 0. 0. 0.
132 | 0. 0. 0. 0. 0. 0.
133 | 0. 0.9462071 0. 0. 0. 0. ]
134 | [0. 0. 0. 0. 0. 0.
135 | 0. 0. 0. 0. 0. 0.
136 | 0. 0.9462071 0. 0. 0. 0. ]]
137 |
138 | [[0. 0. 0. 0. 0. 0.
139 | 0. 0. 0. 0. 0. 0.
140 | 0. 0.9462071 0. 0. 0. 0. ]
141 | [0. 0. 0. 0. 0. 0.
142 | 0. 0. 0. 0. 0. 0.
143 | 0. 0. 0. 0. 0. 0. ]
144 | [0. 0. 0. 0. 0. 0.
145 | 0. 0. 0. 0. 0. 0.
146 | 0. 0.86998172 0. 0. 0. 0. ]]
147 |
148 | [[0. 0. 0. 0. 0. 0.
149 | 0. 0. 0. 0. 0. 0.
150 | 0. 0.9462071 0. 0. 0. 0. ]
151 | [0. 0. 0. 0. 0. 0.
152 | 0. 0. 0. 0. 0. 0.
153 | 0. 0.86998172 0. 0. 0. 0. ]
154 | [0. 0. 0. 0. 0. 0.
155 | 0. 0. 0. 0. 0. 0.
156 | 0. 0. 0. 0. 0. 0. ]]]
157 | :END:
158 |
159 | Here is an example of using a mask. There are three atoms in the molecule, but the positions array has four "rows", perhaps because this is part of a batch where the largest molecule has 4 atoms, and we need all the arrays to be the same size. We provide a mask with three ones, and a 0 in the last row as the mask, which masks all the invalid atom distances to zero.
160 |
161 | #+BEGIN_SRC python :results output org drawer
162 | import numpy as np
163 | from dap.py.bpnn import cosine_cutoff, G2
164 | from dap.ag.neighborlist import get_distances
165 |
166 | from ase.structure import molecule
167 | atoms = molecule('H2O')
168 | atoms.cell = 100 * np.eye(3)
169 | print(atoms)
170 |
171 | positions = np.zeros((4, 3))
172 | positions[0:3, :] = atoms.positions
173 | cell = atoms.cell
174 | atom_mask = np.ones((4, 1))
175 | atom_mask[-1] = 0
176 |
177 | config = {'cutoff_radius': 6.5}
178 | d = get_distances(positions, cell, config['cutoff_radius'])
179 |
180 | print(cosine_cutoff(config, d, atom_mask))
181 | #+END_SRC
182 |
183 | #+RESULTS:
184 | :RESULTS:
185 | Atoms(symbols='OH2', pbc=False, cell=[100.0, 100.0, 100.0])
186 | [[[0. 0. 0. 0. 0. 0.
187 | 0. 0. 0. 0. 0. 0.
188 | 0. 0. 0. 0. 0. 0. ]
189 | [0. 0. 0. 0. 0. 0.
190 | 0. 0. 0. 0. 0. 0.
191 | 0. 0.9462071 0. 0. 0. 0. ]
192 | [0. 0. 0. 0. 0. 0.
193 | 0. 0. 0. 0. 0. 0.
194 | 0. 0.9462071 0. 0. 0. 0. ]
195 | [0. 0. 0. 0. 0. 0.
196 | 0. 0. 0. 0. 0. 0.
197 | 0. 0. 0. 0. 0. 0. ]]
198 |
199 | [[0. 0. 0. 0. 0. 0.
200 | 0. 0. 0. 0. 0. 0.
201 | 0. 0.9462071 0. 0. 0. 0. ]
202 | [0. 0. 0. 0. 0. 0.
203 | 0. 0. 0. 0. 0. 0.
204 | 0. 0. 0. 0. 0. 0. ]
205 | [0. 0. 0. 0. 0. 0.
206 | 0. 0. 0. 0. 0. 0.
207 | 0. 0.86998172 0. 0. 0. 0. ]
208 | [0. 0. 0. 0. 0. 0.
209 | 0. 0. 0. 0. 0. 0.
210 | 0. 0. 0. 0. 0. 0. ]]
211 |
212 | [[0. 0. 0. 0. 0. 0.
213 | 0. 0. 0. 0. 0. 0.
214 | 0. 0.9462071 0. 0. 0. 0. ]
215 | [0. 0. 0. 0. 0. 0.
216 | 0. 0. 0. 0. 0. 0.
217 | 0. 0.86998172 0. 0. 0. 0. ]
218 | [0. 0. 0. 0. 0. 0.
219 | 0. 0. 0. 0. 0. 0.
220 | 0. 0. 0. 0. 0. 0. ]
221 | [0. 0. 0. 0. 0. 0.
222 | 0. 0. 0. 0. 0. 0.
223 | 0. 0. 0. 0. 0. 0. ]]
224 |
225 | [[0. 0. 0. 0. 0. 0.
226 | 0. 0. 0. 0. 0. 0.
227 | 0. 0. 0. 0. 0. 0. ]
228 | [0. 0. 0. 0. 0. 0.
229 | 0. 0. 0. 0. 0. 0.
230 | 0. 0. 0. 0. 0. 0. ]
231 | [0. 0. 0. 0. 0. 0.
232 | 0. 0. 0. 0. 0. 0.
233 | 0. 0. 0. 0. 0. 0. ]
234 | [0. 0. 0. 0. 0. 0.
235 | 0. 0. 0. 0. 0. 0.
236 | 0. 0. 0. 0. 0. 0. ]]]
237 | :END:
238 |
239 | ** G2 function
240 |
241 | The G2 function is adapted from the one used in Amp. It also differs in the use of vectorization, and utilizes an atom_mask and species_masks. The G2 function returns a function that computes the fingerprint on each atom as a column vector.
242 |
243 | #+BEGIN_SRC python :results output org drawer
244 | import numpy as np
245 | from dap.py.bpnn import cosine_cutoff, G2
246 | from dap.ag.neighborlist import get_distances
247 |
248 | from ase.structure import molecule
249 | atoms = molecule('H2O')
250 | atoms.cell = 100 * np.eye(3)
251 | print(atoms)
252 |
253 | positions = atoms.positions
254 | cell = atoms.cell
255 | atom_mask = [[1] for atom in atoms]
256 |
257 | numbers = list(np.unique(atoms.numbers))
258 |
259 | species_mask = np.stack([[atom.number == el for atom in atoms]
260 | for el in numbers], axis=1).astype(int)
261 |
262 | config = {'cutoff_radius': 6.5}
263 | d = get_distances(positions, cell, config['cutoff_radius'])
264 |
265 | g0 = G2(0, 0.05, 0.0)
266 | g1 = G2(1, 0.05, 0.0)
267 | print(np.concatenate((g0(config, d, atom_mask, species_mask),
268 | g1(config, d, atom_mask, species_mask)), axis=1))
269 | #+END_SRC
270 |
271 | #+RESULTS:
272 | :RESULTS:
273 | Atoms(symbols='OH2', pbc=False, cell=[100.0, 100.0, 100.0])
274 | [[1.89031441 0. ]
275 | [0.867586 0.9451572 ]
276 | [0.867586 0.9451572 ]]
277 | :END:
278 |
--------------------------------------------------------------------------------
/dap/tf/neighborlist.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """Neighborlist functions for tensorflow."""
15 |
16 | from collections import namedtuple
17 | import tensorflow as tf
18 | import numpy as np
19 |
20 |
21 | def get_distances(config, positions, cell, atom_mask=None):
22 | """Get distances to neighboring atoms with periodic boundary conditions.
23 |
24 | The way this function works is it tiles a volume with unit cells to at least
25 | fill a sphere with a radius of cutoff_radius. That means some atoms will be
26 | outside the cutoff radius. Those are included in the results. Then we get
27 | distances to all atoms in the tiled volume. This is always the same number for
28 | every atom, so we have consistent sized arrays.
29 |
30 | Args:
31 | config: A dictionary containing 'cutoff_radius' with a float value.
32 | positions: array-like or Tensor shape=(numatoms, 3)
33 | Array of cartesian coordinates of atoms in a unit cell.
34 | cell: array-like shape=(3, 3)
35 | Array of unit cell vectors in cartesian basis. Each row is a unit cell
36 | vector.
37 | atom_mask: array-like (numatoms,)
38 | ones for atoms, zero for padded positions. If None, defaults to all ones
39 | cutoff_radius: float
40 | The cutoff_radius we want atoms within.
41 |
42 | Returns:
43 | distances: shape=(maxnatoms, maxnatoms, nunitcells) containing the distances
44 | between all pairs of atoms in the tiled volume.
45 |
46 | Related
47 | -------
48 |
49 | pydoc:pymatgen.core.lattice.Lattice.get_points_in_sphere
50 |
51 | """
52 | with tf.name_scope('get_distances'):
53 | positions = tf.convert_to_tensor(positions)
54 | cell = tf.convert_to_tensor(cell)
55 |
56 | if atom_mask is None:
57 | natoms = positions.get_shape()[0]
58 | atom_mask = tf.ones((natoms, 1), dtype=cell.dtype)
59 | else:
60 | atom_mask = tf.convert_to_tensor(atom_mask, dtype=cell.dtype)
61 | cutoff_radius = tf.convert_to_tensor(
62 | config['cutoff_radius'], dtype=cell.dtype)
63 | # Next we get the inverse unit cell, which will be used to compute the
64 | # unit cell offsets required to tile space inside the sphere.
65 | inverse_cell = tf.matrix_inverse(cell)
66 |
67 | fractional_coords = tf.mod(
68 | tf.matmul(positions, inverse_cell), tf.ones_like(positions))
69 |
70 | num_cell_repeats = cutoff_radius * tf.norm(inverse_cell, axis=0)
71 |
72 | mins = tf.reduce_min(tf.floor(fractional_coords - num_cell_repeats), axis=0)
73 | maxs = tf.reduce_max(tf.ceil(fractional_coords + num_cell_repeats), axis=0)
74 |
75 | # Now we generate a set of cell offsets. We start with the repeats in each
76 | # unit cell direction.
77 | v0_range = tf.range(mins[0], maxs[0])
78 | v1_range = tf.range(mins[1], maxs[1])
79 | v2_range = tf.range(mins[2], maxs[2])
80 |
81 | # Then we expand them in each dimension
82 | xhat = tf.constant([1.0, 0.0, 0.0], dtype=inverse_cell.dtype)
83 | yhat = tf.constant([0.0, 1.0, 0.0], dtype=inverse_cell.dtype)
84 | zhat = tf.constant([0.0, 0.0, 1.0], dtype=inverse_cell.dtype)
85 |
86 | v0_range = v0_range[:, None] * xhat[None, :]
87 | v1_range = v1_range[:, None] * yhat[None, :]
88 | v2_range = v2_range[:, None] * zhat[None, :]
89 |
90 | # And combine them to get an offset vector for each cell
91 | offsets = (
92 | v0_range[:, None, None] + v1_range[None, :, None] +
93 | v2_range[None, None, :])
94 |
95 | offsets = tf.reshape(offsets, (-1, 3))
96 |
97 | # Now we have a vector of unit cell offsets (offset_index, 3) in the inverse
98 | # unit cell basis. We convert that to cartesian coordinate offsets here.
99 | cart_offsets = tf.matmul(offsets, cell)
100 |
101 | # we need to offset each atom coordinate by each offset.
102 | # This array is (atom_index, offset, 3)
103 | shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
104 |
105 | # Next, we subtract each position from the array of positions.
106 | # This leads to (atom_i, atom_j, positionvector, xhat)
107 | relative_positions = shifted_cart_coords - positions[:, None, None]
108 |
109 | # This is the distance squared. This leads to (atom_i, atom_j, distance2)
110 | distances2 = tf.reduce_sum(relative_positions**2, axis=3)
111 |
112 | # We zero out masked distances.
113 | distances2 *= atom_mask
114 | distances2 *= atom_mask[:, None]
115 |
116 | # We do not mask out the values greater than cutoff_radius here. That is
117 | # done later in the energy function. The zero masking here is due to the
118 | # fact that the gradient of the square_root at x=0 is nan, so we have to
119 | # avoid the zeros. Here we replace the zeros temporarily with ones, take the
120 | # sqrt, and then return the right parts.
121 | zeros = tf.equal(distances2, 0.0)
122 | adjusted = tf.where(zeros, tf.ones_like(distances2), distances2)
123 | distance = tf.sqrt(adjusted)
124 | return tf.where(zeros, tf.zeros_like(distance), distance)
125 |
126 |
127 | def get_neighbors_oneway(positions,
128 | cell,
129 | cutoff_distance,
130 | skin=0.01,
131 | strain=np.zeros((3, 3)),
132 | debug=False):
133 | """Oneway neighborlist.
134 |
135 |
136 | Returns
137 | -------
138 |
139 | indices_tuples: a list of tuples (atom_index, neighbor_index, offset_index),
140 | i.e. the atom at neighbor_index is a neighbor of the atom at atom_index and it
141 | is located at the offset in offset_index.
142 |
143 | Adapted from
144 | https://wiki.fysik.dtu.dk/ase/_modules/ase/neighborlist.html#NeighborList.
145 |
146 | """
147 | positions = tf.convert_to_tensor(positions)
148 | cell = tf.convert_to_tensor(cell)
149 | strain = tf.convert_to_tensor(strain, dtype=cell.dtype)
150 |
151 | strain_tensor = tf.eye(3, dtype=cell.dtype) + strain
152 | positions = tf.transpose(tf.matmul(strain_tensor, tf.transpose(positions)))
153 | cell = tf.transpose(tf.matmul(strain_tensor, tf.transpose(cell)))
154 |
155 | inverse_cell = tf.matrix_inverse(cell)
156 | h = 1 / tf.norm(inverse_cell, axis=0)
157 | N = tf.floor(cutoff_distance / h) + 1
158 |
159 | if debug:
160 | N = tf.Print(N, [N], ' tf N: ')
161 |
162 | scaled = tf.matmul(positions, inverse_cell)
163 | scaled0 = tf.matmul(positions, inverse_cell) % 1.0
164 |
165 | offsets = tf.round(scaled0 - scaled)
166 | if debug:
167 | offsets = tf.Print(offsets, [offsets], ' tf offsets:', summarize=100)
168 |
169 | positions0 = positions + tf.matmul(offsets, cell)
170 | if debug:
171 | positions0 = tf.Print(
172 | positions0, [positions0], ' tf positions: ', summarize=100)
173 |
174 | v0_range = tf.range(0, N[0] + 1)
175 | v1_range = tf.range(-N[1], N[1] + 1)
176 | v2_range = tf.range(-N[2], N[2] + 1)
177 |
178 | xhat = tf.constant([1, 0, 0], dtype=cell.dtype)
179 | yhat = tf.constant([0, 1, 0], dtype=cell.dtype)
180 | zhat = tf.constant([0, 0, 1], dtype=cell.dtype)
181 |
182 | v0_range = v0_range[:, None] * xhat[None, :]
183 | v1_range = v1_range[:, None] * yhat[None, :]
184 | v2_range = v2_range[:, None] * zhat[None, :]
185 |
186 | N = (
187 | v0_range[:, None, None] + v1_range[None, :, None] +
188 | v2_range[None, None, :])
189 |
190 | N = tf.reshape(N, (-1, 3))
191 |
192 | n1 = N[:, 0]
193 | n2 = N[:, 1]
194 | n3 = N[:, 2]
195 |
196 | mask = tf.logical_not(
197 | tf.logical_and(
198 | tf.equal(n1, 0.0),
199 | tf.logical_or(
200 | tf.less(n2, 0.0),
201 | tf.logical_and(tf.equal(n2, 0.0), tf.less(n3, 0.0)))))
202 | N = tf.boolean_mask(N, mask)
203 | if debug:
204 | N = tf.Print(N, [N], 'tf offsets', summarize=20)
205 | noffsets = tf.shape(N)[0]
206 | natoms = tf.shape(positions)[0]
207 | indices = tf.range(natoms)
208 | # Finally, we have to run two loops, one over the offsets, and one over the
209 | # positions. We will accumulate the neighbors as we go. I like to save all the
210 | # loop vars in one place.
211 | # n is a counter for offsets
212 | # a is a counter for atom index
213 | # k is a counter for neighbors
214 | # indices contains a list of (a, index): the index of the neighbor of atom a.
215 | # displacements is a list of (n1, n2, n3) corresponding to displacements for
216 | # each neighbor.
217 | LV = namedtuple('LoopVariables', 'n, a, k, indices, distances, displacements')
218 |
219 | lv0 = LV(
220 | tf.constant(0, dtype=tf.int32), # n, unit cell offset counter
221 | tf.constant(0, dtype=tf.int32), # a, counter for atom index
222 | tf.constant(0, dtype=tf.int32), # k, neighbor counter
223 | tf.Variable(tf.zeros((0, 2), dtype=tf.int32), dtype=tf.int32), # indices
224 | # distances
225 | tf.Variable(tf.zeros((0,), dtype=positions.dtype), dtype=positions.dtype),
226 | tf.Variable(tf.zeros((0, 3), dtype=tf.int32),
227 | dtype=tf.int32) # displacements
228 | )
229 |
230 | shiv = LV(
231 | tf.TensorShape(None), tf.TensorShape(None), tf.TensorShape(None),
232 | tf.TensorShape([None, 2]), tf.TensorShape(None), tf.TensorShape([None,
233 | 3]))
234 |
235 | def outer_cond(nt):
236 | return tf.less(nt.n, noffsets)
237 |
238 | def outer_body(nt):
239 | """This is the loop over the offsets."""
240 |
241 | n1, n2, n3 = tf.unstack(N[nt.n])
242 |
243 | displacement = tf.matmul(tf.cast(N[nt.n][None, :], dtype=cell.dtype), cell)
244 |
245 | if debug:
246 | displacement = tf.Print(displacement, [n, displacement],
247 | 'tf displacement: ')
248 |
249 | # Now we loop over each atom
250 | def inner_cond(nt):
251 | return tf.less(nt.a, natoms)
252 |
253 | def inner_body(nt):
254 | """This is a loop over each atom."""
255 | _p = positions0 + displacement - positions0[nt.a]
256 | _p2 = tf.reduce_sum(_p**2, axis=1)
257 | _m0 = tf.equal(_p2, 0.0)
258 | _mp = tf.where(_m0, tf.ones_like(_p2), _p2)
259 | _d = tf.sqrt(_mp)
260 |
261 | # These are the distances to the neighbors
262 | d = tf.where(_m0, tf.zeros_like(_p2), _d)
263 |
264 | # get indices where the distance is within the cutoff distance
265 | # skip self (d == 0).
266 | neighbor_mask = tf.logical_and(d > 0.0, d < (cutoff_distance + skin))
267 | i = tf.boolean_mask(indices, neighbor_mask)
268 | d = tf.boolean_mask(d, neighbor_mask)
269 |
270 | # ug. you have to specify the shape here since i, and hence m is not know
271 | # in advance. Without it you get:
272 |
273 | # "Number of mask dimensions must be specified, even if some
274 | # dimensions" ValueError: Number of mask dimensions must be specified,
275 | # even if some dimensions are None. E.g. shape=[None] is ok, but
276 | # shape=None is not.
277 |
278 | def self_interaction():
279 | m = tf.greater(i, nt.a)
280 | m.set_shape([None])
281 | return tf.boolean_mask(i, m), tf.boolean_mask(d, m)
282 |
283 | i, d = tf.cond(
284 | tf.reduce_all([tf.equal(n1, 0),
285 | tf.equal(n2, 0),
286 | tf.equal(n3, 0)]),
287 | true_fn=self_interaction,
288 | false_fn=lambda: (i, d))
289 |
290 | # Now we need to add tuples of (nt.a, ind) for ind in i if there is
291 | # anything in i, and also the index of the offset.
292 |
293 | n_inds = tf.shape(i)[0]
294 |
295 | disp = N[nt.n][None, :]
296 | disp += tf.gather(offsets, i)
297 | disp -= offsets[nt.a]
298 |
299 | def nind_cond(nt):
300 | return tf.less(nt.k, n_inds)
301 |
302 | def nind_body(nt):
303 | tups = tf.concat(
304 | [
305 | nt.indices,
306 | [(
307 | nt.a, # atom to get neighbors for
308 | i[nt.k], # index of neighbor equivalent atom.
309 | )]
310 | ],
311 | axis=0)
312 |
313 | dists = tf.concat([nt.distances, [d[nt.k]]], axis=0)
314 |
315 | disps = tf.concat(
316 | [nt.displacements, [tf.cast(disp[nt.k], tf.int32)]], axis=0)
317 |
318 | return LV(nt.n, nt.a, nt.k + 1, tups, dists, disps),
319 |
320 | nt, = tf.while_loop(nind_cond, nind_body, [nt], [shiv])
321 | return LV(nt.n, nt.a + 1, 0, nt.indices, nt.distances, nt.displacements),
322 |
323 | nt, = tf.while_loop(inner_cond, inner_body, [nt], [shiv])
324 |
325 | return LV(nt.n + 1, 0, 0, nt.indices, nt.distances, nt.displacements),
326 |
327 | lv1, = tf.while_loop(outer_cond, outer_body, [lv0], [shiv])
328 |
329 | return lv1.indices, lv1.distances, lv1.displacements
330 |
--------------------------------------------------------------------------------
/docs/dap.ag.org:
--------------------------------------------------------------------------------
1 | #+TITLE: Autograd modules
2 |
3 | * Neighborlists
4 |
5 | Neighborlists are one of the most critical components of molecular simulation. They are challenging to implement in vectorized code that is compatible with Tensorflow. This module implements some in Python that are compatible with autograd.
6 |
7 | ** Bothways neighbor lists
8 |
9 | In a bothways list, you get all the neighbors within a cutoff distance of each atom in the system. That means each atom will be the neighbor of another atom, and vice versa, so if you loop over all of them there will be double counting. It is however, exactly what you want for descriptors of local environments.
10 |
11 | For reference, here is what a neighborlist from pydoc:ase.neighborlist.NeighborList looks like. It returns a list of indices and offsets to generate the positions. You have to construct the position vectors to each neighbor afterwards.
12 |
13 | #+BEGIN_SRC python :results output org drawer
14 | from ase.build import molecule
15 | from ase.neighborlist import NeighborList
16 |
17 | atoms = molecule('H2O')
18 | atoms.set_cell((20, 20, 20))
19 |
20 | nl = NeighborList([3.0 / 2] * len(atoms), skin=0,
21 | bothways=True, self_interaction=False)
22 | nl.update(atoms)
23 |
24 | indices, offsets = nl.get_neighbors(0)
25 |
26 | print(f'ase inds: {indices}')
27 | print(f'ase offsets:\n{offsets}')
28 | print('Positions of neighbors')
29 |
30 | for i, offset in zip(indices, offsets):
31 | print(atoms.positions[i] + offset @ atoms.get_cell())
32 | #+END_SRC
33 |
34 | #+RESULTS:
35 | :RESULTS:
36 | ase inds: [1 2]
37 | ase offsets:
38 | [[0 0 0]
39 | [0 0 0]]
40 | Positions of neighbors
41 | [ 0. 0.763239 -0.477047]
42 | [ 0. -0.763239 -0.477047]
43 | :END:
44 |
45 | pydoc:dap.ag.neighborlist.get_distances returns a multidimensional array of distances, and offset vectors. Distances that are greater than the cutoff distance are masked to zero. Here is an example output for the distance array for a water molecule.
46 |
47 | #+BEGIN_SRC python :results output org drawer
48 | from ase.build import molecule
49 | from dap.ag.neighborlist import get_distances
50 |
51 | atoms = molecule('H2O')
52 | atoms.set_cell((20, 20, 20))
53 | atoms.center()
54 |
55 |
56 | dists, offsets = get_distances(atoms.positions, atoms.cell, 3.0)
57 | print(dists)
58 | #+END_SRC
59 |
60 | #+RESULTS:
61 | :RESULTS:
62 | [[[0. ]
63 | [0.96856502]
64 | [0.96856502]]
65 |
66 | [[0.96856502]
67 | [0. ]
68 | [1.526478 ]]
69 |
70 | [[0.96856502]
71 | [1.526478 ]
72 | [0. ]]]
73 | :END:
74 |
75 | It is sometimes easier to work more directly with the neighborlist, e.g. to get a list of the equivalent indices, and offset vectors. pydoc:dap.ag.neighborlist.get_neighbors provides some tools for that.
76 |
77 |
78 | #+BEGIN_SRC python :results output org drawer
79 | from ase.build import molecule
80 | from dap.ag.neighborlist import get_distances, get_neighbors
81 |
82 | atoms = molecule('H2O')
83 | atoms.set_cell((20, 20, 20))
84 | atoms.center()
85 |
86 | dists, offsets = get_distances(atoms.positions, atoms.cell, 3.0)
87 |
88 | inds, offs = get_neighbors(0, dists, offsets)
89 |
90 | print(inds)
91 | print(offs)
92 | #+END_SRC
93 |
94 | #+RESULTS:
95 | :RESULTS:
96 | [1 2]
97 | [[0 0 0]
98 | [0 0 0]]
99 | :END:
100 |
101 | *** on a real periodic system
102 |
103 | In ASE:
104 |
105 | #+BEGIN_SRC python :results output org drawer
106 | import numpy as np
107 | np.set_printoptions(precision=3, suppress=True)
108 |
109 | from ase.build import bulk
110 | from ase.neighborlist import NeighborList
111 |
112 | a = 3.6
113 | Rc = a / np.sqrt(2) / 2
114 | cutoff_distance = 2 * Rc
115 |
116 | atoms = bulk('Cu', 'fcc', a=a).repeat((2, 1, 1))
117 | atoms.rattle()
118 | nl = NeighborList([Rc] * len(atoms), skin=0.01,
119 | bothways=True, self_interaction=False)
120 | nl.update(atoms)
121 |
122 | indices, offsets = nl.get_neighbors(0)
123 |
124 | isort = np.argsort(offsets.view('f8,f8,f8'), axis=0,
125 | order=['f0', 'f1', 'f2']).flatten()
126 |
127 | print(f'ase inds: {indices[isort]}')
128 | print(f'ase offsets:')
129 | print(offsets[isort])
130 | print('Positions of neighbors')
131 |
132 | for i, offset in zip(indices[isort], offsets[isort]):
133 | print(atoms.positions[i] + offset @ atoms.get_cell())
134 | #+END_SRC
135 |
136 | #+RESULTS:
137 | :RESULTS:
138 | ase inds: [1 0 1 0 0 0 0 1 0 1 1 1]
139 | ase offsets:
140 | [[ 0 0 0]
141 | [ 0 0 1]
142 | [ 0 0 -1]
143 | [ 0 0 -1]
144 | [ 0 1 0]
145 | [ 0 1 -1]
146 | [ 0 -1 0]
147 | [ 0 -1 0]
148 | [ 0 -1 1]
149 | [-1 0 0]
150 | [-1 0 1]
151 | [-1 1 0]]
152 | Positions of neighbors
153 | [0.002 1.8 1.8 ]
154 | [1.8 1.8 0.001]
155 | [-1.798 -0. 1.8 ]
156 | [-1.8 -1.8 0.001]
157 | [ 1.8 -0. 1.801]
158 | [ 0. -1.8 1.801]
159 | [-1.8 -0. -1.799]
160 | [-1.798 1.8 -0. ]
161 | [ 0. 1.8 -1.799]
162 | [ 0.002 -1.8 -1.8 ]
163 | [ 1.802 -0. -1.8 ]
164 | [ 1.802 -1.8 -0. ]
165 | :END:
166 |
167 | Here is the version from this module. The order of the neighbors may differ.
168 |
169 | #+BEGIN_SRC python :results output org drawer
170 | import numpy as np
171 | np.set_printoptions(precision=3, suppress=True)
172 |
173 | from ase.build import bulk
174 | from dap.ag.neighborlist import get_distances, get_neighbors
175 |
176 | a = 3.6
177 | Rc = a / np.sqrt(2) / 2
178 | cutoff_distance = 2 * Rc
179 |
180 | atoms = bulk('Cu', 'fcc', a=a).repeat((2, 1, 1))
181 | atoms.rattle()
182 |
183 | dists, doffsets = get_distances(atoms.positions, atoms.cell, cutoff_distance)
184 |
185 | indices, offsets = get_neighbors(0, dists, doffsets)
186 |
187 | isort = np.argsort(offsets.view('f8,f8,f8'), axis=0,
188 | order=['f0', 'f1', 'f2']).flatten()
189 |
190 | print(f'ase inds: {indices[isort]}')
191 | print(f'ase offsets:')
192 | print(offsets[isort])
193 | print('Positions of neighbors')
194 |
195 | for i, offset in zip(indices[isort], offsets[isort]):
196 | print(atoms.positions[i] + offset @ atoms.get_cell())
197 | #+END_SRC
198 |
199 | #+RESULTS:
200 | :RESULTS:
201 | ase inds: [1 0 0 1 0 0 0 1 0 1 1 1]
202 | ase offsets:
203 | [[ 0 0 0]
204 | [ 0 0 1]
205 | [ 0 0 -1]
206 | [ 0 0 -1]
207 | [ 0 1 0]
208 | [ 0 1 -1]
209 | [ 0 -1 0]
210 | [ 0 -1 0]
211 | [ 0 -1 1]
212 | [-1 0 0]
213 | [-1 0 1]
214 | [-1 1 0]]
215 | Positions of neighbors
216 | [0.002 1.8 1.8 ]
217 | [1.8 1.8 0.001]
218 | [-1.8 -1.8 0.001]
219 | [-1.798 -0. 1.8 ]
220 | [ 1.8 -0. 1.801]
221 | [ 0. -1.8 1.801]
222 | [-1.8 -0. -1.799]
223 | [-1.798 1.8 -0. ]
224 | [ 0. 1.8 -1.799]
225 | [ 0.002 -1.8 -1.8 ]
226 | [ 1.802 -0. -1.8 ]
227 | [ 1.802 -1.8 -0. ]
228 | :END:
229 |
230 | ** Oneway lists
231 |
232 | There is a "one-way" concept in a neighborlist. In the "bothways" approach, each pair of atoms has two distances: one from atom i to j, and one from j to i. It is not always desirable to double count these.
233 |
234 | Here is a one-way list in ASE.
235 |
236 | #+BEGIN_SRC python :results output org drawer
237 | from ase.build import molecule
238 | from ase.neighborlist import NeighborList
239 |
240 | atoms = molecule('H2O')
241 | atoms.set_cell((20, 20, 20))
242 |
243 | nl = NeighborList([3.0 / 2] * len(atoms), skin=0,
244 | bothways=False, self_interaction=False)
245 | nl.update(atoms)
246 |
247 | for i, atom in enumerate(atoms):
248 | indices, offsets = nl.get_neighbors(i)
249 | print(f'Atom {i}')
250 | print(f' ase inds: {indices}')
251 | print(f' ase offsets:\n{offsets}')
252 | print(' Positions of neighbors')
253 |
254 | for j, offset in zip(indices, offsets):
255 | print(' ', atoms.positions[j] + offset @ atoms.get_cell())
256 | #+END_SRC
257 |
258 | #+RESULTS:
259 | :RESULTS:
260 | Atom 0
261 | ase inds: [1 2]
262 | ase offsets:
263 | [[0 0 0]
264 | [0 0 0]]
265 | Positions of neighbors
266 | [ 0. 0.763239 -0.477047]
267 | [ 0. -0.763239 -0.477047]
268 | Atom 1
269 | ase inds: [2]
270 | ase offsets:
271 | [[0 0 0]]
272 | Positions of neighbors
273 | [ 0. -0.763239 -0.477047]
274 | Atom 2
275 | ase inds: []
276 | ase offsets:
277 | []
278 | Positions of neighbors
279 | :END:
280 |
281 | *** get_neighbors_oneway
282 |
283 | This is a near direct translation of the one-way algorithm from ase, and it returns the same results. It is used in the dap.ag.emt module, and it is differentiable. It is not that easy, however, to implement in Tensorflow.
284 |
285 |
286 |
287 | * Training the Lennard Jones parameters
288 |
289 | There is a database of DFT calculations of Ar in [[./argon.db]]. Here is a brief description of the database. It contains five structures at three different volumes each. For each volume and structure the atoms were randomly displaced many times, and the energy and forces were computed using DFT (Vasp).
290 |
291 | #+BEGIN_SRC python :results output org
292 | from collections import Counter
293 | import ase.db
294 |
295 | db = ase.db.connect('argon.db')
296 | data = db.select()
297 |
298 | keys, cnt = {}, 0
299 | for entry in data:
300 | cnt += 1
301 | for k, v in entry.key_value_pairs.items():
302 |
303 | if k in keys:
304 | keys[k] += [v]
305 | else:
306 | keys[k] = [v]
307 |
308 | print ('{0:15s} {1:15s} {2} calculations total'.format('keyword', 'value',cnt))
309 |
310 | print('------------------------------------------------------')
311 |
312 | for k, v in keys.items():
313 | vals = list(set(v))
314 |
315 | if len(vals) <= 5:
316 | val = ", ".join(str(e)[:5] for e in vals)
317 | print('{0:5}: {1}'.format(k, val))
318 |
319 | else:
320 | val = ", ".join(str(e)[:5] for e in vals[:5])
321 | print('{0:5s}:{1}, etc...'.format(k, val))
322 | #+END_SRC
323 |
324 | #+RESULTS:
325 | #+BEGIN_SRC org
326 | keyword value 139 calculations total
327 | ------------------------------------------------------
328 | i :0, 1, 2, 3, 4, etc...
329 | structure: fcc, hcp, sc, diamo, bcc
330 | f : 0.9, 1.0, 1.1
331 | #+END_SRC
332 |
333 |
334 | We can use the built in pydoc:autograd.misc.optimizers.adam optimizer to find the best set of parameters. We have to provide an initial guess.
335 |
336 | #+BEGIN_SRC python :results output org drawer
337 | import autograd.numpy as np
338 | from dap.ag.lennardjones import energy
339 | from autograd.misc.optimizers import adam
340 | from autograd import grad
341 |
342 | import matplotlib.pyplot as plt
343 |
344 | import ase.db
345 | db = ase.db.connect('argon.db')
346 |
347 | known_energies = [row.energy for row in db.select()]
348 | all_positions = [row.positions for row in db.select()]
349 | all_cells = [row.cell for row in db.select()]
350 |
351 | # Initial guess
352 | params = {'epsilon': 0.1, 'sigma': 3.5}
353 |
354 | def objective(params, step):
355 | energies = [energy(params, pos, cell) for pos, cell in zip(all_positions, all_cells)]
356 | errs = np.array(energies) - np.array(known_energies)
357 | return np.mean(np.abs(errs))
358 |
359 | max_steps = 251
360 | loss_goal = 0.01
361 |
362 | def callback(params, step, gradient):
363 | if step % 100 == 0:
364 | loss = objective(params, step)
365 | print(f"step {i * max_steps + step:4d} objective {loss:1.4f} {params}")
366 |
367 | for i in range(10):
368 | if objective(params, None) <= loss_goal:
369 | break
370 |
371 | params = adam(grad(objective), params,
372 | step_size=0.001, num_iters=max_steps, callback=callback)
373 |
374 |
375 | print(f'Final params = {params} with a MAE of {objective(params, None):1.2e}')
376 |
377 | # Save for reuse later. autograd saves the params as 0d arrays, which are not
378 | # serializable, so we cast them as floats here.
379 | import json
380 | with open('argon-lj.json', 'w') as f:
381 | f.write(json.dumps({'sigma': float(params['sigma']),
382 | 'epsilon': float(params['epsilon'])}))
383 | #+END_SRC
384 |
385 | #+RESULTS:
386 | :RESULTS:
387 | step 0 objective 0.6260 {'epsilon': array(0.1), 'sigma': array(3.5)}
388 | step 100 objective 0.0236 {'epsilon': array(0.00399268), 'sigma': array(3.42282772)}
389 | step 200 objective 0.0168 {'epsilon': array(0.00638201), 'sigma': array(3.43248715)}
390 | step 251 objective 0.0166 {'epsilon': array(0.00631611), 'sigma': array(3.44008306)}
391 | step 351 objective 0.0141 {'epsilon': array(0.00604479), 'sigma': array(3.54264976)}
392 | step 451 objective 0.0107 {'epsilon': array(0.00569821), 'sigma': array(3.65304244)}
393 | Final params = {'epsilon': array(0.00527079), 'sigma': array(3.71124126)} with a MAE of 9.04e-03
394 | :END:
395 |
396 | Now that we have fitted it, we can reuse it.
397 |
398 | #+BEGIN_SRC python :results output org drawer
399 | import json
400 | import matplotlib.pyplot as plt
401 | import ase.db
402 | import numpy as np
403 | from dap.ag.lennardjones import energy
404 |
405 | with open('argon-lj.json') as f:
406 | params = json.loads(f.read())
407 |
408 | db = ase.db.connect('argon.db')
409 |
410 | plt.subplot(121)
411 | for structure, spec in [('fcc', 'b.'),
412 | ('hcp', 'r.'),
413 | ('bcc', 'g.'),
414 | ('diamond', 'gd'),
415 | ('sc', 'bs')]:
416 |
417 | ke, pe = [], []
418 | for row in db.select(structure=structure):
419 | ke += [row.energy]
420 | atoms = row.toatoms()
421 |
422 | pe += [energy(params, atoms.positions, atoms.cell)]
423 | plt.plot(ke, pe, spec, label=structure)
424 |
425 | plt.plot([-0.1, 0], [-0.1, 0], 'k-', label='parity')
426 | plt.legend()
427 | plt.xlabel('DFT')
428 | plt.ylabel('LJ')
429 |
430 | err = np.array(ke) - np.array(pe)
431 |
432 | plt.subplot(122)
433 | plt.hist(err)
434 | plt.xlabel('error')
435 | plt.tight_layout()
436 | plt.savefig('ag-lj.png')
437 | #+END_SRC
438 |
439 | #+RESULTS:
440 | :RESULTS:
441 | :END:
442 |
443 | [[./ag-lj.png]]
444 |
445 | See also: http://kitchingroup.cheme.cmu.edu/blog/2017/11/19/Training-the-ASE-Lennard-Jones-potential-to-DFT-calculations/
446 |
447 | * EMT - effective medium theory
448 |
449 | ASE implements an effective medium theory cite:jacobsen-1996-semi-empir calculator (https://wiki.fysik.dtu.dk/ase/ase/calculators/emt.html#module-ase.calculators.emt), but it is notably lacking an ability to compute stress. One of the key features of differentiable atomistic potentials is that the stress is straightforward to compute. It is tricky to verify the stress is correct, and in the tests we use a numerical approximation to the stress for comparison.
450 |
451 | Here is an example usage.
452 |
453 | #+BEGIN_SRC python :results output org drawer
454 | from ase.build import bulk
455 | from ase.calculators.emt import EMT
456 | from dap.ag.emt import parameters, energy, forces, stress
457 | import numpy as np
458 |
459 | atoms = bulk('Cu', 'fcc', a=3.6).repeat((2, 2, 2))
460 | atoms.set_calculator(EMT())
461 | atoms.rattle()
462 |
463 |
464 | e = energy(parameters, atoms.positions, atoms.numbers, atoms.cell)
465 | print(f'ase: {atoms.get_potential_energy()}\nag : {e}')
466 |
467 | f = forces(parameters, atoms.positions, atoms.numbers, atoms.cell)
468 | print(f'Forces equal: {np.allclose(f, atoms.get_forces())}')
469 |
470 | # Good luck getting this from ASE!
471 | s = stress(parameters, atoms.positions, atoms.numbers, atoms.cell)
472 | print(s)
473 | #+END_SRC
474 |
475 | #+RESULTS:
476 | :RESULTS:
477 | ase: -0.0534529034684148
478 | ag : -0.0534529034684148
479 | Forces equal: True
480 | [ 6.99783243e-03 6.99779486e-03 6.99766900e-03 -7.41208181e-07
481 | 3.16443974e-07 -2.32564931e-07]
482 | :END:
483 |
484 | ** Training your own EMT
485 |
486 | One of the main points of this is to get a trainable EMT, so you can use it for what you want. The parameters are in dap.ag.emt.parameters. We can use this in autograd.
487 |
488 | These are a set of clusters we could use for input data.
489 |
490 | #+BEGIN_SRC ipython
491 | from ase.cluster.icosahedron import Icosahedron
492 |
493 | atoms = Icosahedron('Au', noshells=2)
494 | print(atoms)
495 |
496 | atoms = Icosahedron('Au', noshells=3)
497 | print(atoms)
498 | #+END_SRC
499 |
500 | #+RESULTS:
501 | :RESULTS:
502 | # Out[3]:
503 | # output
504 | : Atoms(symbols='Au', pbc=False, tags=...)
505 | : Atoms(symbols='Au13', pbc=False, cell=[4.908247792861572, 4.908247792861572, 4.908247792861572], tags=...)
506 | : Atoms(symbols='Au55', pbc=False, cell=[9.816495585723144, 9.816495585723144, 9.816495585723144], tags=...)
507 | :END:
508 |
509 | #+BEGIN_SRC ipython
510 | from ase.cluster.octahedron import Octahedron
511 |
512 | atoms = Octahedron('Au', 2)
513 | print(atoms)
514 |
515 | atoms = Octahedron('Au', 3)
516 | print(atoms)
517 |
518 | atoms = Octahedron('Au', 4)
519 | print(atoms)
520 | #+END_SRC
521 |
522 | #+RESULTS:
523 | :RESULTS:
524 | # Out[7]:
525 | # output
526 | : Cluster(symbols='Au6', pbc=False, cell=[4.08, 4.08, 4.08])
527 | : Cluster(symbols='Au19', pbc=False, cell=[8.16, 8.16, 8.16])
528 | : Cluster(symbols='Au44', pbc=False, cell=[12.24, 12.24, 12.24])
529 | :END:
530 |
531 | #+BEGIN_SRC ipython
532 | from ase.cluster.decahedron import Decahedron
533 |
534 | atoms = Decahedron('Au', 2, 2, 0)
535 | print(atoms)
536 |
537 | atoms = Decahedron('Au', 2, 2, 1)
538 | print(atoms)
539 |
540 | atoms = Decahedron('Au', 3, 2, 0)
541 | print(atoms)
542 |
543 | atoms = Decahedron('Au', 2, 3, 0)
544 | print(atoms)
545 |
546 | atoms = Decahedron('Au', 3, 3, 0)
547 | print(atoms)
548 | #+END_SRC
549 |
550 | #+RESULTS:
551 | :RESULTS:
552 | # Out[12]:
553 | # output
554 | : Atoms(symbols='Au13', pbc=False, cell=[4.752390490203045, 4.519791943686726, 5.769991334482228])
555 | : Atoms(symbols='Au75', pbc=False, cell=[12.441919831163347, 11.832968930649798, 11.539982668964456])
556 | : Atoms(symbols='Au39', pbc=False, cell=[9.50478098040609, 9.039583887373452, 8.654987001723342])
557 | : Atoms(symbols='Au19', pbc=False, cell=[4.752390490203045, 4.519791943686726, 8.654987001723342])
558 | : Atoms(symbols='Au55', pbc=False, cell=[9.50478098040609, 9.039583887373452, 11.539982668964456])
559 | :END:
560 |
561 | * Bibtex entries
562 |
563 | #+BEGIN_SRC text
564 | @article{jacobsen-1996-semi-empir,
565 | author = {K.W. Jacobsen and P. Stoltze and J.K. N{\o}rskov},
566 | title = {A Semi-Empirical Effective Medium Theory for Metals and
567 | Alloys},
568 | journal = {Surface Science},
569 | volume = 366,
570 | number = 2,
571 | pages = {394-402},
572 | year = 1996,
573 | doi = {10.1016/0039-6028(96)00816-3},
574 | url = {https://doi.org/10.1016/0039-6028(96)00816-3},
575 | DATE_ADDED = {Tue Nov 21 15:34:51 2017},
576 | }
577 |
578 |
579 |
580 | #+END_SRC
581 |
--------------------------------------------------------------------------------
/dap/tf/lennardjones.py:
--------------------------------------------------------------------------------
1 | # Copyright 2018 Google Inc.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 | """A Tensorflow implementation of a LennardJones Potential for a single element.
15 |
16 | This is a standalone module by design.
17 | """
18 |
19 | import numpy as np
20 | import tensorflow as tf
21 | from dap.tf.neighborlist import get_neighbors_oneway
22 | from ase.calculators.calculator import Calculator, all_changes
23 | import ase.db
24 |
25 |
26 | def get_Rij(positions, cell, mask, cutoff_radius):
27 | """Get distances to neighboring atoms with periodic boundary conditions.
28 |
29 | The way this function works is it tiles space with unit cells to at least fill
30 | a sphere with a radius of cutoff_radius. That means some atoms will be outside
31 | the cutoff radius. Those are included in the results. Then we get distances to
32 | all atoms in the tiled space. This is always the same number for every atom,
33 | so we have consistent sized arrays.
34 |
35 | This function is specific to the Lennard Jones potential as noted in the
36 | comments below.
37 |
38 | Args:
39 | positions: array-like shape=(numatoms, 3)
40 | Array of cartesian coordinates of atoms in a unit cell.
41 | cell: array-like shape=(3, 3)
42 | Array of unit cell vectors in cartesian basis. Each row is a unit cell
43 | vector.
44 | mask: array-like (numatoms,)
45 | ones for atoms, zero for padded positions
46 | cutoff_radius: float
47 | The cutoff_radius we want atoms within.
48 |
49 | Returns:
50 | A flattened array of distances to all the neighbors.
51 |
52 | Notes:
53 |
54 | One of the distances is equal to 0.0, which corresponds to Rii. This distance
55 | is problematic for the gradients, which are undefined for these points. I have
56 | not found a masking strategy to eliminate these points while keeping the
57 | gradients besides the one used here. This is not an issue with other
58 | potentials that don't have a 1/r form like this one does.
59 |
60 | This code was adapted from:
61 | Related: pydoc:pymatgen.core.lattice.Lattice.get_points_in_sphere
62 |
63 | """
64 | with tf.name_scope("get_Rij"):
65 | positions = tf.convert_to_tensor(positions)
66 | cell = tf.convert_to_tensor(cell)
67 | mask = tf.convert_to_tensor(mask, dtype=cell.dtype)
68 |
69 | with tf.name_scope("get_offsets"):
70 | # Next we get the reciprocal unit cell, which will be used to compute the
71 | # unit cell offsets required to tile space inside the sphere.
72 | inverse_cell = tf.matrix_inverse(cell)
73 |
74 | fcoords = tf.mod(
75 | tf.matmul(positions, inverse_cell), tf.ones_like(positions))
76 |
77 | recp_len = tf.norm(inverse_cell, axis=0)
78 |
79 | nmax = cutoff_radius * recp_len
80 |
81 | mins = tf.reduce_min(tf.floor(fcoords - nmax), axis=0)
82 | maxs = tf.reduce_max(tf.ceil(fcoords + nmax), axis=0)
83 |
84 | # Now we generate a set of cell offsets. We start with the repeats in each
85 | # unit cell direction.
86 | arange = tf.range(mins[0], maxs[0])
87 | brange = tf.range(mins[1], maxs[1])
88 | crange = tf.range(mins[2], maxs[2])
89 |
90 | # Then we expand them in each dimension
91 | xhat = tf.constant([1.0, 0.0, 0.0], dtype=inverse_cell.dtype)
92 | yhat = tf.constant([0.0, 1.0, 0.0], dtype=inverse_cell.dtype)
93 | zhat = tf.constant([0.0, 0.0, 1.0], dtype=inverse_cell.dtype)
94 |
95 | arange = arange[:, None] * xhat[None, :]
96 | brange = brange[:, None] * yhat[None, :]
97 | crange = crange[:, None] * zhat[None, :]
98 |
99 | # And combine them to get an offset vector for each cell
100 | offsets = (
101 | arange[:, None, None] + brange[None, :, None] + crange[None, None, :])
102 |
103 | offsets = tf.reshape(offsets, (-1, 3))
104 |
105 | # Now we have a vector of unit cell offsets (offset_index, 3) in the inverse
106 | # unit cell basis. We convert that to cartesian coordinate offsets here.
107 | cart_offsets = tf.matmul(offsets, cell)
108 |
109 | # we need to offset each atom coordinate by each offset.
110 | # This array is (atom_index, offset, 3)
111 | shifted_cart_coords = positions[:, None] + cart_offsets[None, :]
112 |
113 | # Next, we subtract each position from the array of positions.
114 | # This leads to (atom_i, atom_j, positionvector, xhat)
115 | relative_positions = shifted_cart_coords - positions[:, None, None]
116 |
117 | # This is the distance squared. This leads to (atom_i, atom_j, distance2)
118 | Rij2 = tf.reduce_sum(relative_positions**2, axis=3)
119 |
120 | # We zero out masked distances. This is subtle. We have to zero out parts of
121 | # two dimensions. First, all the entries in the first dimension which are
122 | # not atoms must be zeroed, and then, all the entries in the second
123 | # dimension which aren't atoms have to be zeroed.
124 | Rij2 *= mask[:, None] * mask[:, None, None]
125 | # Since we assume the atoms are all the same we can flatten it. It turns out
126 | # that the array will get flattened anyway because of the boolean mask in
127 | # the return. This effectively removes elements in some of the subarrays so
128 | # the shape is no longer constant, causing the array to be flattened.
129 | Rij2 = tf.reshape(Rij2, [-1])
130 |
131 | # We exclude the self-interaction by only considering atoms with a distance
132 | # greater than 0. For this potential, it is necessary to do this here to
133 | # avoid nan's in the gradients.
134 | #
135 | # It is not necessary to take the square root here, since we later compute
136 | # 1/Rij^6. But, this function was originally intended to be used for other
137 | # potentials where Rij is used directly, so we do that here.
138 | #
139 | # We do not mask out the values greater than cutoff_radius here. That is
140 | # done later in the energy function.
141 | return tf.sqrt(tf.boolean_mask(Rij2, Rij2 > 0.0))
142 |
143 |
144 | def energy(positions, cell, mask=None, strain=None):
145 | """Compute the energy of a Lennard-Jones system.
146 |
147 | Args:
148 | positions: array-like shape=(numatoms, 3)
149 | Array of cartesian coordinates of atoms in a unit cell.
150 | cell: array-like shape=(3, 3)
151 | Array of unit cell vectors in cartesian basis. Each row is a unit cell
152 | vector.
153 | mask: array-like (numatoms,)
154 | ones for atoms, zero for padded positions.
155 | strain: array-like shape=(3, 3)
156 | Array of strains to compute the energy at.
157 |
158 | Returns: float
159 | The total energy from the Lennard Jones potential.
160 | """
161 |
162 | with tf.name_scope("LennardJones"):
163 | with tf.name_scope("setup"):
164 | positions = tf.convert_to_tensor(positions)
165 | cell = tf.convert_to_tensor(cell)
166 | if mask is None:
167 | mask = tf.ones_like(positions[:, 0])
168 | mask = tf.convert_to_tensor(mask)
169 | if strain is None:
170 | strain = tf.zeros_like(cell)
171 |
172 | strain = tf.convert_to_tensor(strain)
173 |
174 | strained_cell = tf.matmul(cell, tf.eye(3, dtype=cell.dtype) + strain)
175 | strained_positions = tf.matmul(positions,
176 | tf.eye(3, dtype=cell.dtype) + strain)
177 |
178 | with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
179 | sigma = tf.get_variable(
180 | "sigma",
181 | dtype=cell.dtype,
182 | initializer=tf.constant(1.0, dtype=cell.dtype))
183 |
184 | with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
185 | epsilon = tf.get_variable(
186 | "epsilon",
187 | dtype=cell.dtype,
188 | initializer=tf.constant(1.0, dtype=cell.dtype))
189 |
190 | rc = 3 * sigma
191 |
192 | with tf.name_scope("calculate_energy"):
193 | e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
194 | energy = 0.0
195 |
196 | d = get_Rij(strained_positions, strained_cell, mask, rc)
197 |
198 | neighbor_mask = tf.less_equal(d, tf.ones_like(d) * rc)
199 | energy -= e0 * tf.reduce_sum(tf.cast(neighbor_mask, e0.dtype))
200 | c6 = (sigma**2 / tf.boolean_mask(d, neighbor_mask)**2)**3
201 | c12 = c6**2
202 | energy += tf.reduce_sum(4 * epsilon * (c12 - c6))
203 |
204 | return energy / 2.0
205 |
206 |
207 | def forces(positions, cell, mask=None, strain=None):
208 | """Compute the forces.
209 |
210 | Args:
211 | positions: array-like shape=(numatoms, 3)
212 | Array of cartesian coordinates of atoms in a unit cell.
213 | cell: array-like shape=(3, 3)
214 | Array of unit cell vectors in cartesian basis. Each row is a unit cell
215 | vector.
216 | mask: array-like (numatoms,)
217 | ones for atoms, zero for padded positions.
218 | strain: array-like shape=(3, 3)
219 | Array of strains to compute the energy at.
220 |
221 | Returns:
222 | array: shape=(natoms, 3)
223 | """
224 | with tf.name_scope("forces"):
225 | positions = tf.convert_to_tensor(positions)
226 | cell = tf.convert_to_tensor(cell)
227 | if mask is None:
228 | mask = tf.ones_like(positions[:, 0])
229 | mask = tf.convert_to_tensor(mask)
230 | if strain is None:
231 | strain = tf.zeros_like(cell)
232 | return tf.gradients(-energy(positions, cell, mask, strain), positions)[0]
233 |
234 |
235 | def stress(positions, cell, mask=None, strain=None):
236 | """Compute the stress.
237 |
238 | Args:
239 | positions: array-like shape=(numatoms, 3)
240 | Array of cartesian coordinates of atoms in a unit cell.
241 | cell: array-like shape=(3, 3)
242 | Array of unit cell vectors in cartesian basis. Each row is a unit cell
243 | vector.
244 | mask: array-like (numatoms,)
245 | ones for atoms, zero for padded positions
246 | strain: array-like shape=(3, 3)
247 | Array of strains to compute the stress at.
248 |
249 | Returns:
250 | The stress components [sxx, syy, szz, syz, sxz, sxy]
251 | array: shape=(6,)
252 | """
253 | with tf.name_scope("stress"):
254 | with tf.name_scope("setup"):
255 | positions = tf.convert_to_tensor(positions)
256 | cell = tf.convert_to_tensor(cell)
257 | if mask is None:
258 | mask = tf.ones_like(positions[:, 0])
259 | mask = tf.convert_to_tensor(mask)
260 | if strain is None:
261 | strain = tf.zeros_like(cell)
262 |
263 | with tf.name_scope("get_stress"):
264 | volume = tf.abs(tf.matrix_determinant(cell))
265 | stress = tf.gradients(energy(positions, cell, mask, strain), strain)[0]
266 | stress /= volume
267 | return tf.gather(tf.reshape(stress, (9,)), [0, 4, 8, 5, 2, 1])
268 |
269 |
270 | def energy_batch(POSITIONS,
271 | CELLS,
272 | MASKS,
273 | strain=np.zeros((3, 3), dtype=np.float64)):
274 | """A batched version of `energy'.
275 |
276 | Args:
277 | POSITIONS: array-like shape=(batch, maxnumatoms, 3)
278 | batched array of position arrays. Each position array should be padded
279 | if there are fewer atoms than maxnatoms.
280 | CELLS: array-like shape=(batch, 3, 3)
281 | MASKS: array-like shape=(batch, maxnatoms)
282 | strain: array-like shape=(3, 3)
283 | Array of strains to compute the stress at.
284 |
285 | Returns:
286 | energies: array-like shape=(batch,)
287 | """
288 | return tf.convert_to_tensor([
289 | energy(positions, cell, mask, strain)
290 | for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
291 | ])
292 |
293 |
294 | def forces_batch(POSITIONS,
295 | CELLS,
296 | MASKS,
297 | strain=np.zeros((3, 3), dtype=np.float64)):
298 | """A batched version of `forces'.
299 |
300 | Args:
301 | POSITIONS: array-like shape=(batch, maxnumatoms, 3)
302 | batched array of position arrays. Each position array should be padded
303 | if there are fewer atoms than maxnatoms.
304 | CELLS: array-like shape=(batch, 3, 3)
305 | MASKS: array-like shape=(batch, maxnatoms)
306 | strain: array-like shape=(3, 3)
307 | Array of strains to compute the stress at.
308 |
309 | Returns:
310 | forces: array-like shape=(batch, maxnatoms, 3)
311 | """
312 | return tf.convert_to_tensor([
313 | forces(positions, cell, mask, strain)
314 | for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
315 | ])
316 |
317 |
318 | def stress_batch(POSITIONS,
319 | CELLS,
320 | MASKS,
321 | strain=np.zeros((3, 3), dtype=np.float64)):
322 | """A batched version of `stress'.
323 |
324 | Args:
325 | POSITIONS: array-like shape=(batch, maxnumatoms, 3)
326 | batched array of position arrays. Each position array should be padded
327 | if there are fewer atoms than maxnatoms.
328 | CELLS: array-like shape=(batch, 3, 3)
329 | MASKS: array-like shape=(batch, maxnatoms)
330 | strain: array-like shape=(3, 3)
331 | Array of strains to compute the stress at.
332 |
333 | Returns:
334 | stresses: array-like shape=(batch, 6)"""
335 | return tf.convert_to_tensor([
336 | stress(positions, cell, mask, strain)
337 | for positions, cell, mask in zip(POSITIONS, CELLS, MASKS)
338 | ])
339 |
340 |
341 | # * One way list class
342 |
343 |
344 | class LennardJones(Calculator):
345 | """A simple Tensorflow driven calculator.
346 |
347 | """
348 | implemented_properties = ["energy", "forces", "stress"]
349 |
350 | default_parameters = {"sigma": 1.0, "epsilon": 1.0}
351 |
352 | def __init__(self, **kwargs):
353 | Calculator.__init__(self, **kwargs)
354 | self.sess = tf.Session()
355 |
356 | with tf.variable_scope("sigma", reuse=tf.AUTO_REUSE):
357 | sigma = tf.get_variable(
358 | "sigma",
359 | dtype=tf.float64,
360 | initializer=tf.constant(self.parameters.sigma, dtype=tf.float64))
361 |
362 | with tf.variable_scope("epsilon", reuse=tf.AUTO_REUSE):
363 | epsilon = tf.get_variable(
364 | "epsilon",
365 | dtype=tf.float64,
366 | initializer=tf.constant(self.parameters.epsilon, dtype=tf.float64))
367 |
368 | self.sigma = sigma
369 | self.epsilon = epsilon
370 |
371 | self._positions = tf.placeholder(dtype=tf.float64, shape=(None, 3))
372 | self._cell = tf.placeholder(dtype=tf.float64, shape=(3, 3))
373 | self._strain = tf.placeholder(dtype=tf.float64, shape=(3, 3))
374 |
375 | with tf.name_scope("LennardJones"):
376 | with tf.name_scope("setup"):
377 | strain_tensor = tf.eye(3, dtype=self._cell.dtype) + self._strain
378 | strained_cell = tf.matmul(self._cell, strain_tensor)
379 | strained_positions = tf.matmul(self._positions, strain_tensor)
380 |
381 | sigma = self.sigma
382 | epsilon = self.epsilon
383 | rc = 3 * sigma
384 |
385 | with tf.name_scope("calculate_energy"):
386 | e0 = 4 * epsilon * ((sigma / rc)**12 - (sigma / rc)**6)
387 | _energy = 0.0
388 |
389 | inds, dists, displacements = get_neighbors_oneway(
390 | strained_positions, strained_cell, rc)
391 |
392 | m = dists < rc
393 | m.set_shape([None])
394 | r2 = tf.boolean_mask(dists, m)**2
395 | c6 = (sigma**2 / r2)**3
396 | c12 = c6**2
397 | n = tf.ones_like(r2)
398 |
399 | _energy -= tf.reduce_sum(e0 * n)
400 | _energy += tf.reduce_sum(4 * epsilon * (c12 - c6))
401 |
402 | self._energy = tf.identity(_energy, name='_energy')
403 |
404 | with tf.name_scope("forces"):
405 | f = tf.gradients(-self._energy, self._positions)[0]
406 | self._forces = tf.identity(tf.convert_to_tensor(f), name='_forces')
407 |
408 |
409 | with tf.name_scope("stress"):
410 | with tf.name_scope("get_stress"):
411 | volume = tf.abs(tf.matrix_determinant(self._cell))
412 | g = tf.gradients(self._energy, self._strain)
413 | stress = tf.convert_to_tensor(g[0])
414 | stress /= volume
415 | stress = tf.gather(tf.reshape(stress, (9,)), [0, 4, 8, 5, 2, 1])
416 | self._stress = tf.identity(stress, name='_stress')
417 |
418 | def calculate(self,
419 | atoms=None,
420 | properties=["energy"],
421 | system_changes=all_changes):
422 | """Run the calculator.
423 | You don't usually call this, it is usually called by methods on the Atoms.
424 | """
425 | Calculator.calculate(self, atoms, properties, system_changes)
426 | self.sess.run(tf.global_variables_initializer())
427 | self.results["energy"] = self.sess.run(self._energy,
428 | feed_dict={self._positions: atoms.positions,
429 | self._cell: atoms.cell,
430 | self._strain: np.zeros_like(atoms.cell)})
431 |
432 | self.results["forces"] = self.sess.run(self._forces,
433 | feed_dict={self._positions: atoms.positions,
434 | self._cell: atoms.cell,
435 | self._strain: np.zeros_like(atoms.cell)})
436 |
437 | self.results["stress"] = self.sess.run(self._stress,
438 | feed_dict={self._positions: atoms.positions,
439 | self._cell: atoms.cell,
440 | self._strain: np.zeros_like(atoms.cell)})
441 |
442 |
443 | def save(self, label):
444 | "Save the graph and variables."
445 | saver = tf.train.Saver()
446 | saver.save(self.sess, label)
447 |
448 | def load(self, label):
449 | "Load variables from label."
450 | saver = tf.train.import_meta_graph(label + ".meta")
451 | self.sess.run(saver.restore(self.sess, label + ".meta"))
452 | g = tf.get_default_graph()
453 | self.sigma = g.get_tensor_by_name("sigma:0")
454 | self.epsilon = g.get_tensor_by_name("epsilon:0")
455 | print(f'Loaded {self.sigma} and {self.epsilon}')
456 |
457 |
458 | def train(self, label, dbfile, nepochs=10,
459 | learning_rate=0.001,
460 | shuffle=True, percenttest=0.1):
461 | """Train the potential against the data in a database.
462 |
463 | Parameters
464 | ----------
465 | label: string, used for saving the results.
466 | db: the path to an ase database containing training examples.
467 | shuffle: boolean, if True, shuffle the data.
468 | percenttest: float, fraction of data to use only for testing
469 | """
470 |
471 | with ase.db.connect(dbfile) as db:
472 | data = [(row.toatoms(), row.energy) for row in db.select()]
473 |
474 | if shuffle:
475 | import random
476 | random.shuffle(data)
477 |
478 | N_train = int(len(data) * (1 - percenttest))
479 |
480 | train_data = data[0:N_train]
481 | test_data = data[N_train:]
482 |
483 | known_energies = tf.placeholder(tf.float64, None)
484 | tf_energies = tf.placeholder(tf.float64, None)
485 |
486 | #loss = tf.reduce_mean(tf.square(tf_energies - known_energies))
487 | #opt = tf.train.AdamOptimizer(learning_rate).minimize(loss)
488 |
489 | for i in range(nepochs):
490 | for atoms, ke in train_data:
491 | atoms.set_calculator(self)
492 | te = atoms.get_calculator()._energy
493 |
494 |
495 | _loss = self.sess.run([te])
496 |
--------------------------------------------------------------------------------