├── .editorconfig ├── .flake8 ├── .gitattributes ├── .gitignore ├── .pre-commit-config.yaml ├── .readthedocs.yaml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── docs ├── Makefile ├── make.bat ├── qimpy.ico ├── qimpy.svg └── source │ ├── .gitignore │ ├── _ext │ └── yamldoc.py │ ├── _static │ └── css │ │ └── custom.css │ ├── _templates │ ├── class.rst │ └── module.rst │ ├── api.rst │ ├── conf.py │ ├── development │ └── index.rst │ ├── index.rst │ ├── inputfile.rst │ ├── install.rst │ ├── transport │ ├── index.rst │ └── inputfile.rst │ └── tutorials │ ├── getting-started.rst │ ├── index.rst │ ├── md │ ├── index.rst │ ├── ovito_example.png │ ├── si_aimd_energy.png │ └── silicon.rst │ ├── molecules │ ├── firstcalc.rst │ ├── geometryopt.rst │ ├── index.rst │ ├── openshell.rst │ ├── water.gif │ └── water.png │ └── solids │ ├── Pt_bandstructure.png │ ├── Pt_soc_bandstructure.png │ ├── Si_bandstructure.png │ ├── band_structures.rst │ ├── bz.rst │ ├── index.rst │ ├── metals.rst │ └── soc.rst ├── environment.yml ├── examples ├── .gitignore ├── CoSi │ └── totalE.yaml ├── Cu │ ├── bandstruct.yaml │ └── totalE.yaml ├── Graphene │ ├── bandstruct.yaml │ └── totalE.yaml ├── Jellium │ └── totalE.yaml ├── MD │ ├── H2 │ │ ├── .gitignore │ │ └── md.py │ └── Si │ │ ├── .gitignore │ │ └── md.py ├── Molecules │ ├── H2.yaml │ ├── H2O-triclinic.yaml │ ├── H2O.yaml │ ├── Hatom-SOC.yaml │ ├── Hatom.yaml │ ├── O2.yaml │ └── OH-.yaml ├── NaCl-ClVac │ ├── totalE.py │ └── totalE.yaml ├── Pt-SOC │ └── totalE.yaml ├── Si │ ├── bandstruct.yaml │ └── totalE.yaml └── Transport │ ├── .gitignore │ ├── curved-tile-split.svg │ ├── curved-tile.svg │ ├── ping_pong │ ├── README.md │ ├── plot.yaml │ └── rect-domain.svg │ ├── plot.yaml │ ├── rect-domain.svg │ ├── rect-domain.yaml │ ├── rect-periodic.svg │ ├── rect-periodic.yaml │ ├── rect-with-leads-plot.yaml │ ├── rect-with-leads.svg │ ├── rect-with-leads.yaml │ ├── semicircle.svg │ └── semicircle.yaml ├── mpi_print_from_head.sh ├── pyproject.toml ├── setup.cfg ├── setup.py ├── src ├── README.md ├── mypy.ini └── qimpy │ ├── README.md │ ├── __init__.py │ ├── _energy.py │ ├── _tree.py │ ├── _version.py │ ├── algorithms │ ├── README.md │ ├── __init__.py │ ├── _gradable.py │ ├── _minimize.py │ ├── _minimize_cg.py │ ├── _minimize_lbfgs.py │ ├── _minimize_line.py │ ├── _optimizable.py │ ├── _pulay.py │ └── test_minimize.py │ ├── conftest.py │ ├── dft │ ├── README.md │ ├── __init__.py │ ├── __main__.py │ ├── _main.py │ ├── _system.py │ ├── electrons │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _basis.py │ │ ├── _basis_ops.py │ │ ├── _basis_real.py │ │ ├── _chefsi.py │ │ ├── _davidson.py │ │ ├── _electrons.py │ │ ├── _fillings.py │ │ ├── _hamiltonian.py │ │ ├── _lcao.py │ │ ├── _scf.py │ │ ├── _wavefunction.py │ │ ├── _wavefunction_arithmetic.py │ │ ├── _wavefunction_dot.py │ │ ├── _wavefunction_init.py │ │ ├── _wavefunction_slice.py │ │ ├── _wavefunction_split.py │ │ ├── test_fillings.py │ │ ├── test_wavefunction_init_split.py │ │ └── xc │ │ │ ├── README.md │ │ │ ├── __init__.py │ │ │ ├── _plus_U.py │ │ │ ├── _xc.py │ │ │ ├── functional.py │ │ │ ├── gga.py │ │ │ ├── lda.py │ │ │ └── mgga.py │ ├── export │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _bgw.py │ │ └── _export.py │ ├── geometry │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _dynamics.py │ │ ├── _fixed.py │ │ ├── _geometry.py │ │ ├── _gradient.py │ │ ├── _history.py │ │ ├── _relax.py │ │ ├── _stepper.py │ │ └── thermostat.py │ └── ions │ │ ├── README.md │ │ ├── __init__.py │ │ ├── _ions.py │ │ ├── _ions_atomic.py │ │ ├── _ions_projectors.py │ │ ├── _ions_update.py │ │ ├── _lowdin.py │ │ ├── _pseudo_quantum_numbers.py │ │ ├── _pseudopotential.py │ │ ├── _read_upf.py │ │ ├── symbols.py │ │ └── test_pseudopotential.py │ ├── grid │ ├── README.md │ ├── __init__.py │ ├── _change.py │ ├── _embed.py │ ├── _fft.py │ ├── _field.py │ ├── _field_symmetrizer.py │ ├── _grid.py │ ├── coulomb │ │ ├── __init__.py │ │ ├── _coulomb.py │ │ ├── _isolated.py │ │ ├── _periodic.py │ │ ├── _slab.py │ │ ├── _wire.py │ │ └── test_coulomb.py │ ├── test_change.py │ ├── test_common.py │ ├── test_embed.py │ ├── test_fft.py │ └── test_field.py │ ├── interfaces │ ├── README.md │ ├── __init__.py │ ├── ase.py │ ├── bandstructure.py │ ├── cif.py │ └── xsf.py │ ├── io │ ├── README.md │ ├── __init__.py │ ├── _checkpoint.py │ ├── _default.py │ ├── _error.py │ ├── _log_config.py │ ├── _tensor.py │ ├── _unit.py │ ├── dict.py │ └── yaml.py │ ├── lattice │ ├── README.md │ ├── __init__.py │ ├── _kpoints.py │ ├── _lattice.py │ ├── _lattice_systems.py │ ├── _wigner_seitz.py │ └── test_wigner_seitz.py │ ├── math │ ├── __init__.py │ ├── _integer.py │ ├── _linalg.py │ ├── _radial_function.py │ ├── _spherical_harmonics_data.py │ ├── _spherical_harmonics_generate.py │ ├── quintic_spline.py │ ├── random.py │ ├── spherical_bessel.py │ ├── spherical_harmonics.py │ ├── test_quintic_spline.py │ ├── test_spherical_bessel.py │ └── test_spherical_harmonics.py │ ├── mpi │ ├── README.md │ ├── __init__.py │ ├── _async_reduce.py │ ├── _bufferview.cpp │ ├── _bufferview.pyi │ ├── _process_grid.py │ ├── _sparse_matrices.py │ ├── _taskdivision.py │ ├── _waitable.py │ ├── globalreduce.py │ └── test_sparse.py │ ├── pre_init.py │ ├── profiler.py │ ├── py.typed │ ├── rc.py │ ├── symmetries │ ├── README.md │ ├── __init__.py │ ├── _grid.py │ ├── _lattice.py │ ├── _positions.py │ └── _symmetries.py │ └── transport │ ├── README.md │ ├── __init__.py │ ├── __main__.py │ ├── _main.py │ ├── _time_evolution.py │ ├── _transport.py │ ├── advect.py │ ├── collide │ └── __init__.py │ ├── geometry │ ├── __init__.py │ ├── _geometry.py │ ├── _parameter_grid.py │ ├── _patch.py │ ├── _patch_set.py │ ├── _spline.py │ ├── _subdivide.py │ ├── _svg.py │ ├── _tensor_list.py │ ├── test_advect.py │ └── test_svg.py │ ├── material │ ├── __init__.py │ ├── _fermi_circle.py │ ├── _material.py │ └── ab_initio │ │ ├── __init__.py │ │ ├── _ab_initio.py │ │ ├── _light.py │ │ ├── _lindblad.py │ │ ├── _packed_hermitian.py │ │ ├── _pulse_b.py │ │ ├── _relaxation_time.py │ │ └── read.py │ └── plot.py └── versioneer.py /.editorconfig: -------------------------------------------------------------------------------- 1 | # https://EditorConfig.org 2 | 3 | # top-most EditorConfig file 4 | root = true 5 | 6 | # Unix-style newlines with a newline ending every file 7 | [*] 8 | end_of_line = lf 9 | insert_final_newline = true 10 | 11 | # 4 space indentation for Python (PEP8) with black recommended line length 12 | [{*.py,*.cfg}] 13 | indent_style = space 14 | indent_size = 4 15 | max_line_length = 88 16 | 17 | # 2 space indentation for YAML 18 | [{*.yml,*.yaml}] 19 | indent_style = space 20 | indent_size = 2 21 | 22 | # Tab indentation for Makefiles 23 | [Makefile] 24 | indent_style = tab 25 | 26 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 88 3 | select = C, E, F, W, B, B950 4 | extend-ignore = E203, E501, E741, W503 5 | 6 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | src/qimpy/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # KDevelop projects 114 | *.kdev4 115 | 116 | # PyCharm projects 117 | .idea 118 | 119 | # Spyder project settings 120 | .spyderproject 121 | .spyproject 122 | 123 | # Rope project settings 124 | .ropeproject 125 | 126 | # mkdocs documentation 127 | /site 128 | 129 | # mypy 130 | .mypy_cache/ 131 | .dmypy.json 132 | dmypy.json 133 | 134 | # Pyre type checker 135 | .pyre/ 136 | 137 | # vim swap files 138 | *.sw* 139 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/psf/black 3 | rev: 22.12.0 4 | hooks: 5 | - id: black 6 | - repo: https://github.com/pycqa/flake8 7 | rev: 6.0.0 8 | hooks: 9 | - id: flake8 10 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Configuration for website and doc generation on RTD 2 | version: 2 3 | 4 | build: 5 | os: ubuntu-22.04 6 | tools: 7 | python: "mambaforge-22.9" 8 | 9 | conda: 10 | environment: environment.yml 11 | 12 | python: 13 | install: 14 | - method: setuptools 15 | path: . 16 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2021, QimPy collaboration 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include versioneer.py 2 | include src/qimpy/_version.py 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: precommit 2 | precommit: 3 | pre-commit run --all-files 4 | 5 | .PHONY: typecheck 6 | typecheck: 7 | cd src && mypy -p qimpy 8 | 9 | .PHONY: test-nompi 10 | test-nompi: 11 | python -m pytest 12 | 13 | .PHONY: test-mpi 14 | test-mpi: 15 | mpirun ./mpi_print_from_head.sh python -m pytest --with-mpi 16 | 17 | .PHONY: test 18 | test: test-nompi test-mpi 19 | 20 | .PHONY: check 21 | check: precommit typecheck test 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![QimPy](docs/qimpy.svg) 2 | 3 | --- 4 | 5 | QimPy (pronounced [/'kɪm-paɪ'/](https://en.wikipedia.org/wiki/Help:IPA/English)) 6 | is a Python package for Quantum-Integrated Multi-PhYsics. 7 | 8 | # Coding style 9 | 10 | The repository provides a .editorconfig with indentation and line-length rules, 11 | and a pre-commit configuration to run black and flake8 to enforce and verify style. 12 | Please install this pre-commit hook by running `pre-commit install` 13 | within the working directory. 14 | While this hook will run automatically on filed modified in each commit, 15 | you can also use `make precommit` to manually run it on all code files. 16 | 17 | Function/method signatures and class attributes must use type hints. 18 | Document class attributes using doc comments on the type hints when possible. 19 | Run `make typecheck` to perform a static type check using mypy before pushing code. 20 | 21 | For all log messages, use f-strings as far as possible for maximum readability. 22 | 23 | Run `make test` to invoke all configured pytest tests. To only run mpi or 24 | non-mpi tests specifically, use `make test-mpi` or `make test-nompi`. 25 | 26 | Best practice: run `make check` to invoke the precommit, typecheck 27 | and test targets before commiting code. 28 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/qimpy.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/qimpy.ico -------------------------------------------------------------------------------- /docs/source/.gitignore: -------------------------------------------------------------------------------- 1 | api 2 | yamldoc 3 | 4 | -------------------------------------------------------------------------------- /docs/source/_static/css/custom.css: -------------------------------------------------------------------------------- 1 | .wy-table-responsive table td { 2 | white-space: normal !important; 3 | } 4 | .wy-table-responsive { 5 | overflow: visible !important; 6 | } 7 | 8 | .yamlkey { 9 | color: purple; 10 | } 11 | 12 | .yamlparam { 13 | color: purple; 14 | font-weight: bold; 15 | } 16 | 17 | .yamltype { 18 | font-weight: bold; 19 | color: blue; 20 | } 21 | 22 | .yamlcomment { 23 | color: grey; 24 | } 25 | -------------------------------------------------------------------------------- /docs/source/_templates/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :show-inheritance: 8 | 9 | {% block methods %} 10 | .. automethod:: __init__ 11 | 12 | {% if methods %} 13 | .. rubric:: {{ _('Methods') }} 14 | 15 | .. autosummary:: 16 | :nosignatures: 17 | {% for item in methods %} 18 | ~{{ name }}.{{ item }} 19 | {%- endfor %} 20 | {% endif %} 21 | {% endblock %} 22 | 23 | {% block attributes %} 24 | {% if attributes %} 25 | .. rubric:: {{ _('Attributes') }} 26 | 27 | .. autosummary:: 28 | {% for item in attributes %} 29 | ~{{ name }}.{{ item }} 30 | {%- endfor %} 31 | {% endif %} 32 | {% endblock %} 33 | -------------------------------------------------------------------------------- /docs/source/_templates/module.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline}} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module Attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | {% if functions %} 19 | .. rubric:: {{ _('Functions') }} 20 | 21 | .. autosummary:: 22 | :toctree: 23 | :nosignatures: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: {{ _('Classes') }} 33 | 34 | .. autosummary:: 35 | :nosignatures: 36 | :toctree: 37 | :template: class.rst 38 | {% for item in classes %} 39 | {{ item }} 40 | {%- endfor %} 41 | {% endif %} 42 | {% endblock %} 43 | 44 | {% block exceptions %} 45 | {% if exceptions %} 46 | .. rubric:: {{ _('Exceptions') }} 47 | 48 | .. autosummary:: 49 | :toctree: 50 | :nosignatures: 51 | {% for item in exceptions %} 52 | {{ item }} 53 | {%- endfor %} 54 | {% endif %} 55 | {% endblock %} 56 | 57 | {% block modules %} 58 | {% if modules %} 59 | .. rubric:: Modules 60 | 61 | .. autosummary:: 62 | :toctree: 63 | :nosignatures: 64 | :template: module.rst 65 | :recursive: 66 | {% for item in modules %} 67 | {% if not (('.test' in item) or item.endswith('test')) %} 68 | {{ item }} 69 | {% endif %} 70 | {%- endfor %} 71 | {% endif %} 72 | {% endblock %} 73 | -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | API Reference 2 | ============= 3 | 4 | .. autosummary:: 5 | :toctree: api 6 | :template: module.rst 7 | :recursive: 8 | 9 | qimpy 10 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | 13 | import os 14 | import sys 15 | 16 | sys.path.append(os.path.abspath("../../src/")) 17 | sys.path.append(os.path.abspath("./_ext/")) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = "QimPy" 23 | copyright = "2023, QimPy Collaboration" 24 | author = "QimPy Collaboration" 25 | 26 | 27 | # -- General configuration --------------------------------------------------- 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | "sphinx.ext.autodoc", # Core library for html generation from docstrings 34 | "sphinx.ext.autosummary", # Create neat summary tables 35 | "sphinx.ext.coverage", # Report missing documentation 36 | "sphinx.ext.napoleon", # NumPy style docstrings 37 | "yamldoc", # Extract YAML input file documentation from docstrings 38 | ] 39 | autosummary_generate = True 40 | autosummary_imported_members = True 41 | autosummary_ignore_module_all = False 42 | coverage_show_missing_items = True 43 | 44 | # Add any paths that contain templates here, relative to this directory. 45 | templates_path = ["_templates"] 46 | 47 | # List of patterns, relative to source directory, that match files and 48 | # directories to ignore when looking for source files. 49 | # This pattern also affects html_static_path and html_extra_path. 50 | exclude_patterns = [] 51 | 52 | # Move type hints from call signature to description 53 | autodoc_typehints = "description" 54 | 55 | # Order entries by type: 56 | autodoc_member_order = "groupwise" 57 | 58 | # Suppress unnecessary paths in class / function names: 59 | add_module_names = False 60 | python_use_unqualified_type_names = True 61 | 62 | # -- Options for HTML output ------------------------------------------------- 63 | 64 | # The theme to use for HTML and HTML Help pages. See the documentation for 65 | # a list of builtin themes. 66 | # 67 | html_theme = "sphinx_rtd_theme" 68 | html_theme_options = {"style_nav_header_background": "#000000"} 69 | 70 | # Add any paths that contain custom static files (such as style sheets) here, 71 | # relative to this directory. They are copied after the builtin static files, 72 | # so a file named "default.css" will overwrite the builtin "default.css". 73 | html_static_path = ["_static"] 74 | html_css_files = ["css/custom.css"] 75 | html_logo = "../qimpy.svg" 76 | html_favicon = "../qimpy.ico" 77 | -------------------------------------------------------------------------------- /docs/source/development/index.rst: -------------------------------------------------------------------------------- 1 | Development 2 | =========== 3 | 4 | To get started with QimPy development, fetch QimPy from 5 | `git `_ 6 | and install it in develop mode as discussed in :doc:`/install`. 7 | QimPy maintains a consistent object heirarchy in the API, 8 | input and checkpoint files, so once you get familiar with 9 | where to specify a setting in the input, you now also know 10 | where to find the corresponding outputs in the HDF5 checkpoint 11 | as well as the underlying source code related to that setting. 12 | 13 | In fact, the :doc:`/inputfile` and :doc:`/api` are both generated 14 | from the same documentation strings within the code. 15 | The main objects in the object heirarchy for QimPy all derive from 16 | :class:`qimpy.TreeNode`, which sets up a consistent tree structure for 17 | the objects in memory, the checkpoint and the YAML input file. 18 | See the particularly detailed doc strings for the `__init__` of 19 | any such class, *e.g.*, starting with :class:`qimpy.dft.System`, 20 | the root object created for DFT calculations. 21 | The parameters whose documentation contain a `:yaml:` tag 22 | are those that can be specified from the input file, 23 | while the rest are used internally in the code alone. 24 | 25 | To get started with QimPy development, a great place to start is the 26 | `QimPy issues `_ page. 27 | In particular, look for issues labeled with `good first issue`. 28 | We of course greatly appreciate any and all feature contributions. 29 | If you like using the code but are not yet comfortable modifying it, 30 | expansion of the tutorials and improvement of the documentation 31 | will also be invaluable contributions! 32 | 33 | 34 | Coding style 35 | ------------ 36 | 37 | The repository provides a .editorconfig with indentation and line-length rules, 38 | and a pre-commit configuration to run black and flake8 to enforce and verify style. 39 | Please install this pre-commit hook by running `pre-commit install` 40 | within the working directory. 41 | While this hook will run automatically on filed modified in each commit, 42 | you can also use `make precommit` to manually run it on all code files. 43 | 44 | Function/method signatures and class attributes must use type hints. 45 | Document class attributes using doc comments on the type hints when possible. 46 | Run `make typecheck` to perform a static type check using mypy before pushing code. 47 | 48 | For all log messages, use f-strings as far as possible for maximum readability. 49 | 50 | Run `make test` to invoke all configured pytest tests. To only run mpi or 51 | non-mpi tests specifically, use `make test-mpi` or `make test-nompi`. 52 | 53 | Best practice: run `make check` to invoke the precommit, typecheck 54 | and test targets before commiting code. 55 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | QimPy: Quantum-Integrated Multi-PhYsics 2 | ======================================= 3 | 4 | QimPy (pronounced `kɪm-paɪ `_) 5 | is an open-source electronic structure software designed to enable 6 | tight integration with classical multi-physics and multi-scale modeling. 7 | A key focus of this code is facilitating simultaneous performance and rapid technique development, 8 | making it as easy to develop new electronic-structure-integrated features, 9 | as it is to apply to materials and chemistry modeling. 10 | 11 | Designed from the ground up in 2021, this code takes advantage of modern Python 12 | for ease of development and PyTorch for high performance on a wide range of computing hardware. 13 | It is intended as a successor of `JDFTx `_ and will develop a full feature set 14 | for first-principles electrochemistry, carrier dynamics and transport in 2023 and 2024. 15 | At the moment, QimPy is a fully-functional plane-wave DFT code with norm-conserving pseudopotentials, 16 | supporing electronic structure, geometry optimization and *ab initio* molecular dynamics calciulations. 17 | 18 | QimPy is built on PyTorch as a hardware abstraction layer, and fully supports CPUs, 19 | NVIDIA GPUs and likely also AMD GPUs through the corresponding PyTorch device layers. 20 | The use of ML libraries as the underlying layer presents a unique advantage: 21 | use of the specialized tensor cores in the GPUs without the need for hand-tuned kernels. 22 | QimPy is designed to scale to large numbers of GPUs by efficiently overlapping 23 | all inter-GPU communications with the primary computation involving wavefunction transforms. 24 | 25 | 26 | .. toctree:: 27 | :maxdepth: 1 28 | 29 | install 30 | tutorials/index 31 | inputfile 32 | api 33 | development/index 34 | 35 | See the `QimPy github page `_ for the source code. 36 | 37 | Auxiliary packages 38 | ------------------ 39 | 40 | .. toctree:: 41 | :maxdepth: 1 42 | 43 | transport/index 44 | -------------------------------------------------------------------------------- /docs/source/inputfile.rst: -------------------------------------------------------------------------------- 1 | Input file documentation 2 | ======================== 3 | 4 | QimPy uses the `YAML format `_ for input files. 5 | See below for a complete list of settings. 6 | Most settings have sensible defaults: 7 | start with the :doc:`/tutorials/index` to see the most commonly used settings 8 | and then use the following as a reference for more information on each setting. 9 | 10 | Click on each setting to open a more detailed documentation, 11 | and also to link to the corresponding entry in the :doc:`/api`. 12 | In addition to settings listed below, the special keyword `include` 13 | can be used at any level to effectively load another YAML file 14 | into that level of the heirarchy. 15 | 16 | .. yamldoc:: qimpy.dft.System 17 | -------------------------------------------------------------------------------- /docs/source/transport/index.rst: -------------------------------------------------------------------------------- 1 | QimPy transport 2 | =============== 3 | 4 | The transport package of QimPy, currently in development, 5 | will facilitate device-scale simulation of charge and spin transport 6 | using *ab initio* electronic structure and scattering kernels. 7 | 8 | .. toctree:: 9 | :maxdepth: 1 10 | 11 | inputfile 12 | 13 | -------------------------------------------------------------------------------- /docs/source/transport/inputfile.rst: -------------------------------------------------------------------------------- 1 | Input file documentation for qimpy.transport 2 | ============================================ 3 | 4 | QimPy uses the `YAML format `_ for input files. 5 | See below for a complete list of settings. 6 | Most settings have sensible defaults: 7 | start with the :doc:`/tutorials/index` to see the most commonly used settings 8 | and then use the following as a reference for more information on each setting. 9 | 10 | Click on each setting to open a more detailed documentation, 11 | and also to link to the corresponding entry in the :doc:`/api`. 12 | In addition to settings listed below, the special keyword `include` 13 | can be used at any level to effectively load another YAML file 14 | into that level of the heirarchy. 15 | 16 | .. yamldoc:: qimpy.transport.Transport 17 | -------------------------------------------------------------------------------- /docs/source/tutorials/getting-started.rst: -------------------------------------------------------------------------------- 1 | Getting started 2 | =============== 3 | 4 | Follow :doc:`/install` to setup QimPy in a python virtual environment 5 | or conda environment, and make sure that environment is active 6 | before running any of the tutorials. 7 | We'll assume you called that environment `qimpy`, 8 | indicated by the prefix (qimpy) on all the shells shown here. 9 | 10 | The tutorials will guide you through the construction of a YAML input file, say `in.yaml`. 11 | To run QimPy using a single process and all available CPU threads: 12 | 13 | .. code-block:: bash 14 | 15 | (qimpy) $ python -m qimpy.dft -i in.yaml 16 | 17 | To use multiple processes using MPI, *e.g.*, using 4 processes and assuming OpenMPI: 18 | 19 | .. code-block:: bash 20 | 21 | (qimpy) $ mpirun -n 4 python -m qimpy.dft -i in.yaml 22 | 23 | With MPI and to leverage GPUs, *e.g.*, assuming 4 GPUs available on the system: 24 | 25 | .. code-block:: bash 26 | 27 | (qimpy) $ CUDA_VISIBLE_DEVICES="0,1,2,3" mpirun -n 4 python -m qimpy.dft -i in.yaml 28 | 29 | Note that QimPy will not use GPUs unless explicitly instructed to using CUDA_VISIBLE_DEVICES. 30 | 31 | Within a SLURM batch job file, request cores and GPUs as specified for your HPC resource and: 32 | 33 | .. code-block:: bash 34 | 35 | (qimpy) $ srun python -m qimpy.dft -i in.yaml 36 | 37 | In this case, SLURM will set all required environment variables, including CUDA_VISIBLE_DEVICES 38 | and SLURM_CPUS_PER_TASK, which QimPy will use to select the appropriate GPU and CPUs. 39 | 40 | 41 | Pseudopotentials 42 | ---------------- 43 | 44 | QimPy currently supports norm-conserving UPF pseudopotentials, 45 | but does not distribute any pseudopotentials with the code. 46 | You can refer to pseudopotentials with relative or absolute paths 47 | in each calculation, but this can be cumbersome. 48 | The instructions below will get you started with the 49 | `SG15 pseudopotentials `_ 50 | in a path specified to QimPy with an environment variable, 51 | so that you do not need to specify absolute paths for each calculation. 52 | The tutorials assume that you have the SG15 pseudopotentials set up this way. 53 | 54 | Create a directory, say /path/to/pseudos where you want to store your pseudopotentials: 55 | 56 | .. code-block:: bash 57 | 58 | $ cd /path/to/pseudos 59 | pseudos$ wget https://raw.githubusercontent.com/shankar1729/jdftx/master/jdftx/pseudopotentials/SG15.tgz 60 | pseudos$ wget https://raw.githubusercontent.com/shankar1729/jdftx/master/jdftx/pseudopotentials/SG15-pulay.tgz 61 | pseudos$ tar xvzf SG15.tgz 62 | pseudos$ tar xvzf SG15-pulay.tgz 63 | 64 | Alternately, if you have `JDFTx `_ installed, you already have these files 65 | and /path/to/pseudos can just be taken as jdftx-build-dir/pseudopotentials. 66 | 67 | Set the environment variable before running QimPy calculations: 68 | 69 | .. code-block:: bash 70 | 71 | $ export QIMPY_PSEUDO_DIR=/path/to/pseudos 72 | 73 | and add this to your .bashrc so that this takes effect in all subsequent terminal sessions. 74 | -------------------------------------------------------------------------------- /docs/source/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | getting-started 8 | molecules/index 9 | solids/index 10 | md/index 11 | 12 | -------------------------------------------------------------------------------- /docs/source/tutorials/md/index.rst: -------------------------------------------------------------------------------- 1 | Molecular Dynamics tutorials 2 | ============================ 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | silicon 8 | -------------------------------------------------------------------------------- /docs/source/tutorials/md/ovito_example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/md/ovito_example.png -------------------------------------------------------------------------------- /docs/source/tutorials/md/si_aimd_energy.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/md/si_aimd_energy.png -------------------------------------------------------------------------------- /docs/source/tutorials/md/silicon.rst: -------------------------------------------------------------------------------- 1 | Silicon AIMD 2 | =================== 3 | 4 | We will show an *ab-initio* molecular dynamics simulation with a silicon crystal 5 | using QimPy. *Ab-initio* molecular dynamics (AIMD) is a technique for performing 6 | molecular dynamics simulations using electronic structure calculations to 7 | compute forces. Although AIMD is significantly more computationally intensive 8 | than conventional molecular dynamics, you should find that the calculation in 9 | this tutorial runs on ordinary consumer hardware, with significant benefits to 10 | GPU-based computation if it is available. 11 | 12 | The following input file defines an AIMD calculation on a perturbed 13 | diamond-cubic silicon lattice with a Nose-Hoover thermostat. Save this text to 14 | ``Silicon_md.yaml`` in your calculation directory: 15 | 16 | .. code-block:: yaml 17 | 18 | # Diamond-cubic silicon 19 | lattice: 20 | system: cubic 21 | modification: face-centered 22 | a: 5.4437022209 Angstrom 23 | movable: no 24 | 25 | ions: 26 | pseudopotentials: 27 | - SG15/$ID_ONCV_PBE-1.0.upf 28 | coordinates: 29 | - [Si, 0.75, 0.75, 0.25] 30 | - [Si, 0.00, 0.50, 0.50] 31 | - [Si, 0.75, 0.25, 0.75] 32 | - [Si, 0.00, 0.00, 0.00] 33 | - [Si, 0.25, 0.75, 0.75] 34 | - [Si, 0.50, 0.50, 0.00] 35 | - [Si, 0.25, 0.25, 0.25] 36 | - [Si, 0.50, 0.00, 0.50] 37 | 38 | geometry: 39 | dynamics: 40 | T0: 300 K 41 | n-steps: 5000 42 | dt: 10. fs 43 | thermostat: 44 | nose-hoover: 45 | chain-length-T: 3 46 | t-damp-T: 50 fs 47 | 48 | checkpoint: null # disable reading checkpoint 49 | checkpoint-out: Silicon_md.h5 # but still create it 50 | 51 | Now you are ready to perform the calculation: 52 | 53 | .. code-block:: bash 54 | 55 | (qimpy) $ python -m qimpy.dft -i Silicon_md.yaml | tee Silicon_md.out 56 | 57 | The standard output of this run will be saved to ``Silicon_md.out``, and 58 | raw data (including forces, trajectories, and all run parameters) will be saved 59 | to ``Silicon_md.h5`` (as instructed by ``checkpoint-out``). If this calculation 60 | ends and you would like to restart or continue it from a checkpoint, simply 61 | change ``checkpoint: null`` to ``checkpoint: Silicon_md.h5``. The trajectories 62 | from this new run will be seamlessly appended to the existing checkpoint file. 63 | 64 | .. code-block:: python 65 | 66 | import h5py 67 | import matplotlib.pyplot as plt 68 | import numpy as np 69 | 70 | silicon_md = h5py.File("Silicon_md.h5", "r") 71 | 72 | # Energy per step 73 | E = np.array(silicon_md["geometry"]["action"]["history"]["energy"]) 74 | 75 | dt = 10. # fs 76 | steps = len(E) 77 | time = np.linspace(0, steps*dt, num=steps) 78 | 79 | # Temperature per step (as an example) 80 | T = np.array(silicon_md["geometry"]["action"]["history"]["T"]) 81 | 82 | # Make a time series plot 83 | plt.ylabel("Energy vs. Time (Silicon AIMD)") 84 | plt.ylabel("Energy (Ha)") 85 | plt.xlabel("Time (fs)") 86 | plt.plot(time, E) 87 | 88 | plt.savefig("si_aimd_energy.png") 89 | 90 | Save this script as ``energy_plot.py`` within your calculation directory (make 91 | sure the ``Silicon_md.h5`` checkpoint file is available), and run it to produce 92 | the following time-series plot of the system's energy: 93 | 94 | .. figure:: si_aimd_energy.png 95 | :align: center 96 | 97 | You may just as easily extract all other time series parameters of your run as 98 | numpy arrays for analysis (e.g. forces, positions, etc.). 99 | 100 | Using QimPy's XSF interface, you can also easily extract this data to create an 101 | animated XSF file for analysis with standard tools such as Ovito. You can do 102 | this by running the following script within your calculation directory: 103 | 104 | .. code-block:: bash 105 | 106 | (qimpy) $ python -m qimpy.interfaces.xsf --animated -c Silicon_md.h5 -x Silicon_md.xsf 107 | 108 | The ``--animated`` flag makes sure that this data is parsed into an animated XSF 109 | file. You may now open this file in Ovito, and you will be able to view an 110 | animation of your calculation. 111 | 112 | .. image:: ovito_example.png 113 | :align: center 114 | :scale: 80 % 115 | -------------------------------------------------------------------------------- /docs/source/tutorials/molecules/index.rst: -------------------------------------------------------------------------------- 1 | Molecules tutorials 2 | =================== 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | firstcalc 8 | geometryopt 9 | openshell 10 | -------------------------------------------------------------------------------- /docs/source/tutorials/molecules/openshell.rst: -------------------------------------------------------------------------------- 1 | Open-shell systems 2 | ================== 3 | 4 | This tutorial covers the basics for spin-polarized and open-shell calculations in QimPy, 5 | using the most basic system of all, a hydrogen atom. 6 | 7 | First, lets set up a hydrogen atom calculation exactly as we would based on the previous tutorials. 8 | Save the following to `Hatom.yaml`: 9 | 10 | .. code-block:: yaml 11 | 12 | lattice: 13 | system: 14 | name: cubic 15 | modification: face-centered 16 | a: 20. # bohrs 17 | 18 | ions: 19 | pseudopotentials: 20 | - SG15/$ID_ONCV_PBE.upf 21 | coordinates: 22 | - [H, 0., 0., 0.] 23 | 24 | electrons: 25 | basis: 26 | ke-cutoff: 30.0 27 | 28 | checkpoint: null # disable reading checkpoint 29 | checkpoint-out: Hatom.h5 # but still create it 30 | 31 | and run 32 | 33 | .. code-block:: bash 34 | 35 | (qimpy) $ python -m qimpy.dft -i Hatom.yaml | tee Hatom.out 36 | 37 | Since there is only one atom, we don't need geometry optimization. 38 | Notice that the final energy F = -0.4601 Hartrees, which is rather different 39 | from the analytical exact energy -0.5 Hartree (= -1 Rydberg = -13.6 eV). 40 | 41 | The reason for this disrepancy is that, by default, this DFT calculation is spin-unpolarized, 42 | that is it assumes an equal number of up and down spin electrons. 43 | This assumption is correct for the water molecule with a closed shell of 8 valence electrons 44 | that we dealt with so far, but is incorrect for the hydrogen atom which has only one electron. 45 | This electron must be either an up or down spin, so that the magnetization (Nup - Ndn) is +1 or -1. 46 | We can invoke a spin-polarized calculation and specify the magnetization by adding the following 47 | key-value pairs to `Hatom.yaml` and rerun QimPy: 48 | 49 | .. code-block:: yaml 50 | 51 | electrons: 52 | basis: 53 | ke-cutoff: 30.0 54 | spin-polarized: yes 55 | fillings: 56 | M: 1 57 | 58 | Now we find F = -0.4997 Hartrees, in much better agreement with the analytical result. 59 | Check that using magnetization -1 produces exactly the same result. 60 | -------------------------------------------------------------------------------- /docs/source/tutorials/molecules/water.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/molecules/water.gif -------------------------------------------------------------------------------- /docs/source/tutorials/molecules/water.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/molecules/water.png -------------------------------------------------------------------------------- /docs/source/tutorials/solids/Pt_bandstructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/solids/Pt_bandstructure.png -------------------------------------------------------------------------------- /docs/source/tutorials/solids/Pt_soc_bandstructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/solids/Pt_soc_bandstructure.png -------------------------------------------------------------------------------- /docs/source/tutorials/solids/Si_bandstructure.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/docs/source/tutorials/solids/Si_bandstructure.png -------------------------------------------------------------------------------- /docs/source/tutorials/solids/band_structures.rst: -------------------------------------------------------------------------------- 1 | Band structure calculations 2 | =========================== 3 | 4 | .. image:: Si_bandstructure.png 5 | 6 | In the previous tutorial, we calculated the total energy of silicon and explored its Brillouin zone convergence. This tutorial illustrates calculations of the electronic band structure, specifically, the variation of the Kohn-Sham eigenvalues along a special kpoint path in the Brillouin zone. 7 | 8 | First lets specify the bulk silicon energy calculation in Si.yaml, or reuse the file from the :doc:`bz`: 9 | 10 | .. code-block:: yaml 11 | 12 | lattice: 13 | system: 14 | name: cubic 15 | modification: face-centered 16 | a: 10.263 17 | 18 | ions: 19 | pseudopotentials: 20 | - SG15/$ID_ONCV_PBE.upf 21 | coordinates: 22 | - [Si, 0, 0, 0] 23 | - [Si, 0.25, 0.25, 0.25] 24 | 25 | electrons: 26 | k-mesh: 27 | offset: [0.5, 0.5, 0.5] #Monkhorst-Pack 28 | size: [8, 8, 8] 29 | 30 | grid: 31 | ke-cutoff: 100 #Hartree 32 | 33 | checkpoint_out: Si_out.h5 34 | 35 | which can be run with 36 | 37 | .. code-block:: bash 38 | 39 | (qimpy) $ python -m qimpy.dft -i Si.yaml -o Si.out 40 | 41 | Next, we list high-symmetry points in the Brillouin zone laying out a path along which we want the band structure, which can be put in kpoints.yaml: 42 | 43 | .. code-block:: yaml 44 | 45 | include: Si.yaml 46 | 47 | electrons: 48 | 49 | fillings: 50 | n-bands: 10 51 | n-bands-extra: 5 52 | 53 | fixed-H: Si_out.h5 #fixed Hamiltonian so there's no more SCF 54 | 55 | k-mesh: null #de-specify the k-mesh from Si.yaml 56 | 57 | k-path: 58 | dk: 0.05 59 | points: 60 | - [0, 0, 0, $\Gamma$] 61 | - [0, 0.5, 0.5, X] 62 | - [0.25, 0.75, 0.5, W] 63 | - [0.5, 0.5, 0.5, L] 64 | - [0, 0, 0, $\Gamma$] 65 | - [0.375, 0.75, 0.375, K] 66 | 67 | checkpoint-out: null #de-specify the checkpoint file creation from Si.yaml 68 | 69 | Note that you can overwrite previously specified values when using **include**. The high symmetry points for many structures can be found easily online, such as this `course website `_ . Or you can use the more complete `Bilbao database `_ but you'll need to know the point group of your crystal. 70 | 71 | Then you can calculate the band structure along the k-point path with 72 | 73 | .. code-block:: bash 74 | 75 | (qimpy) $ python -m qimpy.dft -i kpoints.yaml -o kpoints.out 76 | 77 | and if you investigate the electron fillings using :code:`h5dump -g electrons/fillings kpoints.h5` then you can see that half of the bands are filled completely, while the other half are completely empty, in line with what is expected for a semiconductor. 78 | 79 | And to generate the band structure plot, run 80 | 81 | .. code-block:: bash 82 | 83 | (qimpy) $ python -m qimpy.interfaces.bandstructure -c kpoints.h5 -o Si_bandstructure.png 84 | 85 | which should produce 86 | 87 | .. image:: Si_bandstructure.png 88 | 89 | Notice that at the Gamma point, the lowest band is single while the next three higher bands are degenerate: these line up with the s and p valence orbitals on the Silicon atoms. These degeneracies change in different parts of the Brillouin zone: the XW segment has two pairs of degenerate bands, while the WL and Gamma-K segments have no degeneracies. 90 | -------------------------------------------------------------------------------- /docs/source/tutorials/solids/index.rst: -------------------------------------------------------------------------------- 1 | Solids tutorials 2 | ================ 3 | 4 | .. toctree:: 5 | :maxdepth: 1 6 | 7 | bz 8 | band_structures 9 | metals 10 | soc 11 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: qimpy 2 | channels: 3 | - conda-forge 4 | - nodefaults 5 | dependencies: 6 | - python=3.11 7 | - pyyaml 8 | - pytorch 9 | - psutil 10 | - numpy 11 | - h5py 12 | - mpi4py 13 | - scipy 14 | - svg.path 15 | - typing-extensions 16 | - sphinx_rtd_theme 17 | -------------------------------------------------------------------------------- /examples/.gitignore: -------------------------------------------------------------------------------- 1 | bandstruct.pdf 2 | *.h5 3 | *.h5.* 4 | *.out 5 | *.xsf 6 | -------------------------------------------------------------------------------- /examples/CoSi/totalE.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: Cubic 4 | a: 8.37 5 | compute-stress: yes 6 | 7 | ions: 8 | pseudopotentials: 9 | - SG15/$ID_ONCV_PBE.upf 10 | coordinates: 11 | - [Co, 0.7, 0.5, 0.2] 12 | - [Co, 0.5, 0.2, 0.7] 13 | - [Co, 0.2, 0.7, 0.5] 14 | - [Co, 0.0, 0.0, 0.0] 15 | - [Si, 0.0, 0.2, 0.5] 16 | - [Si, 0.2, 0.5, 0.0] 17 | - [Si, 0.5, 0.0, 0.2] 18 | - [Si, 0.7, 0.7, 0.7] 19 | 20 | electrons: 21 | basis: 22 | ke-cutoff: 30 23 | fillings: 24 | smearing: Gauss 25 | sigma: 0.004 26 | k-mesh: 27 | size: [6, 6, 6] 28 | -------------------------------------------------------------------------------- /examples/Cu/bandstruct.yaml: -------------------------------------------------------------------------------- 1 | include: totalE.yaml 2 | 3 | electrons: 4 | fixed-H: totalE.h5 5 | fillings: 6 | n-bands: 15 7 | n-bands-extra: 5 8 | k-mesh: null # disable k-mesh included from totalE.yaml 9 | k-path: 10 | dk: 0.02 11 | points: 12 | - [0.000, 0.000, 0.000, $\Gamma$] 13 | - [0.000, 0.500, 0.500, X] 14 | - [0.250, 0.750, 0.500, W] 15 | - [0.500, 0.500, 0.500, L] 16 | - [0.000, 0.000, 0.000, $\Gamma$] 17 | - [0.375, 0.750, 0.375, K] 18 | 19 | geometry: fixed 20 | checkpoint: null 21 | checkpoint-out: bandstruct.h5 22 | 23 | -------------------------------------------------------------------------------- /examples/Cu/totalE.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | modification: face-centered 5 | a: 3.61 Å 6 | movable: yes 7 | 8 | ions: 9 | pseudopotentials: 10 | - $ID_ONCV_PBE-1.0.upf 11 | coordinates: 12 | - [Cu, 0., 0., 0.] 13 | 14 | electrons: 15 | basis: 16 | ke-cutoff: 30 17 | fillings: 18 | smearing: Gauss 19 | sigma: 0.02 20 | k-mesh: 21 | offset: [0.5, 0.5, 0.5] #Monkhorst-Pack 22 | size: [8, 8, 8] 23 | xc: 24 | functional: gga-pbe 25 | #plus_U: 26 | # Cu d: 0.080 eV 27 | save-wavefunction: no #keeps checkpoint small 28 | 29 | geometry: 30 | relax: 31 | n-iterations: 10 32 | 33 | checkpoint: null # disable reading checkpoint 34 | checkpoint-out: totalE.h5 # but still create it 35 | -------------------------------------------------------------------------------- /examples/Graphene/bandstruct.yaml: -------------------------------------------------------------------------------- 1 | include: totalE.yaml 2 | 3 | electrons: 4 | fixed-H: totalE.h5 5 | k-mesh: null # disable k-mesh included from totalE.yaml 6 | k-path: 7 | dk: 0.02 8 | points: 9 | - [0.0000000, 0.0000000, 0., $\Gamma$] 10 | - [0.5000000, 0.0000000, 0., M] 11 | - [0.6666667, -0.3333333, 0., K] 12 | - [0.0000000, 0.0000000, 0., $\Gamma$] 13 | fillings: 14 | n-bands: 8 15 | n-bands-extra: 4 16 | 17 | checkpoint-out: bandstruct.h5 18 | -------------------------------------------------------------------------------- /examples/Graphene/totalE.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: hexagonal 4 | a: 4.651 5 | c: 15 6 | compute-stress: yes 7 | 8 | ions: 9 | pseudopotentials: 10 | - SG15/$ID_ONCV_PBE.upf 11 | coordinates: 12 | - [C, 0.00000000, 0.00000000, 0.0] 13 | - [C, 0.33333333, -0.33333333, 0.0] 14 | 15 | electrons: 16 | fillings: 17 | smearing: Gauss 18 | sigma: 0.01 19 | k-mesh: 20 | size: [12, 12, 1] 21 | xc: 22 | functional: gga-pbe 23 | save-wavefunction: no #keeps checkpoint small 24 | 25 | checkpoint: null # disable reading checkpoint 26 | checkpoint-out: totalE.h5 # but still create it 27 | -------------------------------------------------------------------------------- /examples/Jellium/totalE.yaml: -------------------------------------------------------------------------------- 1 | # Free electron gas: 2 | lattice: 3 | system: 4 | name: tetragonal 5 | modification: body-centered 6 | a: 5 # bohrs 7 | c: 7 # bohrs 8 | compute-stress: yes 9 | 10 | electrons: 11 | fillings: 12 | charge: -4 13 | k-mesh: 14 | size: 35.0 # supercell dimension 15 | -------------------------------------------------------------------------------- /examples/MD/H2/.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | -------------------------------------------------------------------------------- /examples/MD/H2/md.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | import qimpy as qp 4 | from qimpy.io import Unit 5 | 6 | 7 | def main() -> None: 8 | qp.io.log_config() # default set up to log from MPI head alone 9 | qp.log.info("Using QimPy " + qp.__version__) 10 | qp.rc.init() 11 | torch.manual_seed(1234) 12 | 13 | # Callback function to analyze trajetcory: 14 | def analyze(dynamics: qp.dft.geometry.Dynamics, i_iter: int) -> None: 15 | positions = dynamics.system.ions.positions 16 | lattice = dynamics.system.lattice 17 | dpos = positions[1] - positions[0] 18 | dpos -= (dpos + 0.5).floor() # periodic wrap (minimum image convention) 19 | bond_distance = (lattice.Rbasis @ dpos).norm().item() 20 | bond_distances.append(bond_distance) 21 | 22 | bond_distances = [] # to be populated by analyze() 23 | system = qp.dft.System( 24 | lattice=dict(system=dict(name="cubic", modification="face-centered", a=14.0)), 25 | ions=dict( 26 | pseudopotentials="SG15/$ID_ONCV_PBE.upf", 27 | coordinates=[["H", 0.0, 0.0, 0.0], ["H", 0.3, 0.2, 1.4]], 28 | fractional=False, 29 | ), 30 | electrons=dict( 31 | basis=dict(real_wavefunctions=True), 32 | fillings=dict(smearing=None), 33 | ), 34 | geometry=dict( 35 | dynamics=dict( 36 | dt=float(Unit(1.0, "fs")), 37 | n_steps=200, 38 | thermostat="berendsen", 39 | t_damp_T=Unit(10, "fs"), 40 | report_callback=analyze, 41 | ), 42 | ), 43 | ) 44 | system.run() 45 | qp.rc.report_end() 46 | qp.profiler.StopWatch.print_stats() 47 | 48 | # Visualize trajectory: 49 | plt.plot(bond_distances) 50 | plt.xlabel("Time step") 51 | plt.ylabel("Bond length [$a_0$]") 52 | plt.savefig("bond-length.pdf", bbox_inches="tight") 53 | plt.show() 54 | 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /examples/MD/Si/.gitignore: -------------------------------------------------------------------------------- 1 | *.pdf 2 | -------------------------------------------------------------------------------- /examples/MD/Si/md.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | import qimpy as qp 4 | from qimpy.io import Unit 5 | 6 | 7 | def main() -> None: 8 | qp.io.log_config() # default set up to log from MPI head alone 9 | qp.log.info("Using QimPy " + qp.__version__) 10 | qp.rc.init() 11 | torch.manual_seed(1234) 12 | 13 | # Callback function to analyze trajectory: 14 | def analyze(dynamics: qp.dft.geometry.Dynamics, i_iter: int) -> None: 15 | energies.append(float(dynamics.system.energy)) 16 | volumes.append(dynamics.system.lattice.volume / Unit.MAP["Angstrom"] ** 3) 17 | pressures.append(Unit.convert(dynamics.P, "bar").value) 18 | temperatures.append(Unit.convert(dynamics.T, "K").value) 19 | 20 | energies = [] # to be populated by analyze() 21 | volumes = [] # to be populated by analyze() 22 | pressures = [] # to be populated by analyze() 23 | temperatures = [] # to be populated by analyze() 24 | 25 | # Construct coordinates input: 26 | positions = [ 27 | [0.000, 0.000, 0.000], 28 | [0.003, 0.502, 0.501], 29 | [0.503, 0.001, 0.502], 30 | [0.501, 0.503, 0.002], 31 | [0.249, 0.251, 0.248], 32 | [0.250, 0.748, 0.751], 33 | [0.751, 0.248, 0.749], 34 | [0.748, 0.752, 0.249], 35 | ] 36 | velocities = [ 37 | [-9.446e-05, +6.233e-05, +6.401e-05], 38 | [+7.916e-05, -9.725e-06, -9.636e-05], 39 | [-9.603e-05, -4.774e-05, +1.696e-05], 40 | [-1.114e-04, -7.478e-05, +1.372e-04], 41 | [+1.999e-04, +1.307e-04, -4.277e-04], 42 | [-1.040e-04, -5.010e-05, +1.226e-05], 43 | [+7.844e-05, -7.251e-06, +1.675e-04], 44 | [+4.838e-05, -3.422e-06, +1.261e-04], 45 | ] 46 | coordinates = [ 47 | ["Si", *tuple(pos), {"v": v}] for pos, v in zip(positions, velocities) 48 | ] 49 | 50 | system = qp.dft.System( 51 | lattice=dict(system=dict(name="cubic", a=float(Unit(5.43, "Å"))), movable=True), 52 | ions=dict( 53 | pseudopotentials="SG15/$ID_ONCV_PBE.upf", 54 | coordinates=coordinates, 55 | ), 56 | electrons=dict( 57 | k_mesh=dict(size=[2, 2, 2], offset=[0.5, 0.5, 0.5]), 58 | basis=dict(ke_cutoff=10.0), 59 | fillings=dict(smearing=None), 60 | scf=dict(energy_threshold=1e-6), 61 | ), 62 | geometry=dict( 63 | dynamics=dict( 64 | dt=float(Unit(2.0, "fs")), 65 | n_steps=100, 66 | # thermostat=dict(berendsen=dict(B0=Unit(95.0, "GPa"))), 67 | thermostat="nose-hoover", 68 | t_damp_T=Unit(10, "fs"), 69 | t_damp_P=Unit(100, "fs"), 70 | report_callback=analyze, 71 | ), 72 | ), 73 | checkpoint="md.h5", 74 | ) 75 | system.run() 76 | qp.rc.report_end() 77 | qp.profiler.StopWatch.print_stats() 78 | 79 | if qp.rc.is_head: 80 | # Visualize trajectory properties: 81 | for quantity, ylabel, filename in ( 82 | (energies, "Energy [$E_h$]", "energy.pdf"), 83 | (volumes, r"Volume [$\AA^3$]", "volume.pdf"), 84 | (pressures, "$P$ [bar]", "pressure.pdf"), 85 | (temperatures, "$T$ [K]", "temperature.pdf"), 86 | ): 87 | plt.figure() 88 | plt.xlabel("Time step") 89 | plt.ylabel(ylabel) 90 | plt.plot(quantity) 91 | plt.savefig(filename, bbox_inches="tight") 92 | plt.show() 93 | 94 | 95 | if __name__ == "__main__": 96 | main() 97 | -------------------------------------------------------------------------------- /examples/Molecules/H2.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | a: 20. # bohrs 5 | compute-stress: yes 6 | 7 | ions: 8 | pseudopotentials: 9 | - SG15/$ID_ONCV_PBE.upf # has nonlocal s projectors 10 | fractional: no 11 | coordinates: 12 | - [H, 0., 0., -0.7] 13 | - [H, 0., 0., +0.7] 14 | 15 | electrons: 16 | basis: 17 | real-wavefunctions: yes 18 | xc: 19 | functional: gga-xc-pbe 20 | scf: 21 | mix-density: no # mix potential instead 22 | 23 | #checkpoint: null # disable reading checkpoint 24 | #checkpoint-out: H2.h5 # but still create it 25 | -------------------------------------------------------------------------------- /examples/Molecules/H2O-triclinic.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: triclinic # for low-symmetry test of force/strain transformations and transpose correctness 4 | a: 20. 5 | b: 15. 6 | c: 18. 7 | alpha: 80 deg 8 | beta: 95 deg 9 | gamma: 75 deg 10 | compute-stress: yes 11 | 12 | ions: 13 | pseudopotentials: 14 | - ../../../../JDFTx/build_testing/pseudopotentials/SG15/$ID_ONCV_PBE.upf 15 | fractional: no 16 | coordinates: 17 | - [H, 0., -1.432, +0.6, {Q: 0.2}] 18 | - [H, 0., +1.432, +0.6, {Q: 0.2}] 19 | - [O, 0., 0.000, -0.6, {Q: -0.4}] 20 | 21 | electrons: 22 | basis: 23 | real-wavefunctions: yes 24 | 25 | geometry: 26 | relax: 27 | n-iterations: 10 28 | 29 | checkpoint: null # disable reading checkpoint 30 | -------------------------------------------------------------------------------- /examples/Molecules/H2O.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | a: 10. # bohrs 5 | compute-stress: yes 6 | periodic: [no, no, no] 7 | center: [0., 0., 0.1] 8 | 9 | ions: 10 | pseudopotentials: 11 | - SG15/$ID_ONCV_PBE.upf 12 | fractional: no 13 | coordinates: 14 | - [H, 0., -1.432, +0.6, {Q: 0.2}] 15 | - [H, 0., +1.432, +0.6, {Q: 0.2}] 16 | - [O, 0., 0.000, -0.6, {Q: -0.4}] 17 | 18 | electrons: 19 | basis: 20 | real-wavefunctions: yes 21 | xc: 22 | functional: ${FUNC} 23 | scf: 24 | mix-density: no # mix potential instead 25 | 26 | geometry: 27 | relax: 28 | n-iterations: 10 29 | 30 | checkpoint: null # disable reading checkpoint 31 | # checkpoint-out: H2O.h5 32 | 33 | coulomb: 34 | analytic: yes 35 | radius: 5.0 36 | 37 | -------------------------------------------------------------------------------- /examples/Molecules/Hatom-SOC.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | modification: face-centered 5 | a: 20. # bohrs 6 | 7 | ions: 8 | pseudopotentials: 9 | - DOJO/FR/$ID.upf 10 | coordinates: 11 | - [H, 0., 0., 0.] 12 | 13 | electrons: 14 | xc: 15 | functional: gga-xc-pbe 16 | spin-polarized: yes 17 | spinorial: yes 18 | fillings: 19 | M: [0, 0.8, -0.6] 20 | 21 | checkpoint: null 22 | -------------------------------------------------------------------------------- /examples/Molecules/Hatom.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | modification: face-centered 5 | a: 20. # bohrs 6 | 7 | ions: 8 | pseudopotentials: 9 | - SG15/$ID_ONCV_PBE.upf # has nonlocal s projectors 10 | coordinates: 11 | - [H, 0., 0., 0.] 12 | 13 | electrons: 14 | xc: 15 | functional: ${FUNC} 16 | spin-polarized: yes 17 | fillings: 18 | M: 1 19 | 20 | geometry: fixed 21 | 22 | checkpoint: null # disable reading checkpoint 23 | checkpoint-out: Hatom.h5 # but still create it 24 | -------------------------------------------------------------------------------- /examples/Molecules/O2.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | a: 20. # bohrs 5 | compute-stress: yes 6 | 7 | ions: 8 | pseudopotentials: 9 | - SG15/$ID_ONCV_PBE.upf 10 | fractional: no 11 | coordinates: 12 | - [O, 0., 0., -1.14] 13 | - [O, 0., 0., +1.14] 14 | 15 | electrons: 16 | basis: 17 | real-wavefunctions: yes 18 | spin-polarized: yes 19 | fillings: 20 | M: 2. 21 | M-constrain: no 22 | xc: 23 | functional: ${FUNC} 24 | 25 | checkpoint: null # disable reading checkpoint 26 | checkpoint-out: O2.h5 # but still create it 27 | -------------------------------------------------------------------------------- /examples/Molecules/OH-.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | a: 20. # bohrs 5 | 6 | ions: 7 | pseudopotentials: 8 | - SG15/$ID_ONCV_PBE.upf 9 | fractional: no 10 | coordinates: 11 | - [H, +0.92, 0., 0.] 12 | - [O, -0.92, 0., 0.] 13 | 14 | electrons: 15 | fillings: 16 | charge: -1 17 | basis: 18 | real-wavefunctions: yes 19 | xc: 20 | functional: gga-xc-pbe 21 | 22 | checkpoint: null 23 | -------------------------------------------------------------------------------- /examples/NaCl-ClVac/totalE.py: -------------------------------------------------------------------------------- 1 | # Calculation analogous to totalE.yaml with Python input: 2 | import numpy as np 3 | import qimpy as qp 4 | 5 | qp.io.log_config() # default set up to log from MPI head alone 6 | qp.log.info("Using QimPy " + qp.__version__) 7 | qp.rc.init() 8 | 9 | # Create lattice object explicitly (eg. shared between two systems) 10 | n_sup = 2 # number of unit cells in each dimension 11 | lattice = qp.lattice.Lattice( 12 | system=dict(name="cubic", a=10.74, modification="face-centered"), 13 | scale=n_sup, 14 | ) 15 | 16 | # Ion parameters: 17 | coords_mesh_1d = np.arange(n_sup) * (1.0 / n_sup) 18 | coords_mesh = ( 19 | np.stack(np.meshgrid(*((coords_mesh_1d,) * 3), indexing="ij")).reshape(3, -1).T 20 | ) 21 | coordinates = [["Na", *tuple(coords + 0.5 / n_sup)] for coords in coords_mesh] 22 | coordinates.extend( 23 | [["Cl", *tuple(coords)] for coords in coords_mesh if np.linalg.norm(coords)] 24 | ) # omit Cl at (0,0,0) 25 | 26 | system = qp.dft.System( 27 | lattice=lattice, 28 | ions={ 29 | "pseudopotentials": "SG15/$ID_ONCV_PBE.upf", 30 | "coordinates": coordinates, 31 | }, 32 | electrons={ 33 | "basis": {"real-wavefunctions": True}, 34 | "xc": {"functional": "gga-xc-pbe"}, 35 | }, 36 | ) 37 | system.run() 38 | 39 | qp.rc.report_end() 40 | qp.profiler.StopWatch.print_stats() 41 | -------------------------------------------------------------------------------- /examples/NaCl-ClVac/totalE.yaml: -------------------------------------------------------------------------------- 1 | #NaCl 2x2x2 primitive supercell with a Cl vacancy 2 | 3 | lattice: 4 | vectors: 5 | - [0, 0.5, 0.5] # vector1 Cartesian coordinates (in rows of 3x3 matrix) 6 | - [0.5, 0, 0.5] 7 | - [0.5, 0.5, 0] 8 | scale: 11.37 Å #for 2x2x2 supercell (could be a length 3 list/tuple) 9 | movable: yes #optimize lattice vectors 10 | 11 | ions: 12 | pseudopotentials: 13 | - SG15/$ID_ONCV_PBE.upf 14 | coordinates: 15 | - [Na, 0.25, 0.25, 0.25] 16 | - [Na, 0.25, 0.25, 0.75] 17 | - [Na, 0.25, 0.75, 0.25] 18 | - [Na, 0.25, 0.75, 0.75] 19 | - [Na, 0.75, 0.25, 0.25] 20 | - [Na, 0.75, 0.25, 0.75] 21 | - [Na, 0.75, 0.75, 0.25] 22 | - [Na, 0.75, 0.75, 0.75] 23 | - [Cl, 0.00, 0.50, 0.50] 24 | - [Cl, 0.50, 0.00, 0.50] 25 | - [Cl, 0.50, 0.50, 0.00] 26 | - [Cl, 0.50, 0.00, 0.00] 27 | - [Cl, 0.00, 0.50, 0.00] 28 | - [Cl, 0.00, 0.00, 0.50] 29 | - [Cl, 0.50, 0.50, 0.50] 30 | 31 | electrons: 32 | basis: 33 | real-wavefunctions: yes 34 | # chefsi: {filter-order: 10} 35 | scf: 36 | mix-density: no # mix potential instead 37 | 38 | geometry: 39 | relax: 40 | n-iterations: 10 41 | 42 | checkpoint: null # disable reading checkpoint 43 | #checkpoint-out: NaCl-ClVac.h5 # but still create it 44 | -------------------------------------------------------------------------------- /examples/Pt-SOC/totalE.yaml: -------------------------------------------------------------------------------- 1 | lattice: 2 | system: 3 | name: cubic 4 | modification: face-centered 5 | a: 7.41 # bohrs 6 | 7 | ions: 8 | pseudopotentials: 9 | - DOJO/FR/$ID.upf 10 | coordinates: 11 | - [Pt, 0., 0., 0.] 12 | 13 | electrons: 14 | spinorial: yes 15 | fillings: 16 | smearing: Gauss 17 | sigma: 0.02 18 | k-mesh: 19 | size: [8, 8, 8] 20 | xc: 21 | functional: gga-xc-pbe 22 | 23 | checkpoint: null 24 | -------------------------------------------------------------------------------- /examples/Si/bandstruct.yaml: -------------------------------------------------------------------------------- 1 | include: totalE.yaml 2 | 3 | lattice: 4 | system: 5 | a: 10.3492926 # relaxed lattice constant 6 | 7 | electrons: 8 | fixed-H: totalE.h5 9 | fillings: 10 | n-bands: 10 11 | n-bands-extra: 5 12 | k-mesh: null # disable k-mesh included from totalE.yaml 13 | k-path: 14 | dk: 0.05 15 | points: 16 | - [0.000, 0.000, 0.000, $\Gamma$] 17 | - [0.000, 0.500, 0.500, X] 18 | - [0.250, 0.750, 0.500, W] 19 | - [0.500, 0.500, 0.500, L] 20 | - [0.000, 0.000, 0.000, $\Gamma$] 21 | - [0.375, 0.750, 0.375, K] 22 | 23 | geometry: fixed 24 | checkpoint-out: bandstruct.h5 25 | 26 | -------------------------------------------------------------------------------- /examples/Si/totalE.yaml: -------------------------------------------------------------------------------- 1 | # Diamond-cubic silicon 2 | lattice: 3 | system: 4 | name: cubic 5 | modification: face-centered 6 | a: 5.43 Angstrom 7 | movable: yes 8 | 9 | ions: 10 | pseudopotentials: 11 | - SG15/$ID_ONCV_PBE.upf 12 | coordinates: 13 | - [Si, 0.125, 0.07, 0.01] # specified off-center to test space group detection 14 | - [Si, 0.375, 0.32, 0.26] 15 | 16 | electrons: 17 | k-mesh: 18 | offset: [0.5, 0.5, 0.5] #Monkhorst-Pack 19 | size: [4, 4, 4] 20 | save-wavefunction: no #keeps checkpoint small 21 | 22 | grid: 23 | ke-cutoff: 100 24 | 25 | geometry: 26 | relax: 27 | n-iterations: 1 28 | 29 | checkpoint: null 30 | checkpoint_out: totalE.h5 31 | -------------------------------------------------------------------------------- /examples/Transport/.gitignore: -------------------------------------------------------------------------------- 1 | animation 2 | *.dat 3 | *.pdf 4 | *.png 5 | *.avi 6 | 7 | -------------------------------------------------------------------------------- /examples/Transport/curved-tile.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 36 | 38 | 42 | 46 | 50 | 54 | 58 | 59 | 60 | -------------------------------------------------------------------------------- /examples/Transport/ping_pong/README.md: -------------------------------------------------------------------------------- 1 | (1) Check geometry: 2 | `python -m qimpy.transport.geometry.test_svg rect-domain.svg 1 --grid_spacing 0.01` 3 | 4 | (2) Run: 5 | `time mpirun -np 8 python -m qimpy.transport.geometry.test_advect --h 0.005 --Ntheta 128 --specularity 0 --sigma 0.1 --q0 0.5 0.5 --v0 0 1 --dt_save 0.01 --t_max 10 --svg rect-domain.svg` 6 | 7 | (3) Plot: 8 | `time mpirun -np 8 python -m qimpy.transport.plot plot.yaml` 9 | -------------------------------------------------------------------------------- /examples/Transport/ping_pong/plot.yaml: -------------------------------------------------------------------------------- 1 | checkpoints: animation/*.h5 2 | output: animation/frame{:04d}.png 3 | 4 | density: 5 | cmap: bwr 6 | interpolation: bilinear 7 | linthresh: 0.1 8 | -------------------------------------------------------------------------------- /examples/Transport/ping_pong/rect-domain.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 39 | 41 | 46 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /examples/Transport/plot.yaml: -------------------------------------------------------------------------------- 1 | checkpoints: animation/*.h5 2 | output: animation/frame{:04d}.png 3 | 4 | density: 5 | cmap: bwr 6 | interpolation: bilinear 7 | linthresh: 0.1 8 | 9 | streamlines: 10 | transparency: no 11 | density: 2 12 | linewidth: 0.7 13 | arrowsize: 0.5 14 | -------------------------------------------------------------------------------- /examples/Transport/rect-domain.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 37 | 39 | 43 | 48 | 54 | 60 | 66 | 72 | 73 | 74 | -------------------------------------------------------------------------------- /examples/Transport/rect-domain.yaml: -------------------------------------------------------------------------------- 1 | fermi_circle: 2 | kF: 1.0 3 | vF: 1.5 4 | tau-p: .inf 5 | tau-ee: .inf 6 | N-theta: 256 7 | r_c: 10.0 8 | 9 | patch_set: 10 | svg_file: rect-domain.svg 11 | grid_spacing: 1.0 12 | 13 | contacts: 14 | source: 15 | dmu: +0.1 16 | drain: 17 | dmu: -0.1 18 | 19 | time_evolution: 20 | t_max: 1000.0 21 | dt_save: 5.0 22 | n_collate: 32 23 | 24 | checkpoint_out: animation/advect_{:04d}.h5 25 | -------------------------------------------------------------------------------- /examples/Transport/rect-periodic.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 38 | 40 | 44 | 50 | 56 | 60 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /examples/Transport/rect-periodic.yaml: -------------------------------------------------------------------------------- 1 | ab_initio: 2 | fname: ldbd-CsPbBr3-dft.h5 3 | mu: 0.528824 eV 4 | eph_scatt: no 5 | 6 | patch_set: 7 | svg_file: rect-periodic.svg 8 | svg_unit: 100.0 nm 9 | grid_spacing: 100.0 nm 10 | 11 | contacts: 12 | source: 13 | dmu: 0.1 14 | Bfield: [0., 1. T, 0.] 15 | drain: 16 | dmu: -0.1 17 | Bfield: [0., -1. T, 0.] 18 | 19 | time_evolution: 20 | t_max: 40.0 ps 21 | dt_save: 0.2 ps 22 | n_collate: 32 23 | 24 | checkpoint_out: animation/advect_{:04d}.h5 25 | -------------------------------------------------------------------------------- /examples/Transport/rect-with-leads-plot.yaml: -------------------------------------------------------------------------------- 1 | include: plot.yaml # common settings 2 | 3 | # Select aperture names to additionally plot (contacts always plotted): 4 | apertures: 5 | sourceA: # short name to use in plot 6 | name: apertureSourceOpening # name in SVG 7 | outward: [0, 1] # direction to pick "outward" normal 8 | drainA: # short name to use in plot 9 | name: apertureDrainOpening # name in SVG 10 | outward: [0, -1] # direction to pick "outward" normal 11 | 12 | density-prefix: rect-with-leads-density 13 | current-prefix: rect-with-leads-current 14 | -------------------------------------------------------------------------------- /examples/Transport/rect-with-leads.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 39 | 41 | 45 | 50 | 56 | 62 | 68 | 74 | 78 | 84 | 90 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /examples/Transport/rect-with-leads.yaml: -------------------------------------------------------------------------------- 1 | fermi_circle: 2 | kF: 1.0 3 | vF: 1.5 4 | tau-p: .inf 5 | tau-ee: 10.0 6 | N-theta: 256 7 | specularity: 0.5 8 | 9 | patch_set: 10 | svg_file: rect-with-leads.svg 11 | grid_spacing: 1.0 12 | 13 | contacts: 14 | source: 15 | dmu: +0.1 16 | drain: 17 | dmu: -0.1 18 | 19 | time_evolution: 20 | t_max: 1000.0 21 | dt_save: 5.0 22 | n_collate: 32 23 | 24 | checkpoint_out: animation/advect_{:04d}.h5 25 | -------------------------------------------------------------------------------- /examples/Transport/semicircle.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 16 | 37 | 39 | 43 | 48 | 52 | 56 | 62 | 68 | 74 | 75 | 76 | -------------------------------------------------------------------------------- /examples/Transport/semicircle.yaml: -------------------------------------------------------------------------------- 1 | fermi_circle: 2 | kF: 1.0 3 | vF: 1.5 4 | tau-p: .inf 5 | tau-ee: 5.0 6 | N-theta: 256 7 | 8 | patch_set: 9 | svg_file: semicircle.svg 10 | grid_spacing: 1.0 11 | 12 | contacts: 13 | source: 14 | dmu: +0.1 15 | drain: 16 | dmu: -0.1 17 | 18 | time_evolution: 19 | t_max: 400.0 20 | dt_save: 2.0 21 | n_collate: 32 22 | 23 | checkpoint_out: animation/advect_{:04d}.h5 24 | -------------------------------------------------------------------------------- /mpi_print_from_head.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [[ $OMPI_COMM_WORLD_RANK == 0 ]] 3 | then 4 | export COLUMNS=120 5 | "$@" 6 | else 7 | "$@" 1>/dev/null 2>/dev/null 8 | fi 9 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel", 5 | "pybind11", 6 | "torch" 7 | ] 8 | build-backend = "setuptools.build_meta" 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = qimpy 3 | author = QimPy Collaboration 4 | author_email = author@example.com 5 | description = Quantum-Integrated Multi-PhYsics 6 | long_description = file: README.md 7 | long_description_content_type = text/markdown 8 | url = https://qimpy.org 9 | project_urls = 10 | Bug Tracker = https://github.com/shankar1729/qimpy/issues 11 | classifiers = 12 | Programming Language :: Python :: 3 13 | License :: OSI Approved :: BSD License 14 | Operating System :: OS Independent 15 | 16 | [options] 17 | package_dir = 18 | = src 19 | packages = find: 20 | include-package-data = True 21 | python_requires = >=3.9 22 | install_requires = 23 | torch >1.10 24 | pyyaml 25 | numpy 26 | scipy 27 | mpi4py 28 | h5py 29 | psutil 30 | svg.path 31 | 32 | [options.packages.find] 33 | where = src 34 | 35 | [options.package_data] 36 | * = py.typed 37 | 38 | [versioneer] 39 | VCS = git 40 | style = pep440 41 | versionfile_source = src/qimpy/_version.py 42 | versionfile_build = qimpy/_version.py 43 | tag_prefix = v 44 | parentdir_prefix = qimpy- 45 | 46 | [tool:pytest] 47 | minversion = 6.0 48 | addopts = -v 49 | testpaths = 50 | src 51 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | from torch.utils import cpp_extension 3 | import sys 4 | import os 5 | 6 | 7 | def get_version_cmdclass(): 8 | import versioneer 9 | 10 | return versioneer.get_version(), versioneer.get_cmdclass() 11 | 12 | 13 | sys.path.append(os.path.dirname(__file__)) # needed for versioneer 14 | version, cmdclass = get_version_cmdclass() 15 | cmdclass["build_ext"] = cpp_extension.BuildExtension 16 | 17 | ext_modules = [ 18 | cpp_extension.CppExtension( 19 | "qimpy.mpi._bufferview", ["src/qimpy/mpi/_bufferview.cpp"] 20 | ) 21 | ] 22 | 23 | setuptools.setup(version=version, cmdclass=cmdclass, ext_modules=ext_modules) 24 | -------------------------------------------------------------------------------- /src/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api.html) for an overview. 2 | -------------------------------------------------------------------------------- /src/mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | exclude = _version.py 3 | plugins = numpy.typing.mypy_plugin 4 | 5 | [mypy-mpi4py.*] 6 | ignore_missing_imports = True 7 | 8 | [mypy-matplotlib.*] 9 | ignore_missing_imports = True 10 | 11 | [mypy-scipy.*] 12 | ignore_missing_imports = True 13 | 14 | [mypy-psutil.*] 15 | ignore_missing_imports = True 16 | 17 | [mypy-h5py.*] 18 | ignore_missing_imports = True 19 | 20 | [mypy-pylibxc.*] 21 | ignore_missing_imports = True 22 | 23 | [mypy-torch.nn.intrinsic.quantized._reference] 24 | ignore_missing_imports = True 25 | 26 | [mypy-ase.*] 27 | ignore_missing_imports = True 28 | 29 | [mypy-tqdm.*] 30 | ignore_missing_imports = True 31 | 32 | [mypy-svg.*] 33 | ignore_missing_imports = True 34 | -------------------------------------------------------------------------------- /src/qimpy/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.html) for an overview of package `qimpy`. 2 | -------------------------------------------------------------------------------- /src/qimpy/__init__.py: -------------------------------------------------------------------------------- 1 | """QimPy: Quantum-Integrated Multi-PhYsics""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "log", 5 | "set_gpu_visibility", 6 | "MPI", 7 | "rc", 8 | "profiler", 9 | "io", 10 | "mpi", 11 | "utils", 12 | "math", 13 | "TreeNode", 14 | "Energy", 15 | "algorithms", 16 | "lattice", 17 | "symmetries", 18 | "grid", 19 | "dft", 20 | "transport", 21 | ) 22 | 23 | # Module import definition 24 | from .pre_init import log, set_gpu_visibility 25 | from mpi4py import MPI #: Must initialize MPI after pre_init for correct GPU behavior. 26 | from . import rc, profiler, io, mpi, math 27 | from ._tree import TreeNode 28 | from ._energy import Energy 29 | from . import algorithms, lattice, symmetries, grid, dft, transport 30 | 31 | # Automatic versioning added by versioneer 32 | from ._version import get_versions 33 | 34 | __version__: str = get_versions()["version"] 35 | del get_versions 36 | -------------------------------------------------------------------------------- /src/qimpy/_energy.py: -------------------------------------------------------------------------------- 1 | from typing import Union, Optional 2 | 3 | import torch 4 | 5 | 6 | class Energy(dict[str, Union[float, torch.Tensor]]): 7 | """Energy of system with access to components""" 8 | 9 | def __float__(self) -> float: 10 | """Compute total energy from energy components""" 11 | return float(sum(self.values())) 12 | 13 | def __repr__(self) -> str: 14 | terms: list[list[str]] = [[], []] # collect terms with +/- separately 15 | for name, value in sorted(self.items()): 16 | term_index = 1 if (name[0] in "+-") else 0 17 | terms[term_index].append(f"{name:>9s} = {value:25.16f}") 18 | terms[0].extend(terms[1]) 19 | terms[0].append("-" * 37) # separator 20 | terms[0].append(f"{self.name:>9s} = {float(self):25.16f}") # total 21 | return "\n".join(terms[0]) 22 | 23 | @property 24 | def name(self) -> str: 25 | """Appropriate name of (free) energy based on components.""" 26 | if "Eband" in self: 27 | return "Eband" # Band structure energy 28 | if "-muN" in self: 29 | return "G" # Grand free energy 30 | if "-TS" in self: 31 | return "F" # Helmholtz free energy 32 | return "E" # Energy 33 | 34 | def sum_tensor(self) -> Optional[torch.Tensor]: 35 | result = None 36 | for value in self.values(): 37 | assert isinstance(value, torch.Tensor) 38 | result = value if (result is None) else (result + value) 39 | return result 40 | -------------------------------------------------------------------------------- /src/qimpy/algorithms/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.algorithms.html) for an overview of package `qimpy.algorithms`. 2 | -------------------------------------------------------------------------------- /src/qimpy/algorithms/__init__.py: -------------------------------------------------------------------------------- 1 | """Shared algorithms for optimization and self-consistency.""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "Gradable", 5 | "Optimizable", 6 | "ConvergenceCheck", 7 | "MatrixArray", 8 | "MinimizeState", 9 | "Minimize", 10 | "Pulay", 11 | ) 12 | 13 | from ._gradable import Gradable 14 | from ._optimizable import Optimizable, ConvergenceCheck, MatrixArray 15 | from ._minimize import Minimize, MinimizeState 16 | from ._pulay import Pulay 17 | -------------------------------------------------------------------------------- /src/qimpy/algorithms/_gradable.py: -------------------------------------------------------------------------------- 1 | from typing import Generic, TypeVar 2 | from abc import ABC, abstractmethod 3 | 4 | 5 | GradientType = TypeVar("GradientType") 6 | 7 | 8 | class Gradable(ABC, Generic[GradientType]): 9 | """Interface to store gradient w.r.t current object, analogous to pytorch.""" 10 | 11 | grad: GradientType #: optional gradient (of energy) with respect to this object. 12 | 13 | @property 14 | def requires_grad(self) -> bool: 15 | """Return whether gradient with respect to this object is needed.""" 16 | return self._requires_grad 17 | 18 | def requires_grad_(self, requires_grad: bool = True, clear: bool = False) -> None: 19 | """Set whether gradient with respect to this object is needed. 20 | If `clear`, also clear previous gradient / set to zero as needed. 21 | """ 22 | self._requires_grad = requires_grad 23 | if clear: 24 | if requires_grad: 25 | self.grad = self.zeros_like() # prepare new zero'd gradient 26 | else: 27 | self.__dict__.pop("grad", None) # remove previous gradient (if any) 28 | 29 | def __init__(self) -> None: 30 | self._requires_grad = False 31 | 32 | @abstractmethod 33 | def zeros_like(self: GradientType) -> GradientType: 34 | ... 35 | -------------------------------------------------------------------------------- /src/qimpy/algorithms/_minimize_lbfgs.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Generic, Deque 3 | from collections import deque 4 | from dataclasses import dataclass 5 | 6 | from qimpy import log, Energy, algorithms 7 | from ._minimize_line import LINE_MINIMIZE, Vector 8 | from ._minimize_cg import initialize_convergence_checks, check_convergence 9 | 10 | 11 | @dataclass 12 | class HistoryEntry(Generic[Vector]): 13 | """History of state and gradient changes within `_lbfgs`""" 14 | 15 | s: "Vector" #: Change in state (step_size * direction) 16 | Ky: "Vector" #: Corresponding change in preconditioned gradient 17 | rho: float #: Inverse state-gradient overlap, 1/(s.y) 18 | 19 | 20 | def lbfgs(self: algorithms.Minimize[Vector]) -> Energy: 21 | """L-BFGS implementation for `Minimize.minimize`""" 22 | assert self.method == "l-bfgs" 23 | 24 | # Initial energy and gradients: 25 | state = algorithms.MinimizeState[Vector]() 26 | E = self._compute(state, energy_only=False) 27 | E_prev = 0.0 28 | line_minimize = LINE_MINIMIZE[self.line_minimize] 29 | checks = initialize_convergence_checks(self, state) 30 | history: Deque[HistoryEntry] = deque(maxlen=self.n_history) 31 | gamma = 0.0 # current scaling factor, updated each iteration 32 | 33 | # Iterate till convergence (or iteration limit): 34 | for i_iter in range(self.i_iter_start, self.n_iterations + 1): 35 | # Optional reporting: 36 | if self.report(i_iter): 37 | log.info(f"{self.name}: State modified externally:" " resetting history.") 38 | E = self._compute(state, energy_only=False) 39 | history.clear() 40 | 41 | # Check and report convergence: 42 | E, E_prev, should_exit = check_convergence( 43 | self, state, i_iter, checks, E, E_prev 44 | ) 45 | if should_exit: 46 | return state.energy 47 | 48 | # Compute search direction: 49 | direction = (-1.0) * state.K_gradient 50 | alpha: Deque[float] = deque() # scale factors to each history entry 51 | for h in reversed(history): 52 | alpha_i = h.rho * self._sync(h.s.vdot(direction)) 53 | direction -= alpha_i * h.Ky 54 | alpha.append(alpha_i) 55 | if gamma: 56 | direction *= gamma # scaling to keep reasonable step size ~ 1 57 | for h in history: 58 | alpha_i = alpha.pop() 59 | beta = h.rho * self._sync(h.Ky.vdot(direction)) 60 | direction += (alpha_i - beta) * h.s 61 | direction = self.constrain(direction) 62 | if len(history) == self.n_history: 63 | history.popleft() # save memory by removing oldest here, when full 64 | 65 | # Line minimization: 66 | step_size_test = min(self.step_size.initial, self.safe_step_size(direction)) 67 | g_prev, Kg_prev = state.gradient, state.K_gradient 68 | E, step_size, success = line_minimize(self, direction, step_size_test, state) 69 | if not success: 70 | log.info(f"{self.name}: Undoing step.") 71 | self.step(direction, -step_size) 72 | E = self._compute(state, energy_only=False) 73 | if len(history): 74 | # Step failed, but not along gradient direction: 75 | log.info(f"{self.name}: Step failed: resetting history.") 76 | history.clear() 77 | gamma = 0.0 78 | continue 79 | else: 80 | # Step failed along gradient direction: 81 | log.info( 82 | f"{self.name}: Step failed along gradient: likely" 83 | " at roundoff / inner-solve accuracy limit." 84 | ) 85 | return state.energy 86 | 87 | # Update history: 88 | y = state.gradient - g_prev 89 | Ky = state.K_gradient - Kg_prev 90 | del g_prev, Kg_prev # minimize # of gradient-like objects in memory 91 | direction *= step_size # Now equal to s, the change of state in step 92 | y_s = self._sync(y.vdot(direction)) 93 | gamma = y_s / self._sync(y.vdot(Ky)) 94 | history.append(HistoryEntry(s=direction, Ky=Ky, rho=1.0 / y_s)) 95 | del y, Ky, direction # minimize # of gradient-like objects in memory 96 | 97 | log.info(f"{self.name}: Not converged in {self.n_iterations}" " iterations.") 98 | return state.energy 99 | -------------------------------------------------------------------------------- /src/qimpy/algorithms/_optimizable.py: -------------------------------------------------------------------------------- 1 | from typing import Protocol, TypeVar, Deque 2 | from dataclasses import dataclass 3 | 4 | import torch 5 | 6 | from qimpy import MPI 7 | from qimpy.mpi import BufferView 8 | 9 | 10 | T = TypeVar("T") 11 | 12 | 13 | class Optimizable(Protocol): 14 | """Class requirements for use as vector space in optimization algorithms. 15 | This is required in :class:`Pulay` and :class:`Minimize`, for example.""" 16 | 17 | def __add__(self: T, other: T) -> T: 18 | ... 19 | 20 | def __iadd__(self: T, other: T) -> T: 21 | ... 22 | 23 | def __sub__(self: T, other: T) -> T: 24 | ... 25 | 26 | def __isub__(self: T, other: T) -> T: 27 | ... 28 | 29 | def __mul__(self: T, other: float) -> T: 30 | ... 31 | 32 | def __rmul__(self: T, other: float) -> T: 33 | ... 34 | 35 | def __imul__(self: T, other: float) -> T: 36 | ... 37 | 38 | def vdot(self: T, other: T) -> float: 39 | ... 40 | 41 | 42 | class ConvergenceCheck(Deque[bool]): 43 | """Check quantity stays unchanged a certain number of times.""" 44 | 45 | threshold: float #: Convergence threshold 46 | n_check: int #: Number of consecutive checks that must pass at convergence 47 | 48 | def __init__(self, threshold: float, n_check: int = 2) -> None: 49 | """Initialize convergence check to specified `threshold`. 50 | The check must pass `n_check` consecutive times.""" 51 | self.threshold = threshold 52 | self.n_check = n_check 53 | super().__init__(maxlen=n_check) 54 | 55 | def check(self, v: float) -> bool: 56 | """Return if converged, given latest quantity `v` to check.""" 57 | self.append(abs(v) < self.threshold) 58 | if len(self) < self.n_check: 59 | return False # not enough data to pass yet 60 | return all(converged for converged in self) 61 | 62 | 63 | @dataclass 64 | class MatrixArray: 65 | """Array of matrices implementing the `Optimizable` protocol. 66 | This is convenient as an independent variable for optimizing over 67 | subspace rotations, such as in `LCAO` and `Wannier`.""" 68 | 69 | M: torch.Tensor #: Array of matrices with dimension ..., N x N 70 | comm: MPI.Comm #: Communicator where M is split on some dimension(s) 71 | 72 | def __add__(self, other: "MatrixArray") -> "MatrixArray": 73 | return MatrixArray(M=(self.M + other.M), comm=self.comm) 74 | 75 | def __iadd__(self, other: "MatrixArray") -> "MatrixArray": 76 | self.M += other.M 77 | return self 78 | 79 | def __sub__(self, other: "MatrixArray") -> "MatrixArray": 80 | return MatrixArray(M=(self.M - other.M), comm=self.comm) 81 | 82 | def __isub__(self, other: "MatrixArray") -> "MatrixArray": 83 | self.M -= other.M 84 | return self 85 | 86 | def __mul__(self, other: float) -> "MatrixArray": 87 | return MatrixArray(M=(self.M * other), comm=self.comm) 88 | 89 | __rmul__ = __mul__ 90 | 91 | def __imul__(self, other: float) -> "MatrixArray": 92 | self.M *= other 93 | return self 94 | 95 | def vdot(self, other: "MatrixArray") -> float: 96 | """Global vector-space dot product collected over `comm`.""" 97 | result = torch.vdot(self.M.flatten(), other.M.flatten()).real 98 | self.comm.Allreduce(MPI.IN_PLACE, BufferView(result), MPI.SUM) 99 | return result.item() 100 | -------------------------------------------------------------------------------- /src/qimpy/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from . import rc 4 | 5 | 6 | @pytest.hookimpl(hookwrapper=True) 7 | def pytest_report_teststatus(report, config): 8 | """Add timing to test result if passed.""" 9 | outcome = yield 10 | category, short_letter, verbose_word = outcome.get_result() 11 | if category == "passed": 12 | verbose_word = f"{verbose_word} in {report.duration:.2f}s" 13 | outcome.force_result((category, short_letter, verbose_word)) 14 | 15 | 16 | @pytest.fixture(scope="session", autouse=True) 17 | def init_run_config(): 18 | rc.init() 19 | 20 | 21 | def pytest_collection_modifyitems(config, items): 22 | # Modify pytest-mpi to deselect instead of skip mpi/non-mpi tests based on mode: 23 | with_mpi = config.getoption("--with-mpi") 24 | deselect_mark = "mpi_skip" if with_mpi else "mpi" 25 | removed = [] 26 | kept = [] 27 | for item in items: 28 | if item.get_closest_marker(deselect_mark): 29 | removed.append(item) 30 | else: 31 | kept.append(item) 32 | if removed: 33 | config.hook.pytest_deselected(items=removed) 34 | items[:] = kept 35 | -------------------------------------------------------------------------------- /src/qimpy/dft/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.html) for an overview of package `qimpy.dft`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/__init__.py: -------------------------------------------------------------------------------- 1 | """Electronic density-functional theory.""" 2 | # List exported symbols for doc generation 3 | __all__ = ("ions", "electrons", "geometry", "export", "System", "main") 4 | 5 | # Module import definition 6 | from . import ions, electrons, geometry, export 7 | from ._system import System 8 | from ._main import main 9 | -------------------------------------------------------------------------------- /src/qimpy/dft/__main__.py: -------------------------------------------------------------------------------- 1 | from . import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.electrons.html) for an overview of package `qimpy.dft.electrons`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/__init__.py: -------------------------------------------------------------------------------- 1 | """Electronic sub-system""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "Fillings", 5 | "Basis", 6 | "Wavefunction", 7 | "Davidson", 8 | "CheFSI", 9 | "SCF", 10 | "LCAO", 11 | "xc", 12 | "Electrons", 13 | ) 14 | 15 | from ._fillings import Fillings 16 | from ._basis import Basis 17 | from ._wavefunction import Wavefunction 18 | from ._davidson import Davidson 19 | from ._chefsi import CheFSI 20 | from ._scf import SCF 21 | from ._lcao import LCAO 22 | from . import xc 23 | from ._electrons import Electrons 24 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/_hamiltonian.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from qimpy import dft 4 | from . import Wavefunction 5 | 6 | 7 | def _hamiltonian(self: dft.electrons.Electrons, C: Wavefunction) -> Wavefunction: 8 | """Apply electronic Hamiltonian on wavefunction `C`""" 9 | basis = C.basis 10 | ions = basis.ions 11 | HC = basis.apply_ke(C) 12 | HC += basis.apply_potential(self.n_tilde.grad, C) 13 | if self.xc.need_tau: 14 | for i_dir in range(3): 15 | HC -= 0.5 * basis.apply_gradient( 16 | basis.apply_potential( 17 | self.tau_tilde.grad, basis.apply_gradient(C, i_dir) 18 | ), 19 | i_dir, 20 | ) 21 | # Nonlocal ps: 22 | beta_C = C.proj 23 | beta = ions.beta_full if C.band_division else ions.beta 24 | HC += beta @ (ions.D_all @ beta_C) 25 | 26 | # DFT+U: 27 | if self.xc.plus_U: 28 | HC += self.xc.plus_U(C) # TODO 29 | return HC 30 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/_wavefunction_arithmetic.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Union 3 | 4 | import torch 5 | 6 | from qimpy.dft import electrons 7 | 8 | 9 | def _mul( 10 | self: electrons.Wavefunction, scale: Union[float, torch.Tensor] 11 | ) -> electrons.Wavefunction: 12 | is_suitable = is_band_scale = isinstance(scale, float) 13 | if isinstance(scale, torch.Tensor) and (len(scale.shape) == 5): 14 | is_suitable = True 15 | is_band_scale = scale.shape[-2:] == (1, 1) 16 | if not is_suitable: 17 | return NotImplemented 18 | result = electrons.Wavefunction( 19 | self.basis, coeff=self.coeff * scale, band_division=self.band_division 20 | ) 21 | if is_band_scale and self._proj_is_valid(): 22 | assert self._proj is not None 23 | result._proj = self._proj * scale 24 | result._proj_version = self._proj_version 25 | return result 26 | 27 | 28 | def _imul( 29 | self: electrons.Wavefunction, scale: Union[float, torch.Tensor] 30 | ) -> electrons.Wavefunction: 31 | is_suitable = is_band_scale = isinstance(scale, float) 32 | if isinstance(scale, torch.Tensor) and (len(scale.shape) == 5): 33 | is_suitable = True 34 | is_band_scale = scale.shape[-2:] == (1, 1) 35 | if not is_suitable: 36 | return NotImplemented 37 | self.coeff *= scale 38 | if is_band_scale and self._proj_is_valid(): 39 | assert self._proj is not None 40 | self._proj *= scale 41 | else: 42 | self._proj_invalidate() 43 | return self 44 | 45 | 46 | def _add( 47 | self: electrons.Wavefunction, other: electrons.Wavefunction 48 | ) -> electrons.Wavefunction: 49 | if not isinstance(other, electrons.Wavefunction): 50 | return NotImplemented 51 | assert self.basis is other.basis 52 | result = electrons.Wavefunction( 53 | self.basis, coeff=(self.coeff + other.coeff), band_division=self.band_division 54 | ) 55 | if self._proj_is_valid() and other._proj_is_valid(): 56 | assert self._proj is not None 57 | assert other._proj is not None 58 | result._proj = self._proj + other._proj 59 | result._proj_version = self._proj_version 60 | return result 61 | 62 | 63 | def _iadd( 64 | self: electrons.Wavefunction, other: electrons.Wavefunction 65 | ) -> electrons.Wavefunction: 66 | if not isinstance(other, electrons.Wavefunction): 67 | return NotImplemented 68 | assert self.basis is other.basis 69 | self.coeff += other.coeff 70 | if self._proj_is_valid() and other._proj_is_valid(): 71 | assert self._proj is not None 72 | assert other._proj is not None 73 | self._proj += other._proj 74 | else: 75 | self._proj_invalidate() 76 | return self 77 | 78 | 79 | def _sub( 80 | self: electrons.Wavefunction, other: electrons.Wavefunction 81 | ) -> electrons.Wavefunction: 82 | if not isinstance(other, electrons.Wavefunction): 83 | return NotImplemented 84 | assert self.basis is other.basis 85 | result = electrons.Wavefunction( 86 | self.basis, coeff=(self.coeff - other.coeff), band_division=self.band_division 87 | ) 88 | if self._proj_is_valid() and other._proj_is_valid(): 89 | assert self._proj is not None 90 | assert other._proj is not None 91 | result._proj = self._proj - other._proj 92 | result._proj_version = self._proj_version 93 | return result 94 | 95 | 96 | def _isub( 97 | self: electrons.Wavefunction, other: electrons.Wavefunction 98 | ) -> electrons.Wavefunction: 99 | if not isinstance(other, electrons.Wavefunction): 100 | return NotImplemented 101 | assert self.basis is other.basis 102 | self.coeff -= other.coeff 103 | if self._proj_is_valid() and other._proj_is_valid(): 104 | assert self._proj is not None 105 | assert other._proj is not None 106 | self._proj -= other._proj 107 | else: 108 | self._proj_invalidate() 109 | return self 110 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/_wavefunction_slice.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Any 3 | 4 | import torch 5 | 6 | from qimpy.dft import electrons 7 | 8 | 9 | def _getitem(self: electrons.Wavefunction, index: Any) -> electrons.Wavefunction: 10 | """Propagate getting slices to coeff and proj if present""" 11 | result = electrons.Wavefunction( 12 | self.basis, coeff=self.coeff[index], band_division=self.band_division 13 | ) 14 | if self._proj_is_valid(): 15 | assert self._proj is not None 16 | result._proj = self._proj[_proj_index(index)] 17 | result._proj_version = self._proj_version 18 | return result 19 | 20 | 21 | def _setitem( 22 | self: electrons.Wavefunction, index: Any, other: electrons.Wavefunction 23 | ) -> None: 24 | """Propagate setting slices to `coeff` and `proj` if present""" 25 | self.coeff[index] = other.coeff 26 | if self._proj_is_valid() and other._proj_is_valid(): 27 | assert self._proj is not None 28 | assert other._proj is not None 29 | self._proj[_proj_index(index)] = other._proj 30 | else: 31 | self._proj_invalidate() 32 | 33 | 34 | def _proj_index(index: Any) -> Any: 35 | """Convert wavefunction index to projector index, adding extra dim when needed.""" 36 | if isinstance(index, tuple) and len(index) > 2: 37 | return index[:2] + (slice(None),) + index[2:] # insert projector index at 2 38 | else: 39 | return index # only slicing one or two dimensions (before projector index) 40 | 41 | 42 | def _cat( 43 | self: electrons.Wavefunction, 44 | other: electrons.Wavefunction, 45 | dim: int = 2, 46 | clear: bool = False, 47 | ) -> electrons.Wavefunction: 48 | """Join wavefunctions along specified dimension (default: 2 => bands). 49 | If `clear` is True, eagerly clear memory of the input operands. 50 | Note that this will leave `self` and `other` in a broken state, 51 | so use this only if they are deleted or replaced shortly thereafter. 52 | Despite this danger, this is often necessary becaause this operation will 53 | likely be near the peak memory usage eg. within `Davidson` and `CheFSI`.""" 54 | result = electrons.Wavefunction( 55 | self.basis, 56 | coeff=torch.cat((self.coeff, other.coeff), dim=dim), 57 | band_division=self.band_division, 58 | ) 59 | if clear: 60 | del self.coeff 61 | del other.coeff 62 | if self._proj_is_valid() and other._proj_is_valid(): 63 | assert self._proj is not None 64 | assert other._proj is not None 65 | result._proj = torch.cat((self._proj, other._proj), dim=dim) 66 | result._proj_version = self._proj_version 67 | if clear: 68 | del self.proj 69 | del other.proj 70 | return result 71 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/test_fillings.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import torch 3 | import pytest 4 | 5 | from qimpy import rc 6 | from ._fillings import _smearing_funcs, SmearingFunc 7 | 8 | 9 | @functools.cache 10 | def get_smear_test_inputs() -> tuple[torch.Tensor, float, float, float]: 11 | """Get input set for smearing tests.""" 12 | sigma = 0.005 13 | mu = -0.157 14 | deig = 0.01 * sigma 15 | delta_eig = 100 * sigma 16 | eig = torch.arange(mu - delta_eig, mu + delta_eig, deig, device=rc.device) 17 | return eig, mu, sigma, deig 18 | 19 | 20 | @pytest.mark.mpi_skip 21 | @pytest.mark.parametrize("smearing_func", _smearing_funcs.values()) 22 | def test_f_eig(smearing_func: SmearingFunc) -> None: 23 | """Check df/deig consistency with f.""" 24 | eig, mu, sigma, deig = get_smear_test_inputs() 25 | f, f_eig, S = smearing_func(eig, mu, sigma) 26 | f_eig_num = (f[2:] - f[:-2]) / (2 * deig) # central difference derivative 27 | err_max = (deig**2) / (sigma**3) # because above is second-order correct 28 | assert torch.allclose(f_eig_num, f_eig[1:-1], atol=err_max, rtol=0.0) 29 | 30 | 31 | @pytest.mark.mpi_skip 32 | @pytest.mark.parametrize("smearing_func", _smearing_funcs.values()) 33 | def test_S(smearing_func: SmearingFunc) -> None: 34 | """Check S consistency with f: dS/df = 2(eig - mu)/sigma.""" 35 | eig, mu, sigma, deig = get_smear_test_inputs() 36 | f, f_eig, S = smearing_func(eig, mu, sigma) 37 | df = f[2:] - f[:-2] 38 | dS = S[2:] - S[:-2] 39 | dS_expected = df * 2 * (eig[1:-1] - mu) / sigma 40 | tol = (deig / sigma) ** 2 41 | assert torch.allclose(dS, dS_expected, rtol=tol, atol=tol) 42 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/xc/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.electrons.xc.html) for an overview of package `qimpy.dft.electrons.xc`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/xc/__init__.py: -------------------------------------------------------------------------------- 1 | """Exchange-correlation functional.""" 2 | # List exported symbols for doc generation 3 | __all__ = ("functional", "lda", "gga", "mgga", "PlusU", "XC") 4 | 5 | from . import functional, lda, gga, mgga 6 | from ._plus_U import PlusU 7 | from ._xc import XC 8 | -------------------------------------------------------------------------------- /src/qimpy/dft/electrons/xc/_plus_U.py: -------------------------------------------------------------------------------- 1 | from qimpy import TreeNode, log 2 | from qimpy.io import CheckpointPath, CheckpointContext 3 | from .. import Wavefunction 4 | 5 | 6 | class PlusU(TreeNode): 7 | """DFT+U correction.""" 8 | 9 | U_values: dict[tuple[str, str], float] #: map specie, orbital -> U value 10 | 11 | def __init__( 12 | self, *, checkpoint_in: CheckpointPath = CheckpointPath(), **U_values: float 13 | ) -> None: 14 | """Initialize from components and/or dictionary of options. 15 | 16 | Parameters 17 | ---------- 18 | U_values 19 | :yaml:`Dictionary of U values by species and orbital names.` 20 | For example, to add U to Cu d and O s and p, the yaml input would be: 21 | 22 | .. code-block:: yaml 23 | 24 | plus_U: 25 | Cu d: 2.4 eV 26 | O s: 0.1 eV 27 | O p: 0.7 eV 28 | """ 29 | super().__init__() 30 | self.U_values = {} 31 | for key, U in U_values.items(): 32 | specie, orbital = key.split() 33 | # TODO: validate and map orbital codes, check against Ions 34 | log.info(f" +U on {specie}: {U}") 35 | self.U_values[(specie, orbital)] = float(U) 36 | 37 | def _save_checkpoint( 38 | self, cp_path: CheckpointPath, context: CheckpointContext 39 | ) -> list[str]: 40 | attrs = cp_path.attrs 41 | for (specie, orbital), U in self.U_values.items(): 42 | attrs[f"{specie} {orbital}"] = U 43 | return list(attrs.keys()) 44 | 45 | def __bool__(self) -> bool: 46 | return bool(self.U_values) 47 | 48 | def __call__(self, C: Wavefunction) -> Wavefunction: 49 | """TODO.""" 50 | raise NotImplementedError 51 | -------------------------------------------------------------------------------- /src/qimpy/dft/export/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.export.html) for an overview of package `qimpy.dft.export`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/export/__init__.py: -------------------------------------------------------------------------------- 1 | """Export data to interface with other codes.""" 2 | # List exported symbols for doc generation 3 | __all__ = ("BGW", "Export") 4 | 5 | from ._bgw import BGW 6 | from ._export import Export 7 | -------------------------------------------------------------------------------- /src/qimpy/dft/export/_bgw.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from qimpy import log, TreeNode, dft 4 | from qimpy.io import CheckpointPath 5 | 6 | 7 | class BGW(TreeNode): 8 | """Fixed geometry, i.e. only optimize electronic degrees of freedom.""" 9 | 10 | filename: str #: BGW-format HDF5 file to output 11 | 12 | def __init__( 13 | self, 14 | *, 15 | system: dft.System, 16 | filename: str, 17 | checkpoint_in: CheckpointPath = CheckpointPath(), 18 | ) -> None: 19 | """Export data for BerkeleyGW. 20 | 21 | Parameters 22 | ---------- 23 | system 24 | Overall electronic DFT system to export data for. 25 | filename 26 | :yaml:`Filename for BerkeleyGW output.` 27 | """ 28 | super().__init__() 29 | self.filename = filename 30 | 31 | def export(self, system: dft.System) -> None: 32 | """Export BGW HDF5 file.""" 33 | log.info("Will do BGW export here.") 34 | raise NotImplementedError 35 | -------------------------------------------------------------------------------- /src/qimpy/dft/export/_export.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Protocol, Optional, Union 3 | 4 | from qimpy import TreeNode, dft 5 | from qimpy.io import CheckpointPath, CheckpointContext 6 | from . import BGW 7 | 8 | 9 | class Exporter(Protocol): 10 | """Class requirements to use as a geometry action.""" 11 | 12 | def export(self, system: dft.System) -> None: 13 | ... 14 | 15 | 16 | class Export(TreeNode): 17 | """Export data for other codes.""" 18 | 19 | bgw: BGW 20 | exporters: list[Exporter] 21 | 22 | def __init__( 23 | self, 24 | *, 25 | system: dft.System, 26 | checkpoint_in: CheckpointPath = CheckpointPath(), 27 | bgw: Optional[Union[dict, BGW]] = None, 28 | ) -> None: 29 | """Specify one or more export formats. 30 | 31 | Parameters 32 | ---------- 33 | bgw 34 | :yaml:`BerkeleyGW export.` 35 | """ 36 | super().__init__() 37 | self.exporters = [] 38 | 39 | if bgw is not None: 40 | self.add_child("bgw", BGW, bgw, checkpoint_in, system=system) 41 | self.exporters.append(self.bgw) 42 | 43 | def __call__(self, system: dft.System): 44 | """Run selected geometry action.""" 45 | for exporter in self.exporters: 46 | exporter.export(system) 47 | 48 | def _save_checkpoint( 49 | self, cp_path: CheckpointPath, context: CheckpointContext 50 | ) -> list[str]: 51 | attrs = cp_path.attrs 52 | # TODO: add attributes for each exporter 53 | return list(attrs.keys()) 54 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.geometry.html) for an overview of package `qimpy.dft.geometry`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/__init__.py: -------------------------------------------------------------------------------- 1 | """Geometry actions: relaxation and dynamics.""" 2 | # List exported symbols for doc generation 3 | __all__ = ["Relax", "Fixed", "thermostat", "Dynamics", "Geometry"] 4 | 5 | from ._relax import Relax 6 | from ._fixed import Fixed 7 | from . import thermostat 8 | from ._dynamics import Dynamics 9 | from ._geometry import Geometry 10 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/_fixed.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | from qimpy import MPI, dft 4 | from qimpy.io import CheckpointPath, CheckpointContext, Checkpoint 5 | from qimpy.lattice import Lattice 6 | from . import Relax 7 | 8 | 9 | class Fixed(Relax): 10 | """Fixed geometry, i.e. only optimize electronic degrees of freedom.""" 11 | 12 | def __init__( 13 | self, 14 | *, 15 | comm: MPI.Comm, 16 | lattice: Lattice, 17 | checkpoint_in: CheckpointPath = CheckpointPath(), 18 | ) -> None: 19 | super().__init__( 20 | n_iterations=0, 21 | save_history=False, 22 | comm=comm, 23 | lattice=lattice, 24 | checkpoint_in=checkpoint_in, 25 | ) 26 | 27 | def run(self, system: dft.System) -> None: 28 | if system.electrons.fixed_H: 29 | # Bypass stepper and force calculations for non-SCF calculations: 30 | system.electrons.run(system) 31 | if system.checkpoint_out: 32 | with Checkpoint(system.checkpoint_out, writable=True) as cp: 33 | system.save_checkpoint(CheckpointPath(cp), CheckpointContext("end")) 34 | else: 35 | Relax.run(self, system) 36 | 37 | def _save_checkpoint( 38 | self, cp_path: CheckpointPath, context: CheckpointContext 39 | ) -> list[str]: 40 | return [] # just need to bypass setting incompatible attributes from Relax 41 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/_geometry.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Protocol, Optional, Union 3 | 4 | from qimpy import TreeNode, dft, MPI 5 | from qimpy.io import CheckpointPath 6 | from qimpy.lattice import Lattice 7 | from . import Fixed, Relax, Dynamics 8 | 9 | 10 | class Action(Protocol): 11 | """Class requirements to use as a geometry action.""" 12 | 13 | def run(self, system: dft.System) -> None: 14 | ... 15 | 16 | 17 | class Geometry(TreeNode): 18 | """Select between possible geometry actions.""" 19 | 20 | action: Action 21 | 22 | def __init__( 23 | self, 24 | *, 25 | comm: MPI.Comm, 26 | lattice: Lattice, 27 | checkpoint_in: CheckpointPath = CheckpointPath(), 28 | fixed: Optional[Union[dict, Fixed]] = None, 29 | relax: Optional[Union[dict, Relax]] = None, 30 | dynamics: Optional[Union[dict, Dynamics]] = None, 31 | ) -> None: 32 | """Specify one of the supported geometry actions. 33 | Defaults to `Fixed` if none specified. 34 | 35 | Parameters 36 | ---------- 37 | fixed 38 | :yaml:`Electronic optimization only at a fixed geometry.` 39 | relax 40 | :yaml:`Geometry relaxation of ions, and optionally, also the lattice.` 41 | dynamics 42 | :yaml:`Molecular dynamics of ions, and optionally, also the lattice.` 43 | """ 44 | super().__init__() 45 | ChildOptions = TreeNode.ChildOptions 46 | self.add_child_one_of( 47 | "action", 48 | checkpoint_in, 49 | ChildOptions("fixed", Fixed, fixed, comm=comm, lattice=lattice), 50 | ChildOptions("relax", Relax, relax, comm=comm, lattice=lattice), 51 | ChildOptions("dynamics", Dynamics, dynamics, comm=comm), 52 | have_default=True, 53 | ) 54 | 55 | def run(self, system: dft.System): 56 | """Run selected geometry action.""" 57 | self.action.run(system) 58 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/_gradient.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, ClassVar 2 | from dataclasses import dataclass 3 | 4 | import torch 5 | 6 | 7 | @dataclass 8 | class Gradient: 9 | """Geometry gradient used for relaxation / dynamics.""" 10 | 11 | ions: torch.Tensor #: ionic gradient (forces) 12 | lattice: Optional[torch.Tensor] = None #: lattice gradient (stress) 13 | thermostat: Optional[torch.Tensor] = None #: thermostat gradient (e.g. Nose-Hoover) 14 | barostat: Optional[torch.Tensor] = None #: barostat gradient (e.g. Nose-Hoover) 15 | OPTIONAL_ATTRIBUTE_NAMES: ClassVar[set[str]] = {"lattice", "thermostat", "barostat"} 16 | 17 | def clone(self) -> "Gradient": 18 | result = Gradient(ions=self.ions.clone().detach()) 19 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 20 | if (self_attribute := getattr(self, attribute_name)) is not None: 21 | setattr(result, attribute_name, self_attribute.clone().detach()) 22 | return result 23 | 24 | def __add__(self, other: "Gradient") -> "Gradient": 25 | result = Gradient(ions=(self.ions + other.ions)) 26 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 27 | if (self_attribute := getattr(self, attribute_name)) is not None: 28 | other_attribute = getattr(other, attribute_name) 29 | assert other_attribute is not None 30 | setattr(result, attribute_name, self_attribute + other_attribute) 31 | return result 32 | 33 | def __iadd__(self, other: "Gradient") -> "Gradient": 34 | self.ions += other.ions 35 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 36 | if (self_attribute := getattr(self, attribute_name)) is not None: 37 | other_attribute = getattr(other, attribute_name) 38 | assert other_attribute is not None 39 | self_attribute += other_attribute 40 | return self 41 | 42 | def __sub__(self, other: "Gradient") -> "Gradient": 43 | result = Gradient(ions=(self.ions - other.ions)) 44 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 45 | if (self_attribute := getattr(self, attribute_name)) is not None: 46 | other_attribute = getattr(other, attribute_name) 47 | assert other_attribute is not None 48 | setattr(result, attribute_name, self_attribute - other_attribute) 49 | return result 50 | 51 | def __isub__(self, other: "Gradient") -> "Gradient": 52 | self.ions -= other.ions 53 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 54 | if (self_attribute := getattr(self, attribute_name)) is not None: 55 | other_attribute = getattr(other, attribute_name) 56 | assert other_attribute is not None 57 | self_attribute -= other_attribute 58 | return self 59 | 60 | def __mul__(self, other: float) -> "Gradient": 61 | result = Gradient(ions=(self.ions * other)) 62 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 63 | if (self_attribute := getattr(self, attribute_name)) is not None: 64 | setattr(result, attribute_name, self_attribute * other) 65 | return result 66 | 67 | __rmul__ = __mul__ 68 | 69 | def __imul__(self, other: float) -> "Gradient": 70 | self.ions *= other 71 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 72 | if (self_attribute := getattr(self, attribute_name)) is not None: 73 | self_attribute *= other 74 | return self 75 | 76 | def vdot(self, other: "Gradient") -> float: 77 | result = self.ions.flatten() @ other.ions.flatten() 78 | for attribute_name in Gradient.OPTIONAL_ATTRIBUTE_NAMES: 79 | if (self_attribute := getattr(self, attribute_name)) is not None: 80 | other_attribute = getattr(other, attribute_name) 81 | assert other_attribute is not None 82 | result += self_attribute.flatten() @ other_attribute.flatten() 83 | return float(result.item()) 84 | -------------------------------------------------------------------------------- /src/qimpy/dft/geometry/_history.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Union 3 | 4 | import torch 5 | import numpy as np 6 | 7 | 8 | from qimpy import rc, TreeNode, MPI 9 | from qimpy.io import CheckpointPath, CheckpointContext 10 | from qimpy.mpi import TaskDivision 11 | 12 | 13 | class History(TreeNode): 14 | """Helper to save history along trajectory.""" 15 | 16 | comm: MPI.Comm 17 | iter_division: TaskDivision # Division of iterations over MPI 18 | i_iter: int # Current iteration / last iteration for which history available 19 | save_map: dict[str, np.ndarray] # Names and data for quantities to save 20 | 21 | def __init__( 22 | self, 23 | *, 24 | comm: MPI.Comm, 25 | n_max: int, 26 | i_iter: int = 0, 27 | checkpoint_in: CheckpointPath = CheckpointPath(), 28 | ) -> None: 29 | super().__init__() 30 | self.comm = comm 31 | self.iter_division = TaskDivision( 32 | n_tot=n_max, n_procs=comm.size, i_proc=comm.rank 33 | ) 34 | self.i_iter = i_iter 35 | self.save_map = {} 36 | 37 | if checkpoint_in: 38 | checkpoint, path = checkpoint_in 39 | assert checkpoint is not None 40 | group = checkpoint[path] 41 | i_start = self.iter_division.i_start 42 | i_stop = min(self.iter_division.i_stop, self.i_iter + 1) 43 | n_in = i_stop - i_start # number of iterations to be read at this process 44 | for name in group.keys(): 45 | dset = group[name] # version in file 46 | data = np.empty( 47 | (self.iter_division.n_mine,) + dset.shape[1:], dtype=dset.dtype 48 | ) # for version in memory, split over MPI 49 | if n_in > 0: 50 | data[:n_in] = dset[i_start:i_stop] 51 | self.save_map[name] = data 52 | 53 | def add(self, name: str, value: Union[float, torch.Tensor]) -> None: 54 | """Add current `value` for variable `name` to history.""" 55 | data = np.array(value) if isinstance(value, float) else value.to(rc.cpu).numpy() 56 | if name not in self.save_map: 57 | assert self.i_iter == 0 # if not, previous history must have been read in 58 | self.save_map[name] = np.empty( 59 | (self.iter_division.n_mine,) + data.shape, dtype=data.dtype 60 | ) 61 | if self.iter_division.is_mine(self.i_iter): 62 | i_out = self.i_iter - self.iter_division.i_start # local index 63 | self.save_map[name][i_out] = data 64 | 65 | def _save_checkpoint( 66 | self, cp_path: CheckpointPath, context: CheckpointContext 67 | ) -> list[str]: 68 | cp_path.attrs["i_iter"] = self.i_iter 69 | saved_list = [] 70 | for name, data in self.save_map.items(): 71 | self._save(cp_path.relative(name), data) 72 | saved_list.append(name) 73 | return saved_list 74 | 75 | def _save(self, cp_path: CheckpointPath, data: np.ndarray) -> None: 76 | """Save history `data` up to `i_iter`'th iteration to `cp_path`.""" 77 | checkpoint, path = cp_path 78 | assert checkpoint is not None 79 | dset = checkpoint.create_dataset( 80 | path, shape=(self.i_iter + 1,) + data.shape[1:], dtype=data.dtype 81 | ) 82 | i_start = self.iter_division.i_start 83 | i_stop = min(self.iter_division.i_stop, self.i_iter + 1) 84 | n_out = i_stop - i_start 85 | if n_out > 0: 86 | dset[i_start:i_stop] = data[:n_out] 87 | -------------------------------------------------------------------------------- /src/qimpy/dft/ions/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.dft.ions.html) for an overview of package `qimpy.dft.ions`. 2 | -------------------------------------------------------------------------------- /src/qimpy/dft/ions/__init__.py: -------------------------------------------------------------------------------- 1 | """Ionic sub-system""" 2 | # List exported symbols for doc generation 3 | __all__ = [ 4 | "symbols", 5 | "PseudoQuantumNumbers", 6 | "Pseudopotential", 7 | "Ions", 8 | "Lowdin", 9 | ] 10 | 11 | from . import symbols 12 | from ._pseudo_quantum_numbers import PseudoQuantumNumbers 13 | from ._pseudopotential import Pseudopotential 14 | from ._ions import Ions 15 | from ._lowdin import Lowdin 16 | -------------------------------------------------------------------------------- /src/qimpy/dft/ions/test_pseudopotential.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import matplotlib.pyplot as plt 4 | import pytest 5 | 6 | from qimpy import rc 7 | from qimpy.io import log_config 8 | from . import Pseudopotential 9 | 10 | 11 | # Get list of filenames to test load: 12 | ps_path = os.getenv("QIMPY_PSEUDOPOTENTIAL_TEST_PATH", "") 13 | ps_names = os.getenv("QIMPY_PSEUDOPOTENTIAL_TEST_NAMES", "H").split() if ps_path else [] 14 | ps_filenames = [os.path.join(ps_path, ps_name) for ps_name in ps_names] 15 | 16 | 17 | @pytest.mark.mpi_skip 18 | @pytest.mark.parametrize("filename", ps_filenames) 19 | def test_pseudopotential(filename: str) -> None: 20 | plot_pseudopotential(filename) 21 | 22 | 23 | def plot_pseudopotential(filename: str) -> None: 24 | ps = Pseudopotential(filename) 25 | for plot_func in (plot_local, plot_projectors, plot_orbitals): 26 | plt.figure() 27 | plot_func(ps) 28 | 29 | 30 | def plot_local(ps: Pseudopotential) -> None: 31 | """Plot local potential and densities.""" 32 | r = ps.r.to(rc.cpu) 33 | plt.title(f"{ps.element} density/potential") 34 | plt.plot(r, ps.rho_atom.f.to(rc.cpu)[0], label=r"$\rho_{\mathrm{atom}}(r)$") 35 | if hasattr(ps, "nCore"): 36 | plt.plot(r, ps.n_core.f.to(rc.cpu)[0], label=r"$n_{\mathrm{core}}(r)$") 37 | plt.plot(r, r * ps.Vloc.f.to(rc.cpu)[0], label=r"$r V_{\mathrm{loc}}(r)$") 38 | plt.xlabel(r"$r$") 39 | plt.xlim(0, 10.0) 40 | plt.legend() 41 | 42 | 43 | def plot_projectors(ps: Pseudopotential) -> None: 44 | """Plot projectors.""" 45 | r = ps.r.to(rc.cpu) 46 | plt.title(f"{ps.element} projectors") 47 | for i, beta_i in enumerate(ps.beta.f.to(rc.cpu)): 48 | l_i = int(ps.beta.l[i].item()) 49 | plt.plot(r, beta_i, label=r"$\beta_" + "spdf"[l_i] + f"(r)/r^{l_i}$") 50 | plt.xlabel(r"$r$") 51 | plt.xlim(0, 10.0) 52 | plt.legend() 53 | 54 | 55 | def plot_orbitals(ps: Pseudopotential) -> None: 56 | """Plot projectors.""" 57 | r = ps.r.to(rc.cpu) 58 | plt.title(f"{ps.element} orbitals") 59 | for i, psi_i in enumerate(ps.psi.f.to(rc.cpu)): 60 | l_i = int(ps.psi.l[i].item()) 61 | plt.plot(r, psi_i, label=r"$\psi_" + "spdf"[l_i] + f"(r)/r^{l_i}$") 62 | plt.xlabel(r"$r$") 63 | plt.xlim(0, 10.0) 64 | plt.legend() 65 | 66 | 67 | def main(): 68 | log_config() 69 | rc.init() 70 | if not ps_filenames: 71 | print( 72 | """ 73 | Specify environment variables QIMPY_PSEUDOPOTENTIAL_TEST_PATH 74 | and QIMPY_PSEUDOPOTENTIAL_TEST_NAMES to select pseudos to plot. 75 | """ 76 | ) 77 | if rc.is_head: 78 | for ps_filename in ps_filenames: 79 | plot_pseudopotential(ps_filename) 80 | plt.show() 81 | 82 | 83 | if __name__ == "__main__": 84 | main() 85 | -------------------------------------------------------------------------------- /src/qimpy/grid/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.grid.html) for an overview of package `qimpy.grid`. 2 | -------------------------------------------------------------------------------- /src/qimpy/grid/__init__.py: -------------------------------------------------------------------------------- 1 | """Grids, fields and their operations""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "Grid", 5 | "FieldType", 6 | "Field", 7 | "FieldR", 8 | "FieldC", 9 | "FieldH", 10 | "FieldG", 11 | "FieldSymmetrizer", 12 | "coulomb", 13 | ) 14 | 15 | from ._grid import Grid 16 | from ._field import FieldType, Field, FieldR, FieldC, FieldH, FieldG 17 | from ._field_symmetrizer import FieldSymmetrizer 18 | from . import coulomb 19 | -------------------------------------------------------------------------------- /src/qimpy/grid/coulomb/__init__.py: -------------------------------------------------------------------------------- 1 | """Coulomb interactions with support for partial periodicity""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "Coulomb", 5 | "Kernel", 6 | "Ewald", 7 | "N_SIGMAS_PER_WIDTH", 8 | ) 9 | 10 | from ._coulomb import Coulomb, Kernel, Ewald 11 | import numpy as np 12 | 13 | N_SIGMAS_PER_WIDTH: float = 1.0 + np.sqrt(-2.0 * np.log(np.finfo(float).eps)) 14 | """Gaussian negligible after this many standard deviations. 15 | Evaluated at double precision with 1 extra standard deviation for margin.""" 16 | -------------------------------------------------------------------------------- /src/qimpy/grid/coulomb/_wire.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | 5 | from qimpy.lattice import Lattice 6 | from qimpy.grid import Grid, FieldH, coulomb 7 | 8 | 9 | class KernelWire: 10 | """Coulomb interactions between fields with 1D periodicity: Wigner-Seitz version.""" 11 | 12 | grid: Grid 13 | i_dir: int #: Periodic direction (wire axis) 14 | _kernel: torch.Tensor # Coulomb kernel 15 | 16 | def __init__(self, coul: coulomb.Coulomb, i_dir: int) -> None: 17 | self.grid = coul.grid 18 | self.i_dir = i_dir 19 | raise NotImplementedError 20 | 21 | def __call__(self, rho: FieldH, correct_G0_width: bool = False) -> FieldH: 22 | assert self.grid is rho.grid 23 | raise NotImplementedError 24 | 25 | def stress(self, rho1: FieldH, rho2: FieldH) -> torch.Tensor: 26 | raise NotImplementedError 27 | 28 | 29 | class KernelCylindrical: 30 | """Coulomb interactions between fields with 1D periodicity: analytic version.""" 31 | 32 | grid: Grid 33 | i_dir: int #: Periodic direction (cylinder axis) 34 | radius: float #: Cylinder radius 35 | _kernel: torch.Tensor #: Coulomb kernel 36 | 37 | def __init__(self, coul: coulomb.Coulomb, i_dir: int) -> None: 38 | self.grid = coul.grid 39 | self.i_dir = i_dir 40 | if coul.radius: 41 | self.radius = coul.radius 42 | else: 43 | raise NotImplementedError # TODO: determine in-radius 44 | raise NotImplementedError 45 | 46 | def __call__(self, rho: FieldH, correct_G0_width: bool = False) -> FieldH: 47 | assert self.grid is rho.grid 48 | raise NotImplementedError 49 | 50 | def stress(self, rho1: FieldH, rho2: FieldH) -> torch.Tensor: 51 | raise NotImplementedError 52 | 53 | 54 | class EwaldWire: 55 | """Coulomb interactions between point charges with 1D periodicity.""" 56 | 57 | lattice: Lattice 58 | i_dir: int 59 | 60 | def __init__(self, lattice: Lattice, i_dir: int) -> None: 61 | self.lattice = lattice 62 | self.i_dir = i_dir 63 | raise NotImplementedError 64 | 65 | def __call__(self, positions: torch.Tensor, Z: torch.Tensor) -> float: 66 | raise NotImplementedError 67 | -------------------------------------------------------------------------------- /src/qimpy/grid/coulomb/test_coulomb.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | from qimpy import rc 5 | from qimpy.io import log_config 6 | from qimpy.grid.test_common import get_sequential_grid, get_reference_field 7 | from qimpy.grid import FieldH 8 | from . import Coulomb 9 | 10 | 11 | def test_energy(): 12 | grid = get_sequential_grid((10, 10, 10)) # need to add 'periodic' selection here 13 | coulomb = Coulomb(grid=grid) # currently only fully periodic 14 | fieldh = get_reference_field(FieldH, grid) 15 | result = coulomb.kernel(fieldh) 16 | assert result # TODO: make this an actual test by checking a known result 17 | 18 | 19 | def plot_nyquist(): 20 | """Plot dependence of Nyquist frequency with broadening 21 | to inform heuristic for best ion width selection""" 22 | dx = 0.2 # Typical grid spacing at 100 Eh plane-wave spacing 23 | rTest = 3.0 # Worst-case ion-fluid spacing (H in H3O+, NonlinearPCM) 24 | N = 128 25 | x = dx * np.arange(N) 26 | L = dx * N 27 | G = (2 * np.pi / L) * np.concatenate((np.arange(N // 2), np.arange(N // 2 - N, 0))) 28 | sigma_by_dx = np.arange(1.0, 4.0, 0.1) 29 | sigma = dx * sigma_by_dx 30 | fTilde = np.exp(-0.5 * (sigma[:, None] * G[None, :]) ** 2) * (1.0 / L) 31 | f = np.fft.fft(fTilde, axis=-1).real 32 | 33 | # Visualize f near test radius: 34 | plt.figure() 35 | for i, f_i in enumerate(f): 36 | plt.plot(x, np.abs(f_i), label=r"$\sigma$/dx=" + f"{sigma_by_dx[i]:.1f}") 37 | plt.axvline(rTest, color="k", ls="dotted", lw=1) 38 | plt.xlim(0, 2 * rTest) 39 | plt.ylim(1e-14, 1.0) 40 | plt.yscale("log") 41 | plt.legend(bbox_to_anchor=(1, 1), loc="upper left") 42 | 43 | # MAE between rTest and 2*rTest vs sigma: 44 | sel = np.where(np.logical_and(x >= rTest, x <= 2 * rTest)) 45 | mae = np.abs(f[:, sel]).mean(axis=-1) 46 | plt.figure() 47 | plt.plot(sigma_by_dx, mae) 48 | plt.yscale("log") 49 | plt.ylabel("MAE") 50 | plt.xlabel(r"$\sigma$/dx") 51 | plt.show() 52 | 53 | 54 | def main(): 55 | log_config() 56 | rc.init() 57 | if rc.is_head: 58 | plot_nyquist() 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /src/qimpy/grid/test_common.py: -------------------------------------------------------------------------------- 1 | from typing import Type, Sequence 2 | from functools import cache 3 | 4 | from qimpy import rc 5 | from qimpy.io import Unit 6 | from qimpy.lattice import Lattice 7 | from qimpy.symmetries import Symmetries 8 | from . import Grid 9 | from ._field import FieldType 10 | 11 | 12 | @cache 13 | def get_sequential_grid(shape: Sequence[int]) -> Grid: 14 | lattice, symmetries = get_grid_inputs() 15 | return Grid(lattice=lattice, symmetries=symmetries, shape=shape, comm=None) 16 | 17 | 18 | @cache 19 | def get_parallel_grid(shape: Sequence[int]) -> Grid: 20 | lattice, symmetries = get_grid_inputs() 21 | return Grid(lattice=lattice, symmetries=symmetries, shape=shape, comm=rc.comm) 22 | 23 | 24 | @cache 25 | def get_reference_field( 26 | cls: Type[FieldType], grid: Grid, shape_batch: tuple[int, ...] = (2, 3) 27 | ) -> FieldType: 28 | """MPI-reproducible field of specified type on given `grid`.""" 29 | result = cls(grid, shape_batch=shape_batch) # all zeroes 30 | result.randomize(seed=0) 31 | return result 32 | 33 | 34 | @cache 35 | def get_grid_inputs() -> tuple[Lattice, Symmetries]: 36 | """Get dummy lattice etc. needed to create grid.""" 37 | lattice = Lattice( 38 | system=dict( 39 | name="triclinic", 40 | a=2.1, 41 | b=2.2, 42 | c=2.3, 43 | alpha=75 * Unit.MAP["deg"], 44 | beta=80 * Unit.MAP["deg"], 45 | gamma=85 * Unit.MAP["deg"], 46 | ) 47 | ) # pick one with no symmetries 48 | return lattice, Symmetries(lattice=lattice) 49 | -------------------------------------------------------------------------------- /src/qimpy/grid/test_fft.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence, Type 2 | 3 | import pytest 4 | 5 | from qimpy import rc, log 6 | from qimpy.io import log_config 7 | from qimpy.profiler import StopWatch 8 | from . import FieldType, FieldR, FieldH, FieldC, FieldG 9 | from .test_common import get_sequential_grid, get_parallel_grid, get_reference_field 10 | 11 | 12 | def get_shape_batch_field_combinations( 13 | include_tilde: bool, 14 | ) -> Sequence[tuple[Sequence[int], Sequence[int], Type]]: 15 | shapes = ((48, 64, 96), (64, 72, 128)) 16 | n_batches = ((2, 3), tuple[int, ...]()) 17 | field_types = [FieldR, FieldC] 18 | if include_tilde: 19 | field_types += [FieldH, FieldG] 20 | return [ 21 | (shape, n_batch, field_type) 22 | for shape, n_batch in zip(shapes, n_batches) 23 | for field_type in field_types 24 | ] 25 | 26 | 27 | @pytest.mark.mpi 28 | @pytest.mark.parametrize( 29 | "shape, n_batch, cls", get_shape_batch_field_combinations(include_tilde=True) 30 | ) 31 | def test_fft( 32 | shape: Sequence[int], n_batch: Sequence[int], cls: Type[FieldType], n_repeat=0 33 | ) -> None: 34 | """Check parallel FFT against serial version.""" 35 | # Create sequential and parallel grids of same shape: 36 | grid_s = get_sequential_grid(shape) 37 | grid_p = get_parallel_grid(shape) 38 | # Create fields that are supposed to be identical on both grids: 39 | field_s = get_reference_field(cls, grid_s, n_batch) 40 | field_p = get_reference_field(cls, grid_p, n_batch) 41 | # Check that serial and parallel versions match: 42 | field_s_tilde = ~field_s 43 | tol = 1e-8 * field_s_tilde.norm().max() 44 | assert (field_s_tilde.to(grid_p) - (~field_p)).norm().max().item() < tol 45 | # Time repetitions if needed: 46 | if n_repeat: 47 | for field, name in ((field_s, "seq"), (field_p, "par")): 48 | for i_repeat in range(n_repeat): 49 | watch = StopWatch(f"{cls.__name__}.fft({name})") 50 | field_tilde = ~field 51 | watch.stop() 52 | log.info(f"Rep: {i_repeat} norm: {field_tilde.norm().max().item()}") 53 | 54 | 55 | def main(): 56 | log_config() 57 | rc.init() 58 | for shape, n_batch, field_type in get_shape_batch_field_combinations( 59 | include_tilde=True 60 | ): 61 | test_fft(shape, n_batch, field_type, n_repeat=10) 62 | StopWatch.print_stats() 63 | 64 | 65 | if __name__ == "__main__": 66 | main() 67 | -------------------------------------------------------------------------------- /src/qimpy/grid/test_field.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence, Type 2 | 3 | import pytest 4 | import torch 5 | 6 | from . import FieldType 7 | from .test_common import get_parallel_grid, get_reference_field 8 | from .test_fft import get_shape_batch_field_combinations 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "shape, n_batch, cls", get_shape_batch_field_combinations(include_tilde=False) 13 | ) 14 | def test_parseval( 15 | shape: Sequence[int], n_batch: Sequence[int], cls: Type[FieldType], n_repeat=0 16 | ) -> None: 17 | grid = get_parallel_grid(shape) 18 | field = get_reference_field(cls, grid, n_batch) 19 | field_tilde = ~field 20 | result = field ^ field 21 | result_tilde = field_tilde ^ field_tilde 22 | assert (result - result_tilde).norm() < 1e-8 * result.norm() 23 | 24 | 25 | @pytest.mark.parametrize( 26 | "shape, n_batch, cls", get_shape_batch_field_combinations(include_tilde=False) 27 | ) 28 | def test_integral( 29 | shape: Sequence[int], n_batch: Sequence[int], cls: Type[FieldType], n_repeat=0 30 | ) -> None: 31 | grid = get_parallel_grid(shape) 32 | field = get_reference_field(cls, grid, n_batch) 33 | # Test integral against dot product with 1s 34 | ones = cls(grid, data=torch.ones_like(field.data)) 35 | integral_ref = ones ^ field 36 | tol = 1e-8 * integral_ref.norm() 37 | assert (field.integral() - integral_ref).norm() < tol 38 | assert ((~field).integral() - integral_ref).norm() < tol 39 | -------------------------------------------------------------------------------- /src/qimpy/interfaces/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.interfaces.html) for an overview of package `qimpy.interfaces`. 2 | -------------------------------------------------------------------------------- /src/qimpy/interfaces/__init__.py: -------------------------------------------------------------------------------- 1 | """Interfaces to other codes.""" 2 | -------------------------------------------------------------------------------- /src/qimpy/interfaces/ase.py: -------------------------------------------------------------------------------- 1 | from ase.calculators.calculator import Calculator 2 | 3 | from qimpy import rc, dft 4 | from qimpy.io import Unit 5 | 6 | 7 | class QimPy(Calculator): 8 | 9 | implemented_properties = ["energy", "forces"] 10 | 11 | def __init__(self, pseudopotentials: str, **kwargs) -> None: 12 | """Qimpy calculator for ASE 13 | 14 | restart: str 15 | Prefix for restart file. May contain a directory. Default 16 | is None: don't restart. 17 | ignore_bad_restart_file: bool 18 | Deprecated, please do not use. 19 | Passing more than one positional argument to Calculator() 20 | is deprecated and will stop working in the future. 21 | Ignore broken or missing restart file. By default, it is an 22 | error if the restart file is missing or broken. 23 | directory: str or PurePath 24 | Working directory in which to read and write files and 25 | perform calculations. 26 | label: str 27 | Name used for all files. Not supported by all calculators. 28 | May contain a directory, but please use the directory parameter 29 | for that instead. 30 | atoms: Atoms object 31 | Optional Atoms object to which the calculator will be 32 | attached. When restarting, atoms will get its positions and 33 | unit-cell updated from file. 34 | """ 35 | Calculator.__init__(self, **kwargs) 36 | self.pseudopotentials = pseudopotentials 37 | 38 | def calculate(self, atoms=None, properties=["energy"], system_changes=[]) -> None: 39 | # Necessary units (to match ASE) 40 | angstrom = Unit.MAP["Angstrom"] 41 | eV = Unit.MAP["eV"] 42 | 43 | # Obtain lattice parameters and structure 44 | 45 | # Get lattice vectors (3x3 array): 46 | lattice = atoms.get_cell()[:] * angstrom 47 | 48 | # Get atomic positions 49 | positions = atoms.get_scaled_positions() 50 | 51 | # Get symbols 52 | symbols = atoms.get_chemical_symbols() 53 | 54 | lattice_dict = { 55 | "vector1": lattice[0].tolist(), 56 | "vector2": lattice[1].tolist(), 57 | "vector3": lattice[2].tolist(), 58 | "movable": False, 59 | } 60 | 61 | # Horrible hardcode but default pseudopotentials need to be specified... 62 | coordinates = [ 63 | [symbol] + position.tolist() for symbol, position in zip(symbols, positions) 64 | ] 65 | ions = { 66 | "coordinates": coordinates, 67 | "fractional": True, 68 | "pseudopotentials": self.pseudopotentials, 69 | } 70 | 71 | rc.init() 72 | system = dft.System(lattice=lattice_dict, ions=ions) 73 | system.run() 74 | 75 | self.results = {"energy": float(system.energy) * eV} 76 | -------------------------------------------------------------------------------- /src/qimpy/interfaces/cif.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import yaml 3 | 4 | import numpy as np 5 | from ase.io import read 6 | 7 | from qimpy.io import Unit 8 | 9 | 10 | def _parse_cif(cif_file) -> tuple[dict, dict]: 11 | 12 | cif_data = read(cif_file, format="cif") 13 | 14 | latt_vec = cif_data.get_cell()[:] * Unit.MAP["Angstrom"] 15 | coords = cif_data.get_scaled_positions() 16 | species = cif_data.get_chemical_symbols() 17 | pbc = cif_data.get_pbc() 18 | 19 | lattice = { 20 | "vector1": latt_vec[0, :].tolist(), 21 | "vector2": latt_vec[1, :].tolist(), 22 | "vector3": latt_vec[2, :].tolist(), 23 | "periodic": np.array(pbc).tolist(), 24 | } # list(pbc) returns something weird but putting it to an array and back doesn't 25 | 26 | coordinates = [ 27 | [symbol] + position.tolist() for symbol, position in zip(species, coords) 28 | ] 29 | 30 | ions = {"ions": {"coordinates": coordinates, "fractional": True}} 31 | lattice = {"lattice": lattice} 32 | 33 | return lattice, ions 34 | 35 | 36 | def write_yaml(cif_file: str, yaml_file: str) -> None: 37 | """Write YAML file from CIF file. 38 | 39 | Parameters 40 | ---------- 41 | cif_file 42 | CIF file to be converted 43 | yaml_file 44 | Output file in YAML format. 45 | 46 | Usage 47 | ----- 48 | :code:`python -m qimpy.interfaces.cif [-h] -f FILE [-y FILE]` 49 | 50 | Command-line parameters (obtained using :code:`python -m qimpy.interfaces.cif -h`): 51 | 52 | .. code-block:: bash 53 | 54 | python -m qimpy.interfaces.cif [-h] -f FILE [-y FILE] 55 | 56 | write YAML file from CIF file 57 | 58 | options: 59 | -h, --help show this help message and exit 60 | -f FILE, --cif-file FILE 61 | checkpoint file in HDF5 format 62 | -y FILE, --yaml-file FILE 63 | output file in XSF format (in.yaml if unspecified) 64 | """ 65 | 66 | lattice, ions = _parse_cif(cif_file) 67 | 68 | with open("in.yaml", "w") as f: 69 | yaml.dump(lattice, f, default_flow_style=None, allow_unicode=True) 70 | yaml.dump(ions, f, default_flow_style=None, allow_unicode=True) 71 | 72 | 73 | def main() -> None: 74 | command_parser = argparse.ArgumentParser( 75 | prog="python -m qimpy.interfaces.cif", 76 | description="Parse CIF file to YAML input", 77 | ) 78 | group = command_parser.add_mutually_exclusive_group(required=True) 79 | group.add_argument("-f", "--cif-file", metavar="FILE", help="CIF file to be parsed") 80 | command_parser.add_argument( 81 | "-y", 82 | "--yaml-file", 83 | default="in.yaml", 84 | metavar="FILE", 85 | help="output file in YAML format (in.yaml if unspecified)", 86 | ) 87 | 88 | args = command_parser.parse_args() 89 | write_yaml(args.cif_file, args.yaml_file) 90 | 91 | 92 | if __name__ == "__main__": 93 | main() 94 | -------------------------------------------------------------------------------- /src/qimpy/io/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.io.html) for an overview of package `qimpy.io`. 2 | -------------------------------------------------------------------------------- /src/qimpy/io/__init__.py: -------------------------------------------------------------------------------- 1 | """I/O functionality including checkpoints and logging.""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "log_config", 5 | "fmt", 6 | "Default", 7 | "WithDefault", 8 | "cast_default", 9 | "CheckpointOverrideException", 10 | "InvalidInputException", 11 | "check_only_one_specified", 12 | "TensorCompatible", 13 | "cast_tensor", 14 | "Unit", 15 | "UnitOrFloat", 16 | "dict", 17 | "yaml", 18 | "Checkpoint", 19 | "CheckpointPath", 20 | "CheckpointContext", 21 | ) 22 | 23 | from ._log_config import log_config, fmt 24 | from ._default import Default, WithDefault, cast_default 25 | from ._error import ( 26 | CheckpointOverrideException, 27 | InvalidInputException, 28 | check_only_one_specified, 29 | ) 30 | from ._tensor import TensorCompatible, cast_tensor 31 | from ._unit import Unit, UnitOrFloat 32 | from . import dict, yaml 33 | from ._checkpoint import Checkpoint, CheckpointPath, CheckpointContext 34 | -------------------------------------------------------------------------------- /src/qimpy/io/_default.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Generic, Union, TypeVar 3 | from dataclasses import dataclass 4 | 5 | 6 | T = TypeVar("T") 7 | 8 | 9 | @dataclass(frozen=True) 10 | class Default(Generic[T]): 11 | """Typed default value for a function argument. 12 | Use as a sentinel to specify a default value, instead of None. 13 | This allows passing in a default value, and keeping track of whether 14 | the argument was explicitly passed in or a default within the function. 15 | """ 16 | 17 | value: T #: The underlying default value 18 | 19 | def __repr__(self) -> str: 20 | return f"Default({self.value})" 21 | 22 | 23 | WithDefault = Union[T, Default[T]] #: Type alias for a type or its default value 24 | 25 | 26 | def cast_default(item: WithDefault[T]) -> T: 27 | """Cast an optional default to retain only the value.""" 28 | if isinstance(item, Default): 29 | return item.value 30 | else: 31 | return item 32 | 33 | 34 | def test_default(param: WithDefault[bool] = Default(False)) -> None: 35 | is_default = isinstance(param, Default) 36 | value = cast_default(param) 37 | print(f"param = {value} was {'' if is_default else 'not '}specified as a default") 38 | -------------------------------------------------------------------------------- /src/qimpy/io/_error.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class InvalidInputException(Exception): 5 | def __init__(self, message: str) -> None: 6 | super().__init__(message) 7 | 8 | 9 | def check_only_one_specified(**kwargs) -> None: 10 | """Check that exactly one of `kwargs` is not None. If not, raise an exception.""" 11 | n_specified = sum((0 if x is None else 1) for x in kwargs.values()) 12 | if n_specified != 1: 13 | names = ", ".join(kwargs.keys()) 14 | raise InvalidInputException(f"Exactly one of {names} must be specified") 15 | 16 | 17 | class CheckpointOverrideException(InvalidInputException): 18 | def __init__(self, var_name: str) -> None: 19 | super().__init__( 20 | f"Cannot override parameter '{var_name}' when reading from checkpoint" 21 | ) 22 | -------------------------------------------------------------------------------- /src/qimpy/io/_log_config.py: -------------------------------------------------------------------------------- 1 | from typing import Optional, Union 2 | import logging 3 | import sys 4 | 5 | import numpy as np 6 | import torch 7 | 8 | from qimpy import rc, log, MPI 9 | 10 | 11 | def log_config( 12 | *, 13 | output_file: Optional[str] = None, 14 | mpi_log: Optional[str] = None, 15 | mpi_comm: Optional[MPI.Comm] = None, 16 | append: bool = True, 17 | verbose: bool = False 18 | ): 19 | """Configure logging globally for the qimpy library. It should typically 20 | only be necessary to call this once during start-up. Note that the default 21 | log configuration before calling this function is to print only warnings 22 | and errors will from all processes to stdout. 23 | 24 | For further customization, directly modify the :class:`logging.Logger` 25 | object :attr:`~qimpy.log`, as required. 26 | 27 | Parameters 28 | ---------- 29 | output_file 30 | Output file to write from MPI rank 0. 31 | Default = None implies log to stdout. 32 | mpi_log 33 | Higher-rank MPI processes will log to . if given. 34 | Default = None implies log only from head (rank=0) process. 35 | mpi_comm 36 | MPI communicator whose rank determines logging behavior. 37 | Default = None implies use COMM_WORLD. 38 | append 39 | Whether log files should be appended or overwritten. 40 | verbose 41 | Whether to log debug information including module/line numbers of code. 42 | """ 43 | 44 | # Create handler with appropriate output file and mode, if any: 45 | i_proc = (mpi_comm if mpi_comm else MPI.COMM_WORLD).Get_rank() 46 | is_head = i_proc == 0 47 | filemode = "a" if append else "w" 48 | filename = "" 49 | if is_head and output_file: 50 | filename = output_file 51 | if (not is_head) and mpi_log: 52 | filename = mpi_log + "." + str(i_proc) 53 | handler = get_handler(filename, filemode) 54 | 55 | # Set log format: 56 | handler.setFormatter( 57 | logging.Formatter( 58 | ("[%(module)s:%(lineno)d] " if verbose else "") + "%(message)s" 59 | ) 60 | ) 61 | 62 | # Set handler: 63 | log.handlers.clear() 64 | log.addHandler(handler) 65 | 66 | # Select log level: 67 | if is_head or ((not is_head) and mpi_log): 68 | log.setLevel(logging.DEBUG if verbose else logging.INFO) 69 | else: 70 | log.setLevel(logging.WARNING) 71 | 72 | 73 | def fmt(tensor: Union[torch.Tensor, np.ndarray], **kwargs) -> str: 74 | """Standardized conversion of torch tensors and numpy arrays for logging. 75 | Keyword arguments are forwarded to `numpy.array2string`.""" 76 | # Set some defaults in formatter: 77 | kwargs.setdefault("precision", 8) 78 | kwargs.setdefault("suppress_small", True) 79 | kwargs.setdefault("separator", ", ") 80 | return np.array2string( 81 | tensor.detach().to(rc.cpu).numpy() 82 | if isinstance(tensor, torch.Tensor) 83 | else tensor, 84 | **kwargs 85 | ) 86 | 87 | 88 | def get_handler(filename: str, filemode: str) -> logging.Handler: 89 | if filename: 90 | return logging.FileHandler(filename, mode=filemode) 91 | else: 92 | return logging.StreamHandler(sys.stdout) 93 | -------------------------------------------------------------------------------- /src/qimpy/io/_tensor.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Union, Sequence 3 | 4 | import torch 5 | import numpy as np 6 | 7 | from qimpy import rc 8 | from . import InvalidInputException 9 | 10 | 11 | TensorCompatible = Union[torch.Tensor, np.ndarray, float, Sequence[float]] 12 | 13 | 14 | def cast_tensor(t: TensorCompatible) -> torch.Tensor: 15 | """Convert `t` to a torch tensor on current device (if not already so). 16 | Useful to handle input from yaml, checkpoint or python code on an equal footing.""" 17 | if isinstance(t, torch.Tensor): 18 | return t.to(rc.device) 19 | if isinstance(t, np.ndarray): 20 | return torch.from_numpy(t).to(rc.device) 21 | try: 22 | return torch.tensor(t, device=rc.device) 23 | except ValueError: 24 | raise InvalidInputException(f"Could not convert {t} to a tensor") 25 | -------------------------------------------------------------------------------- /src/qimpy/io/dict.py: -------------------------------------------------------------------------------- 1 | """Utilities to manipulate dictionaries used as input to constructors.""" 2 | __all__ = ["key_cleanup", "flatten", "unflatten", "merge", "remove_units"] 3 | 4 | from typing import Callable, Union 5 | 6 | from . import Unit 7 | 8 | 9 | def key_cleanup(params: dict) -> dict: 10 | """Clean-up dictionary keys for use in constructors. 11 | This is required eg. for dicts from YAML to make sure keys are compatible 12 | with passing as keyword-only arguments to constructors. Currently, this 13 | replaces hyphens (which look nicer) in keys to underscores internally, 14 | so that they become valid identifiers within the code.""" 15 | return dict((k.replace("-", "_"), v) for k, v in params.items()) 16 | 17 | 18 | def flatten(d: dict, _key_prefix: tuple = tuple()) -> dict: 19 | """Convert nested dict `d` to a flat dict with tuple keys. 20 | Input `_key_prefix` is prepended to the keys of the resulting dict, 21 | and is used internally for recursively flattening the dict.""" 22 | result = {} 23 | for key, value in d.items(): 24 | flat_key = _key_prefix + (key,) 25 | if isinstance(value, dict): 26 | result.update(flatten(value, flat_key)) 27 | else: 28 | result[flat_key] = value 29 | return result 30 | 31 | 32 | def unflatten(d: dict) -> dict: 33 | """Unpack tuple keys in `d` to a nested dictionary. 34 | (Inverse of :func:`flatten`.)""" 35 | result: dict = {} 36 | for key_tuple, value in d.items(): 37 | assert isinstance(key_tuple, tuple) 38 | target = result # where to add value 39 | for key in key_tuple[:-1]: 40 | if key not in target: 41 | target[key] = {} 42 | target = target[key] # traverse down each key in tuple 43 | target[key_tuple[-1]] = value 44 | return result 45 | 46 | 47 | def merge(d_list: list[dict]) -> dict: 48 | """Merge a list of nested dictonaries `d_list`. 49 | The dictionaries are processed in order, with each dictionary overriding 50 | values associated with keys present in previous dictionaries.""" 51 | result = {} 52 | for d in d_list: 53 | result.update(flatten(d)) 54 | return unflatten(result) 55 | 56 | 57 | def remove_units(d: dict) -> dict: 58 | """Recursively remove any units (convert each qimpy.mpi.Unit to float).""" 59 | return { 60 | key: ( 61 | value 62 | if ((remover := units_remover.get(value.__class__)) is None) 63 | else remover(value) 64 | ) 65 | for key, value in d.items() 66 | } 67 | 68 | 69 | LT = Union[list, tuple] 70 | 71 | 72 | def remove_units_list(lt: LT) -> LT: 73 | """Same as `remove_units` but for `list` or `tuple` objects instead.""" 74 | return lt.__class__( 75 | ( 76 | value 77 | if ((remover := units_remover.get(value.__class__)) is None) 78 | else remover(value) 79 | ) 80 | for value in lt 81 | ) 82 | 83 | 84 | units_remover: dict[type, Callable] = { 85 | dict: remove_units, 86 | list: remove_units_list, 87 | tuple: remove_units_list, 88 | Unit: float, 89 | } 90 | -------------------------------------------------------------------------------- /src/qimpy/io/yaml.py: -------------------------------------------------------------------------------- 1 | """YAML wrappers handling includes and environment substitution.""" 2 | __all__ = ("load", "dump") 3 | 4 | import yaml 5 | import os 6 | 7 | from .dict import merge as dict_merge 8 | 9 | 10 | def load(filename: str, already_included: tuple = tuple()) -> dict: 11 | """Load input from `filename` in YAML format to a nested dict. 12 | Handles environment substitution and processes `include` keys. 13 | Keep track of `already_included` filenames to prevent cyclic includes, 14 | when recursively processing include directives.""" 15 | with open(filename) as f: 16 | result = yaml.safe_load(os.path.expandvars(f.read())) 17 | return _process_includes(result, already_included + (filename,)) 18 | 19 | 20 | def dump(d: dict) -> str: 21 | """Convert nested dictionary to YAML-format string.""" 22 | return yaml.dump(d, default_flow_style=None, allow_unicode=True) 23 | 24 | 25 | def _process_includes(d: dict, already_included: tuple) -> dict: 26 | """Recursively process `include` directives in nested dictionary.""" 27 | # Process any includes in inner dictionaries recursively: 28 | for key, value in d.items(): 29 | if isinstance(value, dict): 30 | d[key] = _process_includes(value, already_included) 31 | # Process include at current level: 32 | include_names = d.pop("include", []) 33 | if include_names: 34 | if isinstance(include_names, str): 35 | include_names = [include_names] # convert single str to list[str] 36 | d_list = [] 37 | for include_name in include_names: 38 | if include_name in already_included: 39 | raise RecursionError( 40 | "Cyclic include " 41 | f'{" > ".join(already_included)}' 42 | f" > {include_name}" 43 | ) 44 | d_list.append(load(include_name, already_included)) 45 | d_list.append(d) # current dict is last (highest priority) 46 | d = dict_merge(d_list) 47 | return d 48 | -------------------------------------------------------------------------------- /src/qimpy/lattice/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.lattice.html) for an overview of package `qimpy.lattice`. 2 | -------------------------------------------------------------------------------- /src/qimpy/lattice/__init__.py: -------------------------------------------------------------------------------- 1 | """Bravais lattice and unit cell""" 2 | # List exported symbols for doc generation 3 | __all__ = ("WignerSeitz", "Lattice", "Kpoints", "Kmesh", "Kpath") 4 | 5 | from ._wigner_seitz import WignerSeitz 6 | from ._lattice import Lattice 7 | from ._kpoints import Kpoints, Kmesh, Kpath 8 | -------------------------------------------------------------------------------- /src/qimpy/lattice/_lattice_systems.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import torch 4 | import numpy as np 5 | 6 | 7 | def get_Rbasis(*, name: str, **kwargs) -> torch.Tensor: 8 | """Create lattice vectors from lattice system and modification""" 9 | return { 10 | "cubic": _get_Rbasis_cubic, 11 | "tetragonal": _get_Rbasis_tetragonal, 12 | "orthorhombic": _get_Rbasis_orthorhombic, 13 | "hexagonal": _get_Rbasis_hexagonal, 14 | "rhombohedral": _get_Rbasis_rhombohedral, 15 | "monoclinic": _get_Rbasis_monoclinic, 16 | "triclinic": _get_Rbasis_triclinic, 17 | }[name.lower()]( 18 | **kwargs 19 | ) # type: ignore 20 | 21 | 22 | def _get_Rbasis_cubic(*, a: float, modification: Optional[str] = None) -> torch.Tensor: 23 | Rbasis = _get_Rbasis_lengths_angles(a, a, a) 24 | if modification is None: 25 | return Rbasis 26 | else: 27 | M = { 28 | "body-centered": 0.5 * torch.tensor([[-1, 1, 1], [1, -1, 1], [1, 1, -1]]), 29 | "face-centered": 0.5 * torch.tensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), 30 | }[modification.lower()] 31 | return Rbasis @ M 32 | 33 | 34 | def _get_Rbasis_tetragonal( 35 | *, a: float, c: float, modification: Optional[str] = None 36 | ) -> torch.Tensor: 37 | Rbasis = _get_Rbasis_lengths_angles(a, a, c) 38 | if modification is None: 39 | return Rbasis 40 | else: 41 | assert modification.lower() == "body-centered" 42 | M = 0.5 * torch.tensor([[-1, 1, 1], [1, -1, 1], [1, 1, -1]]) 43 | return Rbasis @ M 44 | 45 | 46 | def _get_Rbasis_orthorhombic( 47 | *, a: float, b: float, c: float, modification: Optional[str] = None 48 | ) -> torch.Tensor: 49 | Rbasis = _get_Rbasis_lengths_angles(a, b, c) 50 | if modification is None: 51 | return Rbasis 52 | else: 53 | M = { 54 | "body-centered": 0.5 * torch.tensor([[-1, 1, 1], [1, -1, 1], [1, 1, -1]]), 55 | "base-centered": 0.5 * torch.tensor([[1, -1, 0], [1, 1, 0], [0, 0, 2]]), 56 | "face-centered": 0.5 * torch.tensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), 57 | }[modification.lower()] 58 | return Rbasis @ M 59 | 60 | 61 | def _get_Rbasis_hexagonal(*, a: float, c: float) -> torch.Tensor: 62 | return _get_Rbasis_lengths_angles(a, a, c, gamma=(2 / 3 * np.pi)) 63 | 64 | 65 | def _get_Rbasis_rhombohedral(*, a: float, alpha: float) -> torch.Tensor: 66 | return _get_Rbasis_lengths_angles(a, a, a, alpha, alpha, alpha) 67 | 68 | 69 | def _get_Rbasis_monoclinic( 70 | *, a: float, b: float, c: float, beta: float, modification: Optional[str] = None 71 | ) -> torch.Tensor: 72 | Rbasis = _get_Rbasis_lengths_angles(a, b, c, beta=beta) 73 | if modification is None: 74 | return Rbasis 75 | else: 76 | assert modification.lower() == "base-centered" 77 | M = 0.5 * torch.tensor([[1, -1, 0], [1, 1, 0], [0, 0, 2]]) 78 | return Rbasis @ M 79 | 80 | 81 | def _get_Rbasis_triclinic( 82 | *, a: float, b: float, c: float, alpha: float, beta: float, gamma: float 83 | ) -> torch.Tensor: 84 | return _get_Rbasis_lengths_angles(a, b, c, alpha, beta, gamma) 85 | 86 | 87 | def _get_Rbasis_lengths_angles( 88 | a: float, 89 | b: float, 90 | c: float, 91 | alpha: float = 0.5 * np.pi, 92 | beta: float = 0.5 * np.pi, 93 | gamma: float = 0.5 * np.pi, 94 | ) -> torch.Tensor: 95 | cos_alpha = np.cos(alpha) 96 | cos_beta = np.cos(beta) 97 | cos_gamma = np.cos(gamma) 98 | sin_gamma = np.sin(gamma) 99 | v0 = np.array((1.0, 0, 0)) 100 | v1 = np.array((cos_gamma, sin_gamma, 0)) 101 | v2 = np.array((cos_beta, (cos_alpha - cos_beta * cos_gamma) / sin_gamma, 0)) 102 | v2[2] = np.sqrt(1 - (v2**2).sum()) 103 | return torch.tensor(np.array([a * v0, b * v1, c * v2]).T) 104 | -------------------------------------------------------------------------------- /src/qimpy/lattice/test_wigner_seitz.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Sequence 3 | from tempfile import mkstemp 4 | import subprocess 5 | import os 6 | 7 | import pytest 8 | import torch 9 | 10 | from qimpy import rc, log 11 | from qimpy.io import log_config 12 | from . import WignerSeitz 13 | 14 | 15 | def get_test_lattices() -> Sequence[tuple[torch.Tensor, int, int, int]]: 16 | """Generate test lattices with known Wigner-Seitz face, edge and vertex counts""" 17 | return [ 18 | (5.9 * torch.eye(3), 6, 12, 8), 19 | (3.8 * torch.tensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]), 12, 24, 14), 20 | (4.7 * torch.tensor([[-1, 1, 1], [1, -1, 1], [1, 1, -1]]), 14, 36, 24), 21 | (3.0 * torch.tensor([[1, -0, 1], [1, -10, 0], [0, 1, 1]]), 8, 18, 12), 22 | ] 23 | 24 | 25 | @pytest.mark.mpi_skip 26 | @pytest.mark.parametrize("v_basis, n_faces, n_edges, n_vertices", get_test_lattices()) 27 | def test_wigner_seitz( 28 | v_basis: torch.Tensor, n_faces: int, n_edges: int, n_vertices: int 29 | ) -> None: 30 | ws = WignerSeitz(v_basis) 31 | assert len(ws.faces) == n_faces 32 | assert len(ws.edges) == n_edges 33 | assert len(ws.vertices) == n_vertices 34 | 35 | 36 | def main(): 37 | log_config() 38 | rc.init() 39 | assert rc.n_procs == 1 # No MPI needed/supported for these tests 40 | x3d_viewer = os.environ.get("X3D_VIEWER", "view3dscene") 41 | log.info(f"Using '{x3d_viewer}' to view x3d files; export X3D_VIEWER to override.") 42 | for v_basis, n_faces, n_edges, n_vertices in get_test_lattices(): 43 | ws = WignerSeitz(v_basis) 44 | log.info( 45 | f"Created Wigner-Seitz cell with {len(ws.faces)} faces, {len(ws.edges)}" 46 | f" edges and {len(ws.vertices)} vertices. (Expected {n_faces} faces," 47 | f" {n_edges} edges and {n_vertices} vertices.)" 48 | ) 49 | file_handle, x3d_filename = mkstemp(suffix=".x3d") 50 | os.close(file_handle) # WignerSeitz.write_x3d will reopen file 51 | ws.write_x3d(x3d_filename) 52 | subprocess.run([x3d_viewer, x3d_filename], capture_output=True) 53 | os.remove(x3d_filename) 54 | 55 | 56 | if __name__ == "__main__": 57 | main() 58 | -------------------------------------------------------------------------------- /src/qimpy/math/__init__.py: -------------------------------------------------------------------------------- 1 | """Math functions extending the core torch set.""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "prime_factorization", 5 | "fft_suitable", 6 | "ceildiv", 7 | "cis", 8 | "abs_squared", 9 | "dagger", 10 | "accum_norm_", 11 | "accum_prod_", 12 | "ortho_matrix", 13 | "eighg", 14 | "spherical_harmonics", 15 | "spherical_bessel", 16 | "quintic_spline", 17 | "random", 18 | "RadialFunction", 19 | ) 20 | 21 | from ._integer import prime_factorization, fft_suitable, ceildiv 22 | from ._linalg import ( 23 | cis, 24 | abs_squared, 25 | dagger, 26 | accum_norm_, 27 | accum_prod_, 28 | ortho_matrix, 29 | eighg, 30 | ) 31 | from . import spherical_harmonics, spherical_bessel, quintic_spline, random 32 | from ._radial_function import RadialFunction 33 | -------------------------------------------------------------------------------- /src/qimpy/math/_integer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import overload, Union 3 | 4 | import numpy as np 5 | 6 | 7 | def prime_factorization(N: int) -> list[int]: 8 | """Get list of prime factors of `N` in ascending order""" 9 | factors = [] 10 | p = 2 11 | while p * p <= N: 12 | while N % p == 0: 13 | factors.append(p) 14 | N //= p 15 | p += 1 16 | if N > 1: # any left-over factor must be prime itself 17 | factors.append(N) 18 | return factors 19 | 20 | 21 | def fft_suitable(N: int) -> bool: 22 | """Check whether `N` has only small prime factors. Return True if 23 | the prime factorization of `N` is suitable for efficient FFTs, 24 | that is contains only 2, 3, 5 and 7.""" 25 | for p in [2, 3, 5, 7]: 26 | while N % p == 0: 27 | N //= p 28 | # All suitable prime factors taken out 29 | # --- a suitable N should be left with just 1 30 | return N == 1 31 | 32 | 33 | @overload 34 | def ceildiv(num: int, den: int) -> int: 35 | ... 36 | 37 | 38 | @overload 39 | def ceildiv(num: np.ndarray, den: Union[int, np.ndarray]) -> np.ndarray: 40 | ... 41 | 42 | 43 | @overload 44 | def ceildiv(num: Union[int, np.ndarray], den: np.ndarray) -> np.ndarray: 45 | ... 46 | 47 | 48 | def ceildiv(num, den): 49 | """Compute ceil(num/den) with purely integer operations""" 50 | return (num + den - 1) // den 51 | -------------------------------------------------------------------------------- /src/qimpy/math/random.py: -------------------------------------------------------------------------------- 1 | """Fast random numbers for reproducible initialization.""" 2 | # List exported symbols for doc generation 3 | __all__ = ["initialize_state", "rand", "randn"] 4 | 5 | import torch 6 | import numpy as np 7 | 8 | 9 | def initialize_state(state: torch.Tensor) -> None: 10 | """Improve initial state randomness using split-mix-64 method.""" 11 | assert state.dtype is torch.int64 12 | state += -7046029254386353131 13 | state ^= state >> 30 14 | state *= -4658895280553007687 15 | state ^= state >> 27 16 | state *= -7723592293110705685 17 | state ^= state >> 31 18 | 19 | 20 | def rand(state: torch.Tensor) -> torch.Tensor: 21 | """Generate real uniform random Tensor using integer tensor `state`. 22 | Uses a 64-bit xor-shift generator in parallel for fast generation.""" 23 | state ^= state << 13 24 | state ^= state >> 7 25 | state ^= state << 17 26 | return 0.5 + state * (0.5**64) 27 | 28 | 29 | def randn(state: torch.Tensor) -> torch.Tensor: 30 | """Generate complex standard-normal random Tensor using integer tensor `state`.""" 31 | magnitude = torch.sqrt(-2.0 * torch.log(rand(state))) 32 | phase = (2 * np.pi) * rand(state) 33 | return torch.polar(magnitude, phase) 34 | -------------------------------------------------------------------------------- /src/qimpy/math/spherical_bessel.py: -------------------------------------------------------------------------------- 1 | """Calculate spherical Bessel functions.""" 2 | # List exported symbols for doc generation 3 | __all__ = ["jl_by_xl"] 4 | 5 | import torch 6 | 7 | 8 | def jl_by_xl(l_max: int, x: torch.Tensor) -> torch.Tensor: 9 | """Compute spherical bessel functions j_l(x)/x^l for each l <= l_max. 10 | This is optimized to calculate j_l up to l = 6 efficiently combining 11 | recursion relations and Taylor expansions to achieve both absolute 12 | and relative errors < 1e-15 for all x. (The errors will grow beyond 13 | l = 6 due to instability of the efficient recursion relation chosen, 14 | so do not use this routine for higher l without testing.) 15 | """ 16 | result = torch.empty((l_max + 1,) + x.shape, dtype=x.dtype, device=x.device) 17 | taylor_prefac = 1.0 18 | for l in range(l_max + 1): 19 | result_l = result[l] 20 | x_cut = 1.0 + 0.7 * l # cutoff for Taylor expansion 21 | 22 | # Taylor series for small x: 23 | sel = torch.where(x <= x_cut) 24 | x_sel = x[sel] 25 | taylor_prefac /= 2 * l + 1 26 | term = torch.full_like(x_sel, taylor_prefac) # first non-zero term 27 | series = term.clone().detach() 28 | x_sel_sq = x_sel * x_sel 29 | for i in range(1, 9 + 2 * l): 30 | term *= x_sel_sq * (-0.25 / (i * (i + l + 0.5))) 31 | series += term 32 | result_l[sel] = series 33 | 34 | # Trigonometric formula for larger x: 35 | sel = torch.where(x > x_cut) 36 | x_sel = x[sel] 37 | if l == 0: 38 | result_l[sel] = torch.sin(x_sel) / x_sel # j_0(x) 39 | elif l == 1: 40 | result_l[sel] = ( 41 | result[0][sel] - torch.cos(x_sel) # j_0(x) = sin(x)/x from before 42 | ) / ( 43 | x_sel * x_sel 44 | ) # to j_1/x 45 | else: 46 | result_l[sel] = ((2 * l - 1) * result[l - 1][sel] - result[l - 2][sel]) / ( 47 | x_sel * x_sel 48 | ) # j_l/x^l by recursion for l>1 49 | return result 50 | -------------------------------------------------------------------------------- /src/qimpy/math/test_quintic_spline.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pytest 3 | 4 | from qimpy import rc 5 | from qimpy.io import log_config 6 | from .quintic_spline import get_coeff, Interpolator 7 | 8 | 9 | def get_test_data(): 10 | def f_test(x): 11 | """Non-trivial test function with correct symmetries""" 12 | return torch.exp(-torch.sin(0.01 * x * x)) * torch.cos(0.1 * x) 13 | 14 | def f_test_prime(x): 15 | """Analytical derivative of above.""" 16 | return -torch.exp(-torch.sin(0.01 * x * x)) * ( 17 | torch.sin(0.1 * x) * 0.1 18 | + torch.cos(0.01 * x * x) * 0.02 * x * torch.cos(0.1 * x) 19 | ) 20 | 21 | dx = 0.1 22 | x = torch.arange(0.0, 40.0, dx, device=rc.device) 23 | x_fine = torch.linspace(x.min(), x.max() - 1e-6 * dx, 2001, device=rc.device) 24 | y = f_test(x) 25 | y_fine = f_test(x_fine) 26 | y_prime_fine = f_test_prime(x_fine) 27 | y_coeff = get_coeff(y) # blip coefficients 28 | return dx, x_fine, y_fine, y_prime_fine, y_coeff 29 | 30 | 31 | @pytest.mark.mpi_skip 32 | def test_interpolator(): 33 | dx, x_fine, y_fine, y_prime_fine, y_coeff = get_test_data() 34 | assert (y_fine - Interpolator(x_fine, dx, 0)(y_coeff)).norm() < dx**4 35 | assert (y_prime_fine - Interpolator(x_fine, dx, 1)(y_coeff)).norm() < dx**3 36 | 37 | 38 | def main(): 39 | """Run test and additionally plot for visual inspection.""" 40 | import matplotlib.pyplot as plt 41 | 42 | log_config() 43 | rc.init() 44 | 45 | # Plot a single blip function for testing: 46 | plt.figure() 47 | coeff = torch.zeros(12) 48 | coeff[5] = 1 49 | t = torch.linspace(0.0, 12.0, 101, device=rc.device) 50 | for deriv in range(5): 51 | plt.plot( 52 | t.to(rc.cpu), 53 | Interpolator(t, 2.0, deriv)(coeff).to(rc.cpu), 54 | label=f"Deriv: {deriv}", 55 | ) 56 | plt.axhline(0, color="k", ls="dotted") 57 | plt.legend() 58 | 59 | # Generate test data: 60 | dx, x_fine, y_fine, y_prime_fine, y_coeff = get_test_data() 61 | 62 | # Plot results: 63 | plt.figure() 64 | plt.plot( 65 | x_fine.to(rc.cpu), 66 | y_fine.to(rc.cpu), 67 | "k--", 68 | label="Reference data", 69 | zorder=10, 70 | ) 71 | plt.plot( 72 | x_fine.to(rc.cpu), 73 | y_prime_fine.to(rc.cpu), 74 | "k:", 75 | label="Reference derivative", 76 | zorder=10, 77 | ) 78 | for deriv in range(5): 79 | plt.plot( 80 | x_fine.to(rc.cpu), 81 | Interpolator(x_fine, dx, deriv)(y_coeff).to(rc.cpu), 82 | label=f"Interpolant (deriv: {deriv})", 83 | lw=3, 84 | ) 85 | plt.axhline(0, color="k", ls="dotted") 86 | plt.legend() 87 | plt.show() 88 | 89 | 90 | if __name__ == "__main__": 91 | main() 92 | -------------------------------------------------------------------------------- /src/qimpy/math/test_spherical_bessel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import torch 3 | import pytest 4 | from scipy.special import spherical_jn 5 | 6 | from qimpy import rc, log 7 | from qimpy.io import log_config 8 | from .spherical_bessel import jl_by_xl 9 | 10 | 11 | @pytest.mark.mpi_skip 12 | def test_jl(plt=None): 13 | x = np.logspace(-3, 3, 6000) 14 | l_max = 6 15 | jl_ref = [spherical_jn(l, x) / (x**l) for l in range(l_max + 1)] 16 | jl_test = jl_by_xl(l_max, torch.tensor(x, device=rc.device)).to(rc.cpu) 17 | err_mean_all = 0.0 18 | err_max_all = 0.0 19 | for l in range(l_max + 1): 20 | err_scale = np.maximum(x ** (l + 1), 1) # to match forms near 0 and infty 21 | err = np.abs(jl_test[l] - jl_ref[l]) * err_scale 22 | err_mean = err.mean() 23 | err_max = err.max() 24 | if plt is not None: 25 | plt.plot(x, err, label=f"l = {l}") 26 | log.info(f"l: {l} ErrMean: {err_mean:.3e} ErrMax: {err_max:.3e}") 27 | err_mean_all += err_mean / (l_max + 1) 28 | err_max_all = max(err_max_all, err_max) 29 | log.info(f"l<={l_max} ErrMean: {err_mean_all:.3e} ErrMax: {err_max_all:.3e}") 30 | assert err_mean_all <= 1e-16 31 | assert err_max_all <= 2e-15 32 | 33 | 34 | def main(): 35 | """Invoke test_jl with plots and per-l errors.""" 36 | import matplotlib.pyplot as plt 37 | 38 | log_config() 39 | rc.init() 40 | test_jl(plt) 41 | plt.xscale("log") 42 | plt.legend() 43 | plt.show() 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /src/qimpy/mpi/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.mpi.html) for an overview of package `qimpy.mpi`. 2 | -------------------------------------------------------------------------------- /src/qimpy/mpi/__init__.py: -------------------------------------------------------------------------------- 1 | """MPI communication utilities.""" 2 | # List exported symbols for doc generation 3 | __all__ = ( 4 | "BufferView", 5 | "ProcessGrid", 6 | "TaskDivision", 7 | "TaskDivisionCustom", 8 | "get_block_slices", 9 | "Waitable", 10 | "Waitless", 11 | "Iallreduce_in_place", 12 | "globalreduce", 13 | ) 14 | 15 | from ._bufferview import BufferView 16 | from ._process_grid import ProcessGrid 17 | from ._taskdivision import TaskDivision, TaskDivisionCustom, get_block_slices 18 | from ._async_reduce import Iallreduce_in_place 19 | from ._waitable import Waitable, Waitless 20 | from . import globalreduce 21 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_async_reduce.py: -------------------------------------------------------------------------------- 1 | from functools import cache 2 | 3 | import numpy as np 4 | import torch 5 | 6 | from qimpy import rc, MPI 7 | from . import BufferView, TaskDivision 8 | 9 | 10 | @cache 11 | def is_async_reduce_supported(is_cuda: bool) -> bool: 12 | """Determine whether async reduce is supported.""" 13 | if is_cuda: 14 | # OpenMPI does not support Ireduce, Iallreduce etc. on GPU buffers 15 | # TODO: add similar checks for other MPI implementations 16 | return False # "Open MPI" not in MPI.Get_library_version() 17 | else: 18 | return True # cpu MPI likely always supports it 19 | 20 | 21 | class Iallreduce_in_place: 22 | """Perform async Iallreduce in-place on `buf`. 23 | Provides async semantics and completes on wait() of the return value, 24 | even if the MPI implementation does not support Iallreduce. 25 | This is true of some MPI implementations on GPU buffers eg. OpenMPI.""" 26 | 27 | def __init__(self, comm: MPI.Comm, buf: torch.Tensor, op: MPI.Op) -> None: 28 | # Check if real async supported (or need to fake it): 29 | self.async_supported = is_async_reduce_supported(buf.is_cuda) 30 | self.local_reduce = op is MPI.SUM # could optimize other Ops when needed 31 | self.local_reduce_op = torch.sum 32 | self.buf = buf 33 | if self.async_supported: 34 | # Initiate the MPI async operation: 35 | self.request = comm.Iallreduce(MPI.IN_PLACE, BufferView(buf), op) 36 | elif self.local_reduce: 37 | # Initiate an MPI transpose for subsequent local reduction: 38 | # Determine division for MPI transpose: 39 | n_procs = comm.Get_size() 40 | division = TaskDivision( 41 | n_tot=int(np.prod(buf.shape)), n_procs=n_procs, i_proc=comm.Get_rank() 42 | ) 43 | send_counts = np.diff(division.n_prev) 44 | send_offset = division.n_prev[:-1] 45 | recv_counts = division.n_mine 46 | recv_offset = np.arange(n_procs) * recv_counts 47 | mpi_type = rc.mpi_type[buf.dtype] 48 | # Initiate MPI transpose: 49 | self.buf_t = torch.empty( 50 | (n_procs, division.n_mine), dtype=buf.dtype, device=buf.device 51 | ) 52 | self.buf_view = ( 53 | BufferView(buf), 54 | send_counts, 55 | send_offset, 56 | mpi_type, 57 | ) 58 | self.request = comm.Ialltoallv( 59 | self.buf_view, 60 | (BufferView(self.buf_t), recv_counts, recv_offset, mpi_type), 61 | ) 62 | self.n_mine = division.n_mine 63 | self.mpi_type = mpi_type 64 | self.comm = comm 65 | else: 66 | # Remember inputs and return: 67 | self.comm = comm 68 | self.op = op 69 | 70 | def wait(self) -> torch.Tensor: 71 | if self.async_supported: 72 | # Complete the MPI async operation: 73 | self.request.Wait() 74 | elif self.local_reduce: 75 | # Complete MPI transpose: 76 | self.request.Wait() 77 | # Local reduction: 78 | result = self.local_reduce_op(self.buf_t, dim=0) 79 | # Gather results: 80 | self.comm.Allgatherv( 81 | (BufferView(result), self.n_mine, 0, self.mpi_type), 82 | self.buf_view, # back in original buffer 83 | ) 84 | else: 85 | # Perform the blocking MPI operation now: 86 | self.comm.Allreduce(MPI.IN_PLACE, BufferView(self.buf), self.op) 87 | return self.buf 88 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_bufferview.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | namespace py = pybind11; 8 | 9 | //Add format string bindings for Torch's complex types 10 | namespace pybind11 11 | { template<> struct format_descriptor> { static std::string format() { return std::string("Zf"); } }; 12 | template<> struct format_descriptor> { static std::string format() { return std::string("Zd"); } }; 13 | } 14 | 15 | struct BufferView 16 | { 17 | //Buffer contents: 18 | void* data_ptr; //underlying data pointer 19 | py::ssize_t itemsize; //number of bytes per atom 20 | std::string format; //format string 21 | std::vector shape; //dimensions in items 22 | std::vector strides; //strides in bytes 23 | 24 | //Bind a view to a specified tensor: 25 | BufferView(torch::Tensor t) 26 | { if(not t.is_contiguous()) 27 | throw std::invalid_argument("Tensor must be contiguous for BufferView"); 28 | data_ptr = t.data_ptr(); 29 | AT_DISPATCH_ALL_TYPES_AND_COMPLEX(t.scalar_type(), "BufferView", 30 | ([&]{ 31 | itemsize = sizeof(scalar_t); 32 | format = py::format_descriptor::format(); 33 | } 34 | )); 35 | for(auto size: t.sizes()) shape.push_back(size); 36 | for(auto stride: t.strides()) strides.push_back(stride * itemsize); 37 | } 38 | 39 | //Expose a buffer interface to underlying tensor: 40 | py::buffer_info getBuffer() 41 | { return py::buffer_info(data_ptr, itemsize, format, shape.size(), shape, strides); 42 | } 43 | }; 44 | 45 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) 46 | { 47 | py::options options; 48 | options.disable_function_signatures(); 49 | 50 | py::class_(m, "BufferView", py::buffer_protocol(), "Expose buffer protocol on torch.Tensor for mpi4py.") 51 | .def(py::init(), 52 | "__init__(self, tensor: torch.Tensor)\n" 53 | "Construct the object inline, using BufferView(tensor) as an argument\n" 54 | "to MPI routines. Do not retain variables of type BufferView.") 55 | .def_buffer(&BufferView::getBuffer); 56 | } 57 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_bufferview.pyi: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | # Stub for static type checking 4 | class BufferView: 5 | def __init__(self, tensor: torch.Tensor): 6 | pass 7 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_sparse_matrices.py: -------------------------------------------------------------------------------- 1 | from typing import Sequence 2 | 3 | import numpy as np 4 | import scipy as sp 5 | import torch 6 | 7 | from qimpy import log, MPI, rc 8 | from qimpy.grid import Grid 9 | from qimpy.mpi import TaskDivision, BufferView 10 | from qimpy.grid._change import gather 11 | from torch import sparse_coo_tensor 12 | 13 | 14 | class SparseMatrixRight: 15 | split: TaskDivision 16 | indices: torch.Tensor 17 | values: torch.Tensor 18 | size: tuple # size of dense matrix 19 | iRow_mine: torch.Tensor 20 | iCol_mine: torch.Tensor 21 | value_mine: torch.Tensor 22 | M_mine: torch.Tensor 23 | comm: MPI.Comm #: Communicator to split matrix over 24 | n_procs: int #: Size of comm 25 | i_proc: int #: Rank within comm 26 | 27 | def __init__( 28 | self, 29 | indices: Sequence[int], 30 | values: torch.Tensor, 31 | *, 32 | comm: MPI.Comm, 33 | ) -> None: 34 | self.indices = indices 35 | self.values = values 36 | self.comm = comm 37 | self.n_procs, self.i_proc = ( 38 | (1, 0) if (comm is None) else (comm.Get_size(), comm.Get_rank()) 39 | ) 40 | iRow, iCol = indices 41 | self.size = (iRow.max() + 1, iCol.max() + 1) 42 | self.split = TaskDivision( 43 | n_tot=self.size[1].cpu(), n_procs=self.n_procs, i_proc=self.i_proc 44 | ) 45 | split = self.split 46 | sel = torch.nonzero( 47 | torch.logical_and(iCol >= split.i_start, iCol < split.i_stop)).flatten() 48 | 49 | self.iRow_mine = iRow[sel] 50 | self.iCol_mine = iCol[sel] - split.i_start 51 | self.value_mine = values[sel] 52 | indices_mine = torch.stack([self.iRow_mine,self.iCol_mine]) 53 | counts = np.diff(self.split.n_prev) 54 | nCols_mine = counts[self.i_proc] 55 | self.M_mine = sparse_coo_tensor(indices_mine, self.value_mine, 56 | size=(iRow.max()+1, nCols_mine), 57 | device=rc.device).to_sparse_csr() 58 | 59 | def getM(self): 60 | return sparse_coo_tensor(self.indices, self.values, 61 | device=rc.device)#.to_sparse_csr() 62 | 63 | def vecTimesMatrix(self, vec: torch.Tensor) -> torch.Tensor: 64 | if self.n_procs == 1: 65 | return vec @ self.M_mine 66 | assert len(vec.shape) == 1, "Need to pass 1D vector to vecTimesMatrix" 67 | result_mine = vec @ self.M_mine 68 | mpi_type = rc.mpi_type[self.M_mine.dtype] 69 | result = torch.empty(self.size[1], dtype=self.M_mine.dtype, device=rc.device) 70 | # casting self.split.n_prev[:-1] to np.array below was necessary on my laptop 71 | self.comm.Allgatherv( 72 | (BufferView(result_mine), result_mine.shape[0], mpi_type), 73 | (BufferView(result), np.diff(self.split.n_prev), np.array(self.split.n_prev[:-1]), mpi_type)) 74 | return result 75 | 76 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_taskdivision.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | import numpy as np 4 | import torch 5 | 6 | import qimpy 7 | from qimpy import log, MPI 8 | 9 | 10 | class TaskDivision: 11 | """Division of a number of tasks over MPI.""" 12 | 13 | n_tot: int #: Total number of tasks over all processes 14 | n_procs: int #: Number of processes to split over 15 | i_proc: int #: Rank of current process 16 | n_each: int #: Number of tasks on each process (till we run out) 17 | n_prev: np.ndarray #: Cumulative task counts (n_procs+1 ints) 18 | i_start: int #: Task start index on current process 19 | i_stop: int #: Task stop index on current process 20 | n_mine: int #: Number of tasks on current process 21 | 22 | def __init__( 23 | self, *, n_tot: int, n_procs: int, i_proc: int, name: Optional[str] = None 24 | ) -> None: 25 | """Divide `n_tot` tasks among `n_procs` processes. 26 | Report division and load balance if `name` is not None.""" 27 | # Store inputs: 28 | self.n_tot = n_tot 29 | self.n_procs = n_procs 30 | self.i_proc = i_proc 31 | # Compute remaining attributes: 32 | self.n_each = qimpy.math.ceildiv(n_tot, n_procs) 33 | self.n_prev = np.minimum(n_tot, self.n_each * np.arange(n_procs + 1)) 34 | self.i_start = self.n_prev[i_proc] 35 | self.i_stop = self.n_prev[i_proc + 1] 36 | self.n_mine = self.i_stop - self.i_start 37 | # Optionally report counts and imbalance: 38 | if name: 39 | imbalance = 100.0 * (1.0 - n_tot / (self.n_each * n_procs)) 40 | log.info( 41 | f"{name} division: n_tot: {n_tot} " 42 | f"n_each: {self.n_each} imbalance: {imbalance:.0f}%" 43 | ) 44 | 45 | def whose(self, i: int) -> int: 46 | """Return process index `i_proc` responsible for task `i`""" 47 | return i // self.n_each 48 | 49 | def whose_each(self, i: torch.Tensor) -> torch.Tensor: 50 | """Return process index `i_proc` responsible for each task in `i`""" 51 | return torch.div(i, self.n_each, rounding_mode="floor") 52 | 53 | def is_mine(self, i: int) -> bool: 54 | """Return whether current process is responsible for task i""" 55 | return self.i_start <= i < self.i_stop 56 | 57 | 58 | class TaskDivisionCustom(TaskDivision): 59 | """Customized division of a number of tasks over MPI.""" 60 | 61 | n_each_custom: np.ndarray #: Custom number of tasks on each process 62 | 63 | def __init__(self, *, n_mine: int, comm: Optional[MPI.Comm]) -> None: 64 | """Initialize given local number of tasks on each processes.""" 65 | # Collect n_mine on each process and initialize process parameters: 66 | if comm is None: 67 | self.n_each_custom = np.full(1, n_mine) 68 | super().__init__(n_tot=0, n_procs=1, i_proc=0) 69 | else: 70 | self.n_each_custom = np.array(comm.allgather(n_mine)) 71 | super().__init__(n_tot=0, n_procs=comm.Get_size(), i_proc=comm.Get_rank()) 72 | # Override base-class settings: 73 | self.n_mine = n_mine 74 | self.n_each = 0 # not applicable for custom division 75 | # Compute remaining attributes: 76 | self.n_prev = np.concatenate((np.zeros(1), self.n_each_custom.cumsum())).astype( 77 | int 78 | ) 79 | self.n_tot = self.n_prev[-1] 80 | self.i_start = self.n_prev[self.i_proc] 81 | self.i_stop = self.n_prev[self.i_proc + 1] 82 | 83 | def whose(self, i: int) -> int: 84 | """Return process index i_proc responsible for task i""" 85 | return int(np.searchsorted(self.n_prev, i, side="right")) 86 | 87 | 88 | def get_block_slices(n_tot: int, block_size: int) -> list[slice]: 89 | """Split `n_tot` tasks into blocks of size `block_size`. 90 | Returns a list of slices for each block. 91 | All blocks will have equal size (equal to `block_size`), 92 | except the last one that may be smaller.""" 93 | if n_tot: 94 | starts = np.arange(0, n_tot, block_size) 95 | slices = [slice(start, stop) for start, stop in zip(starts[:-1], starts[1:])] 96 | slices.append(slice(starts[-1], n_tot)) # add final block (possibly smaller) 97 | return slices 98 | else: 99 | return [] 100 | -------------------------------------------------------------------------------- /src/qimpy/mpi/_waitable.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from typing import Protocol, TypeVar, Generic 3 | 4 | 5 | Twait = TypeVar("Twait", covariant=True) #: generic return type of `Waitable` 6 | 7 | 8 | class Waitable(Protocol[Twait]): 9 | """Generic protocol for objects with a `wait` method. 10 | Useful as a return type for asynchronous communication or compute functions. 11 | The function returns a `Waitable` object, with the actual results returned later 12 | by the `wait` method.""" 13 | 14 | def wait(self) -> Twait: 15 | """Return the actual results of the asynchronous operation, once complete.""" 16 | 17 | 18 | @dataclass 19 | class Waitless(Generic[Twait]): 20 | """Trivial (identity) `Waitable` for when result is immediately ready.""" 21 | 22 | result: Twait 23 | 24 | def wait(self) -> Twait: 25 | return self.result 26 | -------------------------------------------------------------------------------- /src/qimpy/mpi/globalreduce.py: -------------------------------------------------------------------------------- 1 | """Reduction of MPI-distributed tensors to scalars. 2 | The functions of this module correctly handle zero-sized pieces of 3 | distributed tensors on certain processes, whic is a frequently 4 | encountered and cumbersome corner case in such global reductions. """ 5 | __all__ = ["sum", "prod", "min", "max", "all", "any"] 6 | 7 | from typing import Any 8 | 9 | import torch 10 | 11 | from qimpy import MPI 12 | 13 | 14 | def sum(v: torch.Tensor, comm: MPI.Comm) -> Any: 15 | """Global sum of tensor `v` distributed over `comm`.""" 16 | return comm.allreduce( 17 | torch.sum(v).item() if v.numel() else torch.zeros(1, dtype=v.dtype).item(), 18 | MPI.SUM, 19 | ) 20 | 21 | 22 | def prod(v: torch.Tensor, comm: MPI.Comm) -> Any: 23 | """Global product of tensor `v` distributed over `comm`.""" 24 | return comm.allreduce( 25 | torch.prod(v).item() if v.numel() else torch.ones(1, dtype=v.dtype).item(), 26 | MPI.PROD, 27 | ) 28 | 29 | 30 | def min(v: torch.Tensor, comm: MPI.Comm) -> Any: 31 | """Global minimum of tensor `v` distributed over `comm`.""" 32 | return comm.allreduce( 33 | torch.min(v).item() 34 | if v.numel() 35 | else ( 36 | torch.finfo(v.dtype).max 37 | if v.dtype.is_floating_point 38 | else torch.iinfo(v.dtype).max 39 | ), 40 | MPI.MIN, 41 | ) 42 | 43 | 44 | def max(v: torch.Tensor, comm: MPI.Comm) -> Any: 45 | """Global maximum of tensor `v` distributed over `comm`.""" 46 | return comm.allreduce( 47 | torch.max(v).item() 48 | if v.numel() 49 | else ( 50 | torch.finfo(v.dtype).min 51 | if v.dtype.is_floating_point 52 | else torch.iinfo(v.dtype).min 53 | ), 54 | MPI.MAX, 55 | ) 56 | 57 | 58 | def all(v: torch.Tensor, comm: MPI.Comm) -> Any: 59 | """Global minimum of tensor `v` distributed over `comm`.""" 60 | return comm.allreduce(torch.all(v).item() if v.numel() else True, MPI.LAND) 61 | 62 | 63 | def any(v: torch.Tensor, comm: MPI.Comm) -> Any: 64 | """Global maximum of tensor `v` distributed over `comm`.""" 65 | return comm.allreduce(torch.any(v).item() if v.numel() else False, MPI.LOR) 66 | -------------------------------------------------------------------------------- /src/qimpy/mpi/test_sparse.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | import torch 4 | 5 | from qimpy import rc 6 | from qimpy.io import log_config 7 | from qimpy.mpi._sparse_matrices import SparseMatrixRight 8 | 9 | 10 | def main(): 11 | log_config() 12 | rc.init() 13 | torch.manual_seed(0) 14 | indices = torch.stack([torch.arange(100), torch.arange(2,102)]) 15 | values = torch.randn(indices.shape[1]) 16 | sm = SparseMatrixRight(indices, values, comm=rc.comm) 17 | Md = sm.getM().to_dense() 18 | randvec1 = torch.randn(sm.size[0]) 19 | res1d = randvec1 @ Md 20 | res1 = sm.vecTimesMatrix(randvec1) 21 | indicesT = torch.stack([indices[1], indices[0]]) 22 | sm = SparseMatrixRight(indicesT, values, comm=rc.comm) 23 | Md = sm.getM().to_dense() 24 | randvec2 = torch.randn(sm.size[0]) 25 | res2d = randvec2 @ Md 26 | res2 = sm.vecTimesMatrix(randvec2) 27 | if rc.is_head: 28 | print(res1 - res1d) 29 | print(res2 - res2d) 30 | 31 | 32 | if __name__ == "__main__": 33 | main() 34 | -------------------------------------------------------------------------------- /src/qimpy/pre_init.py: -------------------------------------------------------------------------------- 1 | """ 2 | Initialization that must occur before all other imports: 3 | - Create log. 4 | - Set GPU visibility before importing MPI and torch when possible. 5 | """ 6 | 7 | import logging 8 | import os 9 | 10 | 11 | log: logging.Logger = logging.getLogger("qimpy") 12 | "Log for the qimpy module, configurable using :func:`qimpy.mpi.log_config`" 13 | 14 | 15 | def set_gpu_visibility(local_rank: int) -> int: 16 | """Update CUDA_VISIBLE_DEVICES to select one GPU based on `local_rank` of process. 17 | Return the device number of the selected GPU, and -1 if no GPUs specified. 18 | (Note that CUDA_VISIBLE_DEVICES must be set explicitly to use GPUs.)""" 19 | cuda_dev_str = os.environ.get("CUDA_VISIBLE_DEVICES") 20 | if cuda_dev_str: 21 | # Select one GPU and make sure it's only one visible to torch: 22 | cuda_devs = [int(s) for s in cuda_dev_str.split(",")] 23 | cuda_dev_selected = cuda_devs[local_rank % len(cuda_devs)] 24 | os.environ["CUDA_VISIBLE_DEVICES"] = str(cuda_dev_selected) 25 | return cuda_dev_selected 26 | else: 27 | # Disable GPUs unless explicitly requested: 28 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 29 | return -1 30 | 31 | 32 | # Process GPU visibility in environment BEFORE torch and MPI imports 33 | for local_rank_key in ("OMPI_COMM_WORLD_LOCAL_RANK", "SLURM_LOCALID"): 34 | if local_rank_str := os.environ.get(local_rank_key): 35 | set_gpu_visibility(int(local_rank_str)) 36 | break 37 | -------------------------------------------------------------------------------- /src/qimpy/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/src/qimpy/py.typed -------------------------------------------------------------------------------- /src/qimpy/symmetries/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.symmetries.html) for an overview of package `qimpy.symmetries`. 2 | -------------------------------------------------------------------------------- /src/qimpy/symmetries/__init__.py: -------------------------------------------------------------------------------- 1 | """Point- and space-group detection and enforcement""" 2 | # List exported symbols for doc generation 3 | __all__ = ("LabeledPositions", "Symmetries") 4 | 5 | from ._positions import LabeledPositions 6 | from ._symmetries import Symmetries 7 | -------------------------------------------------------------------------------- /src/qimpy/symmetries/_grid.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Sequence 3 | 4 | import numpy as np 5 | import torch 6 | 7 | from qimpy import rc, log, symmetries 8 | from qimpy.math import ceildiv, fft_suitable 9 | 10 | 11 | def check_grid_shape(self: symmetries.Symmetries, shape: Sequence[int]) -> None: 12 | """Check whether grid dimensions are compatible with symmetries. 13 | 14 | Raises 15 | ------ 16 | ValueError 17 | If incommensurate, raise ValueError with the error string 18 | including the reduced symmetry of specified grid shape 19 | """ 20 | 21 | # Compute rotation matrix in mesh coordinates 22 | S = torch.tensor(shape, dtype=torch.double, device=rc.device) 23 | rot_mesh = S.view(1, 3, 1) * self.rot * (1.0 / S).view(1, 1, 3) 24 | 25 | # Commensurate => matrix should still be an integer: 26 | err = (rot_mesh - rot_mesh.round()).abs().sum(dim=(-2, -1)) 27 | i_sym = torch.where(err <= self.tolerance)[0] 28 | if len(i_sym) < self.n_sym: 29 | raise ValueError( 30 | f"Grid dimensions {shape} commensurate only with a sub-group" 31 | f" of symmetries with indices (0-based): {i_sym.tolist()}" 32 | ) 33 | 34 | 35 | def get_grid_shape(self: symmetries.Symmetries, shape_min: np.ndarray) -> np.ndarray: 36 | """Smallest symmetric, FFT-suitable shape >= shape_min.""" 37 | 38 | # Determine constraints on S due to symmetries: 39 | rot = self.rot.to(dtype=torch.int, device=rc.cpu).numpy() 40 | ratios = np.gcd.reduce(rot, axis=0) 41 | 42 | # Recursive function to set grid shapes compatible with one dimension 43 | def process(Sb, j): 44 | """Given an integer vector Sb where Sb[j] is known to be non-zero, 45 | return smallest integer vector that would be commensurate with 46 | symmetries, by setting connected dimensions to j as appopriate.""" 47 | # Check dimensions constrained to j: 48 | k_linked = np.logical_or(ratios[j, :], ratios[:, j]) 49 | k_linked[j] = False # no need to check j against j 50 | for k in np.where(k_linked)[0]: 51 | if Sb[k]: # pre-existing entry 52 | if ((ratios[j, k] * Sb[j]) % Sb[k]) or ((ratios[k, j] * Sb[k]) % Sb[j]): 53 | # Sb violates constraints between j and k 54 | log.info( 55 | "could not find anisotropic shape" 56 | " commensurate with symmetries" 57 | ) 58 | return np.ones(3, dtype=int) # fall-back solution 59 | else: # add k to this basis entry 60 | if ratios[k, j]: 61 | # scale remaining dimensions 62 | Sb_k_new = Sb[j] * np.maximum(1, ratios[j, k]) 63 | Sb *= ratios[k, j] 64 | Sb[k] = Sb_k_new 65 | else: # ratios[j, k] must be non-zero (since j, k linked) 66 | Sb[k] = ratios[j, k] * Sb[j] 67 | Sb = process(Sb, k) # recursively process now non-zero dim k 68 | return Sb 69 | 70 | # Expand symmetry-compatible grid dimension-by-dimension: 71 | shape = np.zeros(3, dtype=int) 72 | for j in range(3): 73 | if not shape[j]: 74 | # start with a unit basis vector along j: 75 | Sb = np.zeros(3, dtype=int) 76 | Sb[j] = 1 77 | # make it symmetry-compatible: 78 | Sb = process(Sb, j) 79 | Sb //= np.gcd.reduce(Sb) # remove common factors 80 | # check FFT suitability of Sb: 81 | i_nz = np.where(Sb)[0] 82 | if not np.logical_and.reduce([fft_suitable(s) for s in Sb[i_nz]]): 83 | log.info( 84 | "could not find anisotropic shape with" " FFT-suitable factors" 85 | ) 86 | Sb[i_nz] = 1 87 | # determine smallest fft-suitable scale factor to reach shape_min 88 | scale_Sb = 2 * ceildiv(shape_min[i_nz], 2 * Sb[i_nz]).max() 89 | while not fft_suitable(scale_Sb): 90 | scale_Sb += 2 # move through even numbers 91 | shape += scale_Sb * Sb 92 | return shape 93 | -------------------------------------------------------------------------------- /src/qimpy/symmetries/_lattice.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | 5 | from qimpy import symmetries 6 | 7 | 8 | def get_lattice_point_group( 9 | Rbasis: torch.Tensor, periodic: tuple[bool, ...], tolerance: float 10 | ) -> torch.Tensor: 11 | """Return point group (n_sym x 3 x 3 tensor in lattice coordinates), 12 | given lattice vectors Rbasis (3 x 3 tensor) and if each of them is `periodic`.""" 13 | 14 | # Reduce lattice vectors: 15 | T = reduce_matrix33(Rbasis, tolerance) 16 | Rreduced = Rbasis @ T 17 | 18 | # Construct all possible matrices with entries from (-1, 0, 1): 19 | entries = torch.tensor([-1, 0, 1], device=T.device, dtype=torch.double) 20 | matrices = torch.stack(torch.meshgrid([entries] * 9, indexing="ij")) 21 | matrices = matrices.reshape((9, -1)).T.reshape((-1, 3, 3)) 22 | 23 | # Drop any matrices that couple periodic and non-periodic directions: 24 | for i_dir in range(3): 25 | j_dir = (i_dir + 1) % 3 26 | if periodic[i_dir] != periodic[j_dir]: 27 | not_coupled_ij = torch.logical_and( 28 | matrices[:, i_dir, j_dir] == 0.0, matrices[:, j_dir, i_dir] == 0.0 29 | ) 30 | matrices = matrices[torch.where(not_coupled_ij)[0]] 31 | 32 | # Find matrices that preserve reduced metric: 33 | metric = Rreduced.T @ Rreduced 34 | metric_new = matrices.transpose(-2, -1) @ (metric @ matrices) 35 | metric_err = ((metric_new - metric) ** 2).sum(dim=(1, 2)) 36 | metric_err_limit = (tolerance**2) * (metric**2).sum() 37 | sym = matrices[torch.where(metric_err < metric_err_limit)[0]] 38 | 39 | # Transform to original (unreduced) coordinates: 40 | return (T @ sym) @ torch.linalg.inv(T) 41 | 42 | 43 | def reduce_matrix33(M: torch.Tensor, tolerance: float) -> torch.Tensor: 44 | """Find integer T that minimizes norm(M @ T). 45 | All tensors are 3 x 3, and accuracy of minimum is set by tolerance.""" 46 | assert M.shape == (3, 3) 47 | 48 | # Prepare a list of transformations based on +/-1 offsets: 49 | direction_combinations = ((0, 1, 2), (1, 2, 0), (2, 0, 1)) 50 | offset_combinations = ( 51 | (-1, -1), 52 | (-1, 0), 53 | (-1, 1), 54 | (0, -1), 55 | (0, 1), 56 | (1, -1), 57 | (1, 0), 58 | (1, 1), 59 | ) 60 | D_list = [] 61 | for k0, k1, k2 in direction_combinations: 62 | for offset1, offset2 in offset_combinations: 63 | # Propose transformation with +/-1 offsets: 64 | Dcur = torch.eye(3) 65 | Dcur[k1, k0] = offset1 66 | Dcur[k2, k0] = offset2 67 | D_list.append(Dcur) 68 | D = torch.stack(D_list).to(M.device) 69 | 70 | # Repeatedly transform till norm no longer reduces: 71 | T = torch.eye(3, device=M.device) 72 | MT = M.clone().detach() 73 | norm = (M**2).sum() 74 | while True: 75 | MT_new = MT @ D 76 | norm_new = (MT_new**2).sum(dim=(1, 2)) 77 | i_min = norm_new.argmin() 78 | if norm_new[i_min] < norm * (1.0 - tolerance): 79 | T = T @ D[i_min] 80 | MT = MT_new[i_min] 81 | norm = norm_new[i_min] 82 | else: 83 | return T # converged 84 | 85 | 86 | def symmetrize_lattice( 87 | self: symmetries.Symmetries, Rbasis: torch.Tensor 88 | ) -> torch.Tensor: 89 | """Symmetrize lattice vectors `Rbasis` (3 x 3).""" 90 | # Compute symmetrized metric: 91 | metric = Rbasis.T @ Rbasis 92 | metric_sym = (self.rot.transpose(-2, -1) @ (metric @ self.rot)).mean(dim=0) 93 | # From transformation from matrix square-roots of metrics: 94 | E, V = torch.linalg.eigh(metric) 95 | E_sym, V_sym = torch.linalg.eigh(metric_sym) 96 | return ( 97 | Rbasis 98 | @ (V @ ((1.0 / E.sqrt()).diag_embed() @ V.T)) # metric^(-1/2) 99 | @ (V_sym @ (E_sym.sqrt().diag_embed() @ V_sym.T)) 100 | ) # metric_sym^(1/2) 101 | 102 | 103 | def symmetrize_matrix(self: symmetries.Symmetries, mat: torch.Tensor) -> torch.Tensor: 104 | """Symmetrize Cartesian matrix `mat` (3 x 3). 105 | Suitable to symmetrize, e.g., stress tensors. 106 | """ 107 | rot_cart = self.rot_cart 108 | return (rot_cart @ mat @ rot_cart.transpose(-2, -1)).mean(dim=0) 109 | -------------------------------------------------------------------------------- /src/qimpy/transport/README.md: -------------------------------------------------------------------------------- 1 | See [API documentation](https://qimpy.org/en/latest/api/qimpy.transport.html) for an overview of package `qimpy.transport`. 2 | -------------------------------------------------------------------------------- /src/qimpy/transport/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["advect", "material", "geometry", "TimeEvolution", "Transport", "main"] 2 | 3 | from . import advect 4 | from . import material 5 | from . import geometry 6 | from ._time_evolution import TimeEvolution 7 | from ._transport import Transport 8 | from ._main import main 9 | -------------------------------------------------------------------------------- /src/qimpy/transport/__main__.py: -------------------------------------------------------------------------------- 1 | from . import main 2 | 3 | main() 4 | -------------------------------------------------------------------------------- /src/qimpy/transport/advect.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | import torch 4 | from qimpy import rc 5 | 6 | 7 | N_GHOST: int = 2 #: currently a constant, but could depend on slope method later 8 | 9 | # Initialize slices for accessing ghost regions in padded version: 10 | # These are also the slices for the boundary region in non-padded version 11 | NON_GHOST: slice = slice(N_GHOST, -N_GHOST) 12 | GHOST_L: slice = slice(0, N_GHOST) #: ghost indices on left/bottom 13 | GHOST_R: slice = slice(-N_GHOST, None) #: ghost indices on right/top side 14 | 15 | 16 | class Advect(torch.nn.Module): 17 | def __init__(self, slope_lim_theta: float = 2.0, cent_diff_deriv: bool = False): 18 | # Initialize convolution that computes slopes using 3 difference formulae. 19 | # Here, `slope_lim_theta` controls the scaling of the forward/backward 20 | # difference formulae relative to the central difference one. 21 | # This convolution takes input Nbatch x 1 x N and produces output with 22 | # dimensions Nbatch x 3 x (N-2), containing backward, central and forward 23 | # difference computations of the slope. 24 | super().__init__() 25 | weight_data = torch.tensor( 26 | [ 27 | [-slope_lim_theta, slope_lim_theta, 0.0], 28 | [-0.5, 0.0, 0.5], 29 | [0.0, -slope_lim_theta, slope_lim_theta], 30 | ], 31 | device=rc.device, 32 | ) 33 | if cent_diff_deriv: 34 | self.slope_conv = torch.nn.Conv1d(1, 1, 1, bias=False) 35 | weight_data = weight_data[1] 36 | else: 37 | self.slope_conv = torch.nn.Conv1d(1, 3, 3, bias=False) 38 | self.slope_conv.weight.data = weight_data.view(-1, 1, 3) # add in_channels dim 39 | self.slope_conv.weight.requires_grad = False 40 | 41 | def slope_minmod(self, f: torch.Tensor) -> torch.Tensor: 42 | """Compute slope of `f` along its last axis with a minmod limiter.""" 43 | # Flatten all but last axis into a single batch dimension: 44 | batch_shape = f.shape[:-1] 45 | f = f.flatten(0, -2)[:, None] # n_batch x 1 x n_axis 46 | # Compute slopes by convolution and apply minmod filter: 47 | slope = minmod(self.slope_conv(f), axis=1) # n_batch x n_axis 48 | return slope.unflatten(0, batch_shape) # restore dimensions 49 | 50 | def forward(self, rho: torch.Tensor, v: torch.Tensor, axis: int) -> torch.Tensor: 51 | """Compute v * d`rho`/dx, with velocity `v` along `axis`.""" 52 | # Bring active axis to end 53 | rho = rho.swapaxes(axis, -1) 54 | v = v.swapaxes(axis, -1) 55 | 56 | # Reconstruction 57 | half_slope = 0.5 * self.slope_minmod(rho) 58 | 59 | # Central difference from half points & Riemann selection based on velocity: 60 | rho_diff = rho[..., 1:-1].diff(dim=-1) 61 | half_slope_diff = half_slope.diff(dim=-1) 62 | result_minus = (rho_diff - half_slope_diff)[..., 1:] 63 | result_plus = (rho_diff + half_slope_diff)[..., :-1] 64 | delta_rho = torch.where(v < 0.0, result_minus, result_plus) 65 | return -(v * delta_rho).swapaxes(axis, -1) # original axis order; overall sign 66 | 67 | 68 | def minmod(f: torch.Tensor, axis: int) -> torch.Tensor: 69 | """Return min|`f`| along `axis` when all same sign, and 0 otherwise.""" 70 | fmin, fmax = torch.aminmax(f, dim=axis) 71 | return torch.where( 72 | fmin < 0.0, 73 | torch.clamp(fmax, max=0.0), # fmin < 0, so fmax if also < 0, else 0. 74 | fmin, # fmin >= 0, so this is the min mod 75 | ) 76 | -------------------------------------------------------------------------------- /src/qimpy/transport/collide/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/shankar1729/qimpy/7940322f3bc00e7d87814675dbde282c234c87e2/src/qimpy/transport/collide/__init__.py -------------------------------------------------------------------------------- /src/qimpy/transport/geometry/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ( 2 | "TensorList", 3 | "BicubicPatch", 4 | "Patch", 5 | "plot_spline", 6 | "evaluate_spline", 7 | "spline_length", 8 | "within_circles", 9 | "within_circles_np", 10 | "parse_svg", 11 | "QuadSet", 12 | "SubQuadSet", 13 | "subdivide", 14 | "select_division", 15 | "BOUNDARY_SLICES", 16 | "PatchSet", 17 | "ParameterGrid", 18 | "Geometry", 19 | ) 20 | 21 | from ._tensor_list import TensorList 22 | from ._spline import ( 23 | BicubicPatch, 24 | plot_spline, 25 | evaluate_spline, 26 | spline_length, 27 | within_circles, 28 | within_circles_np, 29 | ) 30 | from ._patch import Patch 31 | from ._svg import parse_svg, QuadSet 32 | from ._subdivide import SubQuadSet, subdivide, select_division, BOUNDARY_SLICES 33 | from ._geometry import Geometry 34 | from ._patch_set import PatchSet 35 | from ._parameter_grid import ParameterGrid 36 | -------------------------------------------------------------------------------- /src/qimpy/transport/geometry/_tensor_list.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Iterator 3 | 4 | import torch 5 | 6 | 7 | class TensorList: 8 | data: list[torch.Tensor] 9 | 10 | def __init__(self, data: Iterator[torch.Tensor]): 11 | self.data = list(data) 12 | 13 | def __iter__(self): 14 | return iter(self.data) 15 | 16 | def __getitem__(self, index: int) -> torch.Tensor: 17 | return self.data[index] 18 | 19 | def __mul__(self, scale: float) -> TensorList: 20 | return TensorList(scale * ti for ti in self) 21 | 22 | def __rmul__(self, scale: float) -> TensorList: 23 | return self * scale 24 | 25 | def __add__(self, other: TensorList) -> TensorList: 26 | return TensorList(ti + tj for ti, tj in zip(self, other)) 27 | -------------------------------------------------------------------------------- /src/qimpy/transport/material/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = ["Material", "bose", "fermi", "FermiCircle", "ab_initio"] 2 | 3 | from ._material import Material, bose, fermi 4 | from ._fermi_circle import FermiCircle 5 | from . import ab_initio 6 | -------------------------------------------------------------------------------- /src/qimpy/transport/material/ab_initio/__init__.py: -------------------------------------------------------------------------------- 1 | __all__ = [ 2 | "PackedHermitian", 3 | "RelaxationTime", 4 | "Lindblad", 5 | "Light", 6 | "PulseB", 7 | "AbInitio", 8 | ] 9 | 10 | from ._packed_hermitian import PackedHermitian 11 | from ._relaxation_time import RelaxationTime 12 | from ._lindblad import Lindblad 13 | from ._light import Light 14 | from ._pulse_b import PulseB 15 | from ._ab_initio import AbInitio 16 | -------------------------------------------------------------------------------- /src/qimpy/transport/material/ab_initio/_packed_hermitian.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | 4 | from qimpy import rc 5 | 6 | 7 | class PackedHermitian: 8 | """Packed real representation of Hermitian matrices.""" 9 | 10 | def __init__(self, N: int) -> None: 11 | """Initialize representation for N x N hermitian matrices.""" 12 | i = np.arange(N) 13 | i, j = (x.flatten() for x in np.meshgrid(i, i, indexing="ij")) 14 | i1 = i * N + j # direct entry 15 | i2 = j * N + i # swapped entry 16 | 17 | # Separate real / imaginary part from complex: 18 | self.pack_index = torch.from_numpy(+np.where(i >= j, 2 * i1, 2 * i2 + 1)).to( 19 | device=rc.device 20 | ) 21 | 22 | # Reconstruct complex hermitian version: 23 | unpack_real = np.where(i >= j, i1, i2) 24 | unpack_imag = np.where(i >= j, i2, i1) 25 | unpack_imag_sign = np.sign(i - j) 26 | self.unpack_index = torch.from_numpy( 27 | np.stack((unpack_real, unpack_imag), axis=-1).flatten() 28 | ).to(device=rc.device) 29 | self.unpack_sign = torch.from_numpy( 30 | np.stack((np.ones_like(i1), unpack_imag_sign), axis=-1).flatten() 31 | ).to(device=rc.device) 32 | 33 | # Construct matrices for transforming super-operators to packed form: 34 | R = np.zeros((N * N, N * N), dtype=complex) 35 | Rinv = np.zeros_like(R) 36 | # --- diagonal terms: 37 | diag = np.where(i == j)[0] 38 | d1 = d2 = i1[diag] 39 | R[d1, d2] = 1.0 40 | Rinv[d1, d2] = 1.0 41 | # --- off-diagonal terms 42 | offdiag = np.where(i > j)[0] 43 | o1 = i1[offdiag] 44 | o2 = i2[offdiag] 45 | R[o1, o1] = 1.0 46 | R[o1, o2] = 1.0j 47 | R[o2, o1] = 1.0 48 | R[o2, o2] = -1.0j 49 | Rinv[o1, o1] = +0.5 50 | Rinv[o1, o2] = +0.5 51 | Rinv[o2, o1] = -0.5j 52 | Rinv[o2, o2] = +0.5j 53 | self.R = torch.from_numpy(R).to(rc.device) 54 | self.Rinv = torch.from_numpy(Rinv).to(rc.device) 55 | self.N = N 56 | self.w_overlap = torch.from_numpy( 57 | np.where(i == j, 1.0, 2.0).reshape((N, N)) 58 | ).to(rc.device) 59 | 60 | def pack(self, m: torch.Tensor) -> torch.Tensor: 61 | """Pack ... x N x N complex hermitian tensor to real version.""" 62 | buf = torch.view_as_real(m).flatten(-3, -1) 63 | return buf[..., self.pack_index].view(m.shape) 64 | 65 | def unpack(self, m: torch.Tensor) -> torch.Tensor: 66 | """Unpack ... x N x N real tensor to complex hermitian version.""" 67 | result = m.flatten(-2, -1)[..., self.unpack_index] * self.unpack_sign 68 | return torch.view_as_complex(result.view(m.shape + (2,))) 69 | 70 | def apply_packed(self, op: torch.Tensor, vec: torch.Tensor) -> torch.Tensor: 71 | vec_packed = self.pack(vec).flatten() 72 | return self.unpack((op @ vec_packed).view(vec.shape)) 73 | --------------------------------------------------------------------------------