├── .coveragerc ├── .github └── workflows │ ├── cd.yml │ ├── ci.yml │ └── validate_release_tag.py ├── .gitignore ├── .pre-commit-config.yaml ├── .style.yapf ├── AUTHORS.md ├── CHANGELOG.md ├── LICENSE.txt ├── MANIFEST.in ├── README.md ├── codecov.yml ├── parsevasp ├── __init__.py ├── base.py ├── chgcar.py ├── constants.py ├── doscar.py ├── eigenval.py ├── incar.py ├── incar.yml ├── kpoints.py ├── outcar.py ├── poscar.py ├── potcar.py ├── stream.py ├── stream.yml ├── utils.py └── vasprun.py ├── pyproject.toml └── tests ├── CHGCAR ├── CHGCAR.ncl ├── CHGCAR.spin ├── DOSCAR ├── DOSCAR.ncl ├── DOSCAR.nopdos ├── DOSCAR.spin ├── DOSCAR.spin_pdos ├── EIGENVAL ├── INCAR ├── KPOINTS ├── KPOINTSEXP ├── KPOINTSGRG ├── KPOINTSLINE ├── OUTCAR ├── OUTCAR.converged ├── OUTCAR.crashed ├── OUTCAR.nelm-breach-consistent ├── OUTCAR.nelm-breach-partial ├── OUTCAR.not-converged ├── OUTCAR.unfinished ├── OUTCAR_MAG ├── OUTCAR_MAG_SINGLE ├── POSCAR ├── POSCARNAMES ├── POSCARVEL ├── POTCAR ├── __init__.py ├── basic.xml ├── basicpartial.xml ├── basicrelax.xml ├── basicspin.xml ├── dielectrics.xml ├── disp.xml ├── disp_details.xml ├── gw.xml ├── localfield.xml ├── magmom.xml ├── overflow.xml ├── specific.xml ├── stdout ├── stdout_ZBRENT ├── stdout_nostart ├── test_chgcar.py ├── test_doscar.py ├── test_eigenval.py ├── test_incar.py ├── test_kpoints.py ├── test_outcar.py ├── test_poscar.py ├── test_potcar.py ├── test_stream.py ├── test_xml_event.py ├── test_xml_regular.py ├── utils.py └── velocities.xml /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | source = parsevasp 3 | parallel = True 4 | 5 | [report] 6 | omit = 7 | *test* 8 | 9 | [html] 10 | directory = coverage/html 11 | 12 | [paths] 13 | source = 14 | .tox/*/lib/python*/site-packages/parsevasp 15 | -------------------------------------------------------------------------------- /.github/workflows/cd.yml: -------------------------------------------------------------------------------- 1 | name: cd 2 | 3 | on: 4 | push: 5 | tags: 6 | - v[0-9]+.[0-9]+.[0-9]+* 7 | 8 | jobs: 9 | validate-release-tag: 10 | if: github.repository == 'aiida-vasp/parsevasp' && github.event_name == 'push' && startsWith(github.event.ref, 'refs/tags') 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout source 14 | uses: actions/checkout@v4 15 | - name: Set up Python 16 | uses: actions/setup-python@v5 17 | with: 18 | python-version: '3.10' 19 | - name: Validate the tag version against the package version 20 | run: python .github/workflows/validate_release_tag.py $GITHUB_REF 21 | 22 | publish: 23 | name: Publish to PyPI 24 | runs-on: ubuntu-latest 25 | needs: [validate-release-tag] 26 | steps: 27 | - name: Checkout source 28 | uses: actions/checkout@v4 29 | - name: Set up Python 30 | uses: actions/setup-python@v5 31 | with: 32 | python-version: '3.10' 33 | - name: Install flit 34 | run: pip install flit~=3.4 35 | - name: Build and publish 36 | run: flit publish 37 | env: 38 | FLIT_USERNAME: __token__ 39 | FLIT_PASSWORD: ${{ secrets.PYPI_KEY }} 40 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: parsevasp 2 | 3 | on: [push, pull_request] 4 | 5 | jobs: 6 | tests: 7 | runs-on: ubuntu-latest 8 | timeout-minutes: 30 9 | strategy: 10 | matrix: 11 | python: ['3.9', '3.10', '3.11', '3.12'] 12 | steps: 13 | - uses: actions/checkout@v4 14 | - name: Set up Python 15 | uses: actions/setup-python@v5 16 | with: 17 | python-version: ${{ matrix.python }} 18 | - name: Install pytset 19 | run: pip install pytest pytest-cov 20 | - name: Install parsevasp 21 | run: pip install -e . -vvv 22 | - name: Run pytest 23 | run: pytest --cov=./ --cov-report=xml tests 24 | - name: Upload coverage to Codecov 25 | uses: codecov/codecov-action@v3 26 | with: 27 | verbose: true 28 | -------------------------------------------------------------------------------- /.github/workflows/validate_release_tag.py: -------------------------------------------------------------------------------- 1 | """Validate that the version in the tag label matches the version of the package.""" 2 | 3 | import argparse 4 | import ast 5 | from pathlib import Path 6 | 7 | 8 | def get_version_from_module(content: str) -> str: 9 | """Get the ``__version__`` attribute from a module. 10 | 11 | .. note:: This has been adapted from :mod:`setuptools.config`. 12 | """ 13 | try: 14 | module = ast.parse(content) 15 | except SyntaxError as exception: 16 | raise IOError('Unable to parse module.') from exception 17 | 18 | try: 19 | return next( 20 | ast.literal_eval(statement.value) 21 | for statement in module.body 22 | if isinstance(statement, ast.Assign) 23 | for target in statement.targets 24 | if isinstance(target, ast.Name) and target.id == '__version__' 25 | ) 26 | except StopIteration as exception: 27 | raise IOError('Unable to find the `__version__` attribute in the module.') from exception 28 | 29 | 30 | if __name__ == '__main__': 31 | parser = argparse.ArgumentParser() 32 | parser.add_argument('GITHUB_REF', help='The GITHUB_REF environmental variable') 33 | args = parser.parse_args() 34 | TAG_PREFIX = 'refs/tags/v' 35 | assert args.GITHUB_REF.startswith(TAG_PREFIX), f'GITHUB_REF should start with "{TAG_PREFIX}": {args.GITHUB_REF}' 36 | tag_version = args.GITHUB_REF[len(TAG_PREFIX) :] 37 | package_version = get_version_from_module(Path('parsevasp/__init__.py').read_text(encoding='utf-8')) 38 | error_message = f'The tag version `{tag_version}` is different from the package version `{package_version}`' 39 | assert tag_version == package_version, error_message 40 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .log 2 | .ps.pdf 3 | *.pyc 4 | build/ 5 | dist/ 6 | *.egg-info/ 7 | *MOD 8 | .cache/ 9 | .pytest_cache/ 10 | .tox/ 11 | __pycache__ 12 | .coverage_ 13 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | repos: 3 | - repo: https://github.com/pre-commit/pre-commit-hooks 4 | rev: v5.0.0 5 | hooks: 6 | - id: double-quote-string-fixer 7 | - id: end-of-file-fixer 8 | - id: fix-encoding-pragma 9 | args: ['--remove'] 10 | - id: mixed-line-ending 11 | - id: trailing-whitespace 12 | - id: check-yaml 13 | - id: check-added-large-files 14 | - id: forbid-new-submodules 15 | 16 | - repo: https://github.com/astral-sh/ruff-pre-commit 17 | # Ruff version. 18 | rev: v0.11.11 19 | hooks: 20 | - id: ruff 21 | args: [ "--fix", "--show-fixes" ] 22 | - id: ruff-format 23 | -------------------------------------------------------------------------------- /.style.yapf: -------------------------------------------------------------------------------- 1 | [style] 2 | based_on_style = google 3 | align_closing_bracket_with_visual_indent = true 4 | coalesce_brackets = true 5 | column_limit = 120 6 | dedent_closing_brackets = true 7 | indent_dictionary_value = false 8 | split_arguments_when_comma_terminated = true 9 | -------------------------------------------------------------------------------- /AUTHORS.md: -------------------------------------------------------------------------------- 1 | # Authors 2 | 3 | Parsevasp is maintained by Espen Flage-Larsen, Sigma2, Norway. 4 | 5 | 6 | ## Development Team 7 | * Espen Flage-Larsen, Sigma2 (maintainer) 8 | * Bonan Zhu, University College London 9 | * Atsushi Togo, National Institute for Materials Science 10 | * Jonathan Chico, Sandvik Coromant 11 | 12 | ## Acknowledgements 13 | Special thanks go to the following individuals and organizations that helps or helped in the development. 14 | 15 | * Kohei Shinohara, fixed messages for OUTCAR parser and partial DOS mapping for the DOSCAR parser. 16 | * Ilya Fedorov, updates to the POSCAR parser to include scaling. 17 | * Adair Nicolson, bugfix of the partial density of states. 18 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 4 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 5 | 6 | ## [3.2.1] - 2023-06-29 7 | 8 | ### Changed 9 | - More docstring about the code from `pymatgen` code for the `POTCAR` metadata parser. 10 | - Changed from `node` 12 to 16 for the Github Actions. 11 | 12 | ## [3.2.0] - 2023-06-05 13 | 14 | ### Added 15 | - Added a way to parse generalized k-point grid, where the generators are typically used. 16 | - `POTCAR` metadata parser from `pymatgen` to not having to install `pymatgen` as dependency. 17 | 18 | ### Changed 19 | - Bugfix in the partial density of states parsing from `DOSCAR`. 20 | - Improved detection of a successful VASP start (not full execution). 21 | - Moved to a `toml` description of the package. 22 | - Changed contact info for maintainer. 23 | - Dependencies of the `tests` and `pre-commit` extras follow versions of `aiida-vasp` and `aiida-core`. Except for this release where we use a more recent `pylint>=2.15`. 24 | 25 | ### Removed 26 | - Future and past dependency. Old backwards compatibility for Python 2 that was dormant in the code. 27 | 28 | ## [3.1.0] - 2022-05-27 29 | 30 | ### Added 31 | - Added NBANDS to `run_status`. 32 | - Added entry `bandocc` to the stream parser to be notified if the topmost band is occupied. 33 | 34 | ## [3.0.0] - 2022-05-24 35 | 36 | ### Added 37 | - Added CHGCAR parser which dumps data in numpy arrays. 38 | - Added EIGENVAL and DOSCAR parser. 39 | 40 | ### Changed 41 | - Removed symmetry output. What remains is the number of space group operations, original cell type and symmetrized cell type. 42 | - Fixed parsing of wildcard or N/A containing entries. 43 | - Various other bugfixes. 44 | 45 | ## [2.0.1] - 2021-01-23 46 | 47 | ### Changed 48 | - Fixed the check for the truncated xml file that contained a bug. 49 | 50 | ## [2.0.0] - 2021-01-04 51 | 52 | ### Added 53 | - Posibility to return multiple total energy types. 54 | - Posibility to parse the total energies for each electronic step. Since the total energy array for ionic and electronic steps is staggered,, we flatten it and supply an additional array with key `electronic_steps` where each entry indicates how many electronic steps was performed for each ionic step. Using this, the staggered array can be rebuilt. 55 | 56 | ### Changed 57 | - The return of `get_energies` is now a dict in order to be able to return multiple energy types. 58 | - `final` key for the ionic steps was changed to `last`. 59 | - For static runs, there is now only one ionic entry that is returned. 60 | 61 | ## [1.2.0] - 2020-12-29 62 | 63 | ### Added 64 | - Included additional regex errors for the stream parser 65 | - Posibility to write cartesian coordinates to the POSCAR 66 | - Added parsing of additional total energies, all electronic steps and the final which should be similar to the final electronic step without corrections. 67 | - Parsing of number of electronic steps. 68 | - Parsing of timing data. 69 | - Parsing of VASP version. 70 | 71 | ## [1.1.2] - 2020-10-28 72 | 73 | ### Changed 74 | - Bugfix in the history flag for the stream parser. 75 | 76 | ### Added 77 | - Property function for the stream parser to return a bool if the xml file was truncated. 78 | 79 | ## [1.1.1] - 2020-09-07 80 | 81 | ### Changed 82 | - Fixed docstring 83 | 84 | ## [1.1.0] - 2020-08-25 85 | 86 | ### Added 87 | - Parser for the standard stream of VASP. 88 | 89 | ## [1.0.0] - 2020-06-10 90 | 91 | Considered the first stable release. 92 | 93 | ### Added 94 | - Magnetization from [@JPchico](https://github.com/JPchico) 95 | - Enabled GitHub Actions 96 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Espen Flage-Larsen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include setup.json 2 | include parsevasp/incar.yml 3 | include parsevasp/stream.yml 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Parsevasp - a parser for VASP 2 | 3 | [![PyPI](https://img.shields.io/pypi/dm/parsevasp.svg?maxAge=2592000)](https://pypi.org/project/parsevasp) 4 | [![PyPI](https://img.shields.io/pypi/pyversions/parsevasp)](https://pypi.org/project/parsevasp) 5 | [![codecov](https://codecov.io/gh/aiida-vasp/parsevasp/branch/develop/graph/badge.svg)](https://codecov.io/gh/aiida-vasp/parsevasp) 6 | [![GitHub Actions](https://github.com/aiida-vasp/parsevasp/workflows/parsevasp/badge.svg)](https://github.com/aiida-vasp/parsevasp/actions) 7 | 8 | A parser for VASP using lxml or fallback elementTree. 9 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | codecov: 2 | notify: 3 | wait_for_ci: yes # default 4 | require_ci_to_pass: yes # default 5 | 6 | coverage: 7 | precision: 2 # default 8 | round: up 9 | range: "70...100" # default 10 | status: 11 | project: 12 | default: 13 | threshold: 1.0% 14 | patch: 15 | default: 16 | target: 50% 17 | -------------------------------------------------------------------------------- /parsevasp/__init__.py: -------------------------------------------------------------------------------- 1 | """A parser for VASP.""" 2 | 3 | __version__ = '3.3.0' 4 | 5 | # import parsevasp.incar 6 | # import parsevasp.kpoints 7 | # import parsevasp.outcar 8 | # import parsevasp.poscar 9 | # import parsevasp.stream 10 | # import parsevasp.vasprun 11 | -------------------------------------------------------------------------------- /parsevasp/base.py: -------------------------------------------------------------------------------- 1 | """Base class to handle VASP files.""" 2 | 3 | import logging 4 | import os 5 | import sys 6 | from abc import ABC, abstractmethod 7 | 8 | 9 | class BaseParser(ABC): 10 | """Base class to handle VASP files.""" 11 | 12 | ERROR_USE_ONE_ARGUMENT = 10 13 | ERROR_NO_ENTRIES = 11 14 | ERROR_NO_KEY = 12 15 | ERROR_KEY_INVALID_TYPE = 13 16 | ERROR_FILE_NOT_FOUND = 14 17 | ERROR_EMPTY_HANDLER = 15 18 | ERROR_EMPTY_FILE_PATH = 16 19 | ERROR_MESSAGES = { 20 | ERROR_USE_ONE_ARGUMENT: 'Supply only one argument when initializing the parser class.', 21 | ERROR_NO_ENTRIES: "There is no 'entries' class attribute.", 22 | ERROR_NO_KEY: "The correct key in 'entries' is missing.", 23 | ERROR_KEY_INVALID_TYPE: 'The key has a wrong type.', 24 | ERROR_FILE_NOT_FOUND: 'The path did not contain a file.', 25 | ERROR_EMPTY_HANDLER: 'The supplied file handler is empty.', 26 | ERROR_EMPTY_FILE_PATH: 'The supplied file path is empty.', 27 | } 28 | 29 | def __init__(self, file_path=None, file_handler=None, logger=None): 30 | """Initialize a general parser object. Used as a base class for the specific parser classes." 31 | 32 | Parameters 33 | ---------- 34 | file_path : string, optional 35 | The file path in which the INCAR is read. 36 | file_hander: object 37 | A valid file handler object. 38 | logger : object, optional 39 | A standard Python logger object. 40 | 41 | """ 42 | 43 | self._file_path = file_path 44 | self._file_handler = file_handler 45 | 46 | # set logger 47 | if logger is not None: 48 | self._logger = logger 49 | else: 50 | self._logger = self._setup_logger(logging.DEBUG) 51 | 52 | def _setup_logger(self, level): 53 | """Setup a logger for this class""" 54 | logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__) 55 | logger.setLevel(level) 56 | if not logger.handlers: 57 | handler = logging.StreamHandler() 58 | handler.setLevel(level) 59 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 60 | handler.setFormatter(formatter) 61 | logger.addHandler(handler) 62 | return logger 63 | 64 | def write(self, **kwargs): 65 | """Write respective content as files using a path or handler. 66 | 67 | Parameters 68 | ---------- 69 | file_path : str, optional 70 | A string containing the file path to the file that is going to be parsed. 71 | file_handler : object, optional 72 | A file like object that acts as a handler for the content to be parsed. 73 | other : optional 74 | Any other argument than file path or handler is passed to the specific 75 | `_write` function. 76 | 77 | One has to provide either a file path or a file handler. 78 | 79 | """ 80 | 81 | # Check that we only supply either or of path and handler. 82 | if ('file_path' in kwargs and 'file_handler' in kwargs) or ( 83 | 'file_path' not in kwargs and 'file_handler' not in kwargs 84 | ): 85 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 86 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 87 | 88 | file_path = kwargs.pop('file_path', '') 89 | file_handler = kwargs.pop('file_handler', '') 90 | if file_path: 91 | # Open file 92 | file_handler = open_close_file_handler(file_path, status='w', logger=self._logger) 93 | 94 | # Do the write for each specific content parser _write function using handler, also 95 | # bring any extra arguments. 96 | self._write(file_handler, **kwargs) 97 | 98 | if file_path: 99 | # Close file 100 | open_close_file_handler(file_handler=file_handler, logger=self._logger) 101 | 102 | def _check_file(self, file_path=None): 103 | """ 104 | Check if a file exists 105 | 106 | Parameters 107 | ---------- 108 | file_path : string, optional 109 | The path of the file to be checked. If not supplied, the file path set will be used. 110 | 111 | Returns 112 | ------- 113 | None 114 | 115 | """ 116 | if file_path is None: 117 | file_path = self._file_path 118 | 119 | if not os.path.isfile(file_path): 120 | self._logger.error( 121 | f'{self.ERROR_MESSAGES[self.ERROR_FILE_NOT_FOUND]} The file requested from ' 122 | f'path {file_path} was not found.' 123 | ) 124 | sys.exit(self.ERROR_FILE_NOT_FOUND) 125 | 126 | @abstractmethod 127 | def _write(self, file_handler, **kwargs): 128 | pass 129 | 130 | 131 | def open_close_file_handler(file_name='', file_handler=None, status=None, encoding='utf8', logger=None): 132 | """ 133 | Open and close files. 134 | 135 | Parameters 136 | ---------- 137 | file_name : str, optional 138 | The name of the file to be handled (defaults to ''). 139 | file_handler : object, optional 140 | An existing `file` object. If not supplied a file is 141 | created. Needed for file close, otherwise not. 142 | status : str, optional 143 | The string containing the status to write, read, append etc. 144 | If not supplied, assume file close and `file_handler` need 145 | to be supplied. 146 | encoding : str, optional 147 | Specify the encoding. Defaults to utf8. 148 | logger : object, optional 149 | A logger object to use. 150 | 151 | Returns 152 | ------- 153 | file_handler : object 154 | If `status` is supplied 155 | A `file` object 156 | 157 | """ 158 | 159 | if logger is None: 160 | logger = logging.getLogger(sys._getframe().f_code.co_name) 161 | 162 | if status is None: 163 | if file_handler is None: 164 | logger.error(BaseParser.ERROR_MESSAGES[BaseParser.ERROR_EMPTY_HANDLER]) 165 | sys.exit(BaseParser.ERROR_EMPTY_HANDLER) 166 | file_handler.close() 167 | else: 168 | try: 169 | file_handler = open(file_name, status, encoding=encoding) 170 | return file_handler 171 | except IOError: 172 | logger.error( 173 | f'{BaseParser.ERROR_MESSAGES[BaseParser.ERROR_FILE_NOT_FOUND]} The file in question is: {file_name}' 174 | ) 175 | sys.exit(BaseParser.ERROR_FILE_NOT_FOUND) 176 | -------------------------------------------------------------------------------- /parsevasp/chgcar.py: -------------------------------------------------------------------------------- 1 | """Handle CHGCAR.""" 2 | 3 | import sys 4 | 5 | import numpy as np 6 | 7 | from parsevasp import utils 8 | from parsevasp.base import BaseParser 9 | 10 | 11 | class Chgcar(BaseParser): 12 | """Class to handle CHGCAR.""" 13 | 14 | def __init__(self, file_path=None, file_handler=None, logger=None): 15 | """ 16 | Initialize an CHGCAR object and set content as a dictionary. 17 | 18 | file_path : string, optional 19 | A string containing the file path to the file that is going to be parsed. 20 | file_handler : object, optional 21 | A file like object that acts as a handler for the content to be parsed. 22 | density : bool, optional 23 | If True, we divide the read CHGCAR data by the unit cell volume. (not implemented.) 24 | logger : object, optional 25 | A logger object if you would like to use an external logger for messages 26 | ejected inside this parser. 27 | 28 | """ 29 | 30 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 31 | 32 | # check that at least one is supplied 33 | if self._file_path is None and self._file_handler is None: 34 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 35 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 36 | 37 | if self._file_path is None and self._file_handler is None: 38 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 39 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 40 | 41 | self._data = { 42 | 'total': None, 43 | 'magnetization': None, 44 | } 45 | 46 | # parse parse parse 47 | self._parse() 48 | 49 | def _write(self, *args, **kwargs): 50 | """Write not supported for CHGCAR.""" 51 | raise NotImplementedError('Writing CHGCAR files is not supported.') 52 | 53 | def _parse(self): 54 | """Perform the actual parsing by parsing from file like content.""" 55 | self._from_file() 56 | 57 | def _from_file(self): 58 | """ 59 | Load CHGCAR into NumPy arrays. 60 | 61 | This method is presently not optimized to use as little memory as possible. 62 | 63 | """ 64 | content = utils.read_from_file(self._file_path, self._file_handler, lines=False) 65 | # Extract header 66 | temp = content.split('\n\n', 1) 67 | header = temp[0] 68 | content = temp[1] 69 | header = header.split('\n') 70 | # comment = header[0] 71 | scaling = float(header[1]) 72 | lattice_vectors = np.zeros((3, 3)) 73 | for i in range(3): 74 | # Read and scale lattice vectors 75 | lattice_vectors[i] = scaling * np.array([float(item) for item in header[i + 2].split()]) 76 | # Calculate volume for later scaling 77 | volume = 1.0 78 | if volume: 79 | volume = np.dot(lattice_vectors[0], np.cross(lattice_vectors[1], lattice_vectors[2])) 80 | # First line of content should now be NGXF, NGYF, NGZF 81 | temp = content.split('\n', 1) 82 | ngf_string = temp[0] 83 | content = temp[1] 84 | ngf = [int(item) for item in ngf_string.split()] 85 | # Need to reverse as CHGCAR is x fastest, while we want 86 | # to comply with z fastest (C order). 87 | ngf.reverse() 88 | # Check how many datasets we have 89 | content = content.split(ngf_string) 90 | num_datasets = len(content) 91 | # First dataset is always there 92 | self._data['total'] = ( 93 | np.fromstring(content[0].split('augmentation occupancies')[0], dtype=float, sep=' ').reshape(ngf) / volume 94 | ) 95 | if num_datasets == 2: 96 | # Collinear spin 97 | self._data['magnetization'] = ( 98 | np.fromstring(content[1].split('augmentation occupancies')[0], dtype=float, sep=' ').reshape(ngf) 99 | / volume 100 | ) 101 | elif num_datasets == 4: 102 | # Non-collinear spin 103 | self._data['magnetization'] = {} 104 | self._data['magnetization']['x'] = ( 105 | np.fromstring(content[1].split('augmentation occupancies')[0], dtype=float, sep=' ').reshape(ngf) 106 | / volume 107 | ) 108 | self._data['magnetization']['y'] = ( 109 | np.fromstring(content[2].split('augmentation occupancies')[0], dtype=float, sep=' ').reshape(ngf) 110 | / volume 111 | ) 112 | self._data['magnetization']['z'] = ( 113 | np.fromstring(content[3].split('augmentation occupancies')[0], dtype=float, sep=' ').reshape(ngf) 114 | / volume 115 | ) 116 | 117 | @property 118 | def charge_density(self): 119 | """Return the charge density.""" 120 | return self._data['total'] 121 | 122 | @property 123 | def magnetization_density(self): 124 | """Return the magnetization density.""" 125 | return self._data['magnetization'] 126 | -------------------------------------------------------------------------------- /parsevasp/constants.py: -------------------------------------------------------------------------------- 1 | """Constants.""" 2 | 3 | import os 4 | 5 | import yaml 6 | 7 | elements = { 8 | 'x': 0, 9 | 'h': 1, 10 | 'he': 2, 11 | 'li': 3, 12 | 'be': 4, 13 | 'b': 5, 14 | 'c': 6, 15 | 'n': 7, 16 | 'o': 8, 17 | 'f': 9, 18 | 'ne': 10, 19 | 'na': 11, 20 | 'mg': 12, 21 | 'al': 13, 22 | 'si': 14, 23 | 'p': 15, 24 | 's': 16, 25 | 'cl': 17, 26 | 'ar': 18, 27 | 'k': 19, 28 | 'ca': 20, 29 | 'sc': 21, 30 | 'ti': 22, 31 | 'v': 23, 32 | 'cr': 24, 33 | 'mn': 25, 34 | 'fe': 26, 35 | 'co': 27, 36 | 'ni': 28, 37 | 'cu': 29, 38 | 'zn': 30, 39 | 'ga': 31, 40 | 'ge': 32, 41 | 'as': 33, 42 | 'se': 34, 43 | 'br': 35, 44 | 'kr': 36, 45 | 'rb': 37, 46 | 'sr': 38, 47 | 'y': 39, 48 | 'zr': 40, 49 | 'nb': 41, 50 | 'mo': 42, 51 | 'tc': 43, 52 | 'ru': 44, 53 | 'rh': 45, 54 | 'pd': 46, 55 | 'ag': 47, 56 | 'cd': 48, 57 | 'in': 49, 58 | 'sn': 50, 59 | 'sb': 51, 60 | 'te': 52, 61 | 'i': 53, 62 | 'xe': 54, 63 | 'cs': 55, 64 | 'ba': 56, 65 | 'la': 57, 66 | 'ce': 58, 67 | 'pr': 59, 68 | 'nd': 60, 69 | 'pm': 61, 70 | 'sm': 62, 71 | 'eu': 63, 72 | 'gd': 64, 73 | 'tb': 65, 74 | 'dy': 66, 75 | 'ho': 67, 76 | 'er': 68, 77 | 'tm': 69, 78 | 'yb': 70, 79 | 'lu': 71, 80 | 'hf': 72, 81 | 'ta': 73, 82 | 'w': 74, 83 | 're': 75, 84 | 'os': 76, 85 | 'ir': 77, 86 | 'pt': 78, 87 | 'au': 79, 88 | 'hg': 80, 89 | 'tl': 81, 90 | 'pb': 82, 91 | 'bi': 83, 92 | 'po': 84, 93 | 'at': 85, 94 | 'rn': 86, 95 | 'fr': 87, 96 | 'ra': 88, 97 | 'ac': 89, 98 | 'th': 90, 99 | 'pa': 91, 100 | 'u': 92, 101 | 'np': 93, 102 | 'pu': 94, 103 | 'am': 95, 104 | 'cm': 96, 105 | 'bk': 97, 106 | 'cf': 98, 107 | 'es': 99, 108 | 'fm': 100, 109 | 'md': 101, 110 | 'no': 102, 111 | 'lr': 103, 112 | 'rf': 104, 113 | 'db': 105, 114 | 'sg': 106, 115 | 'bh': 107, 116 | 'hs': 108, 117 | 'mt': 109, 118 | 'ds': 110, 119 | 'rg': 111, 120 | 'cn': 112, 121 | 'uut': 113, 122 | 'uuq': 114, 123 | 'uup': 115, 124 | 'uuh': 116, 125 | 'uus': 117, 126 | 'uuo': 118, 127 | } 128 | 129 | file_path = os.path.dirname(os.path.abspath(__file__)) 130 | incar_tags_file_path = os.path.join(file_path, 'incar.yml') 131 | with open(incar_tags_file_path, 'r', encoding='utf8') as stream: 132 | incar_tags = yaml.safe_load(stream) 133 | -------------------------------------------------------------------------------- /parsevasp/doscar.py: -------------------------------------------------------------------------------- 1 | """Handle DOSCAR.""" 2 | 3 | import sys 4 | 5 | import numpy as np 6 | 7 | from parsevasp import utils 8 | from parsevasp.base import BaseParser 9 | 10 | # Map from number of columns in DOSCAR to dtype for the total density of states. 11 | DTYPES_DOS = { 12 | 3: np.dtype([('energy', float), ('total', float), ('integrated', float)]), 13 | 5: np.dtype([('energy', float), ('total', float, (2,)), ('integrated', float, (2,))]), 14 | } 15 | 16 | # Map from the number of columns in DOSCAR to dtype for the partial density of states. 17 | DTYPES_PDOS_COLLINEAR = { 18 | # l-decomposed 19 | 4: np.dtype([('energy', float), ('s', float), ('p', float), ('d', float)]), 20 | 7: np.dtype([('energy', float), ('s', float, (2,)), ('p', float, (2,)), ('d', float, (2,))]), 21 | 5: np.dtype([('energy', float), ('s', float), ('p', float), ('d', float), ('f', float)]), 22 | 9: np.dtype([('energy', float), ('s', float, (2,)), ('p', float, (2,)), ('d', float, (2,)), ('f', float, (2,))]), 23 | # lm-decomposed 24 | 10: np.dtype( 25 | [ 26 | ('energy', float), 27 | ('s', float), 28 | ('py', float), 29 | ('px', float), 30 | ('pz', float), 31 | ('dxy', float), 32 | ('dyz', float), 33 | ('dz2', float), 34 | ('dxz', float), 35 | ('dx2-y2', float), 36 | ] 37 | ), 38 | 17: np.dtype( 39 | [ 40 | ('energy', float), 41 | ('s', float), 42 | ('py', float), 43 | ('px', float), 44 | ('pz', float), 45 | ('dxy', float), 46 | ('dyz', float), 47 | ('dz2', float), 48 | ('dxz', float), 49 | ('dx2-y2', float), 50 | ('fy(3x2-y2)', float), 51 | ('fxyz', float), 52 | ('fyz2', float), 53 | ('fz3', float), 54 | ('fxz2', float), 55 | ('fz(x2-y2)', float), 56 | ('fx(x2-3y2)', float), 57 | ] 58 | ), 59 | 19: np.dtype( 60 | [ 61 | ('energy', float), 62 | ('s', float, (2,)), 63 | ('py', float, (2,)), 64 | ('px', float, (2,)), 65 | ('pz', float, (2,)), 66 | ('dxy', float, (2,)), 67 | ('dyz', float, (2,)), 68 | ('dz2', float, (2,)), 69 | ('dxz', float, (2,)), 70 | ('dx2-y2', float, (2,)), 71 | ] 72 | ), 73 | 33: np.dtype( 74 | [ 75 | ('energy', float), 76 | ('s', float, (2,)), 77 | ('py', float, (2,)), 78 | ('px', float, (2,)), 79 | ('pz', float, (2,)), 80 | ('dxy', float, (2,)), 81 | ('dyz', float, (2,)), 82 | ('dz2', float, (2,)), 83 | ('dxz', float, (2,)), 84 | ('dx2-y2', float, (2,)), 85 | ('fy(3x2-y2)', float, (2,)), 86 | ('fxyz', float, (2,)), 87 | ('fyz2', float, (2,)), 88 | ('fz3', float, (2,)), 89 | ('fxz2', float, (2,)), 90 | ('fz(x2-y2)', float, (2,)), 91 | ('fx(x2-3y2)', float, (2,)), 92 | ] 93 | ), 94 | } 95 | 96 | DTYPES_PDOS_NONCOLLINEAR = { 97 | # l-decomposed 98 | 13: np.dtype([('energy', float), ('s', float, (4,)), ('p', float, (4,)), ('d', float, (4,))]), 99 | 17: np.dtype([('energy', float), ('s', float, (4,)), ('p', float, (4,)), ('d', float, (4,)), ('f', float, (4,))]), 100 | # lm-decomposed 101 | 37: np.dtype( 102 | [ 103 | ('energy', float), 104 | ('s', float, (4,)), 105 | ('py', float, (4,)), 106 | ('px', float, (4,)), 107 | ('pz', float, (4,)), 108 | ('dxy', float, (4,)), 109 | ('dyz', float, (4,)), 110 | ('dz2', float, (4,)), 111 | ('dxz', float, (4,)), 112 | ('x2-y2', float, (4,)), 113 | ] 114 | ), 115 | 65: np.dtype( 116 | [ 117 | ('energy', float), 118 | ('s', float, (4,)), 119 | ('py', float, (4,)), 120 | ('px', float, (4,)), 121 | ('pz', float, (4,)), 122 | ('dxy', float, (4,)), 123 | ('dyz', float, (4,)), 124 | ('dz2', float, (4,)), 125 | ('dxz', float, (4,)), 126 | ('dx2-y2', float, (4,)), 127 | ('fy(3x2-y2)', float, (4,)), 128 | ('fxyz', float, (4,)), 129 | ('fyz2', float, (4,)), 130 | ('fz3', float, (4,)), 131 | ('fxz2', float, (4,)), 132 | ('fz(x2-y2)', float, (4,)), 133 | ('fx(x2-3y2)', float, (4,)), 134 | ] 135 | ), 136 | } 137 | 138 | # Mapping between the number of columns to the number of spins. 139 | COLSPIN_MAP_COLLINEAR = { 140 | 7: 2, 141 | 9: 2, 142 | 19: 2, 143 | 33: 2, 144 | 4: 1, 145 | 5: 1, 146 | 10: 1, 147 | 17: 1, 148 | } 149 | 150 | COLSPIN_MAP_NONCOLLINEAR = { 151 | 13: 4, 152 | 17: 4, 153 | 37: 4, 154 | 65: 4, 155 | } 156 | 157 | 158 | def _get_num_spin(count: int, non_collinear: bool) -> int: 159 | num_spin = None 160 | if non_collinear: 161 | num_spin = COLSPIN_MAP_NONCOLLINEAR.get(count) 162 | else: 163 | num_spin = COLSPIN_MAP_COLLINEAR.get(count) 164 | 165 | if num_spin is None: 166 | raise ValueError(f'Unkown column count: {count} in DOSCAR') 167 | return num_spin 168 | 169 | 170 | def _get_dtype_pdos(count: int, non_collinear: bool) -> np.dtype: 171 | dtype_pdos = None 172 | if non_collinear: 173 | dtype_pdos = DTYPES_PDOS_NONCOLLINEAR.get(count) 174 | else: 175 | dtype_pdos = DTYPES_PDOS_COLLINEAR.get(count) 176 | 177 | if dtype_pdos is None: 178 | raise ValueError(f'Unkown column count: {count} in DOSCAR') 179 | return dtype_pdos 180 | 181 | 182 | class Doscar(BaseParser): 183 | """Class to handle DOSCAR.""" 184 | 185 | def __init__(self, file_path=None, file_handler=None, logger=None, non_collinear=False): 186 | """ 187 | Initialize an DOSCAR object and set content as a dictionary. 188 | 189 | file_path : string 190 | A string containing the file path to the file that is going to be parsed. 191 | file_handler : object 192 | A file like object that acts as a handler for the content to be parsed. 193 | logger : object 194 | A logger object if you would like to use an external logger for messages 195 | ejected inside this parser. 196 | non_collinear: boolean 197 | If non-collinear calculation is performed, set this flag True. 198 | """ 199 | 200 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 201 | 202 | # check that at least one is supplied 203 | if self._file_path is None and self._file_handler is None: 204 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 205 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 206 | 207 | if self._file_path is None and self._file_handler is None: 208 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 209 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 210 | 211 | self._non_collinear = non_collinear 212 | 213 | self._data = {'dos': None, 'pdos': None, 'metadata': None} 214 | 215 | # parse parse parse 216 | self._parse() 217 | 218 | def _write(self, *args, **kwargs): 219 | """Write not supported for DOSCAR.""" 220 | raise NotImplementedError('Writing DOSCAR files is not supported.') 221 | 222 | def _parse(self): 223 | """Perform the actual parsing by parsing from file like content.""" 224 | self._from_file() 225 | 226 | def _from_file(self): 227 | """ 228 | Create a dictionary of entries from a 229 | file and store them in the this instance's data dictionary. 230 | 231 | """ 232 | 233 | doscar = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8') 234 | self._from_list(doscar) 235 | 236 | def _from_list(self, doscar): 237 | """ 238 | Go through the list and extract total and partial density of states 239 | and some metadata. 240 | 241 | Parameters 242 | ---------- 243 | doscar : list 244 | A list of strings containing each line in the DOSCAR file. 245 | 246 | """ 247 | 248 | # Set some metadata 249 | num_ions, num_atoms, part, ncdij = utils.line_to_type(doscar[0], int) 250 | 251 | # Figure out if we have a partial density of states 252 | # partial = bool(int(part)) 253 | 254 | # Volume of cell (AA^3), length of basis vectors (meters) and POTIMS 255 | # line_0 = utils.line_to_type(doscar[1], float) 256 | 257 | # The initial temperature 258 | # line_1 = utils.line_to_type(doscar[2], float) 259 | 260 | # Fetch coordinates used 261 | coord_type = utils.line_to_type(doscar[3]) 262 | 263 | # Name of system 264 | system = utils.line_to_type(doscar[4], no_split=True) 265 | 266 | # Energy min, energy max, number of points between, fermi level, weight 267 | line_2 = utils.line_to_type(doscar[5], float) 268 | emax, emin, ndos, efermi, weight = line_2 269 | ndos = int(ndos) 270 | 271 | # The rest of the file is density of states data, convert to float 272 | data = [utils.line_to_type(line, d_type=float) for line in doscar[6:]] 273 | 274 | # Get the number of columns for the total dos section to figure out 275 | # if data is spin decomposed 276 | count = len(data[ndos - 1]) 277 | 278 | num_spin = 1 279 | if count == 5: 280 | num_spin = 2 281 | 282 | # Total density of states 283 | dos_data = np.array(data[:ndos]) 284 | dos = np.zeros((dos_data.shape[0]), DTYPES_DOS[count]) 285 | dos['energy'] = dos_data[:, 0] 286 | for i, name in enumerate(DTYPES_DOS[count].names[1:]): 287 | if num_spin == 1: 288 | dos[name] = np.squeeze(dos_data[:, i + 1 : i + 1 + num_spin], axis=1) 289 | else: 290 | dos[name] = dos_data[:, i + 1 : i + 1 + num_spin] 291 | 292 | # Partial density of states 293 | pdos_items = [] 294 | pdos = np.array([]) # Partial dos is empty by default 295 | if line_2 in data: 296 | start = data.index(line_2) + 1 297 | for _ in range(num_ions): 298 | pdos_items += [data[start : start + ndos]] 299 | start += ndos + 1 300 | 301 | # Get the number of columns for the pdos section. 302 | count = len(pdos_items[-1][-1]) 303 | pdos_data = np.array(pdos_items) 304 | 305 | # Adjust the spin according to the column definitions 306 | num_spin = _get_num_spin(count, self._non_collinear) 307 | 308 | dtype_pdos = _get_dtype_pdos(count, self._non_collinear) 309 | pdos = np.zeros((pdos_data.shape[0], pdos_data.shape[1]), dtype_pdos) 310 | pdos['energy'] = pdos_data[:, :, 0] 311 | for i, name in enumerate(dtype_pdos.names[1:]): 312 | if num_spin == 1: # Only squeeze if there is only one spin component 313 | pdos[name] = np.squeeze(pdos_data[:, :, i + 1 : i + 1 + num_spin], axis=2) 314 | else: 315 | pdos[name] = pdos_data[:, :, i + 1 : i + 1 + num_spin] 316 | 317 | metadata = {} 318 | metadata['n_ions'] = num_ions 319 | metadata['n_atoms'] = num_atoms 320 | metadata['cartesian'] = coord_type.startswith(('c', 'C')) 321 | metadata['name'] = system 322 | metadata['emax'] = emax 323 | metadata['emin'] = emin 324 | metadata['n_dos'] = ndos 325 | metadata['efermi'] = efermi 326 | metadata['weight'] = weight 327 | 328 | # Store 329 | self._data['metadata'] = metadata 330 | self._data['pdos'] = pdos 331 | self._data['dos'] = dos 332 | 333 | def get_metadata(self): 334 | """ 335 | Return the metadata. 336 | 337 | Parameters 338 | ---------- 339 | None 340 | 341 | Returns 342 | ------- 343 | metadata : dict 344 | A dictionary containing the number of number of atoms, ions, spin flag, 345 | coordinates etc. 346 | 347 | """ 348 | 349 | metadata = self._data['metadata'] 350 | return metadata 351 | 352 | def get_dos(self): 353 | """ 354 | Return the total density of states. 355 | 356 | Parameters 357 | ---------- 358 | None 359 | 360 | Returns 361 | ------- 362 | dos : nparray 363 | A numpy array containing the total density of states. First index is the 364 | energy samples, while the last index if composed of the energy sample, total 365 | density of states and integrated density of states at that energy sample, respectively. 366 | 367 | """ 368 | 369 | dos = self._data['dos'] 370 | return dos 371 | 372 | def get_pdos(self): 373 | """ 374 | Return the partial density of states. 375 | 376 | Parameters 377 | ---------- 378 | None 379 | 380 | Returns 381 | ------- 382 | pdos : nparray 383 | 384 | """ 385 | 386 | pdos = self._data['pdos'] 387 | return pdos 388 | -------------------------------------------------------------------------------- /parsevasp/eigenval.py: -------------------------------------------------------------------------------- 1 | """Handle EIGENVAL.""" 2 | 3 | import re 4 | import sys 5 | 6 | import numpy as np 7 | 8 | from parsevasp import utils 9 | from parsevasp.base import BaseParser 10 | 11 | 12 | class Eigenval(BaseParser): 13 | """Class to handle ENGENVAL.""" 14 | 15 | def __init__(self, file_path=None, file_handler=None, logger=None): 16 | """ 17 | Initialize an EIGENVAL object and set content as a dictionary. 18 | 19 | Parameters 20 | ---------- 21 | file_path : string 22 | A string containing the file path to the file that is going to be parsed. 23 | file_handler : object 24 | A file like object that acts as a handler for the content to be parsed. 25 | logger : object 26 | A logger object if you would like to use an external logger for messages 27 | ejected inside this parser. 28 | 29 | """ 30 | 31 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 32 | 33 | # check that at least one is supplied 34 | if self._file_path is None and self._file_handler is None: 35 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 36 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 37 | 38 | if self._file_path is None and self._file_handler is None: 39 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 40 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 41 | 42 | self._data = {'eigenvalues': None, 'kpoints': None, 'metadata': None} 43 | 44 | # parse parse parse 45 | self._parse() 46 | 47 | def _write(self, *args, **kwargs): 48 | """Write not supported for EIGENVAL.""" 49 | raise NotImplementedError('Writing EIGENVAL files is not supported.') 50 | 51 | def _parse(self): 52 | """Perform the actual parsing.""" 53 | 54 | if self._file_path is None and self._file_handler is None: 55 | return 56 | 57 | # create dictionary from a file 58 | self._from_file() 59 | 60 | def _from_file(self): 61 | """ 62 | Create a dictionary of entries from a 63 | file and store them in the this instance's data dictionary. 64 | 65 | """ 66 | 67 | eigenval = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8') 68 | self._from_list(eigenval) 69 | 70 | def _from_list(self, eigenval): 71 | """ 72 | Go through the list and extract eigenvalues, kpoints and metadata. 73 | 74 | Parameters 75 | ---------- 76 | eigenval : list 77 | A list of strings containing each line in the EIGENVAL file. 78 | 79 | """ 80 | 81 | # Read metadata 82 | line_0 = utils.line_to_type(eigenval[0], int) 83 | line_1 = utils.line_to_type(eigenval[1], float) 84 | line_2 = utils.line_to_type(eigenval[2], float) 85 | coord_type = utils.line_to_type(eigenval[3]) 86 | 87 | # Read system 88 | system = utils.line_to_type(eigenval[4], no_split=True) 89 | 90 | # Read number of kpoints and bands 91 | param_0, num_kp, num_bands = utils.line_to_type(eigenval[5], int) 92 | 93 | # Read the rest of the data 94 | # Here we convert back to a string since the remainder of this parser 95 | # segment was taken from an older parser that worked on the string. 96 | data = ''.join(eigenval[6:]) 97 | 98 | # Set rest of metadata 99 | num_ions, num_atoms, p00, num_spins = line_0 100 | 101 | # Datablocks 102 | data = re.split(utils.empty_line, data) 103 | data = [[line.split() for line in block.splitlines()] for block in data] 104 | kpoints = np.zeros((num_kp, 4)) 105 | eigenvalues = np.zeros((num_spins, num_kp, num_bands)) 106 | # Iterate over blocks, pr. k-point 107 | for k, field in enumerate(data): 108 | # Remove empty lines 109 | kpbs = [x for x in field if x] 110 | # First line in the data block is the kpoint coordinates and weight 111 | kpi = [float(x) for x in kpbs.pop(0)] 112 | kpoints[k] = kpi 113 | # The rest is the band energies 114 | for point in kpbs: 115 | eigenvalues[:, k, int(point[0]) - 1] = point[1 : num_spins + 1] 116 | 117 | # Create the metadata dict 118 | metadata = {} 119 | metadata[0] = line_0 120 | metadata[1] = line_1 121 | metadata[2] = line_2 122 | metadata['n_ions'] = num_ions 123 | metadata['n_atoms'] = num_atoms 124 | metadata['p00'] = p00 125 | metadata['nspin'] = num_spins 126 | metadata['cartesian'] = coord_type.startswith(('c', 'C')) 127 | metadata['name'] = system 128 | metadata['some_num'] = param_0 129 | metadata['n_bands'] = num_bands 130 | metadata['n_kp'] = num_kp 131 | 132 | # Store 133 | self._data['metadata'] = metadata 134 | self._data['eigenvalues'] = eigenvalues 135 | self._data['kpoints'] = kpoints 136 | 137 | def get_metadata(self): 138 | """ 139 | Return the metadata. 140 | 141 | Parameters 142 | ---------- 143 | None 144 | 145 | Returns 146 | ------- 147 | metadata : dict 148 | A dictionary containing the number of number of atoms, ions, spin flag, 149 | coordinates etc. 150 | 151 | """ 152 | 153 | metadata = self._data['metadata'] 154 | return metadata 155 | 156 | def get_eigenvalues(self): 157 | """ 158 | Return the eigenvalues. 159 | 160 | Parameters 161 | ---------- 162 | None 163 | 164 | Returns 165 | ------- 166 | elastic : nparray 167 | A numpy array containing the eigenvalues. First index is spin, second k-points and the last, 168 | the band index. 169 | 170 | """ 171 | 172 | eigenvalues = self._data['eigenvalues'] 173 | return eigenvalues 174 | 175 | def get_kpoints(self): 176 | """ 177 | Return the kpoints. 178 | 179 | Parameters 180 | ---------- 181 | None 182 | 183 | Returns 184 | ------- 185 | kpoints : nparray 186 | A NumPy array containing the kpoints. First index is the k-point number, last the direction plus the 187 | weight (four in total, last is the weight). 188 | 189 | """ 190 | 191 | kpoints = self._data['kpoints'] 192 | return kpoints 193 | -------------------------------------------------------------------------------- /parsevasp/outcar.py: -------------------------------------------------------------------------------- 1 | """Handle OUTCAR.""" 2 | 3 | import re 4 | import sys 5 | 6 | import numpy as np 7 | 8 | from parsevasp import utils 9 | from parsevasp.base import BaseParser 10 | 11 | 12 | class Outcar(BaseParser): 13 | """Class to handle OUTCAR.""" 14 | 15 | ERROR_NO_ITERATIONS = 600 16 | BaseParser.ERROR_MESSAGES.update({ERROR_NO_ITERATIONS: 'A crash detected before the first SCF step.'}) 17 | ERROR_MESSAGES = BaseParser.ERROR_MESSAGES 18 | 19 | def __init__(self, file_path=None, file_handler=None, logger=None): 20 | """Initialize an OUTCAR object and set content as a dictionary. 21 | 22 | Parameters 23 | ---------- 24 | file_path : string 25 | A string containing the file path to the file that is going to be parsed. 26 | file_handler : object 27 | A file like object that acts as a handler for the content to be parsed. 28 | logger : object 29 | A logger object if you would like to use an external logger for messages 30 | ejected inside this parser. 31 | 32 | """ 33 | 34 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 35 | 36 | # check that at least one is supplied 37 | if self._file_path is None and self._file_handler is None: 38 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 39 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 40 | 41 | if self._file_path is None and self._file_handler is None: 42 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 43 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 44 | 45 | self._data = { 46 | 'elastic_moduli': {'non-symmetrized': None, 'symmetrized': None, 'total': None}, 47 | 'symmetry': { 48 | 'num_space_group_operations': {'static': [], 'dynamic': []}, 49 | 'original_cell_type': {'static': [], 'dynamic': []}, 50 | 'symmetrized_cell_type': {'static': [], 'dynamic': []}, 51 | }, 52 | 'magnetization': { 53 | 'sphere': { 54 | 'x': {'site_moment': {}, 'total_magnetization': {}}, 55 | 'y': {'site_moment': {}, 'total_magnetization': {}}, 56 | 'z': {'site_moment': {}, 'total_magnetization': {}}, 57 | }, 58 | 'full_cell': {}, 59 | }, 60 | 'run_stats': {}, 61 | 'run_status': { 62 | 'nelm': None, 63 | 'nsw': None, 64 | 'last_iteration_index': None, 65 | 'finished': False, 66 | 'ionic_converged': False, 67 | 'electronic_converged': False, 68 | 'consistent_nelm_breach': False, 69 | 'contains_nelm_breach': False, 70 | }, 71 | } 72 | 73 | # parse parse parse 74 | self._parse() 75 | 76 | def _write(self, *args, **kwargs): 77 | """Write not supported for OUTCAR.""" 78 | raise NotImplementedError('Writing OUTCAR files is not supported.') 79 | 80 | def _parse(self): 81 | """Perform the actual parsing.""" 82 | # Create dictionary from a file 83 | self._from_file() 84 | 85 | def _from_file(self): 86 | """ 87 | Create a dictionary of entries from a 88 | file and store them in the this instance's data dictionary. 89 | """ 90 | 91 | outcar = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8') 92 | self._from_list(outcar) 93 | 94 | def _from_list(self, outcar): 95 | """ 96 | Go through the list and extract what is not present in the XML file. 97 | 98 | Parameters 99 | ---------- 100 | outcar : list 101 | A list of strings containing each line in the OUTCAR file. 102 | 103 | """ 104 | config = '' 105 | s_orb = {0: 's', 1: 'p', 2: 'd', 3: 'f'} 106 | params = {'ibrion': -1} 107 | finished = False 108 | iter_counter = None 109 | nelec_steps = {} 110 | 111 | for index, line in enumerate(outcar): 112 | # Check the iteration counter 113 | match = re.search(r'Iteration *(\d+)\( *(\d+)\)', line) 114 | if match: 115 | iter_counter = [int(match.group(1)), int(match.group(2))] 116 | # Increment the counter 117 | if iter_counter[0] in nelec_steps: 118 | nelec_steps[iter_counter[0]] += 1 119 | else: 120 | nelec_steps[iter_counter[0]] = 1 121 | continue 122 | # Record the NELM / NSW requested 123 | utils.match_integer_param(self._data['run_status'], 'NSW', line) 124 | utils.match_integer_param(params, 'IBRION', line) 125 | utils.match_integer_param(self._data['run_status'], 'NELM', line) 126 | if 'NBANDS=' in line: 127 | self._data['run_status']['nbands'] = int(line.split('NBANDS=')[1].strip()) 128 | # Test if the end of execution has reached 129 | if 'timing and accounting informations' in line: 130 | self._data['run_status']['finished'] = True 131 | # Fetch the symmetry 132 | if line.strip().startswith('Analysis of symmetry for initial positions (statically)'): 133 | config = 'static' 134 | if line.strip().startswith('Analysis of symmetry for dynamics'): 135 | config = 'dynamic' 136 | if config: 137 | if line.strip().startswith('Subroutine PRICEL returns'): 138 | text = outcar[index + 1].strip().lower() 139 | if text: 140 | self._data['symmetry']['original_cell_type'][config].append('primitive cell') 141 | if 'primitive cells build up your supercell' in line: 142 | text = f'{line.strip().split()} primitive cells' 143 | self._data['symmetry']['original_cell_type'][config].append(text) 144 | if line.strip().startswith('Routine SETGRP: Setting up the symmetry group for a'): 145 | self._data['symmetry']['symmetrized_cell_type'][config].append(outcar[index + 1].strip().lower()) 146 | if line.strip().startswith('Subroutine GETGRP returns'): 147 | self._data['symmetry']['num_space_group_operations'][config].append(int(line.strip().split()[4])) 148 | 149 | # then the elastic tensors etc. in kBar 150 | if line.strip().startswith('ELASTIC MODULI (kBar)'): 151 | tensor = [] 152 | for idx in range(3, 9): 153 | tensor.append([float(item) for item in outcar[index + idx].strip().split()[1:]]) 154 | self._data['elastic_moduli']['non_symmetrized'] = np.asarray(tensor) 155 | if line.strip().startswith('SYMMETRIZED ELASTIC MODULI'): 156 | tensor = [] 157 | for idx in range(3, 9): 158 | tensor.append([float(item) for item in outcar[index + idx].strip().split()[1:]]) 159 | self._data['elastic_moduli']['symmetrized'] = np.asarray(tensor) 160 | if line.strip().startswith('TOTAL ELASTIC MODULI'): 161 | tensor = [] 162 | for idx in range(3, 9): 163 | tensor.append([float(item) for item in outcar[index + idx].strip().split()[1:]]) 164 | self._data['elastic_moduli']['total'] = np.asarray(tensor) 165 | for _proj in ['x', 'y', 'z']: 166 | if line.strip().startswith(f'magnetization ({_proj})'): 167 | _counter = 0 168 | mag_found = False 169 | while not mag_found: 170 | if outcar[index + 4 + _counter].strip().split(): 171 | if not outcar[index + 4 + _counter].strip().startswith('-') and not outcar[ 172 | index + 4 + _counter 173 | ].strip().startswith('tot'): 174 | mag_line = outcar[index + 4 + _counter].split() 175 | self._data['magnetization']['sphere'][f'{_proj}']['site_moment'][int(mag_line[0])] = {} 176 | for _count, orb in enumerate(mag_line[1:-1]): 177 | self._data['magnetization']['sphere'][f'{_proj}']['site_moment'][int(mag_line[0])][ 178 | s_orb[_count] 179 | ] = float(orb) 180 | self._data['magnetization']['sphere'][f'{_proj}']['site_moment'][int(mag_line[0])][ 181 | 'tot' 182 | ] = float(mag_line[-1]) 183 | if outcar[index + 4 + _counter].strip().startswith('tot'): 184 | mag_line = outcar[index + 4 + _counter].split() 185 | self._data['magnetization']['sphere'][f'{_proj}']['total_magnetization'] = {} 186 | for _count, orb in enumerate(mag_line[1:-1]): 187 | self._data['magnetization']['sphere'][f'{_proj}']['total_magnetization'][ 188 | s_orb[_count] 189 | ] = float(orb) 190 | self._data['magnetization']['sphere'][f'{_proj}']['total_magnetization']['tot'] = float( 191 | mag_line[-1] 192 | ) 193 | mag_found = True 194 | else: 195 | self._data['magnetization']['sphere'][f'{_proj}']['total_magnetization'] = {} 196 | self._data['magnetization']['sphere'][f'{_proj}']['total_magnetization'] = self._data[ 197 | 'magnetization' 198 | ]['sphere'][f'{_proj}']['site_moment'][ 199 | next(iter(self._data['magnetization']['sphere'][f'{_proj}']['site_moment'].keys())) 200 | ] 201 | mag_found = True 202 | _counter = _counter + 1 203 | if line.strip().startswith('number of electron'): 204 | # Only take the last value 205 | self._data['magnetization']['full_cell'] = [float(_val) for _val in line.strip().split()[5:]] 206 | 207 | # Check if SCF iterations are contained in the file 208 | if iter_counter is None: 209 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_NO_ITERATIONS]) 210 | sys.exit(self.ERROR_NO_ITERATIONS) 211 | 212 | # Work out if the ionic relaxation and electronic steps are to be considered converged 213 | run_status = self._data['run_status'] 214 | run_status['last_iteration_index'] = iter_counter 215 | nsw = run_status['nsw'] 216 | nelm = run_status['nelm'] 217 | finished = run_status['finished'] 218 | ibrion = params['ibrion'] 219 | if finished is True: 220 | if ibrion > 0: 221 | # There are fewer number of ionic iterations than the set number of maximum number of 222 | # ionic iterations (NSW), thus the relaxation is considered converged. 223 | # Only relevant to check ionic relaxation convergence when IBRION is larger than zero 224 | if iter_counter[0] < nsw: 225 | # Fewer iterations than requested - ionic relaxation is considered converged 226 | # Note that this may include runs that has been interrupted 227 | # by STOPCAR - this is a limitation of VASP 228 | run_status['ionic_converged'] = True 229 | elif iter_counter[0] == nsw and nsw > 1: 230 | # Reached the requested iterations - ionic relaxation is considered not converged 231 | run_status['ionic_converged'] = False 232 | elif nsw <= 1: 233 | # Sometimes we have no or only one ionic step, which makes it difficult to determine if the 234 | # ionic relaxation is to be considered converged 235 | self._logger.warning( 236 | f'IBRION = {ibrion} but NSW is {nsw}' 237 | ' - cannot deterimine if the relaxation structure is converged!' 238 | ) 239 | run_status['ionic_converged'] = None 240 | else: 241 | # No ionic relaxation performed 242 | run_status['ionic_converged'] = None 243 | 244 | if iter_counter[1] < nelm: 245 | # There are fewer number of electronic steps in the last ionic iteration than the set maximum 246 | # number of electronic steps, thus the electronic self consistent cycle is considered converged 247 | run_status['electronic_converged'] = True 248 | 249 | # Check for consistent electronic convergence problems. VASP will not break when NELM is reached during 250 | # the relaxation, it will simply consider it converged. We need to detect this, which is done 251 | # by checking if there are any single run that have reached NELM in the history or if NELM 252 | # has been consistently reached. 253 | mask = [value >= nelm for sc_idx, value in sorted(nelec_steps.items(), key=lambda x: x[0])] 254 | if (finished and all(mask)) or (not finished and all(mask[:-1]) and iter_counter[0] > 1): 255 | # We have consistently reached NELM. Excluded the last iteration, 256 | # as the calculation may not be finished 257 | run_status['consistent_nelm_breach'] = True 258 | if any(mask): 259 | # We have at least one ionic step where NELM was reached. 260 | run_status['contains_nelm_breach'] = True 261 | 262 | self._data['run_stats'] = self._parse_timings_memory(outcar[-50:]) 263 | 264 | def get_symmetry(self): 265 | """ 266 | Return the symmetry. 267 | 268 | Parameters 269 | ---------- 270 | None 271 | 272 | Returns 273 | ------- 274 | symmetry : dict 275 | A dictionary containing the symmetry information. 276 | 277 | """ 278 | 279 | symmetry = self._data['symmetry'] 280 | return symmetry 281 | 282 | def get_elastic_moduli(self): 283 | """ 284 | Return the elastic moduli in kBar. 285 | 286 | Parameters 287 | ---------- 288 | None 289 | 290 | Returns 291 | ------- 292 | elastic : dict 293 | A dictionary containing the elastic moduli. 294 | 295 | """ 296 | 297 | elastic = self._data['elastic_moduli'] 298 | return elastic 299 | 300 | def get_magnetization(self): 301 | """ 302 | Return the magnetization of the cell. 303 | 304 | Parameters 305 | ---------- 306 | None 307 | 308 | Returns 309 | ------- 310 | magnetic : dict 311 | A dictionary containing the magnetization of the cell. 312 | 313 | """ 314 | 315 | magnetic = self._data['magnetization'] 316 | return magnetic 317 | 318 | def get_run_stats(self): 319 | """ 320 | Return the run time statistics information. 321 | 322 | The existence of timing and memory information also signals the calculation terminate 323 | gracefully. 324 | 325 | Parameters 326 | ---------- 327 | None 328 | 329 | Returns 330 | ------- 331 | stats : dict 332 | A dictionary containing timing and memory consumption information 333 | that are parsed from the end of the OUTCAR file. The key names are 334 | mostly preserved, except for the memory which is prefixed with `mem_usage_`. 335 | Units are preserved from OUTCAR and there are some differences between 336 | VASP 5 and 6. 337 | 338 | """ 339 | 340 | stats = self._data['run_stats'] 341 | return stats 342 | 343 | def get_run_status(self): 344 | """ 345 | Return the status of the run. 346 | 347 | Contains information of the convergence of the ionic relaxation and electronics, 348 | in addition to information if the run has finished. 349 | 350 | Parameters 351 | ---------- 352 | None 353 | 354 | Returns 355 | ------- 356 | status : dict 357 | A dictionary containing the keys `finished`, which is True if the VASP calculation 358 | contain timing information in the end of the OUTCAR. The key `ionic_converged` is 359 | True if the number of ionic steps detected is smaller than the supplied NSW. 360 | The key `electronic_converged` is True if the number of electronic steps is smaller than 361 | NELM (defaults to 60 in VASP). It is also possible to check if all the ionic steps 362 | did reached NELM and thus did not converged if the key `consistent_nelm_breach` is True, 363 | while `contains_nelm_breach` is True if one or more ionic steps reached NELM and thus 364 | did not converge electronically. 365 | 366 | """ 367 | 368 | status = self._data['run_status'] 369 | return status 370 | 371 | @staticmethod 372 | def _parse_timings_memory(timing_lines): 373 | """ 374 | Parse timing information. 375 | 376 | Parameters 377 | ---------- 378 | timing_lines : list 379 | A list of lines containing the timing information. 380 | 381 | Returns 382 | ------- 383 | info : dict 384 | A dictionary containing the timing information. 385 | """ 386 | info = {} 387 | time_mem_pattern = re.compile(r'\((sec|kb)\)') 388 | mem_pattern = re.compile(r':.*kBytes$') 389 | for _, line in enumerate(timing_lines): 390 | if time_mem_pattern.search(line): 391 | tokens = line.strip().split(':') 392 | item_name = '_'.join(tmp.lower() for tmp in tokens[0].strip().split()[:-1]) 393 | # The entry can be empty (VASP6) 394 | try: 395 | info[item_name] = float(tokens[1].strip()) 396 | except ValueError: 397 | info[item_name] = None 398 | 399 | elif mem_pattern.search(line): 400 | tokens = re.split(r'[: ]+', line.strip()) 401 | try: 402 | info['mem_usage_' + tokens[0]] = float(tokens[-2]) 403 | except ValueError: 404 | info['mem_usage_' + tokens[0]] = None 405 | 406 | return info 407 | -------------------------------------------------------------------------------- /parsevasp/potcar.py: -------------------------------------------------------------------------------- 1 | """Handle POTCAR 2 | 3 | In order to not have pymatgen as a dependency, this parser takes parts of the 4 | `pymatgen parser`_ 5 | for POTCAR here. 6 | 7 | Copyright info from their MIT license: 8 | 9 | Copyright (c) 2011-2012 MIT & The Regents of the University of California, 10 | through Lawrence Berkeley National Laboratory 11 | 12 | pymatgen also uses the MIT license so see this plugins LICENSE file for its copy. 13 | """ 14 | 15 | import re 16 | 17 | from parsevasp import utils 18 | from parsevasp.base import BaseParser 19 | 20 | 21 | class Potcar(BaseParser): 22 | """Class to handle the POTCAR""" 23 | 24 | _functional_tags = { 25 | 'pe': {'name': 'PBE', 'class': 'GGA'}, 26 | '91': {'name': 'PW91', 'class': 'GGA'}, 27 | 'rp': {'name': 'revPBE', 'class': 'GGA'}, 28 | 'am': {'name': 'AM05', 'class': 'GGA'}, 29 | 'ps': {'name': 'PBEsol', 'class': 'GGA'}, 30 | 'pw': {'name': 'PW86', 'class': 'GGA'}, 31 | 'lm': {'name': 'Langreth-Mehl-Hu', 'class': 'GGA'}, 32 | 'pb': {'name': 'Perdew-Becke', 'class': 'GGA'}, 33 | 'ca': {'name': 'Perdew-Zunger81', 'class': 'LDA'}, 34 | 'hl': {'name': 'Hedin-Lundquist', 'class': 'LDA'}, 35 | 'wi': {'name': 'Wigner Interpoloation', 'class': 'LDA'}, 36 | } 37 | 38 | def __init__(self, file_path=None, file_handler=None, logger=None): 39 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 40 | 41 | self.metadata = None 42 | self._symbol = None 43 | self._element = None 44 | 45 | if self._file_path is not None or self._file_handler is not None: 46 | self._from_file() 47 | if self._file_path is None and self._file_handler is None: 48 | raise ValueError('Either "file_path" or "file_handler" should be passed') 49 | 50 | def _from_file(self): 51 | """Create rudimentary dictionary of entries from a 52 | file. 53 | 54 | """ 55 | 56 | potcar = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8', lines=False) 57 | 58 | return self._generate_metadata(potcar) 59 | 60 | def _generate_metadata(self, potcar_contents: str): 61 | """Get the metadata from a POTCAR file 62 | 63 | Parameters 64 | ---------- 65 | potcar_contents: string 66 | The contents of the POTCAR file as a string 67 | 68 | Returns 69 | ------- 70 | metadata: dictionary 71 | A dictionary containing the metadata associated with the POTCAR 72 | """ 73 | _parameters_to_parse = { 74 | 'VRHFIN': lambda val: val.strip(), 75 | 'LEXCH': lambda val: val.strip(), 76 | 'TITEL': lambda val: val.strip(), 77 | 'LULTRA': lambda val: re.match(r'^\.?([TFtf])[A-Za-z]*\.?', val).group(1).lower() in ['t'], 78 | 'LCOR': lambda val: re.match(r'^\.?([TFtf])[A-Za-z]*\.?', val).group(1).lower() in ['t'], 79 | 'LPAW': lambda val: re.match(r'^\.?([TFtf])[A-Za-z]*\.?', val).group(1).lower() in ['t'], 80 | 'IUNSCR': lambda val: int(re.match(r'^-?[0-9]+', val).group(0)), 81 | 'NDATA': lambda val: int(re.match(r'^-?[0-9]+', val).group(0)), 82 | 'ICORE': lambda val: int(re.match(r'^-?[0-9]+', val).group(0)), 83 | 'EATOM': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 84 | 'RPACOR': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 85 | 'POMASS': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 86 | 'ZVAL': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 87 | 'RCORE': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 88 | 'RWIGS': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 89 | 'ENMAX': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 90 | 'ENMIN': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 91 | 'RCLOC': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 92 | 'EAUG': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 93 | 'DEXC': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 94 | 'RMAX': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 95 | 'RAUG': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 96 | 'RDEP': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 97 | 'RDEPT': lambda val: float(re.search(r'^-?\d*\.?\d*[eE]?-?\d*', val).group(0)), 98 | 'STEP': lambda val: [float(y) for y in re.split(r'\s+', val.strip()) if not y.isalpha()], 99 | } 100 | search_lines = re.search( 101 | r'(?s)(parameters from PSCTR are:' r'.*?END of PSCTR-controll parameters)', 102 | potcar_contents, 103 | ).group(1) 104 | 105 | self.metadata = {} 106 | for key, val in re.findall(r'(\S+)\s*=\s*(.*?)(?=;|$)', search_lines, flags=re.MULTILINE): 107 | if key in _parameters_to_parse: 108 | self.metadata[key] = _parameters_to_parse[key](val) 109 | 110 | try: 111 | self._symbol = self.metadata['TITEL'].split(' ')[1].strip() 112 | except IndexError: 113 | self._symbol = self.metadata['TITEL'].strip() 114 | 115 | self._element = self._symbol.split('_')[0] 116 | 117 | @property 118 | def symbol(self): 119 | """ 120 | Get the symbol associated with this POTCAR 121 | 122 | Returns 123 | ------- 124 | symbol: string 125 | The POTCAR symbol, e.g. W_pv 126 | """ 127 | return self._symbol 128 | 129 | @property 130 | def element(self): 131 | """ 132 | Get the symbol of the element associated with this POTCAR 133 | 134 | Returns 135 | ------- 136 | element: string 137 | The POTCAR element, e.g. W 138 | """ 139 | return self._element 140 | 141 | @property 142 | def functional(self): 143 | """ 144 | Get the functional associated with this POTCAR 145 | 146 | Returns 147 | ------- 148 | functional: string 149 | Functional associated with the current POTCAR file. 150 | """ 151 | return self._functional_tags.get(self.metadata.get('LEXCH').lower(), {}).get('name') 152 | 153 | @property 154 | def functional_class(self): 155 | """ 156 | Get the functional class associated with this POTCAR 157 | 158 | Returns 159 | -------- 160 | functional_class: string 161 | Functional class associated with the current POTCAR file. 162 | """ 163 | return self._functional_tags.get(self.metadata.get('LEXCH').lower(), {}).get('class') 164 | 165 | def __getattr__(self, attribute): 166 | """ 167 | Delegates attributes to keywords. For example, you can use 168 | potcar.enmax to get the ENMAX of the POTCAR. 169 | For float type properties, they are converted to the correct float. By 170 | default, all energies in eV and all length scales are in Angstroms. 171 | """ 172 | try: 173 | return self.metadata[attribute.upper()] 174 | except Exception as exc: 175 | raise AttributeError(attribute) from exc 176 | 177 | def _write(self, file_handler, **kwargs): 178 | pass 179 | -------------------------------------------------------------------------------- /parsevasp/stream.py: -------------------------------------------------------------------------------- 1 | """ 2 | Standard stream parser for VASP. 3 | 4 | -------------------------------- 5 | Contains parsers for the standard stream that originates from VASP. It fills a set with streams (e.g. 6 | errors and warnings as defined in the streams.yml file). 7 | """ 8 | 9 | import re 10 | import sys 11 | from pathlib import Path 12 | 13 | import yaml 14 | 15 | from parsevasp import utils 16 | from parsevasp.base import BaseParser 17 | 18 | 19 | class Stream(BaseParser): 20 | """Class to handle standard stream.""" 21 | 22 | def __init__(self, file_path=None, file_handler=None, logger=None, history=False, config=None): 23 | """Initialize an object that contain a standard stream composed of e.g. the standard output and error. 24 | 25 | Parameters 26 | ---------- 27 | file_path : string, optional 28 | The file path that contains the standard stream. 29 | file_handler : object 30 | The file handler object. 31 | logger: object 32 | A logger object. 33 | stream : string, optional 34 | A string determining if a stdout, stderr or a combined stream is supplied. Not implemented. 35 | history : bool, optional 36 | If True, keep track of all the stream elements in appearing order. 37 | config : dict, optional 38 | A dictionary containing the override configuration of the recognized errors and warnings. 39 | Setting this will override the supplied error and warning configuration, or add new entries. 40 | 41 | """ 42 | 43 | super().__init__(file_path=file_path, file_handler=file_handler, logger=logger) 44 | 45 | self._file_path = file_path 46 | self._file_handler = file_handler 47 | self._history = history 48 | self._streams = [] 49 | self._inverse_streams = [] 50 | self._config = config 51 | 52 | # Check that at least file path or file handler is supplied 53 | if self._file_path is None and self._file_handler is None: 54 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 55 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 56 | 57 | if self._file_path is None and self._file_handler is None: 58 | self._logger.error(self.ERROR_MESSAGES[self.ERROR_USE_ONE_ARGUMENT]) 59 | sys.exit(self.ERROR_USE_ONE_ARGUMENT) 60 | 61 | # Load stream configuration from the supplied config or the standard config file 62 | self._stream_config = self._load_config() 63 | 64 | # Now investigate the kinds of streams present in the config 65 | self._stream_kinds = self._set_streams() 66 | 67 | # Build list of error and warning objects and store the these as stream triggers on which 68 | # we will react if detected in the stream. 69 | self._stream_triggers, self._inverse_stream_triggers = self._build_stream_triggers() 70 | 71 | # Parse parse parse 72 | self._parse() 73 | 74 | # Add inverse triggers not detected during parsing 75 | self._add_inverse_triggers() 76 | 77 | def __repr__(self): 78 | """Define representation to list number of streams found.""" 79 | return f'StreamScanner found {len(self._streams)} streams' 80 | 81 | def __str__(self): 82 | """Define a string representation for the class which can be used for reporting purposes.""" 83 | return f"We detected {self.number_of_entries} unique {', '.join(self._stream_kinds)}'s" 84 | 85 | def _write(self, *args, **kwargs): 86 | """Write not supported for standard streams.""" 87 | raise NotImplementedError('Writing VASP standard streams files is not supported.') 88 | 89 | @property 90 | def configured_streams(self): 91 | """Return the configured streams.""" 92 | return self._stream_triggers 93 | 94 | @property 95 | def kinds(self): 96 | """Return a list containing the different kinds.""" 97 | return self._stream_kinds 98 | 99 | @property 100 | def entries(self): 101 | """Return the found streams after parsing as a list.""" 102 | return self._streams 103 | 104 | @property 105 | def has_entries(self): 106 | """True if there are streams present after parsing.""" 107 | return bool(self._streams) 108 | 109 | @property 110 | def number_of_entries(self): 111 | """Return a dict containing the number of unique streams detected.""" 112 | return len(self._streams) 113 | 114 | def _load_config(self): 115 | """Load the configuration of the stream.""" 116 | 117 | # First load the standard entries from file 118 | stream_config = self._load_config_from_file() 119 | # Then override or add new entries with the supplied entries. 120 | if self._config is not None: 121 | stream_config.update(self._config) 122 | 123 | return stream_config 124 | 125 | def _load_config_from_file(self): 126 | """Read the configuration of the errors and warnings from a yaml file and save it as the class method.""" 127 | 128 | stream_config = None 129 | fname = Path(__file__).parent / 'stream.yml' 130 | # Read the config file 131 | with open(fname, 'r', encoding='utf8') as file_handler: 132 | stream_config = yaml.safe_load(file_handler) 133 | 134 | return stream_config 135 | 136 | def _set_streams(self): 137 | """Check the kinds of streams present in the config files.""" 138 | stream_kinds = [] 139 | for _, value in self._stream_config.items(): 140 | kind = value['kind'] 141 | if isinstance(kind, str): 142 | if kind.upper() in VaspStream._ALLOWED_STREAMS: 143 | if kind not in stream_kinds: 144 | stream_kinds.append(kind) 145 | else: 146 | raise ValueError('One of the kind entries is not a string.') 147 | return stream_kinds 148 | 149 | def _build_stream_triggers(self): 150 | """Here we use the stream configs to initialize the triggers""" 151 | 152 | # Define container for the triggers 153 | triggers = {} 154 | inverse_triggers = {} 155 | for stream in self._stream_kinds: 156 | triggers[''.join([stream.lower(), 's'])] = [] 157 | inverse_triggers[''.join([stream.lower(), 's'])] = [] 158 | 159 | for stream in self._stream_kinds: 160 | for shortname, config in self._stream_config.items(): 161 | if config['kind'] == stream: 162 | try: 163 | inverse = config['inverse'] 164 | except KeyError: 165 | inverse = False 166 | if not inverse: 167 | triggers[''.join([stream.lower(), 's'])].append(VaspStream(shortname=shortname, **config)) 168 | else: 169 | inverse_triggers[''.join([stream.lower(), 's'])].append( 170 | VaspStream(shortname=shortname, **config) 171 | ) 172 | 173 | return triggers, inverse_triggers 174 | 175 | def _parse(self): 176 | """Perform the actual parsing.""" 177 | 178 | if self._file_path is None and self._file_handler is None: 179 | return 180 | 181 | # Create dictionary from a file 182 | self._from_file() 183 | 184 | def _from_file(self): 185 | """Create a dictionary of entries from a 186 | file and store them in the this instance's data dictionary. 187 | 188 | """ 189 | 190 | stream = utils.read_from_file(self._file_path, self._file_handler, encoding='utf8') 191 | self._from_list(stream) 192 | 193 | def _from_list(self, stream): 194 | """Go through the list and extract any recognized entries. 195 | 196 | Parameters 197 | ---------- 198 | stream : list 199 | A list of strings containing each line in the standard stream. 200 | 201 | """ 202 | # Make sure we access all triggers 203 | stream_triggers = {} 204 | for kind in self._stream_kinds: 205 | key = ''.join([kind.lower(), 's']) 206 | stream_triggers[key] = self._stream_triggers[key] 207 | stream_triggers[key].extend(self._inverse_stream_triggers[key]) 208 | 209 | # Ignore list to avoid multiple occurrences if not requested (default) 210 | ignore = {} 211 | for kind, _ in stream_triggers.items(): 212 | ignore[kind] = [] 213 | for _, line in enumerate(stream): 214 | # Go though all entries in the stream triggers 215 | for kind, triggers in stream_triggers.items(): 216 | # Not check all the triggers of the given kind 217 | for index, trigger in enumerate(triggers): 218 | if index not in ignore[kind]: 219 | trigger_record = trigger.check_line(line) 220 | if trigger_record is not None: 221 | if not trigger_record.inverse: 222 | # Store the streams that we trigger on and want to store. 223 | self._streams.append(trigger_record) 224 | else: 225 | # Keep track of streams detected that we trigger on, but do not want stored. 226 | self._inverse_streams.append(trigger_record.shortname) 227 | if not self._history: 228 | # Add index to avoid storing same trigger multiple times if we do not want the 229 | # full history of streams (e.g. multiple stream occurrences recorded) 230 | ignore[kind].append(index) 231 | 232 | def _add_inverse_triggers(self): 233 | """Adds the triggers that are marked as inverse and are not detected, meaning they should be 234 | included in the returned streams. 235 | 236 | """ 237 | 238 | for _, triggers in self._inverse_stream_triggers.items(): 239 | for trigger in triggers: 240 | if trigger.shortname not in self._inverse_streams: 241 | # Only add inverse triggers if they was not detected during parsing. 242 | self._streams.append( 243 | VaspStream( 244 | trigger.shortname, 245 | trigger.kind, 246 | trigger.regex, 247 | trigger.message, 248 | trigger.suggestion, 249 | trigger.location, 250 | trigger.recover, 251 | trigger.inverse, 252 | ) 253 | ) 254 | 255 | 256 | class VaspStream: 257 | """Class representing stream elements given by VASP that we want to trigger on.""" 258 | 259 | _ALLOWED_STREAMS = ['ERROR', 'WARNING'] 260 | _ALLOWED_LOCATIONS = ['STDOUT', 'STDERR'] 261 | 262 | def __init__( 263 | self, shortname, kind, regex, message, suggestion=None, location='STDOUT', recover=False, inverse=False 264 | ): 265 | """ 266 | Initialise a VaspStream object. 267 | 268 | Paramters 269 | --------- 270 | shortname : string 271 | A short and unique string that identifies the stream 272 | kind : string 273 | The type of stream regex. 274 | regex : string 275 | Regex used for scanning. 276 | message : string 277 | Message to the user. 278 | suggestion : string, optional 279 | String containing a suggestion on how to address the stream message. Defaults to None. 280 | location : string, optional 281 | The location of the stream (typically STDOUT or STDERR). Defaults to STDOUT. 282 | recover : bool, optional 283 | True if the stream indicates that we are able to recover using some measures. Defaults to False. 284 | inverse : bool, optional 285 | If True, the stream should only be triggered on if we do not detect it. 286 | 287 | """ 288 | self.shortname = shortname 289 | self.kind = kind 290 | if isinstance(regex, str): 291 | self.regex = re.compile(regex) 292 | else: 293 | self.regex = regex 294 | self.message = message 295 | self.suggestion = suggestion 296 | self.location = location 297 | self.recover = recover 298 | self.inverse = inverse 299 | 300 | def __repr__(self): 301 | """Set the representation.""" 302 | return ( 303 | f'VaspStream(kind={self.kind}, re={self.regex}, message={self.message}, ' 304 | f'recover={self.recover}, inverse={self.inverse})' 305 | ) 306 | 307 | def __str__(self): 308 | """Set string representation of the stream entry that can be used in a human readable report.""" 309 | return f'({self.kind}) {self.shortname}: {self.message}' 310 | 311 | @property 312 | def shortname(self): 313 | """Return the shortname of the stream.""" 314 | return self._shortname 315 | 316 | @shortname.setter 317 | def shortname(self, shrt): 318 | """Setter for the shortname that validates that it is a string.""" 319 | 320 | if not isinstance(shrt, str): 321 | raise ValueError('The supplied shortname is not of type string.') 322 | self._shortname = shrt 323 | 324 | @property 325 | def kind(self): 326 | """Return the kind.""" 327 | return self._kind 328 | 329 | @kind.setter 330 | def kind(self, knd): 331 | """Setter for kind that validates if the entries are supported.""" 332 | if isinstance(knd, str): 333 | if knd.upper() in self._ALLOWED_STREAMS: 334 | self._kind = knd 335 | else: 336 | raise ValueError( 337 | f'The type of kind for {self._shortname} is not supported. ' 338 | f'Currently we support {self._ALLOWED_STREAMS}.' 339 | ) 340 | else: 341 | raise ValueError(f'The kind for {self._shortname} is not of type string.') 342 | 343 | @property 344 | def regex(self): 345 | """Return the regex.""" 346 | return self._regex 347 | 348 | @regex.setter 349 | def regex(self, reg): 350 | """Setter for regex that validates and compiles if necessary.""" 351 | if isinstance(reg, str): 352 | self._regex = re.compile(reg) 353 | else: 354 | self._regex = reg 355 | 356 | @property 357 | def message(self): 358 | """Return the message.""" 359 | return self._message 360 | 361 | @message.setter 362 | def message(self, mes): 363 | """Setter for message that validates if it is a string.""" 364 | if not isinstance(mes, str): 365 | raise ValueError('The message needs to be a string.') 366 | self._message = mes 367 | 368 | @property 369 | def suggestion(self): 370 | """Return the suggestion.""" 371 | return self._suggestion 372 | 373 | @suggestion.setter 374 | def suggestion(self, sug): 375 | """Setter for the suggestion which validated if it is a string.""" 376 | if sug is not None: 377 | # Allow None 378 | if not isinstance(sug, str): 379 | raise ValueError(f'The suggestion entry {sug} is not of type string.') 380 | self._suggestion = sug 381 | 382 | @property 383 | def location(self): 384 | """Return the location of the stream.""" 385 | return self._location 386 | 387 | @location.setter 388 | def location(self, loc): 389 | """Setter for the location that validates if it is an allowed value.""" 390 | if loc not in self._ALLOWED_LOCATIONS: 391 | raise ValueError(f'The location entry {loc} is not one of the allowed values {self._ALLOWED_LOCATIONS}') 392 | self._location = loc 393 | 394 | @property 395 | def recover(self): 396 | """Return the recover status.""" 397 | return self._recover 398 | 399 | @recover.setter 400 | def recover(self, rec): 401 | """Setter for the recover that validates if it is a boolean.""" 402 | if not isinstance(rec, bool): 403 | raise ValueError(f'The recover entry {rec} is not of type bool.') 404 | self._recover = rec 405 | 406 | @property 407 | def recoverable(self): 408 | """True if the stream is marked as recoverable.""" 409 | return self._recover 410 | 411 | @property 412 | def inverse(self): 413 | """Return the inverse status.""" 414 | return self._inverse 415 | 416 | @inverse.setter 417 | def inverse(self, inv): 418 | """Setter for the inverse that validates if it is a boolean.""" 419 | if not isinstance(inv, bool): 420 | raise ValueError(f'The recover entry {inv} is not of type bool.') 421 | self._inverse = inv 422 | 423 | def check_line(self, line): 424 | """Check the stream in a line, return True the stream is found""" 425 | mch = self.regex.search(line) 426 | if mch: 427 | # Make a new instance for this particular error (in case we want 428 | # to save each and every error) 429 | return VaspStream( 430 | self.shortname, 431 | self.kind, 432 | self.regex, 433 | self.message, 434 | self.suggestion, 435 | self.location, 436 | self.recover, 437 | self.inverse, 438 | ) 439 | 440 | return None 441 | -------------------------------------------------------------------------------- /parsevasp/stream.yml: -------------------------------------------------------------------------------- 1 | # File containing the definition of different streams that VASP generate. 2 | # Each key is an unique short name of the error 3 | # kind: What kind of stream is it (ERROR/WARNING) 4 | # regex: The string printed by VASP (what we search for when we parse the stream line by line) 5 | # message: A human readable interpretation of the regex 6 | # suggestion: A human readable suggestion on how to act 7 | # location: Can be found in which stream (STDOUT/STDERR) 8 | # recover: If False, always break when this error appears, there is no point in recovering 9 | 10 | # This is a special one we use to detect if VASP have not been started 11 | nostart: 12 | kind: ERROR 13 | location: STDOUT 14 | message: "VASP has not been started." 15 | recover: false 16 | regex: "vasp." 17 | inverse: True 18 | suggestion: "" 19 | # There can be multiple IBZKPT errors, some are actually warnings but all printed as "internal error...." 20 | # Here we let recover=True since if it is the fatal ones VASP do stop execution. 21 | ibzkpt: 22 | kind: ERROR 23 | location: STDOUT 24 | message: "Error with the k-points." 25 | recover: true 26 | regex: "internal error in subroutine IBZKPT" 27 | suggestion: "" 28 | brmix: 29 | kind: ERROR 30 | location: STDOUT 31 | message: "Error in BRMIX." 32 | recover: false 33 | regex: "BRMIX: very serious problems" 34 | suggestion: "" 35 | cnormn: 36 | kind: ERROR 37 | location: STDOUT 38 | message: "Error in CNORMN." 39 | recover: false 40 | regex: "WARNING: CNORMN" 41 | suggestion: "" 42 | denmp: 43 | kind: ERROR 44 | location: STDOUT 45 | message: "Error in DENMP." 46 | recover: false 47 | regex: "WARNING: DENMP: can't reach specified precision" 48 | suggestion: "" 49 | dentet: 50 | kind: ERROR 51 | location: STDOUT 52 | message: "Error with DENTET." 53 | recover: false 54 | regex: "WARNING: DENTET" 55 | suggestion: "" 56 | edddav_zhegv: 57 | kind: ERROR 58 | location: STDOUT 59 | message: "Error in ZHEGV from EDDAV." 60 | recover: false 61 | regex: "Error EDDDAV: Call to ZHEGV failed" 62 | suggestion: "" 63 | eddrmm_zhegv: 64 | kind: ERROR 65 | location: STDOUT 66 | message: "Error in EDDRMM." 67 | recover: false 68 | regex: "WARNING in EDDRMM: call to ZHEGV failed" 69 | suggestion: "" 70 | edwav: 71 | kind: ERROR 72 | location: STDOUT 73 | message: "Error in EDWAV." 74 | recover: false 75 | regex: "EDWAV: internal error" 76 | suggestion: "" 77 | fexcp: 78 | kind: ERROR 79 | location: STDOUT 80 | message: "Error in FEXCP." 81 | recover: false 82 | regex: "ERROR FEXCP: supplied Exchange" 83 | suggestion: "" 84 | fock_acc: 85 | kind: ERROR 86 | location: STDERR 87 | message: "Error in FOCK_ACC." 88 | recover: false 89 | regex: "internal error in FOCK_ACC" 90 | suggestion: "" 91 | invgrp: 92 | kind: ERROR 93 | location: STDOUT 94 | message: "Error in INVGRP." 95 | recover: false 96 | regex: "internal error in subroutine INVGRP" 97 | suggestion: "" 98 | kpoints_trans: 99 | kind: ERROR 100 | location: STDERR 101 | message: "Error in GENERATE_KPOINTS_TRANS." 102 | recover: false 103 | regex: "internal error in GENERATE_KPOINTS_TRANS" 104 | suggestion: "" 105 | non_collinear: 106 | kind: ERROR 107 | location: STDOUT 108 | message: "Using a collinear spin executable for a non-colinear calculation." 109 | recover: false 110 | regex: "ERROR: non collinear calculations require" 111 | suggestion: Please make sure to use the VASP executable that has been compiled with the non-colinear functionality (ncl flavor, consult build instructions) 112 | not_hermitian: 113 | kind: ERROR 114 | location: STDOUT 115 | message: "Sub-space matrix not Hermitian in DAV." 116 | recover: false 117 | regex: "not Hermitian in DAV" 118 | suggestion: "" 119 | psmaxn: 120 | kind: ERROR 121 | location: STDOUT 122 | message: "Error in PSMAXN." 123 | recover: false 124 | regex: "PSMAXN for non-local potential too small" 125 | suggestion: "" 126 | pssyevx: 127 | kind: ERROR 128 | location: STDOUT 129 | message: "Error in PSSYEVX." 130 | recover: false 131 | regex: "Error in subspace rotation PSSYEVX" 132 | suggestion: "" 133 | pzstein: 134 | kind: ERROR 135 | location: STDOUT 136 | message: "Error in PZSETIN" 137 | recover: false 138 | regex: "PZSTEIN parameter number had an illegal value" 139 | suggestion: "" 140 | real_optlay: 141 | kind: ERROR 142 | location: STDOUT 143 | message: "Error in REAL_OPTLAY." 144 | recover: false 145 | regex: "REAL_OPTLAY: internal error" 146 | suggestion: "" 147 | rhosyg: 148 | kind: ERROR 149 | location: STDOUT 150 | message: "Error in RHOSYG" 151 | recover: false 152 | regex: "RHOSYG: internal error" 153 | suggestion: "" 154 | rspher: 155 | kind: ERROR 156 | location: STDOUT 157 | message: Error in RSPHER 158 | recover: false 159 | regex: "Internal ERROR RSPHER." 160 | suggestion: "" 161 | set_indpw_full: 162 | kind: ERROR 163 | location: STDOUT 164 | message: "Error in INDPW: insufficient memory." 165 | recover: false 166 | regex: "internal error in SET_INDPW_FULL: insufficient" 167 | suggestion: "" 168 | sgrcon: 169 | kind: ERROR 170 | location: STDOUT 171 | message: "Error in SGRCON" 172 | recover: false 173 | regex: "internal error in subroutine SGRCON" 174 | suggestion: "" 175 | zbrent: 176 | kind: ERROR 177 | location: STDOUT 178 | message: "Error in ZBRENT" 179 | recover: false 180 | regex: "ZBRENT: fatal error in bracketing" 181 | suggestion: "" 182 | no_potim: 183 | kind: ERROR 184 | location: STDOUT 185 | message: "NSW specified, but no POTIM or IBRION set." 186 | recover: false 187 | regex: "Fatal error! IBRION=0, but no entry for POTIM on file INCAR. MUST be specified!!" 188 | suggestion: "" 189 | magmom: 190 | kind: ERROR 191 | location: STDOUT 192 | message: "MAGMON is specified, but ISPIN is set to one." 193 | recover: false 194 | regex: "Error reading item 'MAGMOM' from file INCAR" 195 | suggestion: "" 196 | bandocc: 197 | kind: ERROR 198 | location: STDOUT 199 | message: "The topmost band is occupied." 200 | recover: false 201 | regex: "TOO FEW BANDS" 202 | suggestion: "" 203 | -------------------------------------------------------------------------------- /parsevasp/utils.py: -------------------------------------------------------------------------------- 1 | """Utiles.""" 2 | 3 | import logging 4 | import math 5 | import os 6 | import re 7 | import sys 8 | 9 | import numpy as np 10 | 11 | from parsevasp.base import open_close_file_handler 12 | 13 | 14 | def read_from_file(file_name, input_file_handler, contains=None, lines=True, encoding='utf8', logger=None): 15 | """ 16 | Read a file and return the whole file or specific lines. 17 | 18 | Parameters 19 | ---------- 20 | file_name : str 21 | The location and file name to be read. 22 | file_handler : object 23 | A valid file handler. If both file name and file_handler is set, 24 | the file handler takes presence. 25 | contains : list of str 26 | A list of string of identifiers for the lines that is to be 27 | returned. If None, the whole file is returned. 28 | lines : bool, optional 29 | If set to False, this method will just return the read() from supplied path or handler. 30 | Defaults to True. 31 | encoding : str, optional 32 | Specify the encoding. Defaults to utf8. 33 | logger : object, optional 34 | A logger object to use. 35 | 36 | Returns 37 | ------- 38 | parsed : list of str or a str 39 | If `lines` is True, the list of strings containing the whole or specific 40 | lines from a file is returned. If `lines` is False, a string of all the content 41 | is returned. 42 | 43 | """ 44 | 45 | if logger is None: 46 | logger = logging.getLogger(sys._getframe().f_code.co_name) 47 | 48 | if input_file_handler is not None: 49 | inputfile = input_file_handler 50 | if not lines: 51 | # Only want a string of file content, return. 52 | return inputfile.read() 53 | file_data = inputfile.readlines() 54 | else: 55 | inputfile = open_close_file_handler(file_name=file_name, status='r', encoding=encoding, logger=logger) 56 | if not lines: 57 | # Only want a string of file content, return. 58 | file_data = inputfile.read() 59 | else: 60 | file_data = inputfile.readlines() 61 | open_close_file_handler(logger, file_handler=inputfile, logger=logger) 62 | if not lines: 63 | return file_data 64 | 65 | parsed = [] 66 | 67 | # first check if contains is a list 68 | is_list = is_sequence(contains) 69 | 70 | if contains is not None: 71 | # this can be a bit faster (comprehension), but do not care for this 72 | # now 73 | for _, line in enumerate(file_data): 74 | if is_list: 75 | for element in contains: 76 | if element in line: 77 | parsed.append(line) 78 | elif contains in line: 79 | parsed = line 80 | else: 81 | parsed = file_data 82 | 83 | return parsed 84 | 85 | 86 | def file_exists(file_path, logger=None): 87 | """ 88 | Check if the file exists. 89 | 90 | Parameters 91 | ---------- 92 | file_path : string 93 | The file path to be checked. 94 | logger : object, optional 95 | A logger object to use. 96 | 97 | Returns 98 | ------- 99 | status : bool 100 | If file does not exists or `file_path` empty, else False. 101 | """ 102 | from parsevasp.base import BaseParser 103 | 104 | if logger is None: 105 | logger = logging.getLogger(sys._getframe().f_code.co_name) 106 | 107 | if not file_path: 108 | logger.error(BaseParser.ERROR_MESSAGES[BaseParser.ERROR_EMPTY_FILE_PATH]) 109 | sys.exit(BaseParser.ERROR_EMPTY_FILE_PATH) 110 | 111 | status = True 112 | try: 113 | os.stat(file_path) 114 | except OSError: 115 | logger.error( 116 | f'{BaseParser.ERROR_MESSAGES[BaseParser.ERROR_FILE_NOT_FOUND]} The file in question is: {file_path}' 117 | ) 118 | status = False 119 | 120 | return status 121 | 122 | 123 | def is_sequence(arg): 124 | """ 125 | Checks to see if something is a sequence (list). 126 | 127 | Parameters 128 | ---------- 129 | arg : str 130 | The string to be examined. 131 | 132 | Returns 133 | ------- 134 | sequence : bool 135 | Is True if `arg` is a list. 136 | 137 | """ 138 | 139 | if not hasattr(arg, 'strip') and hasattr(arg, '__getitem__'): 140 | return True 141 | if hasattr(arg, '__iter__'): 142 | return True 143 | return False 144 | 145 | 146 | def test_string_content(string): 147 | """ 148 | Detects if string is integer, float or string. 149 | 150 | Parameters 151 | ---------- 152 | string : string 153 | An input string to be tested. 154 | 155 | Returns 156 | ------- 157 | string 158 | A string with value 'int' if input is an integer, 159 | 'float' if the input is a float and 'string' if it 160 | is just a regular string. 161 | 162 | """ 163 | try: 164 | float(string) 165 | return 'int' if ((string.count('.') == 0) and ('e' not in string) and ('E' not in string)) else 'float' 166 | except ValueError: 167 | return 'string' 168 | 169 | 170 | def is_numbers(string, splitter=' '): 171 | """ 172 | Check if a string only contains numbers. 173 | 174 | Parameters 175 | ---------- 176 | s: str 177 | The input string 178 | splitter : string, optional 179 | The splitting character to be used, defaults to blank spaces. 180 | 181 | Returns 182 | ------- 183 | is_nums: bool 184 | Is True if all entries in the input string is a numbers, 185 | otherwise False. 186 | 187 | """ 188 | 189 | entries = string.split(splitter) 190 | is_nums = True 191 | for entry in entries: 192 | if not is_number(entry): 193 | is_nums = False 194 | return is_nums 195 | 196 | return is_nums 197 | 198 | 199 | def is_number(string): 200 | """ 201 | Check if a string is a number. 202 | 203 | Parameters 204 | ---------- 205 | s: str 206 | The input string 207 | 208 | Returns 209 | ------- 210 | is_num: bool 211 | Is True if the input string is a number, otherwise False 212 | 213 | """ 214 | 215 | try: 216 | float(string) 217 | is_num = True 218 | except ValueError: 219 | is_num = False 220 | 221 | return is_num 222 | 223 | 224 | def remove_newline(fobj, num_newlines=1): 225 | """ 226 | Removes the newline at the end of a file. 227 | 228 | Usefull to run after a for loop that writes a newline character 229 | at each step. Other solutions cannot handle very large files. 230 | 231 | Parameters 232 | ---------- 233 | fobj : object 234 | A file object. 235 | num_newlines : int, optional 236 | The number of newlines to remove. Defaults to 1. 237 | 238 | """ 239 | 240 | # remove last newline, check number of chars, different 241 | # for each OS 242 | remove_chars = len(os.linesep) + num_newlines - 1 243 | fobj.truncate(fobj.tell() - remove_chars) 244 | 245 | 246 | def dir_to_cart(vector, lattice): 247 | """ 248 | Convert direct coordinates to cartesian. 249 | 250 | Parameters 251 | ---------- 252 | vector : ndarray 253 | | Dimension: (3) 254 | 255 | The direct vector to be converted. 256 | lattice : ndarray 257 | | Dimension: (3,3) 258 | 259 | The crystal lattice, where the first lattice vector is 260 | [0,:], the second, [1,:] etc. 261 | 262 | Returns 263 | ------- 264 | cart : ndarray 265 | | Dimension: (3) 266 | 267 | The cartesian vector. 268 | 269 | """ 270 | 271 | cart = np.dot(vector, lattice) 272 | 273 | return cart 274 | 275 | 276 | def cart_to_dir(vector, lattice): 277 | """ 278 | Convert cartesian coordinates to direct. 279 | 280 | Parameters 281 | ---------- 282 | vectir : ndarray 283 | | Dimension: (3) 284 | 285 | The cartersian vector. 286 | lattice : ndarray 287 | | Dimension: (3,3) 288 | The crystal lattice, where the first lattice vector is 289 | (0,:), the second, (1,:) etc. 290 | 291 | Returns 292 | ------- 293 | direct : ndarray 294 | | Dimension: (3) 295 | The direct vector. 296 | 297 | """ 298 | 299 | direct = np.dot(vector, np.linalg.inv(lattice)) 300 | 301 | return direct 302 | 303 | 304 | def lat_to_reclat(lattice): 305 | r""" 306 | Convert the lattice to the reciprocal lattice. 307 | 308 | Parameters 309 | ---------- 310 | lattice : ndarray 311 | | Dimension: (3,3) 312 | The crystal lattice, where the first lattice vector is 313 | (0,:), the second, (1,:) etc. 314 | 315 | Returns 316 | ------- 317 | lattice_rec : ndarray 318 | | Dimension: (3,3) 319 | Reciprocal lattice including the 2:math:`\pi` factor, 320 | see `lattice` for layout. 321 | 322 | Notes 323 | ----- 324 | In general, `lattice_rec`=2pi*(lattice.T)^-1 325 | 326 | """ 327 | 328 | lattice_trans = np.transpose(lattice) 329 | lattice_rec = 2 * math.pi * np.linalg.inv(lattice_trans) 330 | 331 | return lattice_rec 332 | 333 | 334 | def match_integer_param(inputs, key, string): 335 | """ 336 | Search a string for a given parameter and set its values, assuming integer. 337 | 338 | Parameters 339 | ---------- 340 | inputs : dict 341 | A dictionary containing the parameters we want to set in lowercase and default values. 342 | key : string 343 | A string containing the matching parameter we are looking for. 344 | string: string 345 | A string to be searched for key. 346 | 347 | Returns 348 | ------- 349 | value : integer 350 | The located value. 351 | 352 | """ 353 | 354 | match = re.match(r'^ +' + key + r' *= *([-0-9]+)', string) 355 | if match: 356 | inputs[key.lower()] = int(match.group(1)) 357 | 358 | 359 | def line_to_type(fobject_or_string, d_type=str, no_split=False): 360 | """ 361 | Grab a line from a file like object or string and convert it to d_type (default: str). 362 | 363 | Parameters 364 | ---------- 365 | fobject_or_string : object 366 | A file like object or a string containing something that is to be converted to a specified type 367 | dtype : object 368 | The dtype one want to convert to. The standard Python dtypes are supported. 369 | no_splot : bool 370 | If True do not split a string. Useful for comments etc. We still strip. 371 | 372 | """ 373 | if isinstance(fobject_or_string, str): 374 | line = fobject_or_string 375 | else: 376 | line = fobject_or_string.readline() 377 | # Previously this was map instead of list comprehension 378 | if not no_split: 379 | result = [d_type(item) for item in line.split()] 380 | else: 381 | result = line.strip() 382 | if len(result) == 1: 383 | return result[0] 384 | return result 385 | 386 | 387 | empty_line = re.compile(r'[\r\n]\s*[\r\n]') 388 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | # build the package with [flit](https://flit.readthedocs.io) 3 | requires = ["flit_core >=3.4,<4"] 4 | build-backend = "flit_core.buildapi" 5 | 6 | [project] 7 | name = "parsevasp" 8 | dynamic = ["version"] # read from parsevasp/__init__.py 9 | description = "A general parser for VASP" 10 | authors = [{name = "Espen Flage-Larsen ", email = "espen.flage-larsen@sigma2.no"}] 11 | readme = "README.md" 12 | license = {file = "LICENSE.txt"} 13 | classifiers = [ 14 | "Programming Language :: Python", 15 | "Intended Audience :: Science/Research", 16 | "Natural Language :: English", 17 | "Development Status :: 5 - Production/Stable", 18 | "Environment :: Plugins", 19 | "Intended Audience :: Science/Research", 20 | "License :: OSI Approved :: MIT License", 21 | "Programming Language :: Python :: 3.9", 22 | "Programming Language :: Python :: 3.10", 23 | "Programming Language :: Python :: 3.11", 24 | "Programming Language :: Python :: 3.12", 25 | "Topic :: Scientific/Engineering :: Physics", 26 | "Topic :: Scientific/Engineering :: Chemistry" 27 | ] 28 | keywords = ["vasp", "parser"] 29 | requires-python = ">=3.9" 30 | dependencies = [ 31 | "numpy", 32 | "lxml", 33 | "pyyaml" 34 | ] 35 | 36 | 37 | [project.urls] 38 | Source = "https://github.com/aiida-vasp/parsevasp" 39 | 40 | [project.optional-dependencies] 41 | tests = [ 42 | "pytest~=7.0", 43 | "pytest-cov~=2.7,<2.11", 44 | "coverage~=6.0" 45 | ] 46 | pre-commit = [ 47 | "pre-commit~=2.2", 48 | ] 49 | 50 | [tool.flit.module] 51 | name = "parsevasp" 52 | 53 | [tool.coverage.run] 54 | # Configuration of [coverage.py](https://coverage.readthedocs.io) 55 | # reporting which lines of your plugin are covered by tests 56 | source=["parsevasp"] 57 | 58 | 59 | [tool.ruff] 60 | exclude = [ 61 | "cookiecutters", 62 | "tutorials", 63 | ".bzr", 64 | ".direnv", 65 | ".eggs", 66 | ".git", 67 | ".git-rewrite", 68 | ".hg", 69 | ".ipynb_checkpoints", 70 | ".mypy_cache", 71 | ".nox", 72 | ".pants.d", 73 | ".pyenv", 74 | ".pytest_cache", 75 | ".pytype", 76 | ".ruff_cache", 77 | ".svn", 78 | ".tox", 79 | ".venv", 80 | ".vscode", 81 | "__pypackages__", 82 | "_build", 83 | "buck-out", 84 | "build", 85 | "dist", 86 | "node_modules", 87 | "site-packages", 88 | "venv", 89 | ] 90 | 91 | line-length = 120 92 | 93 | 94 | [tool.ruff.format] 95 | quote-style = 'single' 96 | 97 | [tool.ruff.lint] 98 | ignore = [ 99 | 'F403', # Star imports unable to detect undefined names 100 | 'F405', # Import may be undefined or defined from star imports 101 | 'PLR0911', # Too many return statements 102 | 'PLR0912', # Too many branches 103 | 'PLR0913', # Too many arguments in function definition 104 | 'PLR0915', # Too many statements 105 | 'PLR2004', # Magic value used in comparison 106 | 'RUF005', # Consider iterable unpacking instead of concatenation 107 | 'RUF012', # Mutable class attributes should be annotated with `typing.ClassVar` 108 | 'N802', # invalid-function-name 109 | ] 110 | select = [ 111 | 'E', # pydocstyle 112 | 'W', # pydocstyle 113 | 'F', # pyflakes 114 | 'I', # isort 115 | 'N', # pep8-naming 116 | 'PLC', # pylint-convention 117 | 'PLE', # pylint-error 118 | 'PLR', # pylint-refactor 119 | 'PLW', # pylint-warning 120 | 'RUF' # ruff 121 | ] 122 | 123 | 124 | [tool.ruff.lint.per-file-ignores] 125 | "**/tests/*" = ["F403", "N803"] 126 | -------------------------------------------------------------------------------- /tests/CHGCAR: -------------------------------------------------------------------------------- 1 | Dummy CHGCAR no spin 2 | 3.5000000000000000 3 | 0.500000 0.500000 0.000000 4 | 0.000000 0.500000 0.500000 5 | 0.500000 0.000000 0.500000 6 | Ni 7 | 1 8 | Direct 9 | 0.000000 0.000000 0.000000 10 | 11 | 3 4 5 12 | 1 2 3 4 5 13 | 6 7 8 9 10 14 | 11 12 13 14 15 15 | 16 17 18 19 20 16 | 21 22 23 24 25 17 | 26 27 28 29 30 18 | 31 32 33 34 35 19 | 36 37 38 39 40 20 | 41 42 43 44 45 21 | 46 47 48 49 50 22 | 51 52 53 54 55 23 | 56 57 58 59 60 24 | augmentation occupancies 25 | 1 1 1 1 26 | -------------------------------------------------------------------------------- /tests/CHGCAR.ncl: -------------------------------------------------------------------------------- 1 | Dummy CHGCAR non-collinear spin 2 | 3.5000000000000000 3 | 0.500000 0.500000 0.000000 4 | 0.000000 0.500000 0.500000 5 | 0.500000 0.000000 0.500000 6 | Ni 7 | 1 8 | Direct 9 | 0.000000 0.000000 0.000000 10 | 11 | 3 4 5 12 | 1 2 3 4 5 13 | 6 7 8 9 10 14 | 11 12 13 14 15 15 | 16 17 18 19 20 16 | 21 22 23 24 25 17 | 26 27 28 29 30 18 | 31 32 33 34 35 19 | 36 37 38 39 40 20 | 41 42 43 44 45 21 | 46 47 48 49 50 22 | 51 52 53 54 55 23 | 56 57 58 59 60 24 | augmentation occupancies 25 | 1 1 1 1 26 | 3 4 5 27 | 1 2 3 4 5 28 | 6 7 8 9 10 29 | 11 12 13 14 15 30 | 16 17 18 19 20 31 | 21 22 23 24 25 32 | 26 27 28 29 30 33 | 31 32 33 34 35 34 | 36 37 38 39 40 35 | 41 42 43 44 45 36 | 46 47 48 49 50 37 | 51 52 53 54 55 38 | 56 57 58 59 60 39 | augmentation occupancies 40 | 1 1 1 1 41 | 3 4 5 42 | 1 2 3 4 5 43 | 6 7 8 9 10 44 | 11 12 13 14 15 45 | 16 17 18 19 20 46 | 21 22 23 24 25 47 | 26 27 28 29 30 48 | 31 32 33 34 35 49 | 36 37 38 39 40 50 | 41 42 43 44 45 51 | 46 47 48 49 50 52 | 51 52 53 54 55 53 | 56 57 58 59 60 54 | augmentation occupancies 55 | 1 1 1 1 56 | 3 4 5 57 | 1 2 3 4 5 58 | 6 7 8 9 10 59 | 11 12 13 14 15 60 | 16 17 18 19 20 61 | 21 22 23 24 25 62 | 26 27 28 29 30 63 | 31 32 33 34 35 64 | 36 37 38 39 40 65 | 41 42 43 44 45 66 | 46 47 48 49 50 67 | 51 52 53 54 55 68 | 56 57 58 59 60 69 | augmentation occupancies 70 | 1 1 1 1 71 | -------------------------------------------------------------------------------- /tests/CHGCAR.spin: -------------------------------------------------------------------------------- 1 | Dummy CHGCAR collinear spin 2 | 3.5000000000000000 3 | 0.500000 0.500000 0.000000 4 | 0.000000 0.500000 0.500000 5 | 0.500000 0.000000 0.500000 6 | Ni 7 | 1 8 | Direct 9 | 0.000000 0.000000 0.000000 10 | 11 | 3 4 5 12 | 1 2 3 4 5 13 | 6 7 8 9 10 14 | 11 12 13 14 15 15 | 16 17 18 19 20 16 | 21 22 23 24 25 17 | 26 27 28 29 30 18 | 31 32 33 34 35 19 | 36 37 38 39 40 20 | 41 42 43 44 45 21 | 46 47 48 49 50 22 | 51 52 53 54 55 23 | 56 57 58 59 60 24 | augmentation occupancies 25 | 1 1 1 1 26 | 3 4 5 27 | 1 2 3 4 5 28 | 6 7 8 9 10 29 | 11 12 13 14 15 30 | 16 17 18 19 20 31 | 21 22 23 24 25 32 | 26 27 28 29 30 33 | 31 32 33 34 35 34 | 36 37 38 39 40 35 | 41 42 43 44 45 36 | 46 47 48 49 50 37 | 51 52 53 54 55 38 | 56 57 58 59 60 39 | augmentation occupancies 40 | 1 1 11 41 | -------------------------------------------------------------------------------- /tests/DOSCAR: -------------------------------------------------------------------------------- 1 | 4 4 1 0 2 | 0.1648482E+02 0.4040000E-09 0.4040000E-09 0.4040000E-09 0.1000000E-15 3 | 1.000000000000000E-004 4 | CAR 5 | unknown system 6 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 7 | -3.440 -0.1104E-42 -0.2099E-42 8 | -1.539 0.1400E+00 0.2661E+00 9 | 0.362 -0.3624E-72 0.2000E+01 10 | 2.264 -0.1338E-04 0.2000E+01 11 | 4.165 0.3156E+01 0.8000E+01 12 | 6.066 -0.2412E-14 0.8000E+01 13 | 7.967 0.3156E+01 0.1400E+02 14 | 9.868 -0.1381E-26 0.1400E+02 15 | 11.769 0.2901E+01 0.1952E+02 16 | 13.670 0.0000E+00 0.2000E+02 17 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 18 | -3.440 -0.6543E-44 -0.3040E-45 -0.3040E-45 -0.3040E-45 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 19 | -1.539 0.8295E-02 0.3853E-03 0.3853E-03 0.3853E-03 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 20 | 0.362 -0.1336E-73 -0.3400E-74 -0.3400E-74 -0.3400E-74 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 21 | 2.264 -0.4932E-06 -0.1255E-06 -0.1255E-06 -0.1255E-06 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 22 | 4.165 0.1163E+00 0.2961E-01 0.2961E-01 0.2961E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 23 | 6.066 -0.5870E-16 -0.2973E-16 -0.2973E-16 -0.2973E-16 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 24 | 7.967 0.7681E-01 0.3890E-01 0.3890E-01 0.3890E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 25 | 9.868 -0.1988E-28 -0.1936E-28 -0.1923E-28 -0.1944E-28 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 26 | 11.769 0.5486E-01 0.3117E-01 0.3938E-01 0.3827E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 27 | 13.670 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 28 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 29 | -3.440 -0.6543E-44 -0.3040E-45 -0.3040E-45 -0.3040E-45 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 30 | -1.539 0.8295E-02 0.3853E-03 0.3853E-03 0.3853E-03 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 31 | 0.362 -0.1336E-73 -0.3400E-74 -0.3400E-74 -0.3400E-74 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 32 | 2.264 -0.4932E-06 -0.1255E-06 -0.1255E-06 -0.1255E-06 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 33 | 4.165 0.1163E+00 0.2961E-01 0.2961E-01 0.2961E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 34 | 6.066 -0.5870E-16 -0.2973E-16 -0.2973E-16 -0.2973E-16 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 35 | 7.967 0.7681E-01 0.3890E-01 0.3890E-01 0.3890E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 36 | 9.868 -0.2042E-28 -0.1886E-28 -0.1898E-28 -0.1932E-28 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 37 | 11.769 0.4011E-01 0.2855E-01 0.3605E-01 0.3375E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 38 | 13.670 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 39 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 40 | -3.440 -0.6543E-44 -0.3040E-45 -0.3040E-45 -0.3040E-45 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 41 | -1.539 0.8295E-02 0.3853E-03 0.3853E-03 0.3853E-03 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 42 | 0.362 -0.1336E-73 -0.3400E-74 -0.3400E-74 -0.3400E-74 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 43 | 2.264 -0.4932E-06 -0.1255E-06 -0.1255E-06 -0.1255E-06 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 44 | 4.165 0.1163E+00 0.2961E-01 0.2961E-01 0.2961E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 45 | 6.066 -0.5870E-16 -0.2973E-16 -0.2973E-16 -0.2973E-16 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 46 | 7.967 0.7681E-01 0.3890E-01 0.3890E-01 0.3890E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 47 | 9.868 -0.2061E-28 -0.1918E-28 -0.1894E-28 -0.1872E-28 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 48 | 11.769 0.7493E-01 0.3673E-01 0.4614E-01 0.3762E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 49 | 13.670 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 50 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 51 | -3.440 -0.6543E-44 -0.3040E-45 -0.3040E-45 -0.3040E-45 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 52 | -1.539 0.8295E-02 0.3853E-03 0.3853E-03 0.3853E-03 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 53 | 0.362 -0.1336E-73 -0.3400E-74 -0.3400E-74 -0.3400E-74 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 54 | 2.264 -0.4932E-06 -0.1255E-06 -0.1255E-06 -0.1255E-06 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 55 | 4.165 0.1163E+00 0.2961E-01 0.2961E-01 0.2961E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 56 | 6.066 -0.5870E-16 -0.2973E-16 -0.2973E-16 -0.2973E-16 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 57 | 7.967 0.7681E-01 0.3890E-01 0.3890E-01 0.3890E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 58 | 9.868 -0.2089E-28 -0.1874E-28 -0.1898E-28 -0.1865E-28 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 59 | 11.769 0.7120E-01 0.3744E-01 0.4373E-01 0.3642E-01 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 60 | 13.670 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 0.0000E+00 61 | -------------------------------------------------------------------------------- /tests/DOSCAR.nopdos: -------------------------------------------------------------------------------- 1 | 4 4 1 0 2 | 0.1648482E+02 0.4040000E-09 0.4040000E-09 0.4040000E-09 0.1000000E-15 3 | 1.000000000000000E-004 4 | CAR 5 | unknown system 6 | 13.67039808 -3.43982524 10 7.29482275 1.00000000 7 | -3.440 -0.1104E-42 -0.2099E-42 8 | -1.539 0.1400E+00 0.2661E+00 9 | 0.362 -0.3624E-72 0.2000E+01 10 | 2.264 -0.1338E-04 0.2000E+01 11 | 4.165 0.3156E+01 0.8000E+01 12 | 6.066 -0.2412E-14 0.8000E+01 13 | 7.967 0.3156E+01 0.1400E+02 14 | 9.868 -0.1381E-26 0.1400E+02 15 | 11.769 0.2901E+01 0.1952E+02 16 | 13.670 0.0000E+00 0.2000E+02 17 | -------------------------------------------------------------------------------- /tests/EIGENVAL: -------------------------------------------------------------------------------- 1 | 4 4 1 1 2 | 0.1648482E+02 0.4040000E-09 0.4040000E-09 0.4040000E-09 0.1000000E-15 3 | 1.000000000000000E-004 4 | CAR 5 | unknown system 6 | 12 1 10 7 | 8 | 0.2500000E+00 0.2500000E+00 0.2500000E+00 0.1000000E+01 9 | 1 -1.439825 1.000000 10 | 2 2.964373 1.000000 11 | 3 2.964373 1.000000 12 | 4 2.964373 1.000000 13 | 5 7.254542 0.666667 14 | 6 7.254542 0.666667 15 | 7 7.254542 0.666667 16 | 8 11.451811 -0.000000 17 | 9 11.670398 -0.000000 18 | 10 11.670398 -0.000000 19 | -------------------------------------------------------------------------------- /tests/INCAR: -------------------------------------------------------------------------------- 1 | #LVEL = .TRUE. 2 | ALGO = V #TEST 3 | PREC = A 4 | #KINTER = 1 5 | ISMEAR = -5 6 | NEDOS = 100000 7 | EMIN = 5.5 8 | EMAX = 7.5 9 | ENCUT = 350 10 | LOPTICS = .TRUE. 11 | #KPAR = 10 12 | DIPOL = 1 2 2 13 | -------------------------------------------------------------------------------- /tests/KPOINTS: -------------------------------------------------------------------------------- 1 | Example file 2 | 0 3 | G 4 | 4 4 4 5 | -------------------------------------------------------------------------------- /tests/KPOINTSEXP: -------------------------------------------------------------------------------- 1 | Example file 2 | 4 3 | D 4 | 0.0 0.0 0.0 1. 5 | 0.0 0.0 0.5 1. 6 | 0.0 0.5 0.5 2. 7 | 0.5 0.5 0.5 4. 8 | Tetrahedra 9 | 1 0.183333333333333 10 | 6 1 2 3 4 11 | -------------------------------------------------------------------------------- /tests/KPOINTSGRG: -------------------------------------------------------------------------------- 1 | Example file 2 | 0 3 | Reciprocal 4 | 0.25 0.00 0.00 5 | 0.00 0.25 0.00 6 | 0.00 0.00 0.25 7 | 0.50 0.50 0.50 8 | -------------------------------------------------------------------------------- /tests/KPOINTSLINE: -------------------------------------------------------------------------------- 1 | k-points along high symmetry lines 2 | 40 ! 40 intersections 3 | Line-mode 4 | rec 5 | 0 0 0 ! gamma 6 | 0.5 0.5 0 ! X 7 | 8 | 0.5 0.5 0 ! X 9 | 0.5 0.75 0.25 ! W 10 | 11 | 0.5 0.75 0.25 ! W 12 | 0 0 0 ! gamma 13 | -------------------------------------------------------------------------------- /tests/OUTCAR.crashed: -------------------------------------------------------------------------------- 1 | vasp.5.4.4.18Apr17-6-g9f103f2a35 (build Jun 11 2017 21:30:58) complex 2 | 3 | executed on LinuxIFC date 2021.12.05 12:01:41 4 | running on 16 total cores 5 | distrk: each k-point on 16 cores, 1 groups 6 | distr: one band on NCORES_PER_BAND= 1 cores, 16 groups 7 | 8 | 9 | -------------------------------------------------------------------------------------------------------- 10 | 11 | 12 | INCAR: 13 | POTCAR: PAW_PBE Si 05Jan2001 14 | 15 | ----------------------------------------------------------------------------- 16 | | | 17 | | W W AA RRRRR N N II N N GGGG !!! | 18 | | W W A A R R NN N II NN N G G !!! | 19 | | W W A A R R N N N II N N N G !!! | 20 | | W WW W AAAAAA RRRRR N N N II N N N G GGG ! | 21 | | WW WW A A R R N NN II N NN G G | 22 | | W W A A R R N N II N N GGGG !!! | 23 | | | 24 | | For optimal performance we recommend to set | 25 | | NCORE= 4 - approx SQRT( number of cores) | 26 | | NCORE specifies how many cores store one orbital (NPAR=cpu/NCORE). | 27 | | This setting can greatly improve the performance of VASP for DFT. | 28 | | The default, NCORE=1 might be grossly inefficient | 29 | | on modern multi-core architectures or massively parallel machines. | 30 | | Do your own testing !!!! | 31 | | Unfortunately you need to use the default for GW and RPA calculations. | 32 | | (for HF NCORE is supported but not extensively tested yet) | 33 | | | 34 | ----------------------------------------------------------------------------- 35 | 36 | POTCAR: PAW_PBE Si 05Jan2001 37 | VRHFIN =Si: s2p2 38 | LEXCH = PE 39 | EATOM = 103.0669 eV, 7.5752 Ry 40 | 41 | TITEL = PAW_PBE Si 05Jan2001 42 | LULTRA = F use ultrasoft PP ? 43 | IUNSCR = 1 unscreen: 0-lin 1-nonlin 2-no 44 | RPACOR = 1.500 partial core radius 45 | POMASS = 28.085; ZVAL = 4.000 mass and valenz 46 | RCORE = 1.900 outmost cutoff radius 47 | RWIGS = 2.480; RWIGS = 1.312 wigner-seitz radius (au A) 48 | ENMAX = 245.345; ENMIN = 184.009 eV 49 | ICORE = 2 local potential 50 | LCOR = T correct aug charges 51 | LPAW = T paw PP 52 | EAUG = 322.069 53 | DEXC = 0.000 54 | RMAX = 1.950 core radius for proj-oper 55 | RAUG = 1.300 factor for augmentation sphere 56 | RDEP = 1.993 radius for radial grids 57 | RDEPT = 1.837 core radius for aug-charge 58 | 59 | Atomic configuration 60 | 6 entries 61 | n l j E occ. 62 | 1 0 0.50 -1785.8828 2.0000 63 | 2 0 0.50 -139.4969 2.0000 64 | 2 1 1.50 -95.5546 6.0000 65 | 3 0 0.50 -10.8127 2.0000 66 | 3 1 0.50 -4.0811 2.0000 67 | 3 2 1.50 -4.0817 0.0000 68 | Description 69 | l E TYP RCUT TYP RCUT 70 | 0 -10.8127223 23 1.900 71 | 0 -7.6451159 23 1.900 72 | 1 -4.0811372 23 1.900 73 | 1 2.4879257 23 1.900 74 | 2 -4.0817478 7 1.900 75 | local pseudopotential read in 76 | partial core-charges read in 77 | partial kinetic energy density read in 78 | atomic valenz-charges read in 79 | non local Contribution for L= 0 read in 80 | real space projection operators read in 81 | non local Contribution for L= 0 read in 82 | real space projection operators read in 83 | non local Contribution for L= 1 read in 84 | real space projection operators read in 85 | non local Contribution for L= 1 read in 86 | real space projection operators read in 87 | PAW grid and wavefunctions read in 88 | 89 | number of l-projection operators is LMAX = 4 90 | number of lm-projection operators is LMMAX = 8 91 | 92 | PAW_PBE Si 05Jan2001 : 93 | energy of atom 1 EATOM= -103.0669 94 | kinetic energy error for atom= 0.0107 (will be added to EATOM!!) 95 | 96 | 97 | POSCAR: # Compound: Si2. Old comment: Si2 98 | positions in direct lattice 99 | No initial velocities read in 100 | exchange correlation table for LEXCH = 8 101 | RHO(1)= 0.500 N(1) = 2000 102 | RHO(2)= 100.500 N(2) = 4000 103 | 104 | 105 | 106 | -------------------------------------------------------------------------------------------------------- 107 | 108 | 109 | ion position nearest neighbor table 110 | 1 0.875 0.875 0.875- 2 2.35 2 2.35 2 2.35 2 2.35 111 | 2 0.125 0.125 0.125- 1 2.35 1 2.35 1 2.35 1 2.35 112 | 113 | LATTYP: Found a face centered cubic cell. 114 | ALAT = 5.4310000000 115 | 116 | Lattice vectors: 117 | 118 | A1 = ( 2.7155000000, 0.0000000000, 2.7155000000) 119 | A2 = ( 2.7155000000, 2.7155000000, 0.0000000000) 120 | A3 = ( 0.0000000000, 2.7155000000, 2.7155000000) 121 | 122 | 123 | Analysis of symmetry for initial positions (statically): 124 | ===================================================================== 125 | Subroutine PRICEL returns: 126 | Original cell was already a primitive cell. 127 | 128 | 129 | Routine SETGRP: Setting up the symmetry group for a 130 | face centered cubic supercell. 131 | 132 | 133 | Subroutine GETGRP returns: Found 48 space group operations 134 | (whereof 12 operations were pure point group operations) 135 | out of a pool of 48 trial point group operations. 136 | 137 | 138 | The static configuration has the point symmetry D_3d. 139 | The point group associated with its full space group is O_h . 140 | 141 | 142 | Analysis of symmetry for dynamics (positions and initial velocities): 143 | ===================================================================== 144 | Subroutine PRICEL returns: 145 | Original cell was already a primitive cell. 146 | 147 | 148 | Routine SETGRP: Setting up the symmetry group for a 149 | face centered cubic supercell. 150 | 151 | 152 | Subroutine GETGRP returns: Found 48 space group operations 153 | (whereof 12 operations were pure point group operations) 154 | out of a pool of 48 trial point group operations. 155 | 156 | 157 | The dynamic configuration has the point symmetry D_3d. 158 | The point group associated with its full space group is O_h . 159 | 160 | 161 | Subroutine INISYM returns: Found 48 space group operations 162 | (whereof 12 operations are pure point group operations), 163 | and found 1 'primitive' translations 164 | 165 | 166 | 167 | KPOINTS: # No comment 168 | k-points in reciprocal lattice 169 | Space group operators: 170 | irot det(A) alpha n_x n_y n_z tau_x tau_y tau_z 171 | 1 1.000000 0.000001 1.000000 0.000000 0.000000 0.000000 0.000000 0.000000 172 | 2 -1.000000 0.000001 1.000000 0.000000 0.000000 0.000000 0.000000 0.000000 173 | 3 1.000000 120.000000 -0.577350 -0.577350 -0.577350 0.000000 0.000000 0.000000 174 | 4 -1.000000 120.000000 -0.577350 -0.577350 -0.577350 0.000000 0.000000 0.000000 175 | 5 1.000000 120.000000 0.577350 0.577350 0.577350 0.000000 0.000000 0.000000 176 | 6 -1.000000 120.000000 0.577350 0.577350 0.577350 0.000000 0.000000 0.000000 177 | 7 1.000000 180.000000 0.707107 0.000000 -0.707107 0.000000 0.000000 0.000000 178 | 8 -1.000000 180.000000 0.707107 0.000000 -0.707107 0.000000 0.000000 0.000000 179 | 9 1.000000 180.000000 0.000000 -0.707107 0.707107 0.000000 0.000000 0.000000 180 | 10 -1.000000 180.000000 0.000000 -0.707107 0.707107 0.000000 0.000000 0.000000 181 | 11 1.000000 180.000000 0.707107 -0.707107 0.000000 0.000000 0.000000 0.000000 182 | 12 -1.000000 180.000000 0.707107 -0.707107 0.000000 0.000000 0.000000 0.000000 183 | 13 1.000000 90.000000 -1.000000 0.000000 0.000000 0.500000 0.000000 0.000000 184 | 14 -1.000000 90.000000 -1.000000 0.000000 0.000000 0.500000 0.000000 0.000000 185 | 15 1.000000 180.000000 0.707107 0.000000 0.707107 0.500000 0.000000 0.000000 186 | 16 -1.000000 180.000000 0.707107 0.000000 0.707107 0.500000 0.000000 0.000000 187 | 17 1.000000 90.000000 0.000000 0.000000 1.000000 0.500000 0.000000 0.000000 188 | 18 -1.000000 90.000000 0.000000 0.000000 1.000000 0.500000 0.000000 0.000000 189 | 19 1.000000 180.000000 1.000000 0.000000 0.000000 0.000000 0.000000 0.500000 190 | 20 -1.000000 180.000000 1.000000 0.000000 0.000000 0.000000 0.000000 0.500000 191 | 21 1.000000 120.000000 0.577350 -0.577350 0.577350 0.000000 0.000000 0.500000 192 | 22 -1.000000 120.000000 0.577350 -0.577350 0.577350 0.000000 0.000000 0.500000 193 | 23 1.000000 120.000000 -0.577350 -0.577350 0.577350 0.000000 0.000000 0.500000 194 | 24 -1.000000 120.000000 -0.577350 -0.577350 0.577350 0.000000 0.000000 0.500000 195 | 25 1.000000 90.000000 1.000000 0.000000 0.000000 0.000000 0.500000 0.000000 196 | 26 -1.000000 90.000000 1.000000 0.000000 0.000000 0.000000 0.500000 0.000000 197 | 27 1.000000 90.000000 0.000000 -1.000000 0.000000 0.000000 0.500000 0.000000 198 | 28 -1.000000 90.000000 0.000000 -1.000000 0.000000 0.000000 0.500000 0.000000 199 | 29 1.000000 180.000000 0.707107 0.707107 0.000000 0.000000 0.500000 0.000000 200 | 30 -1.000000 180.000000 0.707107 0.707107 0.000000 0.000000 0.500000 0.000000 201 | 31 1.000000 120.000000 0.577350 0.577350 -0.577350 0.500000 0.000000 0.000000 202 | 32 -1.000000 120.000000 0.577350 0.577350 -0.577350 0.500000 0.000000 0.000000 203 | 33 1.000000 120.000000 0.577350 -0.577350 -0.577350 0.500000 0.000000 0.000000 204 | 34 -1.000000 120.000000 0.577350 -0.577350 -0.577350 0.500000 0.000000 0.000000 205 | 35 1.000000 180.000000 0.000000 1.000000 0.000000 0.500000 0.000000 0.000000 206 | 36 -1.000000 180.000000 0.000000 1.000000 0.000000 0.500000 0.000000 0.000000 207 | 37 1.000000 90.000000 0.000000 0.000000 -1.000000 0.000000 0.000000 0.500000 208 | 38 -1.000000 90.000000 0.000000 0.000000 -1.000000 0.000000 0.000000 0.500000 209 | 39 1.000000 180.000000 0.000000 0.707107 0.707107 0.000000 0.000000 0.500000 210 | 40 -1.000000 180.000000 0.000000 0.707107 0.707107 0.000000 0.000000 0.500000 211 | 41 1.000000 90.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.500000 212 | 42 -1.000000 90.000000 0.000000 1.000000 0.000000 0.000000 0.000000 0.500000 213 | 43 1.000000 120.000000 -0.577350 0.577350 -0.577350 0.000000 0.500000 0.000000 214 | 44 -1.000000 120.000000 -0.577350 0.577350 -0.577350 0.000000 0.500000 0.000000 215 | 45 1.000000 180.000000 0.000000 0.000000 1.000000 0.000000 0.500000 0.000000 216 | 46 -1.000000 180.000000 0.000000 0.000000 1.000000 0.000000 0.500000 0.000000 217 | 47 1.000000 120.000000 -0.577350 0.577350 0.577350 0.000000 0.500000 0.000000 218 | 48 -1.000000 120.000000 -0.577350 0.577350 0.577350 0.000000 0.500000 0.000000 219 | 220 | ----------------------------------------------------------------------------- 221 | | | 222 | | EEEEEEE RRRRRR RRRRRR OOOOOOO RRRRRR ### ### ### | 223 | | E R R R R O O R R ### ### ### | 224 | | E R R R R O O R R ### ### ### | 225 | | EEEEE RRRRRR RRRRRR O O RRRRRR # # # | 226 | | E R R R R O O R R | 227 | | E R R R R O O R R ### ### ### | 228 | | EEEEEEE R R R R OOOOOOO R R ### ### ### | 229 | | | 230 | | | 231 | ----------------------------------------------------------------------------- 232 | -------------------------------------------------------------------------------- /tests/POSCAR: -------------------------------------------------------------------------------- 1 | # Compound: Co7Sb24. 2 | 1.00000000000000 3 | 9.0164589999999993 0.0000000000000000 0.0000000000000000 4 | 0.0000000000000000 9.0164589999999993 0.0000000000000000 5 | 0.0000000000000000 0.0000000000000000 9.0164589999999993 6 | Co Sb 7 | 8 24 8 | Direct 9 | 0.2499994731856461 0.2499994731856461 0.2499994731856461 10 | 0.7499995286397905 0.7499995286397905 0.7499995286397905 11 | 0.7499995286397905 0.7499995286397905 0.2499994731856461 12 | 0.2499994731856461 0.2499994731856461 0.7499995286397905 13 | 0.7499995286397905 0.2499994731856461 0.7499995286397905 14 | 0.2499994731856461 0.7499995286397905 0.2499994731856461 15 | 0.2499994731856461 0.7499995286397905 0.7499995286397905 16 | 0.7499995286397905 0.2499994731856461 0.2499994731856461 17 | 0.0000000000000000 0.3351005089692052 0.1580498508339048 18 | 0.0000000000000000 0.6648996019390765 0.8419502600743840 19 | 0.0000000000000000 0.6648996019390765 0.1580498508339048 20 | 0.0000000000000000 0.3351005089692052 0.8419502600743840 21 | 0.1580498508339048 0.0000000000000000 0.3351005089692052 22 | 0.8419502600743840 0.0000000000000000 0.6648996019390765 23 | 0.1580498508339048 0.0000000000000000 0.6648996019390765 24 | 0.8419502600743840 0.0000000000000000 0.3351005089692052 25 | 0.3351005089692052 0.1580498508339048 0.0000000000000000 26 | 0.6648996019390765 0.8419502600743840 0.0000000000000000 27 | 0.6648996019390765 0.1580498508339048 0.0000000000000000 28 | 0.3351005089692052 0.8419502600743840 0.0000000000000000 29 | 0.5000000554541444 0.8351005644233496 0.6580499062880421 30 | 0.5000000554541444 0.1648995464849321 0.3419502046202396 31 | 0.5000000554541444 0.1648995464849321 0.6580499062880421 32 | 0.5000000554541444 0.8351005644233496 0.3419502046202396 33 | 0.6580499062880421 0.5000000554541444 0.8351005644233496 34 | 0.3419502046202396 0.5000000554541444 0.1648995464849321 35 | 0.6580499062880421 0.5000000554541444 0.1648995464849321 36 | 0.3419502046202396 0.5000000554541444 0.8351005644233496 37 | 0.8351005644233496 0.6580499062880421 0.5000000554541444 38 | 0.1648995464849321 0.3419502046202396 0.5000000554541444 39 | 0.1648995464849321 0.6580499062880421 0.5000000554541444 40 | 0.8351005644233496 0.3419502046202396 0.5000000554541444 41 | 42 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 43 | 1.00000000E+00 0.00000000E+00 0.00000000E+00 44 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 45 | 3.00000000E+00 0.00000000E+00 0.00000000E+00 46 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 47 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 48 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 49 | 8.00000000E+00 0.00000000E+00 0.00000000E+00 50 | 0.00000000E+00 9.00000000E+00 0.00000000E+00 51 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 52 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 53 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 54 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 55 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 56 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 57 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 58 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 59 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 60 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 61 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 62 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 63 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 64 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 65 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 66 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 67 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 68 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 69 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 70 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 71 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 72 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 73 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 74 | -------------------------------------------------------------------------------- /tests/POSCARNAMES: -------------------------------------------------------------------------------- 1 | # Compound: Co7Sb24. 2 | 1.00000000000000 3 | 9.0164589999999993 0.0000000000000000 0.0000000000000000 4 | 0.0000000000000000 9.0164589999999993 0.0000000000000000 5 | 0.0000000000000000 0.0000000000000000 9.0164589999999993 6 | Hamburger Pizza 7 | 8 24 8 | Direct 9 | 0.2499994731856461 0.2499994731856461 0.2499994731856461 10 | 0.7499995286397905 0.7499995286397905 0.7499995286397905 11 | 0.7499995286397905 0.7499995286397905 0.2499994731856461 12 | 0.2499994731856461 0.2499994731856461 0.7499995286397905 13 | 0.7499995286397905 0.2499994731856461 0.7499995286397905 14 | 0.2499994731856461 0.7499995286397905 0.2499994731856461 15 | 0.2499994731856461 0.7499995286397905 0.7499995286397905 16 | 0.7499995286397905 0.2499994731856461 0.2499994731856461 17 | 0.0000000000000000 0.3351005089692052 0.1580498508339048 18 | 0.0000000000000000 0.6648996019390765 0.8419502600743840 19 | 0.0000000000000000 0.6648996019390765 0.1580498508339048 20 | 0.0000000000000000 0.3351005089692052 0.8419502600743840 21 | 0.1580498508339048 0.0000000000000000 0.3351005089692052 22 | 0.8419502600743840 0.0000000000000000 0.6648996019390765 23 | 0.1580498508339048 0.0000000000000000 0.6648996019390765 24 | 0.8419502600743840 0.0000000000000000 0.3351005089692052 25 | 0.3351005089692052 0.1580498508339048 0.0000000000000000 26 | 0.6648996019390765 0.8419502600743840 0.0000000000000000 27 | 0.6648996019390765 0.1580498508339048 0.0000000000000000 28 | 0.3351005089692052 0.8419502600743840 0.0000000000000000 29 | 0.5000000554541444 0.8351005644233496 0.6580499062880421 30 | 0.5000000554541444 0.1648995464849321 0.3419502046202396 31 | 0.5000000554541444 0.1648995464849321 0.6580499062880421 32 | 0.5000000554541444 0.8351005644233496 0.3419502046202396 33 | 0.6580499062880421 0.5000000554541444 0.8351005644233496 34 | 0.3419502046202396 0.5000000554541444 0.1648995464849321 35 | 0.6580499062880421 0.5000000554541444 0.1648995464849321 36 | 0.3419502046202396 0.5000000554541444 0.8351005644233496 37 | 0.8351005644233496 0.6580499062880421 0.5000000554541444 38 | 0.1648995464849321 0.3419502046202396 0.5000000554541444 39 | 0.1648995464849321 0.6580499062880421 0.5000000554541444 40 | 0.8351005644233496 0.3419502046202396 0.5000000554541444 41 | 42 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 43 | 1.00000000E+00 0.00000000E+00 0.00000000E+00 44 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 45 | 3.00000000E+00 0.00000000E+00 0.00000000E+00 46 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 47 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 48 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 49 | 8.00000000E+00 0.00000000E+00 0.00000000E+00 50 | 0.00000000E+00 9.00000000E+00 0.00000000E+00 51 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 52 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 53 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 54 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 55 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 56 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 57 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 58 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 59 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 60 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 61 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 62 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 63 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 64 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 65 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 66 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 67 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 68 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 69 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 70 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 71 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 72 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 73 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 74 | -------------------------------------------------------------------------------- /tests/POSCARVEL: -------------------------------------------------------------------------------- 1 | # Compound: Co7Sb24. 2 | 1.00000000000000 3 | 9.0164589999999993 0.0000000000000000 0.0000000000000000 4 | 0.0000000000000000 9.0164589999999993 0.0000000000000000 5 | 0.0000000000000000 0.0000000000000000 9.0164589999999993 6 | Co Sb 7 | 8 24 8 | Direct 9 | 0.2499994731856461 0.2499994731856461 0.2499994731856461 10 | 0.7499995286397905 0.7499995286397905 0.7499995286397905 11 | 0.7499995286397905 0.7499995286397905 0.2499994731856461 12 | 0.2499994731856461 0.2499994731856461 0.7499995286397905 13 | 0.7499995286397905 0.2499994731856461 0.7499995286397905 14 | 0.2499994731856461 0.7499995286397905 0.2499994731856461 15 | 0.2499994731856461 0.7499995286397905 0.7499995286397905 16 | 0.7499995286397905 0.2499994731856461 0.2499994731856461 17 | 0.0000000000000000 0.3351005089692052 0.1580498508339048 18 | 0.0000000000000000 0.6648996019390765 0.8419502600743840 19 | 0.0000000000000000 0.6648996019390765 0.1580498508339048 20 | 0.0000000000000000 0.3351005089692052 0.8419502600743840 21 | 0.1580498508339048 0.0000000000000000 0.3351005089692052 22 | 0.8419502600743840 0.0000000000000000 0.6648996019390765 23 | 0.1580498508339048 0.0000000000000000 0.6648996019390765 24 | 0.8419502600743840 0.0000000000000000 0.3351005089692052 25 | 0.3351005089692052 0.1580498508339048 0.0000000000000000 26 | 0.6648996019390765 0.8419502600743840 0.0000000000000000 27 | 0.6648996019390765 0.1580498508339048 0.0000000000000000 28 | 0.3351005089692052 0.8419502600743840 0.0000000000000000 29 | 0.5000000554541444 0.8351005644233496 0.6580499062880421 30 | 0.5000000554541444 0.1648995464849321 0.3419502046202396 31 | 0.5000000554541444 0.1648995464849321 0.6580499062880421 32 | 0.5000000554541444 0.8351005644233496 0.3419502046202396 33 | 0.6580499062880421 0.5000000554541444 0.8351005644233496 34 | 0.3419502046202396 0.5000000554541444 0.1648995464849321 35 | 0.6580499062880421 0.5000000554541444 0.1648995464849321 36 | 0.3419502046202396 0.5000000554541444 0.8351005644233496 37 | 0.8351005644233496 0.6580499062880421 0.5000000554541444 38 | 0.1648995464849321 0.3419502046202396 0.5000000554541444 39 | 0.1648995464849321 0.6580499062880421 0.5000000554541444 40 | 0.8351005644233496 0.3419502046202396 0.5000000554541444 41 | Direct 42 | 1.00000000E+00 0.00000000E+00 0.00000000E+00 43 | 2.00000000E+00 4.00000000E+00 0.00000000E+00 44 | 3.00000000E+00 0.00000000E+00 0.00000000E+00 45 | 5.00000000E+00 0.00000000E+00 0.00000000E+00 46 | 0.00000000E+00 0.00000000E+00 10.00000000E+00 47 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 48 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 49 | 10.00000000E+00 1.00000000E+00 0.00000000E+00 50 | 0.00000000E+00 0.00000000E+00 14.00000000E+00 51 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 52 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 53 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 54 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 55 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 56 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 57 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 58 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 59 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 60 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 61 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 62 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 63 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 64 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 65 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 66 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 67 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 68 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 69 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 70 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 71 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 72 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 73 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 74 | 75 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 76 | 1.00000000E+00 0.00000000E+00 0.00000000E+00 77 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 78 | 3.00000000E+00 0.00000000E+00 0.00000000E+00 79 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 80 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 81 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 82 | 8.00000000E+00 0.00000000E+00 0.00000000E+00 83 | 0.00000000E+00 9.00000000E+00 0.00000000E+00 84 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 85 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 86 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 87 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 88 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 89 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 90 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 91 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 92 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 93 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 94 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 95 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 96 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 97 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 98 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 99 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 100 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 101 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 102 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 103 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 104 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 105 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 106 | 0.00000000E+00 0.00000000E+00 0.00000000E+00 107 | -------------------------------------------------------------------------------- /tests/POTCAR: -------------------------------------------------------------------------------- 1 | PAW In_sv 11Feb1111 2 | 11.1111111111111 3 | parameters from PSCTR are: 4 | VRHFIN =In: s1p1 5 | LEXCH = CA 6 | EATOM = 1111.1111 eV, 111.1111 Ry 7 | 8 | TITEL = PAW In_d 11Feb1111 9 | LULTRA = F use ultrasoft PP ? 10 | IUNSCR = 1 unscreen: 1-lin 1-nonlin 1-no 11 | RPACOR = 1.111 partial core radius 12 | POMASS = 111.111; ZVAL = 11.111 mass and valenz 13 | RCORE = 1.111 outmost cutoff radius 14 | RWIGS = 1.111; RWIGS = 1.111 wigner-seitz radius (au A) 15 | ENMAX = 111.111; ENMIN = 111.111 eV 16 | ICORE = 1 local potential 17 | LCOR = T correct aug charges 18 | LPAW = T paw PP 19 | EAUG = 111.111 20 | DEXC = 1.111 21 | RMAX = 1.111 core radius for proj-oper 22 | RAUG = 1.111 factor for augmentation sphere 23 | RDEP = 1.111 radius for radial grids 24 | RDEPT = 1.111 core radius for aug-charge 25 | 26 | Atomic configuration 27 | 11 entries 28 | n l j E occ. 29 | 1 1 1.1 1.1 1.1 30 | 1 1 1.1 1.1 1.1 31 | 1 1 1.1 1.1 1.1 32 | 1 1 1.1 1.1 1.1 33 | 1 1 1.1 1.1 1.1 34 | 1 1 1.1 1.1 1.1 35 | 1 1 1.1 1.1 1.1 36 | 1 1 1.1 1.1 1.1 37 | 1 1 1.1 1.1 1.1 38 | 1 1 1.1 1.1 1.1 39 | Description 40 | l E TYP RCUT TYP RCUT 41 | 1 1.1 11 1.1 42 | 1 1.1 11 1.1 43 | 1 1.1 11 1.1 44 | 1 1.1 11 1.1 45 | 1 1.1 11 1.1 46 | 1 1.1 11 1.1 47 | Error from kinetic energy argument (eV) 48 | NDATA = 111 49 | STEP = 11.1 1.1 50 | 11.1 11.1 11.1 11.1 11.1 11.1 11.1 11.1 51 | 11.1 11.1 11.1 11.1 11.1 1.1 1.1 1.1 52 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 53 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 54 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 55 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 56 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 57 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 58 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 59 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 60 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 61 | 1.1 1.1 1.1 1.1 1.1 1.1 1.1 1.1 62 | 1.1 1.1 1.1 1.1 63 | END of PSCTR-controll parameters 64 | 65 | ... DATA ... 66 | 67 | End of Dataset 68 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiida-vasp/parsevasp/8f43b671440eadb14b71c2eaf0676d955b2a1f15/tests/__init__.py -------------------------------------------------------------------------------- /tests/stdout: -------------------------------------------------------------------------------- 1 | running on 1 total cores 2 | distrk: each k-point on 1 cores, 1 groups 3 | distr: one band on 1 cores, 1 groups 4 | using from now: INCAR 5 | vasp.5.4.4.18Apr17-6-g9f103f2a35 (build Aug 3 2020 11:46:08) complex 6 | POSCAR found : 1 types and 2 ions 7 | scaLAPACK will be used 8 | LDA part: xc-table for Pade appr. of Perdew 9 | 10 | 11 | VERY BAD NEWS! internal error in subroutine IBZKPT: 12 | Tetrahedron method fails for NKPT<4. NKPT = 1 13 | -------------------------------------------------------------------------------- /tests/stdout_ZBRENT: -------------------------------------------------------------------------------- 1 | ZBRENT: fatal error in bracketing 2 | please rerun with smaller EDIFF, or copy CONTCAR 3 | to POSCAR and continue 4 | -------------------------------------------------------------------------------- /tests/stdout_nostart: -------------------------------------------------------------------------------- 1 | failed to locate mpirun! 2 | -------------------------------------------------------------------------------- /tests/test_chgcar.py: -------------------------------------------------------------------------------- 1 | """Test chgcar.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from parsevasp.chgcar import Chgcar 9 | 10 | compare_charge_density = np.array( 11 | [ 12 | [ 13 | [0.09329446, 0.18658892, 0.27988338], 14 | [0.37317784, 0.4664723, 0.55976676], 15 | [0.65306122, 0.74635569, 0.83965015], 16 | [0.93294461, 1.02623907, 1.11953353], 17 | ], 18 | [ 19 | [1.21282799, 1.30612245, 1.39941691], 20 | [1.49271137, 1.58600583, 1.67930029], 21 | [1.77259475, 1.86588921, 1.95918367], 22 | [2.05247813, 2.14577259, 2.23906706], 23 | ], 24 | [ 25 | [2.33236152, 2.42565598, 2.51895044], 26 | [2.6122449, 2.70553936, 2.79883382], 27 | [2.89212828, 2.98542274, 3.0787172], 28 | [3.17201166, 3.26530612, 3.35860058], 29 | ], 30 | [ 31 | [3.45189504, 3.5451895, 3.63848397], 32 | [3.73177843, 3.82507289, 3.91836735], 33 | [4.01166181, 4.10495627, 4.19825073], 34 | [4.29154519, 4.38483965, 4.47813411], 35 | ], 36 | [ 37 | [4.57142857, 4.66472303, 4.75801749], 38 | [4.85131195, 4.94460641, 5.03790087], 39 | [5.13119534, 5.2244898, 5.31778426], 40 | [5.41107872, 5.50437318, 5.59766764], 41 | ], 42 | ] 43 | ) 44 | 45 | 46 | @pytest.fixture 47 | def chgcar_parser(request): 48 | """Load CHGCAR file.""" 49 | try: 50 | name = request.param[0] 51 | handler = request.param[1] 52 | except AttributeError: 53 | # Test not parametrized 54 | name = 'CHGCAR' 55 | handler = True 56 | testdir = os.path.dirname(__file__) 57 | chgcarfile = testdir + '/' + name 58 | chgcar = None 59 | if handler: 60 | with open(chgcarfile) as file_handler: 61 | chgcar = Chgcar(file_handler=file_handler) 62 | else: 63 | chgcar = Chgcar(file_path=chgcarfile) 64 | 65 | return chgcar 66 | 67 | 68 | @pytest.mark.parametrize('chgcar_parser', [('CHGCAR', True), ('CHGCAR', False)], indirect=True) 69 | def test_charge_density(chgcar_parser): 70 | """Test that the content returned by the CHGCAR parser returns the correct charge density.""" 71 | charge_density = chgcar_parser.charge_density 72 | assert np.allclose(charge_density, compare_charge_density) 73 | 74 | 75 | @pytest.mark.parametrize('chgcar_parser', [('CHGCAR.spin', True)], indirect=True) 76 | def test_magnetization_density(chgcar_parser): 77 | """Test that the content returned by the CHGCAR parser 78 | returns the correct charge and magnetization density. 79 | 80 | """ 81 | charge_density = chgcar_parser.charge_density 82 | magnetization_density = chgcar_parser.magnetization_density 83 | assert np.allclose(charge_density, compare_charge_density) 84 | assert np.allclose(magnetization_density, compare_charge_density) 85 | 86 | 87 | @pytest.mark.parametrize('chgcar_parser', [('CHGCAR.ncl', True)], indirect=True) 88 | def test_magnetization_density_ncl(chgcar_parser): 89 | """Test that the content returned by the CHGCAR parser 90 | returns the correct charge and magnetization density for non-collinear calculations. 91 | 92 | """ 93 | charge_density = chgcar_parser.charge_density 94 | magnetization_density = chgcar_parser.magnetization_density 95 | assert np.allclose(charge_density, compare_charge_density) 96 | assert set(['x', 'y', 'z']) == set(magnetization_density.keys()) 97 | for key, item in magnetization_density.items(): 98 | assert np.allclose(item, compare_charge_density) 99 | -------------------------------------------------------------------------------- /tests/test_doscar.py: -------------------------------------------------------------------------------- 1 | """Test doscar.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from parsevasp.doscar import Doscar 9 | 10 | compare_total_dos = np.array([(-3.44, -1.10400000e-43, -2.09900000e-43), 11 | (-1.539, 1.40000000e-01, 2.66100000e-01), 12 | (0.362, -3.62400000e-73, 2.00000000e+00), 13 | (2.264, -1.33800000e-05, 2.00000000e+00), 14 | (4.165, 3.15600000e+00, 8.00000000e+00), 15 | (6.066, -2.41200000e-15, 8.00000000e+00), 16 | (7.967, 3.15600000e+00, 1.40000000e+01), 17 | (9.868, -1.38100000e-27, 1.40000000e+01), 18 | (11.769, 2.90100000e+00, 1.95200000e+01), 19 | (13.67, 0.00000000e+00, 2.00000000e+01)], 20 | dtype=[('energy', ' 0 155 | 156 | 157 | def test_doscar_partial(doscar_parser): 158 | """Test that the content returned by the DOSCAR parser returns the partial density of states.""" 159 | pdos = doscar_parser.get_pdos() 160 | for item in compare_partial_dos.dtype.names: 161 | assert np.allclose(pdos[item], compare_partial_dos[item]) 162 | 163 | 164 | def test_doscar_header(doscar_parser): 165 | """Test that the header of the DOSCAR parser returns correct keys and values.""" 166 | assert compare_metadata == doscar_parser.get_metadata() 167 | 168 | 169 | @pytest.mark.parametrize('doscar_parser', [('DOSCAR.spin', False)], indirect=['doscar_parser']) 170 | def test_doscar_spin(doscar_parser): 171 | """ 172 | Test that the content returned by the DOSCAR parser returns 173 | the correct dimensions for spin density of states 174 | """ 175 | dos = doscar_parser.get_dos() 176 | assert len(dos.dtype) == 3 177 | assert dos['total'].shape == (301, 2) 178 | 179 | pdos = doscar_parser.get_pdos() 180 | assert len(pdos.dtype) == 10 181 | assert pdos[0]['px'].shape == (301, 2) 182 | 183 | 184 | @pytest.mark.parametrize('doscar_parser', [('DOSCAR.spin_pdos', False)], indirect=['doscar_parser']) 185 | def test_doscar_spin_pdos(doscar_parser): 186 | """ 187 | Test that the content returned by the DOSCAR parser returns 188 | the correct dimensions for spin density of states 189 | """ 190 | dos = doscar_parser.get_dos() 191 | assert len(dos.dtype) == 3 192 | assert dos['total'].shape == (301, 2) 193 | 194 | pdos = doscar_parser.get_pdos() 195 | assert len(pdos.dtype) == 17 196 | assert pdos[0]['px'].shape == (301, 2) 197 | 198 | 199 | @pytest.mark.parametrize('doscar_parser', [('DOSCAR.ncl', True)], indirect=['doscar_parser']) 200 | def test_doscar_non_collinear(doscar_parser): 201 | """ 202 | Test that the content returned by the DOSCAR parser returns 203 | the correct dimensions for density of states for non-collinear calculations. 204 | """ 205 | dos = doscar_parser.get_dos() 206 | assert len(dos.dtype) == 3 207 | assert dos['total'].shape == (301,) 208 | 209 | pdos = doscar_parser.get_pdos() 210 | assert len(pdos.dtype) == 10 211 | assert pdos[0]['px'].shape == (301, 4) 212 | 213 | 214 | def test_dtypes_pdos(): 215 | from parsevasp.doscar import DTYPES_PDOS_COLLINEAR, DTYPES_PDOS_NONCOLLINEAR 216 | 217 | def _check(dtype_pdos): 218 | count = 0 219 | for name in dtype_pdos.names: 220 | shape = dtype_pdos[name].shape 221 | if shape is tuple(): 222 | count += 1 223 | else: 224 | count += shape[0] 225 | return count 226 | 227 | for key, dtype_pdos in DTYPES_PDOS_COLLINEAR.items(): 228 | assert _check(dtype_pdos) == key 229 | for key, dtype_pdos in DTYPES_PDOS_NONCOLLINEAR.items(): 230 | assert _check(dtype_pdos) == key 231 | -------------------------------------------------------------------------------- /tests/test_eigenval.py: -------------------------------------------------------------------------------- 1 | """Test eigenval.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from parsevasp.eigenval import Eigenval 9 | 10 | compare_eigenvalues = np.array( 11 | [[[-1.439825, 2.964373, 2.964373, 2.964373, 7.254542, 7.254542, 7.254542, 11.451811, 11.670398, 11.670398]]] 12 | ) 13 | compare_kpoints = np.array([[0.25, 0.25, 0.25, 1.0]]) 14 | compare_metadata = { 15 | 0: [4, 4, 1, 1], 16 | 1: [16.48482, 4.04e-10, 4.04e-10, 4.04e-10, 1e-16], 17 | 2: 0.0001, 18 | 'n_ions': 4, 19 | 'n_atoms': 4, 20 | 'p00': 1, 21 | 'nspin': 1, 22 | 'cartesian': True, 23 | 'name': 'unknown system', 24 | 'some_num': 12, 25 | 'n_bands': 10, 26 | 'n_kp': 1, 27 | } 28 | 29 | 30 | @pytest.fixture 31 | def eigenval_parser(request): 32 | """Load EIGENVAL file.""" 33 | try: 34 | name = request.param 35 | except AttributeError: 36 | # Test not parametrized 37 | name = 'EIGENVAL' 38 | testdir = os.path.dirname(__file__) 39 | eigenvalfile = testdir + '/' + name 40 | eigenval = Eigenval(file_path=eigenvalfile) 41 | 42 | return eigenval 43 | 44 | 45 | @pytest.fixture 46 | def eigenval_parser_file_object(request): 47 | """Load EIGENVAL file from a file object.""" 48 | try: 49 | name = request.param 50 | except AttributeError: 51 | # Test not parametrized 52 | name = 'EIGENVAL' 53 | testdir = os.path.dirname(__file__) 54 | eigenvalfile = testdir + '/' + name 55 | eigenval = None 56 | with open(eigenvalfile) as file_handler: 57 | eigenval = Eigenval(file_handler=file_handler) 58 | 59 | return eigenval 60 | 61 | 62 | def test_eigenval(eigenval_parser): 63 | """Test that the content returned by the EIGENVAL parser returns correct eigenvalues, kpoints and metadata.""" 64 | eigenvalues = eigenval_parser.get_eigenvalues() 65 | kpoints = eigenval_parser.get_kpoints() 66 | metadata = eigenval_parser.get_metadata() 67 | assert np.allclose(eigenvalues, compare_eigenvalues) 68 | assert np.allclose(kpoints, compare_kpoints) 69 | assert metadata == compare_metadata 70 | -------------------------------------------------------------------------------- /tests/test_incar.py: -------------------------------------------------------------------------------- 1 | """Test incar.""" 2 | 3 | import os 4 | 5 | import pytest 6 | 7 | from parsevasp.incar import Incar, IncarItem 8 | 9 | 10 | @pytest.fixture() 11 | def incar_dict(): 12 | """Create a dictionary of valid INCAR items.""" 13 | 14 | incar_dict = {'encut': 350, 'Sigma': '.5e-1 #comment', 'lreal': False, 'PREC': 'Accurate'} 15 | return incar_dict 16 | 17 | 18 | @pytest.fixture(scope='module', params=[0]) 19 | def incar_parser(request, tmpdir_factory): 20 | """Load INCAR file.""" 21 | testdir = os.path.dirname(__file__) 22 | incarfile = testdir + '/INCAR' 23 | tmpfile = str(tmpdir_factory.mktemp('data').join('INCAR')) 24 | incar_truncate(request.param, incarfile, tmpfile) 25 | incar = Incar(file_path=tmpfile) 26 | 27 | return incar 28 | 29 | 30 | @pytest.fixture(scope='module', params=[0]) 31 | def incar_parser_file_object(request, tmpdir_factory): 32 | """Load INCAR file using a file object.""" 33 | testdir = os.path.dirname(__file__) 34 | incarfile = testdir + '/INCAR' 35 | tmpfile = str(tmpdir_factory.mktemp('data').join('INCAR')) 36 | incar_truncate(request.param, incarfile, tmpfile) 37 | incar = None 38 | with open(tmpfile) as file_handler: 39 | incar = Incar(file_handler=file_handler) 40 | 41 | return incar 42 | 43 | 44 | def incar_truncate(index, original, tmp): 45 | """Truncate the INCAR file.""" 46 | 47 | with open(original, 'r') as incarfile: 48 | content = incarfile.read().splitlines() 49 | truncated_content = '\n'.join(content[: -index or None]) 50 | with open(tmp, 'w') as incarfile: 51 | incarfile.write(str(truncated_content)) 52 | 53 | 54 | def test_incar_parser_read(incar_parser): 55 | """Check if incar_parser exists.""" 56 | 57 | assert incar_parser.get_dict() 58 | 59 | 60 | def test_incar_parser_parameters(incar_parser): 61 | """Check parameters of the INCAR.""" 62 | 63 | dictionary = incar_parser.get_dict() 64 | assert dictionary['emin'] == 5.5 65 | assert dictionary['emax'] == 7.5 66 | assert dictionary['nedos'] == 100000 67 | assert dictionary['prec'] == 'A' 68 | assert dictionary['loptics'] is True 69 | assert dictionary['encut'] == 350 70 | assert dictionary['dipol'] == [1, 2, 2] 71 | assert dictionary['ismear'] == -5 72 | assert dictionary['algo'] == 'V' 73 | 74 | 75 | def test_incar_parser_write(incar_parser, tmp_path): 76 | """Check the write functions for both file paths and file objects.""" 77 | incar = incar_parser.get_dict() 78 | # Write the content 79 | incar_write_path = tmp_path / 'INCAR' 80 | incar_parser.write(file_path=incar_write_path) 81 | # Then reload and compare 82 | incar_reloaded = Incar(file_path=incar_write_path).get_dict() 83 | assert incar == incar_reloaded 84 | # Write again with file object 85 | with open(incar_write_path, 'w') as handler: 86 | incar_parser.write(file_handler=handler) 87 | # Then reload again and compare 88 | with open(incar_write_path, 'r') as handler: 89 | incar_reloaded = Incar(file_handler=handler).get_dict() 90 | assert incar == incar_reloaded 91 | 92 | 93 | def test_incar_parser_parameters_file_object(incar_parser_file_object): 94 | """Check parameters of the INCAR using a file object""" 95 | dictionary = incar_parser_file_object.get_dict() 96 | assert dictionary['emin'] == 5.5 97 | assert dictionary['emax'] == 7.5 98 | assert dictionary['nedos'] == 100000 99 | assert dictionary['prec'] == 'A' 100 | assert dictionary['loptics'] is True 101 | assert dictionary['encut'] == 350 102 | assert dictionary['dipol'] == [1, 2, 2] 103 | assert dictionary['ismear'] == -5 104 | assert dictionary['algo'] == 'V' 105 | 106 | 107 | def test_incar_from_dict(incar_dict): 108 | """Test passing a dictionary.""" 109 | incar_io = Incar(incar_dict=incar_dict) 110 | comp_dict = {'encut': 350, 'sigma': 0.05, 'lreal': False, 'prec': 'Accurate'} 111 | assert str(sorted(incar_io.get_dict())) == str(sorted(comp_dict)) 112 | 113 | 114 | def test_incar_parser_from_string(): 115 | """Test passing a string.""" 116 | 117 | test_str = 'LOPTICS = .True.\nAddgrid=.false.' 118 | incar_io = Incar(incar_string=test_str) 119 | incar_dict = incar_io.get_dict() 120 | assert incar_dict.pop('loptics') is True 121 | assert incar_dict.pop('addgrid') is False 122 | assert not incar_dict 123 | 124 | 125 | def test_incar_parser_from_string_complexr(): 126 | """Test passing a more complex string.""" 127 | 128 | test_string = """LOPTICS = .True. 129 | EVENONLY = .False. # this is a comment; FLOAT\t=\t1.45e-03 130 | ISMEAR = THIS ; SIGMA = THAT 131 | NBANDS = 45 # endline comment; may contain '#' and ';' NOPARAM = this is not a parameter 132 | DIPOL = 1 2 -33 5 133 | """ 134 | parsed = Incar(incar_string=test_string) 135 | incar_dict = parsed.get_dict() 136 | assert incar_dict['loptics'] is True 137 | assert incar_dict['evenonly'] is False 138 | assert incar_dict['ismear'] == 'THIS' 139 | assert incar_dict['sigma'] == 'THAT' 140 | assert incar_dict['dipol'] == [1, 2, -33, 5] 141 | assert incar_dict['nbands'] == 45 142 | assert 'noparam' not in incar_dict 143 | assert 'float' not in incar_dict 144 | 145 | 146 | def test_incar_parser_invalid_tag(): 147 | """Test passing a tag that is not recognized.""" 148 | 149 | test_string = """SOMEINVALIDTAG = .TRUE.""" 150 | with pytest.raises(SystemExit): 151 | _ = Incar(incar_string=test_string) 152 | 153 | 154 | def test_incar_parser_invalid_tag_and_override(): 155 | """Test passing a tag that is not recognized and its override.""" 156 | 157 | test_string = """SOMEINVALIDTAG = .TRUE.""" 158 | parsed = Incar(incar_string=test_string, validate_tags=False) 159 | incar_dict = parsed.get_dict() 160 | assert next(iter(incar_dict.keys())) == 'someinvalidtag' 161 | 162 | 163 | def test_incar_item(): 164 | """Test the incar item class.""" 165 | 166 | item = IncarItem(tag='encut', value=350, comment=' test comment ') 167 | assert item.get_tag() == 'encut' 168 | assert item.get_value() == 350 169 | assert item.get_comment() == 'test comment' 170 | -------------------------------------------------------------------------------- /tests/test_kpoints.py: -------------------------------------------------------------------------------- 1 | """Test kpoints.""" 2 | 3 | import math 4 | import os 5 | 6 | import numpy as np 7 | import pytest 8 | 9 | from parsevasp.kpoints import Kpoint, Kpoints 10 | 11 | 12 | @pytest.fixture(scope='module') 13 | def kpoints_parser_auto(): 14 | """Load KPOINTS file.""" 15 | 16 | testdir = os.path.dirname(__file__) 17 | kpointsfile = testdir + '/KPOINTS' 18 | kpoints = Kpoints(file_path=kpointsfile) 19 | 20 | return kpoints 21 | 22 | 23 | @pytest.fixture(scope='module') 24 | def kpoints_parser_auto_file_object(): 25 | """Load KPOINTS file.""" 26 | 27 | testdir = os.path.dirname(__file__) 28 | kpointsfile = testdir + '/KPOINTS' 29 | kpoints = None 30 | with open(kpointsfile) as file_handler: 31 | kpoints = Kpoints(file_handler=file_handler) 32 | 33 | return kpoints 34 | 35 | 36 | @pytest.fixture(scope='module') 37 | def kpoints_parser_explicit(): 38 | """Load KPOINTS file.""" 39 | 40 | testdir = os.path.dirname(__file__) 41 | kpointsfile = testdir + '/KPOINTSEXP' 42 | kpoints = Kpoints(file_path=kpointsfile) 43 | 44 | return kpoints 45 | 46 | 47 | @pytest.fixture(scope='module') 48 | def kpoints_parser_line(): 49 | """Load KPOINTS file.""" 50 | 51 | testdir = os.path.dirname(__file__) 52 | kpointsfile = testdir + '/KPOINTSLINE' 53 | kpoints = Kpoints(file_path=kpointsfile) 54 | 55 | return kpoints 56 | 57 | 58 | @pytest.fixture(scope='module') 59 | def kpoints_parser_GRG(): 60 | """Load KPOINTS file of generalized regular grid (Reciprocal).""" 61 | 62 | testdir = os.path.dirname(__file__) 63 | kpointsfile = testdir + '/KPOINTSGRG' 64 | kpoints = Kpoints(file_path=kpointsfile) 65 | 66 | return kpoints 67 | 68 | 69 | def test_kpoints_exist(kpoints_parser_auto): 70 | """Check if kpoints_parser exists.""" 71 | 72 | assert kpoints_parser_auto.get_dict() 73 | 74 | 75 | def test_kpoints_params_auto(kpoints_parser_auto): 76 | """Check parameters in KPOINTS for automatic generation.""" 77 | 78 | kpoints = kpoints_parser_auto.get_dict() 79 | assert kpoints['mode'] == 'automatic' 80 | assert kpoints['comment'] == 'Example file' 81 | assert kpoints['divisions'] == [4, 4, 4] 82 | assert kpoints['shifts'] is None 83 | assert kpoints['points'] is None 84 | assert kpoints['centering'] == 'Gamma' 85 | assert kpoints['tetra'] is None 86 | assert kpoints['tetra_volume'] is None 87 | assert kpoints['num_kpoints'] == 0 88 | 89 | 90 | def test_kpoints_params_auto_file_object(kpoints_parser_auto_file_object): 91 | """Check parameters in KPOINTS for automatic generation using a file object.""" 92 | 93 | kpoints = kpoints_parser_auto_file_object.get_dict() 94 | assert kpoints['mode'] == 'automatic' 95 | assert kpoints['comment'] == 'Example file' 96 | assert kpoints['divisions'] == [4, 4, 4] 97 | assert kpoints['shifts'] is None 98 | assert kpoints['points'] is None 99 | assert kpoints['centering'] == 'Gamma' 100 | assert kpoints['tetra'] is None 101 | assert kpoints['tetra_volume'] is None 102 | assert kpoints['num_kpoints'] == 0 103 | 104 | 105 | def test_kpoints_params_explicit(kpoints_parser_explicit): 106 | """Check parameters in KPOINTS for explicit generation.""" 107 | 108 | kpoints = kpoints_parser_explicit.get_dict() 109 | assert kpoints['mode'] == 'explicit' 110 | assert kpoints['comment'] == 'Example file' 111 | assert kpoints['divisions'] is None 112 | assert kpoints['shifts'] is None 113 | assert kpoints['centering'] is None 114 | points = kpoints['points'] 115 | assert len(points) == 4 116 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 117 | np.testing.assert_allclose(points[1][0], np.array([0.0, 0.0, 0.5])) 118 | np.testing.assert_allclose(points[2][0], np.array([0.0, 0.5, 0.5])) 119 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.5, 0.5])) 120 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 121 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 122 | assert math.isclose(points[2][1], 2.0, rel_tol=1e-07) 123 | assert math.isclose(points[3][1], 4.0, rel_tol=1e-07) 124 | assert points[0][2] 125 | assert points[1][2] 126 | assert points[2][2] 127 | assert points[3][2] 128 | assert kpoints['tetra'] == [[6, 1, 2, 3, 4]] 129 | assert math.isclose(kpoints['tetra_volume'], 0.183333333333333, rel_tol=1e-07) 130 | 131 | 132 | def test_kpoints_params_line(kpoints_parser_line): 133 | """Check parameters in KPOINTS for line generation.""" 134 | 135 | kpoints = kpoints_parser_line.get_dict() 136 | assert kpoints['mode'] == 'line' 137 | assert kpoints['comment'] == 'k-points along high symmetry lines' 138 | assert kpoints['divisions'] is None 139 | assert kpoints['shifts'] is None 140 | assert kpoints['centering'] is None 141 | assert kpoints['num_kpoints'] == 40 142 | points = kpoints['points'] 143 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 144 | np.testing.assert_allclose(points[1][0], np.array([0.5, 0.5, 0.0])) 145 | np.testing.assert_allclose(points[2][0], np.array([0.5, 0.5, 0.0])) 146 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.75, 0.25])) 147 | np.testing.assert_allclose(points[4][0], np.array([0.5, 0.75, 0.25])) 148 | np.testing.assert_allclose(points[5][0], np.array([0.0, 0.0, 0.0])) 149 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 150 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 151 | assert math.isclose(points[2][1], 1.0, rel_tol=1e-07) 152 | assert math.isclose(points[3][1], 1.0, rel_tol=1e-07) 153 | assert math.isclose(points[4][1], 1.0, rel_tol=1e-07) 154 | assert math.isclose(points[5][1], 1.0, rel_tol=1e-07) 155 | assert points[0][2] 156 | assert points[1][2] 157 | assert points[2][2] 158 | assert points[3][2] 159 | assert points[4][2] 160 | assert points[5][2] 161 | 162 | 163 | def test_kpoints_params_GRG(kpoints_parser_GRG): 164 | """Check parameters in KPOINTS for generalized regular grid (Reciprocal)""" 165 | 166 | kpoints = kpoints_parser_GRG.get_dict() 167 | assert kpoints['mode'] == 'automatic' 168 | assert kpoints['comment'] == 'Example file' 169 | np.testing.assert_allclose(kpoints['generating_vectors'], [[0.25, 0.0, 0.0], [0.0, 0.25, 0.0], [0.0, 0.0, 0.25]]) 170 | np.testing.assert_allclose(kpoints['shifts'], [0.5, 0.5, 0.5]) 171 | assert kpoints['points'] is None 172 | assert kpoints['centering'] == 'Reciprocal' 173 | assert kpoints['tetra'] is None 174 | assert kpoints['tetra_volume'] is None 175 | assert kpoints['num_kpoints'] == 0 176 | 177 | 178 | def test_kpoints_write_auto(kpoints_parser_auto, tmpdir): 179 | """Test read, write and read KPOINTS in auto mode. 180 | 181 | Here we also test that the write using file handler works properly. 182 | 183 | """ 184 | 185 | temp_file = str(tmpdir.join('KPOINTS')) 186 | kpoints_parser_auto.write(file_path=temp_file) 187 | kpoints_parser_auto_temp = Kpoints(file_path=temp_file) 188 | kpoints_temp = kpoints_parser_auto_temp.get_dict() 189 | verify_kpoints_content(kpoints_temp) 190 | with open(temp_file, 'w') as handler: 191 | kpoints_parser_auto.write(file_handler=handler) 192 | with open(temp_file, 'r') as handler: 193 | kpoints_parser_auto_temp = Kpoints(file_handler=handler) 194 | kpoints_temp = kpoints_parser_auto_temp.get_dict() 195 | verify_kpoints_content(kpoints_temp) 196 | 197 | 198 | def test_kpoints_write_explicit(kpoints_parser_explicit, tmpdir): 199 | """Test read, write and read KPOINTS in explicit mode.""" 200 | 201 | temp_file = str(tmpdir.join('KPOINTSEXP')) 202 | kpoints_parser_explicit.write(file_path=temp_file) 203 | kpoints_parser_explicit_temp = Kpoints(file_path=temp_file) 204 | kpoints_temp = kpoints_parser_explicit_temp.get_dict() 205 | assert kpoints_temp['mode'] == 'explicit' 206 | assert kpoints_temp['comment'] == 'Example file' 207 | assert kpoints_temp['divisions'] is None 208 | assert kpoints_temp['shifts'] is None 209 | assert kpoints_temp['centering'] is None 210 | points = kpoints_temp['points'] 211 | assert len(points) == 4 212 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 213 | np.testing.assert_allclose(points[1][0], np.array([0.0, 0.0, 0.5])) 214 | np.testing.assert_allclose(points[2][0], np.array([0.0, 0.5, 0.5])) 215 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.5, 0.5])) 216 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 217 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 218 | assert math.isclose(points[2][1], 2.0, rel_tol=1e-07) 219 | assert math.isclose(points[3][1], 4.0, rel_tol=1e-07) 220 | assert points[0][2] 221 | assert points[1][2] 222 | assert points[2][2] 223 | assert points[3][2] 224 | assert kpoints_temp['tetra'] == [[6, 1, 2, 3, 4]] 225 | assert math.isclose(kpoints_temp['tetra_volume'], 0.183333333333333, rel_tol=1e-07) 226 | 227 | 228 | def test_kpoints_write_line(kpoints_parser_line, tmpdir): 229 | """Test read, write and read KPOINTS in line mode.""" 230 | 231 | temp_file = str(tmpdir.join('KPOINTSLINE')) 232 | kpoints_parser_line.write(file_path=temp_file) 233 | kpoints_parser_line_temp = Kpoints(file_path=temp_file) 234 | kpoints_temp = kpoints_parser_line_temp.get_dict() 235 | assert kpoints_temp['mode'] == 'line' 236 | assert kpoints_temp['comment'] == 'k-points along high symmetry lines' 237 | assert kpoints_temp['divisions'] is None 238 | assert kpoints_temp['shifts'] is None 239 | assert kpoints_temp['centering'] is None 240 | assert kpoints_temp['num_kpoints'] == 40 241 | points = kpoints_temp['points'] 242 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 243 | np.testing.assert_allclose(points[1][0], np.array([0.5, 0.5, 0.0])) 244 | np.testing.assert_allclose(points[2][0], np.array([0.5, 0.5, 0.0])) 245 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.75, 0.25])) 246 | np.testing.assert_allclose(points[4][0], np.array([0.5, 0.75, 0.25])) 247 | np.testing.assert_allclose(points[5][0], np.array([0.0, 0.0, 0.0])) 248 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 249 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 250 | assert math.isclose(points[2][1], 1.0, rel_tol=1e-07) 251 | assert math.isclose(points[3][1], 1.0, rel_tol=1e-07) 252 | assert math.isclose(points[4][1], 1.0, rel_tol=1e-07) 253 | assert math.isclose(points[5][1], 1.0, rel_tol=1e-07) 254 | assert points[0][2] 255 | assert points[1][2] 256 | assert points[2][2] 257 | assert points[3][2] 258 | assert points[4][2] 259 | assert points[5][2] 260 | 261 | 262 | def test_kpoints_modify_auto(kpoints_parser_auto, tmpdir): 263 | """Test read, modify, write and read KPOINTS in auto mode.""" 264 | 265 | kpoints = kpoints_parser_auto.get_dict() 266 | assert kpoints['comment'] == 'Example file' 267 | assert kpoints['divisions'] == [4, 4, 4] 268 | kpoints_parser_auto.modify('comment', 'No comment') 269 | kpoints_parser_auto.modify('divisions', [5, 5, 5]) 270 | temp_file = str(tmpdir.join('KPOINTS')) 271 | kpoints_parser_auto.write(file_path=temp_file) 272 | kpoints_parser_auto_temp = Kpoints(file_path=temp_file) 273 | kpoints_temp = kpoints_parser_auto_temp.get_dict() 274 | assert kpoints_temp['comment'] == 'No comment' 275 | assert kpoints_temp['divisions'] == [5, 5, 5] 276 | 277 | 278 | def test_kpoints_modify_explicit(kpoints_parser_explicit, tmpdir): 279 | """Test read, modify, write and read KPOINTS in explicit mode.""" 280 | 281 | kpoints = kpoints_parser_explicit.get_dict() 282 | assert kpoints['comment'] == 'Example file' 283 | points = kpoints['points'] 284 | assert len(points) == 4 285 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 286 | np.testing.assert_allclose(points[1][0], np.array([0.0, 0.0, 0.5])) 287 | np.testing.assert_allclose(points[2][0], np.array([0.0, 0.5, 0.5])) 288 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.5, 0.5])) 289 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 290 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 291 | assert math.isclose(points[2][1], 2.0, rel_tol=1e-07) 292 | assert math.isclose(points[3][1], 4.0, rel_tol=1e-07) 293 | assert points[0][2] 294 | assert points[1][2] 295 | assert points[2][2] 296 | assert points[3][2] 297 | kpoints_parser_explicit.modify('comment', 'Nada comment') 298 | point = Kpoint(np.array([0.0, 0.0, 0.0]), 1.0) 299 | kpoints_parser_explicit.modify('points', point, point_number=3) 300 | temp_file = str(tmpdir.join('KPOINTSEXP')) 301 | kpoints_parser_explicit.write(file_path=temp_file) 302 | kpoints_parser_explicit_temp = Kpoints(file_path=temp_file) 303 | kpoints_temp = kpoints_parser_explicit_temp.get_dict() 304 | assert kpoints_temp['comment'] == 'Nada comment' 305 | points = kpoints_temp['points'] 306 | assert len(points) == 4 307 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 308 | np.testing.assert_allclose(points[1][0], np.array([0.0, 0.0, 0.5])) 309 | np.testing.assert_allclose(points[2][0], np.array([0.0, 0.5, 0.5])) 310 | np.testing.assert_allclose(points[3][0], np.array([0.0, 0.0, 0.0])) 311 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 312 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 313 | assert math.isclose(points[2][1], 2.0, rel_tol=1e-07) 314 | assert math.isclose(points[3][1], 1.0, rel_tol=1e-07) 315 | assert points[0][2] 316 | assert points[1][2] 317 | assert points[2][2] 318 | assert points[3][2] 319 | 320 | 321 | def test_kpoints_modify_line(kpoints_parser_line, tmpdir): 322 | """Test read, modify, write and read KPOINTS in line mode.""" 323 | 324 | kpoints = kpoints_parser_line.get_dict() 325 | assert kpoints['comment'] == 'k-points along high symmetry lines' 326 | kpoints_parser_line.modify('comment', 'No comment') 327 | point = Kpoint(np.array([0.5, 0.5, 0.25]), 1.0) 328 | kpoints_parser_line.modify('points', point, point_number=3) 329 | kpoints_parser_line.modify('points', point, point_number=4) 330 | temp_file = str(tmpdir.join('KPOINTSLINE')) 331 | kpoints_parser_line.write(file_path=temp_file) 332 | kpoints_parser_line_temp = Kpoints(file_path=temp_file) 333 | kpoints_temp = kpoints_parser_line_temp.get_dict() 334 | assert kpoints_temp['comment'] == 'No comment' 335 | points = kpoints_temp['points'] 336 | np.testing.assert_allclose(points[0][0], np.array([0.0, 0.0, 0.0])) 337 | np.testing.assert_allclose(points[1][0], np.array([0.5, 0.5, 0.0])) 338 | np.testing.assert_allclose(points[2][0], np.array([0.5, 0.5, 0.0])) 339 | np.testing.assert_allclose(points[3][0], np.array([0.5, 0.5, 0.25])) 340 | np.testing.assert_allclose(points[4][0], np.array([0.5, 0.5, 0.25])) 341 | np.testing.assert_allclose(points[5][0], np.array([0.0, 0.0, 0.0])) 342 | assert math.isclose(points[0][1], 1.0, rel_tol=1e-07) 343 | assert math.isclose(points[1][1], 1.0, rel_tol=1e-07) 344 | assert math.isclose(points[2][1], 1.0, rel_tol=1e-07) 345 | assert math.isclose(points[3][1], 1.0, rel_tol=1e-07) 346 | assert math.isclose(points[4][1], 1.0, rel_tol=1e-07) 347 | assert math.isclose(points[5][1], 1.0, rel_tol=1e-07) 348 | assert points[0][2] 349 | assert points[1][2] 350 | assert points[2][2] 351 | assert points[3][2] 352 | assert points[4][2] 353 | assert points[5][2] 354 | 355 | 356 | def test_kpoints_string(tmpdir): 357 | """Test to initialize KPOINTS in auto mode using string.""" 358 | 359 | kpoints_str = '# Example file\n0\nG\n4 4 4\n' 360 | kpoints_parser_auto_temp = Kpoints(kpoints_string=kpoints_str) 361 | kpoints_temp = kpoints_parser_auto_temp.get_dict() 362 | assert kpoints_temp['mode'] == 'automatic' 363 | assert kpoints_temp['comment'] == 'Example file' 364 | assert kpoints_temp['divisions'] == [4, 4, 4] 365 | assert kpoints_temp['shifts'] is None 366 | assert kpoints_temp['points'] is None 367 | assert kpoints_temp['centering'] == 'Gamma' 368 | assert kpoints_temp['tetra'] is None 369 | assert kpoints_temp['tetra_volume'] is None 370 | assert kpoints_temp['num_kpoints'] == 0 371 | 372 | 373 | def test_kpoints_dict(tmpdir): 374 | """Test to initialize KPOINTS in auto mode using dictionary.""" 375 | 376 | kpoints_dict = { 377 | 'comment': 'Example file', 378 | 'divisions': [5, 5, 5], 379 | 'mode': 'automatic', 380 | 'shifts': None, 381 | 'points': None, 382 | 'centering': 'Gamma', 383 | 'tetra': None, 384 | 'tetra_volume': None, 385 | 'num_kpoints': 0, 386 | 'generating_vectors': None, 387 | } 388 | kpoints_parser_auto_temp = Kpoints(kpoints_dict=kpoints_dict) 389 | kpoints_temp = kpoints_parser_auto_temp.get_dict() 390 | assert kpoints_temp['mode'] == 'automatic' 391 | assert kpoints_temp['comment'] == 'Example file' 392 | assert kpoints_temp['divisions'] == [5, 5, 5] 393 | assert kpoints_temp['shifts'] is None 394 | assert kpoints_temp['points'] is None 395 | assert kpoints_temp['centering'] == 'Gamma' 396 | assert kpoints_temp['tetra'] is None 397 | assert kpoints_temp['tetra_volume'] is None 398 | assert kpoints_temp['num_kpoints'] == 0 399 | 400 | 401 | def verify_kpoints_content(kpoints): 402 | """Verify that the content of kpoints is correct.""" 403 | assert kpoints['mode'] == 'automatic' 404 | assert kpoints['comment'] == 'Example file' 405 | assert kpoints['divisions'] == [4, 4, 4] 406 | assert kpoints['shifts'] == [0.0, 0.0, 0.0] 407 | assert kpoints['points'] is None 408 | assert kpoints['centering'] == 'Gamma' 409 | assert kpoints['tetra'] is None 410 | assert kpoints['tetra_volume'] is None 411 | assert kpoints['num_kpoints'] == 0 412 | -------------------------------------------------------------------------------- /tests/test_outcar.py: -------------------------------------------------------------------------------- 1 | """Test outcar.""" 2 | 3 | import os 4 | 5 | import numpy as np 6 | import pytest 7 | 8 | from parsevasp.outcar import Outcar 9 | 10 | 11 | @pytest.fixture 12 | def outcar_parser(request): 13 | """A fixture that loads OUTCAR.""" 14 | try: 15 | name = request.param 16 | except AttributeError: 17 | # Test not parametrized 18 | name = 'OUTCAR' 19 | testdir = os.path.dirname(__file__) 20 | outcarfile = testdir + '/' + name 21 | outcar = Outcar(file_path=outcarfile) 22 | 23 | return outcar 24 | 25 | 26 | @pytest.fixture 27 | def outcar_parser_file_objects(request): 28 | """A fixture that loads OUTCAR using file object.""" 29 | try: 30 | name = request.param 31 | except AttributeError: 32 | # Test not parametrized 33 | name = 'OUTCAR' 34 | testdir = os.path.dirname(__file__) 35 | outcarfile = testdir + '/' + name 36 | outcar = None 37 | with open(outcarfile) as file_handler: 38 | outcar = Outcar(file_handler=file_handler) 39 | 40 | return outcar 41 | 42 | 43 | def test_outcar_symmetry(outcar_parser): 44 | """Check if parser returns correct symmetry entries.""" 45 | 46 | symmetry = outcar_parser.get_symmetry() 47 | 48 | test = [48, 16, 16, 16, 16, 16, 16, 4, 4, 4, 4, 4, 4, 8, 8, 48] 49 | assert symmetry['num_space_group_operations']['static'] == test 50 | assert symmetry['num_space_group_operations']['dynamic'] == test 51 | test = [ 52 | 'primitive cell', 53 | 'primitive cell', 54 | 'primitive cell', 55 | 'primitive cell', 56 | 'primitive cell', 57 | 'primitive cell', 58 | 'primitive cell', 59 | 'primitive cell', 60 | 'primitive cell', 61 | 'primitive cell', 62 | 'primitive cell', 63 | 'primitive cell', 64 | 'primitive cell', 65 | 'primitive cell', 66 | 'primitive cell', 67 | 'primitive cell', 68 | ] 69 | assert symmetry['original_cell_type']['static'] == test 70 | assert symmetry['original_cell_type']['dynamic'] == test 71 | test = [ 72 | 'face centered cubic supercell.', 73 | 'body centered tetragonal supercell.', 74 | 'body centered tetragonal supercell.', 75 | 'body centered tetragonal supercell.', 76 | 'body centered tetragonal supercell.', 77 | 'body centered tetragonal supercell.', 78 | 'body centered tetragonal supercell.', 79 | 'base centered monoclinic supercell.', 80 | 'base centered monoclinic supercell.', 81 | 'base centered monoclinic supercell.', 82 | 'base centered monoclinic supercell.', 83 | 'base centered monoclinic supercell.', 84 | 'base centered monoclinic supercell.', 85 | 'face centered cubic supercell.', 86 | 'face centered cubic supercell.', 87 | 'face centered cubic supercell.', 88 | ] 89 | assert symmetry['symmetrized_cell_type']['static'] == test 90 | assert symmetry['symmetrized_cell_type']['dynamic'] == test 91 | 92 | 93 | def test_outcar_elastic(outcar_parser): 94 | """Check if parser returns correct elastic moduli entries.""" 95 | 96 | elastic = outcar_parser.get_elastic_moduli() 97 | test = np.array( 98 | [ 99 | [1.6740702e03, 7.0419980e02, 7.0419980e02, -0.0000000e00, 0.0000000e00, 0.0000000e00], 100 | [7.0502380e02, 1.6748491e03, 7.0502380e02, -0.0000000e00, -0.0000000e00, 0.0000000e00], 101 | [7.0499350e02, 7.0499350e02, 1.6748165e03, 0.0000000e00, -0.0000000e00, 0.0000000e00], 102 | [8.2260000e-01, 8.7980000e-01, 1.2896000e00, 1.1225901e03, -0.0000000e00, 0.0000000e00], 103 | [-7.8000000e-03, -4.9500000e-02, 1.4700000e-02, 0.0000000e00, 1.1230829e03, -0.0000000e00], 104 | [-2.9200000e-02, -5.3200000e-02, -2.1970000e-01, -0.0000000e00, 0.0000000e00, 1.1223147e03], 105 | ] 106 | ) 107 | np.testing.assert_allclose(elastic['non_symmetrized'], test) 108 | test = np.array( 109 | [ 110 | [1674.5786, 704.739, 704.739, -0.0, 0.0, 0.0], 111 | [704.739, 1674.5786, 704.739, -0.0, 0.0, 0.0], 112 | [704.739, 704.739, 1674.5786, -0.0, -0.0, 0.0], 113 | [-0.0, -0.0, -0.0, 1122.6622, 0.0, -0.0], 114 | [0.0, 0.0, -0.0, 0.0, 1122.6622, -0.0], 115 | [0.0, 0.0, 0.0, -0.0, -0.0, 1122.6622], 116 | ] 117 | ) 118 | np.testing.assert_allclose(elastic['symmetrized'], test) 119 | test = np.array( 120 | [ 121 | [1674.5786, 704.739, 704.739, -0.0, 0.0, 0.0], 122 | [704.739, 1674.5786, 704.739, -0.0, 0.0, 0.0], 123 | [704.739, 704.739, 1674.5786, -0.0, -0.0, 0.0], 124 | [-0.0, -0.0, -0.0, 775.8054, 0.0, -0.0], 125 | [0.0, 0.0, -0.0, 0.0, 775.8054, -0.0], 126 | [0.0, 0.0, 0.0, -0.0, -0.0, 775.8054], 127 | ] 128 | ) 129 | np.testing.assert_allclose(elastic['total'], test) 130 | 131 | 132 | def test_outcar_elastic_file_object(outcar_parser_file_objects): 133 | """Check if parser returns correct elastic moduli entries using the file object.""" 134 | 135 | elastic = outcar_parser_file_objects.get_elastic_moduli() 136 | test = np.array( 137 | [ 138 | [1.6740702e03, 7.0419980e02, 7.0419980e02, -0.0000000e00, 0.0000000e00, 0.0000000e00], 139 | [7.0502380e02, 1.6748491e03, 7.0502380e02, -0.0000000e00, -0.0000000e00, 0.0000000e00], 140 | [7.0499350e02, 7.0499350e02, 1.6748165e03, 0.0000000e00, -0.0000000e00, 0.0000000e00], 141 | [8.2260000e-01, 8.7980000e-01, 1.2896000e00, 1.1225901e03, -0.0000000e00, 0.0000000e00], 142 | [-7.8000000e-03, -4.9500000e-02, 1.4700000e-02, 0.0000000e00, 1.1230829e03, -0.0000000e00], 143 | [-2.9200000e-02, -5.3200000e-02, -2.1970000e-01, -0.0000000e00, 0.0000000e00, 1.1223147e03], 144 | ] 145 | ) 146 | np.testing.assert_allclose(elastic['non_symmetrized'], test) 147 | test = np.array( 148 | [ 149 | [1674.5786, 704.739, 704.739, -0.0, 0.0, 0.0], 150 | [704.739, 1674.5786, 704.739, -0.0, 0.0, 0.0], 151 | [704.739, 704.739, 1674.5786, -0.0, -0.0, 0.0], 152 | [-0.0, -0.0, -0.0, 1122.6622, 0.0, -0.0], 153 | [0.0, 0.0, -0.0, 0.0, 1122.6622, -0.0], 154 | [0.0, 0.0, 0.0, -0.0, -0.0, 1122.6622], 155 | ] 156 | ) 157 | np.testing.assert_allclose(elastic['symmetrized'], test) 158 | test = np.array( 159 | [ 160 | [1674.5786, 704.739, 704.739, -0.0, 0.0, 0.0], 161 | [704.739, 1674.5786, 704.739, -0.0, 0.0, 0.0], 162 | [704.739, 704.739, 1674.5786, -0.0, -0.0, 0.0], 163 | [-0.0, -0.0, -0.0, 775.8054, 0.0, -0.0], 164 | [0.0, 0.0, -0.0, 0.0, 775.8054, -0.0], 165 | [0.0, 0.0, 0.0, -0.0, -0.0, 775.8054], 166 | ] 167 | ) 168 | np.testing.assert_allclose(elastic['total'], test) 169 | 170 | 171 | @pytest.mark.parametrize('outcar_parser', (['OUTCAR_MAG']), indirect=['outcar_parser']) 172 | def test_outcar_magnetization(outcar_parser): 173 | """Check if the magnetization parser returns the correct magnetization.""" 174 | 175 | magnetization = outcar_parser.get_magnetization() 176 | test = { 177 | 'sphere': { 178 | 'x': { 179 | 'site_moment': { 180 | 1: {'s': -0.014, 'p': -0.051, 'd': 1.687, 'tot': 1.621}, 181 | 2: {'s': -0.015, 'p': -0.052, 'd': 1.686, 'tot': 1.619}, 182 | 3: {'s': -0.014, 'p': -0.053, 'd': 1.708, 'tot': 1.64}, 183 | 4: {'s': -0.014, 'p': -0.053, 'd': 1.708, 'tot': 1.64}, 184 | }, 185 | 'total_magnetization': {'s': -0.057, 'p': -0.21, 'd': 6.788, 'tot': 6.521}, 186 | }, 187 | 'y': {'site_moment': {}, 'total_magnetization': {}}, 188 | 'z': {'site_moment': {}, 'total_magnetization': {}}, 189 | }, 190 | 'full_cell': np.asarray([6.4424922]), 191 | } 192 | 193 | for _proj in ['x', 'y', 'z']: 194 | for _key, _val in test['sphere'][_proj]['site_moment'].items(): 195 | _test = np.asarray(list(_val.values())) 196 | _mag = np.asarray(list(magnetization['sphere'][_proj]['site_moment'][_key].values())) 197 | np.testing.assert_allclose(_mag, _test) 198 | 199 | _test = np.asarray(list(test['sphere'][_proj]['total_magnetization'].values())) 200 | _mag = np.asarray(list(magnetization['sphere'][_proj]['total_magnetization'].values())) 201 | np.testing.assert_allclose(_mag, _test) 202 | _mag = np.asarray(list(magnetization['full_cell'])) 203 | _test = np.asarray(list(test['full_cell'])) 204 | np.testing.assert_allclose(_mag, _test) 205 | 206 | 207 | @pytest.mark.parametrize('outcar_parser', ['OUTCAR_MAG_SINGLE'], indirect=['outcar_parser']) 208 | def test_outcar_magnetization_single(outcar_parser): 209 | """Check if the magnetization parser returns the correct magnetization 210 | for a single atom in the unit cell. 211 | 212 | """ 213 | 214 | magnetization = outcar_parser.get_magnetization() 215 | 216 | test = { 217 | 'sphere': { 218 | 'x': { 219 | 'site_moment': { 220 | 1: {'s': -0.012, 'p': -0.043, 'd': 2.49, 'tot': 2.434}, 221 | }, 222 | 'total_magnetization': {'s': -0.012, 'p': -0.043, 'd': 2.49, 'tot': 2.434}, 223 | }, 224 | 'y': {'site_moment': {}, 'total_magnetization': {}}, 225 | 'z': {'site_moment': {}, 'total_magnetization': {}}, 226 | }, 227 | 'full_cell': np.asarray([2.4077611]), 228 | } 229 | 230 | for _proj in ['x', 'y', 'z']: 231 | for _key, _val in test['sphere'][_proj]['site_moment'].items(): 232 | _test = np.asarray(list(_val.values())) 233 | _mag = np.asarray(list(magnetization['sphere'][_proj]['site_moment'][_key].values())) 234 | np.testing.assert_allclose(_mag, _test) 235 | 236 | _test = np.asarray(list(test['sphere'][_proj]['total_magnetization'].values())) 237 | _mag = np.asarray(list(magnetization['sphere'][_proj]['total_magnetization'].values())) 238 | np.testing.assert_allclose(_mag, _test) 239 | _mag = np.asarray(list(magnetization['full_cell'])) 240 | _test = np.asarray(list(test['full_cell'])) 241 | np.testing.assert_allclose(_mag, _test) 242 | 243 | 244 | def test_outcar_timing_information(outcar_parser_file_objects): 245 | """Check if outcar_parser returns correct timing information.""" 246 | 247 | timings = outcar_parser_file_objects.get_run_stats() 248 | assert timings['total_cpu_time_used'] == 89.795 249 | assert timings['user_time'] == 60.247 250 | assert timings['elapsed_time'] == 90.990 251 | assert timings['system_time'] == 29.549 252 | assert timings['maximum_memory_used'] == 81612.0 253 | assert timings['average_memory_used'] == 0.0 254 | 255 | assert timings['mem_usage_base'] == 30000.0 256 | assert timings['mem_usage_nonl-proj'] == 2198.0 257 | assert timings['mem_usage_fftplans'] == 304.0 258 | assert timings['mem_usage_grid'] == 903.0 259 | assert timings['mem_usage_one-center'] == 6.0 260 | assert timings['mem_usage_wavefun'] == 559.0 261 | 262 | 263 | def test_run_stats(outcar_parser): 264 | """Test that the output stats is correct.""" 265 | 266 | run_stats = outcar_parser.get_run_stats() 267 | compare_dict = { 268 | 'mem_usage_base': 30000.0, 269 | 'mem_usage_nonl-proj': 2198.0, 270 | 'mem_usage_fftplans': 304.0, 271 | 'mem_usage_grid': 903.0, 272 | 'mem_usage_one-center': 6.0, 273 | 'mem_usage_wavefun': 559.0, 274 | 'total_cpu_time_used': 89.795, 275 | 'user_time': 60.247, 276 | 'system_time': 29.549, 277 | 'elapsed_time': 90.99, 278 | 'maximum_memory_used': 81612.0, 279 | 'average_memory_used': 0.0, 280 | } 281 | assert run_stats == compare_dict 282 | 283 | 284 | _TEST_DATA = [ 285 | ('OUTCAR.converged', [True, True, True, False, False]), 286 | ('OUTCAR.nelm-breach-consistent', [True, False, False, True, True]), 287 | ('OUTCAR.nelm-breach-partial', [True, False, True, False, True]), 288 | ('OUTCAR.unfinished', [False, False, False, False, False]), 289 | ('OUTCAR.not-converged', [True, False, True, False, False]), 290 | ] 291 | 292 | 293 | @pytest.mark.parametrize('outcar_parser,expected', _TEST_DATA, indirect=['outcar_parser']) 294 | def test_run_status(outcar_parser, expected): 295 | """Test that the status of the run is correct.""" 296 | 297 | run_status = outcar_parser.get_run_status() 298 | assert run_status['finished'] is expected[0] 299 | assert run_status['ionic_converged'] is expected[1] 300 | assert run_status['electronic_converged'] is expected[2] 301 | assert run_status['consistent_nelm_breach'] is expected[3] 302 | assert run_status['contains_nelm_breach'] is expected[4] 303 | 304 | 305 | def test_crashed_outcar(outcar_parser): 306 | """Test incomplete OUTCAR""" 307 | testdir = os.path.dirname(__file__) 308 | outcarfile = os.path.join(testdir, 'OUTCAR.crashed') 309 | with pytest.raises(SystemExit): 310 | _ = Outcar(file_path=outcarfile) 311 | -------------------------------------------------------------------------------- /tests/test_potcar.py: -------------------------------------------------------------------------------- 1 | """Test potcar.""" 2 | 3 | import os 4 | 5 | import pytest 6 | 7 | from parsevasp.potcar import Potcar 8 | 9 | 10 | @pytest.fixture(scope='module') 11 | def potcar_reference_metadata(): 12 | """Reference metadata for a dummy POTCAR""" 13 | metadata = { 14 | 'VRHFIN': 'In: s1p1', 15 | 'LEXCH': 'CA', 16 | 'EATOM': 1111.1111, 17 | 'TITEL': 'PAW In_d 11Feb1111', 18 | 'LULTRA': False, 19 | 'IUNSCR': 1, 20 | 'RPACOR': 1.111, 21 | 'POMASS': 111.111, 22 | 'ZVAL': 11.111, 23 | 'RCORE': 1.111, 24 | 'RWIGS': 1.111, 25 | 'ENMAX': 111.111, 26 | 'ENMIN': 111.111, 27 | 'ICORE': 1, 28 | 'LCOR': True, 29 | 'LPAW': True, 30 | 'EAUG': 111.111, 31 | 'DEXC': 1.111, 32 | 'RMAX': 1.111, 33 | 'RAUG': 1.111, 34 | 'RDEP': 1.111, 35 | 'RDEPT': 1.111, 36 | } 37 | 38 | return metadata 39 | 40 | 41 | @pytest.fixture(scope='module') 42 | def potcar_parser(request): 43 | """Load POTCAR file.""" 44 | 45 | testdir = os.path.dirname(__file__) 46 | potcarfile = os.path.join(testdir, 'POTCAR') 47 | potcar = Potcar(file_path=potcarfile) 48 | 49 | return potcar 50 | 51 | 52 | @pytest.fixture(scope='module') 53 | def potcar_parser_file_object(): 54 | """Load POTCAR file using a file object.""" 55 | 56 | testdir = os.path.dirname(__file__) 57 | potcarfile = os.path.join(testdir, 'POTCAR') 58 | potcar = None 59 | with open(potcarfile, 'r', encoding='utf8') as file_handler: 60 | potcar = Potcar(file_handler=file_handler) 61 | 62 | return potcar 63 | 64 | 65 | @pytest.mark.parametrize( 66 | 'potcar_object,reference_values', 67 | [ 68 | ('potcar_parser', 'potcar_reference_metadata'), 69 | ('potcar_parser_file_object', 'potcar_reference_metadata'), 70 | ], 71 | ) 72 | def test_potcar_metadata(potcar_object, reference_values, request): 73 | """Test if the metadata produced matches the expected one""" 74 | potcar_object = request.getfixturevalue(potcar_object) 75 | reference_values = request.getfixturevalue(reference_values) 76 | 77 | for key, value in reference_values.items(): 78 | assert key in potcar_object.metadata, f'key "{key}" not in the metadata' 79 | assert value == potcar_object.metadata[key], ( 80 | f'referance value "{value}" does not match to found value {potcar_object.metadata[key]} for key "{key}"' 81 | ) 82 | 83 | 84 | @pytest.mark.parametrize( 85 | 'potcar_object,reference_values', 86 | [ 87 | ('potcar_parser', 'potcar_reference_metadata'), 88 | ('potcar_parser_file_object', 'potcar_reference_metadata'), 89 | ], 90 | ) 91 | def test_potcar_attributes(potcar_object, reference_values, request): 92 | """Test if the attributes are correctly setup""" 93 | potcar_object = request.getfixturevalue(potcar_object) 94 | reference_values = request.getfixturevalue(reference_values) 95 | 96 | extra_values = {'symbol': 'In_d', 'element': 'In', 'functional': 'Perdew-Zunger81', 'functional_class': 'LDA'} 97 | 98 | reference_values.update(extra_values) 99 | 100 | for key, value in reference_values.items(): 101 | assert hasattr(potcar_object, key.lower()), f'attribute {key} not found in potcar' 102 | assert getattr(potcar_object, key.lower()) == value, ( 103 | f'value of attribute {key} {getattr(potcar_object, key.lower())} does not match reference {value}' 104 | ) 105 | -------------------------------------------------------------------------------- /tests/test_stream.py: -------------------------------------------------------------------------------- 1 | """Test stream.""" 2 | 3 | import os 4 | import pathlib 5 | 6 | import pytest 7 | 8 | from parsevasp.stream import Stream 9 | 10 | cwd = pathlib.Path(__file__).parent 11 | 12 | 13 | @pytest.fixture() 14 | def stream_parser(request, tmpdir_factory): 15 | """Load a stream.""" 16 | try: 17 | name = request.param 18 | except AttributeError: 19 | # Test not parametrized 20 | name = 'stdout' 21 | testdir = os.path.dirname(__file__) 22 | stream_file = testdir + '/' + name 23 | stream = Stream(file_path=stream_file) 24 | 25 | return stream 26 | 27 | 28 | @pytest.fixture() 29 | def stream_parser_file_objects(request, tmpdir_factory): 30 | """Load stream file from a file object.""" 31 | try: 32 | name = request.param 33 | except AttributeError: 34 | # Test not parametrized 35 | name = 'stdout' 36 | testdir = os.path.dirname(__file__) 37 | stream_file = testdir + '/' + name 38 | stream = None 39 | with open(stream_file) as file_handler: 40 | stream = Stream(file_handler=file_handler) 41 | 42 | return stream 43 | 44 | 45 | def test_stream(stream_parser): 46 | """Check if stream_parser returns expected results.""" 47 | 48 | entries = stream_parser.entries 49 | assert stream_parser.configured_streams 50 | print(entries) 51 | assert stream_parser.number_of_entries == 1 52 | assert stream_parser.has_entries 53 | assert str(entries[0]) == '(ERROR) ibzkpt: Error with the k-points.' 54 | 55 | 56 | def test_stream_objects(stream_parser_file_objects): 57 | """Check if stream_parser_file_objects returns expected results passing an object""" 58 | 59 | entries = stream_parser_file_objects.entries 60 | assert stream_parser_file_objects.configured_streams 61 | assert stream_parser_file_objects.number_of_entries == 1 62 | assert stream_parser_file_objects.has_entries 63 | assert str(entries[0]) == '(ERROR) ibzkpt: Error with the k-points.' 64 | 65 | 66 | @pytest.mark.parametrize('stream_parser', (['stdout_nostart']), indirect=['stream_parser']) 67 | def test_executed(stream_parser): 68 | """Check if stream_parser returns expected results for execution checks.""" 69 | 70 | entries = stream_parser.entries 71 | assert entries[0].shortname == 'nostart' 72 | 73 | 74 | def test_stream_override(stream_parser): 75 | """Check that the stream override works.""" 76 | import re 77 | 78 | testdir = os.path.dirname(__file__) 79 | stream_file = testdir + '/stdout' 80 | stream = Stream( 81 | file_path=stream_file, 82 | config={ 83 | 'ibzkpt': { 84 | 'kind': 'WARNING', 85 | 'regex': 'internal error', 86 | 'message': 'some error', 87 | 'suggestion': 'none', 88 | 'location': 'STDOUT', 89 | 'recover': True, 90 | } 91 | }, 92 | ) 93 | assert len(stream.entries) == 1 94 | assert stream.entries[0].kind == 'WARNING' 95 | assert stream.entries[0].regex == re.compile('internal error') 96 | assert stream.entries[0].message == 'some error' 97 | assert stream.entries[0].suggestion == 'none' 98 | assert stream.entries[0].location == stream_parser.entries[0].location 99 | assert stream.entries[0].recover == stream_parser.entries[0].recover 100 | 101 | 102 | def test_stream_zbrent(): 103 | """Check parsing ZBRENT error.""" 104 | stream_file = cwd / 'stdout_ZBRENT' 105 | stream = Stream(file_path=stream_file) 106 | assert stream.entries[0].kind == 'ERROR' 107 | assert stream.entries[0].message == 'Error in ZBRENT' 108 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | def isclose(a, b, rel_tol=1e-07, abs_tol=0.0): 2 | return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) 3 | --------------------------------------------------------------------------------