├── .bumpversion.cfg
├── .github
└── workflows
│ ├── deploy.yml
│ ├── fair-software.yml
│ └── testing.yml
├── .gitignore
├── .readthedocs.yml
├── CITATION.cff
├── LICENSE
├── MANIFEST.in
├── README.md
├── _config.yml
├── _toc.yml
├── atomrdf
├── __init__.py
├── data
│ ├── asmo.owl
│ ├── cdco.owl
│ ├── cmso.owl
│ ├── dft_template.yml
│ ├── element.yml
│ ├── ldo.owl
│ ├── md_template.yml
│ ├── pldo.owl
│ ├── podo.owl
│ ├── prov.rdf
│ └── rdfs.owl
├── encoder.py
├── graph.py
├── io.py
├── json_io.py
├── mp.py
├── namespace.py
├── ontology.py
├── properties.py
├── sample.py
├── stores.py
├── structure.py
├── visualize.py
└── workflow
│ ├── __init__.py
│ ├── jobdict.yml
│ ├── pyiron
│ ├── __init__.py
│ ├── calphy.py
│ ├── lammps.py
│ ├── murnaghan.py
│ ├── pyiron.py
│ ├── quasiharmonic.py
│ └── vasp.py
│ ├── qe
│ ├── __init__.py
│ └── qe.py
│ └── workflow.py
├── codecov.yml
├── docs
├── acknowledgements.md
├── api.rst
├── examples.md
├── extending.md
├── gettingstarted.md
├── license.md
└── source
│ └── _static
│ └── logo.png
├── environment-docs.yml
├── environment-workflows.yml
├── environment.yml
├── examples
├── 01_getting_started.ipynb
├── 02_grain_boundaries.ipynb
├── 03_point_defects.ipynb
├── 04_substitution.ipynb
├── 05_interstitials.ipynb
├── 06_read_in.ipynb
├── 07_dislocation.ipynb
├── 08_write_qe.ipynb
├── 09_structure_modification.ipynb
├── 10_sparql_queries.ipynb
├── 11_dislocations.ipynb
├── conf.dump
├── dataset.tar.gz
├── qe_ref.in
└── workflow_examples
│ ├── 01_lammps_pyiron.ipynb
│ ├── 02_vasp.ipynb
│ ├── 03_murnaghan_pyiron.ipynb
│ ├── 04_vacancy_formation_lammps.ipynb
│ ├── 05_quasiharmonic.ipynb
│ ├── 06_linking_calculations.ipynb
│ ├── 07_quantum_espresso.ipynb
│ └── qe_run
│ ├── Si.pbe-n-rrkjus_psl.1.0.0.UPF
│ └── pw.si.scf.ref
├── index.ipynb
├── logo.png
├── notebooks
├── create_onto.ipynb
├── data_read.ipynb
├── data_set.ipynb
├── example-db.ipynb
├── example.ipynb
├── example_gui.ipynb
├── funowl.ipynb
├── generate_sample_data
│ └── generate_data.ipynb
├── io_poscar.ipynb
├── mem_usage.png
├── memory_profiling.ipynb
├── memscript.py
├── metadata_schema.ipynb
├── ontology_parsing.ipynb
├── ontology_path.ipynb
├── p3_testing.ipynb
├── prototype_test.ipynb
├── rdflib_db.ipynb
├── read_cif.ipynb
├── schema.yml
├── semantic_operations.ipynb
├── speed_testing.ipynb
├── test_network.ipynb
└── wrap_creation.ipynb
├── requirements.txt
├── setup.py
└── tests
├── __init__.py
├── al_data
├── .gitkeep
├── Al.cif
├── Al.dump
├── Al.json
├── Al.poscar
├── Al.prismatic
└── Al_sym.cif
├── conf.dump
├── qe_data
├── pw.si.scf.in
└── pw.si.scf.out
├── test_encoder_and_write.py
├── test_graph.py
├── test_network.py
├── test_qe.py
├── test_rotate.py
├── test_store.py
├── test_structure.py
├── test_structuregraph.py
├── test_visualise.py
└── test_workflow.py
/.bumpversion.cfg:
--------------------------------------------------------------------------------
1 | [bumpversion]
2 | current_version = 0.10.2
3 | commit = True
4 | tag = False
5 |
6 | [bumpversion:file:setup.py]
7 |
8 | [bumpversion:file:CITATION.cff]
9 |
--------------------------------------------------------------------------------
/.github/workflows/deploy.yml:
--------------------------------------------------------------------------------
1 | name: PyPi Release
2 |
3 | on:
4 | push:
5 | pull_request:
6 | workflow_dispatch:
7 |
8 | # based on https://github.com/pypa/gh-action-pypi-publish
9 | jobs:
10 | build:
11 | runs-on: ubuntu-latest
12 |
13 | steps:
14 | - uses: actions/checkout@v2
15 | - uses: actions/setup-python@v2
16 | with:
17 | python-version: "3.10"
18 |
19 | - name: Install dependencies
20 | run: >-
21 | python -m pip install --user --upgrade setuptools wheel
22 | - name: Convert dependencies
23 | run: >-
24 | sed -i 's/==/>=/g' setup.py; cat setup.py
25 | - name: Build
26 | run: >-
27 | python setup.py sdist bdist_wheel
28 | - name: Publish distribution 📦 to PyPI
29 | if: startsWith(github.event.ref, 'refs/tags') || github.event_name == 'release'
30 | uses: pypa/gh-action-pypi-publish@master
31 | with:
32 | user: __token__
33 | password: ${{ secrets.PYPI_API_TOKEN }}
34 |
--------------------------------------------------------------------------------
/.github/workflows/fair-software.yml:
--------------------------------------------------------------------------------
1 | name: fair-software
2 |
3 | on: push
4 |
5 | jobs:
6 | verify:
7 | name: "fair-software"
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: fair-software/howfairis-github-action@0.2.1
11 | name: Measure compliance with fair-software.eu recommendations
12 | env:
13 | PYCHARM_HOSTED: "Trick colorama into displaying colored output"
14 | with:
15 | MY_REPO_URL: "https://github.com/${{ github.repository }}"
16 |
--------------------------------------------------------------------------------
/.github/workflows/testing.yml:
--------------------------------------------------------------------------------
1 | name: testing
2 |
3 | on:
4 | push:
5 | branches: [ main ]
6 | pull_request:
7 | branches: [ main ]
8 |
9 | jobs:
10 | build:
11 | strategy:
12 | matrix:
13 | os: [ubuntu-latest,]
14 | python-version: ['3.11',]
15 |
16 | runs-on: ${{ matrix.os }}
17 | steps:
18 | - uses: actions/checkout@v2
19 | - uses: conda-incubator/setup-miniconda@v3
20 | with:
21 | mamba-version: "*"
22 | activate-environment: rdf
23 | environment-file: environment.yml
24 | python-version: ${{ matrix.python-version }}
25 |
26 | - name: run tests
27 | shell: bash -l {0}
28 | run: |
29 | pip install -e .
30 | pip install pytest
31 | pip install pytest-cov
32 | #pytest tests/
33 | pytest --cov-report=xml --cov=atomrdf tests/
34 |
35 | - name: Upload coverage reports to Codecov
36 | uses: codecov/codecov-action@v4.0.1
37 | with:
38 | token: ${{ secrets.CODECOV_TOKEN }}
39 | slug: pyscal/atomRDF
40 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | #custom
2 | rdf_structure_store/
3 | *.json
4 | *.ttl
5 | *.db
6 | wf*
7 | *.data
8 | *.ipynb
9 | *.dump
10 | *POSCAR*
11 | *.tar.gz
12 | *.in
13 | *.dot
14 | *.png
15 | examples/workflow_examples/t*
16 | examples/workflow_examples/y*
17 |
18 | # Byte-compiled / optimized / DLL files
19 | __pycache__/
20 | *.py[cod]
21 | *$py.class
22 |
23 | # C extensions
24 | *.so
25 |
26 | # Distribution / packaging
27 | .Python
28 | build/
29 | develop-eggs/
30 | dist/
31 | downloads/
32 | eggs/
33 | .eggs/
34 | lib/
35 | lib64/
36 | parts/
37 | sdist/
38 | var/
39 | wheels/
40 | pip-wheel-metadata/
41 | share/python-wheels/
42 | *.egg-info/
43 | .installed.cfg
44 | *.egg
45 | MANIFEST
46 |
47 | # PyInstaller
48 | # Usually these files are written by a python script from a template
49 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
50 | *.manifest
51 | *.spec
52 |
53 | # Installer logs
54 | pip-log.txt
55 | pip-delete-this-directory.txt
56 |
57 | # Unit test / coverage reports
58 | htmlcov/
59 | .tox/
60 | .nox/
61 | .coverage
62 | .coverage.*
63 | .cache
64 | nosetests.xml
65 | coverage.xml
66 | *.cover
67 | *.py,cover
68 | .hypothesis/
69 | .pytest_cache/
70 |
71 | # Translations
72 | *.mo
73 | *.pot
74 |
75 | # Django stuff:
76 | *.log
77 | local_settings.py
78 | db.sqlite3
79 | db.sqlite3-journal
80 |
81 | # Flask stuff:
82 | instance/
83 | .webassets-cache
84 |
85 | # Scrapy stuff:
86 | .scrapy
87 |
88 | # Sphinx documentation
89 | docs/_build/
90 |
91 | # PyBuilder
92 | target/
93 |
94 | # Jupyter Notebook
95 | .ipynb_checkpoints
96 |
97 | # IPython
98 | profile_default/
99 | ipython_config.py
100 |
101 | # pyenv
102 | .python-version
103 |
104 | # pipenv
105 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
106 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
107 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
108 | # install all needed dependencies.
109 | #Pipfile.lock
110 |
111 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
112 | __pypackages__/
113 |
114 | # Celery stuff
115 | celerybeat-schedule
116 | celerybeat.pid
117 |
118 | # SageMath parsed files
119 | *.sage.py
120 |
121 | # Environments
122 | .env
123 | .venv
124 | env/
125 | venv/
126 | ENV/
127 | env.bak/
128 | venv.bak/
129 |
130 | # Spyder project settings
131 | .spyderproject
132 | .spyproject
133 |
134 | # Rope project settings
135 | .ropeproject
136 |
137 | # mkdocs documentation
138 | /site
139 |
140 | # mypy
141 | .mypy_cache/
142 | .dmypy.json
143 | dmypy.json
144 |
145 | # Pyre type checker
146 | .pyre/
147 |
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | build:
4 | os: ubuntu-22.04
5 | tools:
6 | python: "mambaforge-4.10"
7 | jobs:
8 | pre_build:
9 | # Generate the Sphinx configuration for this Jupyter Book so it builds.
10 | - "jupyter-book config sphinx ."
11 |
12 | conda:
13 | environment: environment-docs.yml
14 |
15 | python:
16 | install:
17 | - method: pip
18 | path: .
19 |
20 | sphinx:
21 | builder: html
22 | fail_on_warning: false
23 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | title: atomRDF, python tool for ontology-based creation, manipulation, and quering of atomic structures.
3 | message: >-
4 | If you use this software, please cite it using the
5 | metadata from this file. Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation)
6 | under the National Research Data Infrastructure – NFDI 38/1 – project number 460247524
7 | authors:
8 | - given-names: Sarath
9 | family-names: Menon
10 | affiliation: Max-Planck-Institut für Eisenforschung GmbH
11 | orcid: 'https://orcid.org/0000-0002-6776-1213'
12 | - given-names: Abril
13 | family-names: Azocar Guzman
14 | affiliation: Institute for Advanced Simulations – Materials Data Science and Informatics (IAS-9), Forschungszentrum Jülich GmbH
15 | orcid: 'https://orcid.org/0000-0001-7564-7990'
16 | date-released: '2024-02-15'
17 | doi: 10.5281/zenodo.8146527
18 | url: 'https://atomrdf.pyscal.org'
19 | license: "MIT"
20 | repository-code: https://github.com/pyscal/atomRDF
21 | type: software
22 | version: 0.10.2
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 pyscal
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include atomrdf/data/*.??l
2 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # atomRDF
2 |
3 | > [!NOTE]
4 | > `atomRDF` was previously called `pyscal-rdf`.
5 |
6 | [](https://codecov.io/gh/pyscal/atomRDF)
7 | [](https://anaconda.org/conda-forge/atomrdf)
8 | 
9 | [](https://doi.org/10.5281/zenodo.10973374)
10 |
11 | `atomRDF` is a python tool for ontology-based creation, manipulation, and quering of structures. `atomRDF` uses the [Computational Material Sample Ontology (CMSO)](https://github.com/Materials-Data-Science-and-Informatics/cmso-ontology).
12 |
13 | The package is currently under activate development and could be unstable .
14 |
15 | More details coming soon...
16 |
17 |
18 | ## Acknowledgements
19 | This work is supported by the [NFDI-Matwerk](https://nfdi-matwerk.de/) consortia.
20 |
21 | Funded by the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) under the National Research Data Infrastructure – NFDI 38/1 – project number 460247524
22 |
23 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | # Book settings
2 | # Learn more at https://jupyterbook.org/customize/config.html
3 |
4 | title: "atomrdf"
5 | #author: The Jupyter Book Community
6 | logo: docs/source/_static/logo.png
7 |
8 | # Force re-execution of notebooks on each build.
9 | # See https://jupyterbook.org/content/execute.html
10 | execute:
11 | execute_notebooks: "auto"
12 |
13 | only_build_toc_files: true
14 |
15 | # Define the name of the latex output file for PDF builds
16 | latex:
17 | latex_documents:
18 | targetname: book.tex
19 |
20 | # Information about where the book exists on the web
21 | repository:
22 | url: https://github.com/pyscal/atomrdf
23 | path_to_book: book
24 | branch: main
25 |
26 | notebook_interface : "notebook"
27 |
28 | # Add GitHub buttons to your book
29 | # See https://jupyterbook.org/customize/config.html#add-a-link-to-your-repository
30 | html:
31 | use_issues_button: false
32 | use_repository_button: true
33 |
34 | parse:
35 | myst_enable_extensions:
36 | # don't forget to list any other extensions you want enabled,
37 | # including those that are enabled by default!
38 | - html_image
39 | - amsmath
40 | - dollarmath
41 | - linkify
42 | - substitution
43 | - colon_fence
44 |
45 | sphinx:
46 | config:
47 | mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js
48 | html_theme: pydata_sphinx_theme
49 | html_js_files:
50 | - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js
51 | html_sidebars:
52 | "**": []
53 |
54 | extra_extensions:
55 | - 'sphinx.ext.autodoc'
56 | - 'sphinx.ext.napoleon'
57 | - 'sphinx.ext.viewcode'
58 | - 'sphinx.ext.autosummary'
59 |
60 |
--------------------------------------------------------------------------------
/_toc.yml:
--------------------------------------------------------------------------------
1 | # Table of contents
2 | # Learn more at https://jupyterbook.org/customize/toc.html
3 |
4 | format: jb-book
5 | root: index
6 | chapters:
7 | - file: docs/gettingstarted.md
8 | - file: docs/examples.md
9 | sections:
10 | - file: examples/01_getting_started
11 | - file: examples/02_grain_boundaries
12 | - file: docs/extending.md
13 | - file: docs/license.md
14 | - file: docs/acknowledgements.md
15 | - file: docs/api.rst
16 |
17 |
--------------------------------------------------------------------------------
/atomrdf/__init__.py:
--------------------------------------------------------------------------------
1 | from atomrdf.graph import KnowledgeGraph
2 | from atomrdf.structure import System
3 | from atomrdf.workflow.workflow import Workflow
4 |
--------------------------------------------------------------------------------
/atomrdf/data/dft_template.yml:
--------------------------------------------------------------------------------
1 | method: DensityFunctionalTheory
2 | temperature: 100
3 | pressure: 0
4 | dof:
5 | - AtomicPositions
6 | - CellVolume
7 | id: 2314
8 | xcfunctional: LDA
9 | workflow_manager:
10 | uri: xxxx
11 | label: pyiron
12 | software:
13 | - uri: xxxx
14 | label: lammps
15 | - uri: xxxx
16 | label: pyscal
17 | outputs:
18 | - label: TotalEnergy
19 | value: 2.301
20 | unit: EV
21 | associate_to_sample: True
22 | - label: TotalVolume
23 | value: 23.01
24 | unit: ANGSTROM3
25 | associate_to_sample: True
26 | inputs:
27 | - label: AnotherInput
28 | value: 0.1
29 | unit: None
30 |
--------------------------------------------------------------------------------
/atomrdf/data/element.yml:
--------------------------------------------------------------------------------
1 | Ac: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33337
2 | Ag: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30512
3 | Al: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:28984
4 | Am: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33389
5 | As: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27563
6 | Ar: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49475
7 | At: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30416
8 | Au: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:29287
9 | B: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27560
10 | Ba: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:32594
11 | Be: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30501
12 | Bh: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33355
13 | Bi: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33301
14 | Bk: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33391
15 | Br: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33117
16 | C: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27594
17 | Ca: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:22984
18 | Cd: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:22977
19 | Ce: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33369
20 | Cf: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33392
21 | Cl: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:29311
22 | Cm: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33390
23 | Cn: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33517
24 | Co: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27638
25 | Cr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:28073
26 | Cs: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30514
27 | Cu: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:28694
28 | Db: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33349
29 | Ds: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33367
30 | Dy: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33377
31 | Er: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33379
32 | Es: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33393
33 | Eu: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:32999
34 | F: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30239
35 | Fe: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:18248
36 | Fl: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194531
37 | Fm: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33394
38 | Fr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33323
39 | Ga: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49631
40 | Gd: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33375
41 | Ge: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30441
42 | H: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49637
43 | He: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30217
44 | Hf: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33343
45 | Hg: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:25195
46 | Ho: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49648
47 | Hs: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33357
48 | I: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33115
49 | In: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30430
50 | Ir: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49666
51 | K: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:26216
52 | Kr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49696
53 | La: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33336
54 | Li: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30145
55 | Lr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33397
56 | Lu: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33382
57 | Lv: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194537
58 | Mc: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194535
59 | Md: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33395
60 | Mg: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:25107
61 | Mn: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:18291
62 | Mo: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:28685
63 | Mt: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33361
64 | N: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:25555
65 | Na: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:26708
66 | Nb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33344
67 | Nd: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33372
68 | Ne: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33310
69 | Nh: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194533
70 | Ni: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:28112
71 | No: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33396
72 | Np: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33387
73 | O: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:25805
74 | Og: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194541
75 | Os: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30687
76 | P: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30207
77 | Pa: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33386
78 | Pb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:25016
79 | Pd: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33363
80 | Po: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33313
81 | Pr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49828
82 | Pt: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33364
83 | Pu: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33388
84 | Ra: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33325
85 | Rb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33322
86 | Re: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49882
87 | Rf: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33346
88 | Rg: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33368
89 | Rh: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33359
90 | Rn: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33314
91 | Ru: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30682
92 | S: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:26833
93 | Sb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30513
94 | Sc: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33330
95 | Se: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27568
96 | Sg: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33351
97 | Si: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27573
98 | Sm: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33374
99 | Sn: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27007
100 | Sr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33324
101 | Ta: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33348
102 | Tb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33376
103 | Tc: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33353
104 | Te: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30452
105 | Th: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33385
106 | Ti: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33341
107 | Tl: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:30440
108 | Tm: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33380
109 | Ts: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:194539
110 | U: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27214
111 | V: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27698
112 | W: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27998
113 | Xe: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:49957
114 | Y: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33331
115 | Yb: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33381
116 | Zn: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:27363
117 | Zr: https://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI:33342
--------------------------------------------------------------------------------
/atomrdf/data/md_template.yml:
--------------------------------------------------------------------------------
1 | method: MolecularStatics
2 | temperature: 100
3 | pressure: 0
4 | dof:
5 | - AtomicPositions
6 | - CellVolume
7 | ensemble: NPT
8 | id: 2314
9 | potential:
10 | uri: https://doi.org/xxx
11 | type: eam
12 | label: string
13 | workflow_manager:
14 | uri: xxxx
15 | label: pyiron
16 | software:
17 | - uri: xxxx
18 | label: lammps
19 | - uri: xxxx
20 | label: pyscal
21 | outputs:
22 | - label: TotalEnergy
23 | value: 2.301
24 | unit: EV
25 | associate_to_sample: True
26 | - label: TotalVolume
27 | value: 23.01
28 | unit: ANGSTROM3
29 | associate_to_sample: True
30 | inputs:
31 | - label: AnotherInput
32 | value: 0.1
33 | unit: None
34 |
35 |
--------------------------------------------------------------------------------
/atomrdf/data/rdfs.owl:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
11 |
12 |
13 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/atomrdf/encoder.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from json import JSONEncoder
3 |
4 |
5 | class NumpyArrayEncoder(JSONEncoder):
6 | """
7 | Encode numpy to dump in json
8 | """
9 |
10 | def default(self, obj):
11 | if isinstance(obj, np.integer):
12 | return int(obj)
13 | elif isinstance(obj, np.floating):
14 | return float(obj)
15 | elif isinstance(obj, np.ndarray):
16 | return obj.tolist()
17 | else:
18 | return super(NumpyArrayEncoder, self).default(obj)
19 |
--------------------------------------------------------------------------------
/atomrdf/io.py:
--------------------------------------------------------------------------------
1 | import mendeleev
2 | import numpy as np
3 | from ase.io.espresso import read_fortran_namelist
4 | import os
5 | import warnings
6 |
7 | def _convert_tab_to_dict(tab):
8 | keywords = ["ATOMIC_SPECIES",
9 | "ATOMIC_POSITIONS",
10 | "K_POINTS",
11 | "CELL_PARAMETERS",
12 | "OCCUPATIONS",
13 | "CONSTRAINTS",
14 | "ATOMIC_VELOCITIES",
15 | "ATOMIC_FORCES",
16 | "ADDITIONAL_K_POINTS",
17 | "SOLVENTS",
18 | "HUBBARD"]
19 |
20 | tabdict = {}
21 | for line in tab:
22 | firstword = line.split()[0]
23 | secondword = " ".join(line.split()[1:])
24 |
25 | if firstword in keywords:
26 | tabdict[firstword] = {}
27 | tabdict[firstword]['value'] = []
28 | tabdict[firstword]['extra'] = secondword
29 | tabarr = tabdict[firstword]['value']
30 | else:
31 | tabarr.append(line.strip())
32 | return tabdict
33 |
34 | def write_espresso(s, inputfile, copy_from=None, pseudo_files=None):
35 | data = None
36 | tab = None
37 |
38 | if copy_from is not None:
39 | if os.path.exists(copy_from):
40 | try:
41 | with open(copy_from, 'r') as fin:
42 | data, tab = read_fortran_namelist(fin)
43 | except:
44 | warnings.warn(f'Error reading {copy_from}, a clean file will be written')
45 | copy=True
46 |
47 | if tab is not None:
48 | tab = _convert_tab_to_dict(tab)
49 | else:
50 | tab = {}
51 |
52 | if data is None:
53 | data = {}
54 | data['system'] = {}
55 | data['control'] = {}
56 |
57 | tab['CELL_PARAMETERS'] = {}
58 | tab['CELL_PARAMETERS']['extra'] = 'angstrom'
59 | tab['CELL_PARAMETERS']['value'] = []
60 |
61 | for vec in s.box:
62 | tab['CELL_PARAMETERS']['value'].append(' '.join([str(x) for x in vec]))
63 |
64 | cds = s.direct_coordinates
65 | species = s.atoms.species
66 |
67 | unique_species = np.unique(species)
68 | if pseudo_files is not None:
69 | if not len(pseudo_files) == len(unique_species):
70 | raise ValueError('Number of pseudo files must match number of unique species')
71 | pseudo_dirs = [os.path.dirname(os.path.abspath(pseudo_file)) for pseudo_file in pseudo_files]
72 | if not len(np.unique(pseudo_dirs)) == 1:
73 | raise ValueError('All pseudo files must be in the same directory')
74 | data['control']['pseudo_dir'] = pseudo_dirs[0]
75 | else:
76 | pseudo_files = ['None' for x in range(len(unique_species))]
77 |
78 | tab['ATOMIC_SPECIES'] = {}
79 | tab['ATOMIC_SPECIES']['extra'] = ''
80 | tab['ATOMIC_SPECIES']['value'] = []
81 |
82 | for count, us in enumerate(unique_species):
83 | chem = mendeleev.element(us)
84 | tab['ATOMIC_SPECIES']['value'].append(f'{us} {chem.atomic_weight} {os.path.basename(pseudo_files[count])}')
85 |
86 | tab['ATOMIC_POSITIONS'] = {}
87 | tab['ATOMIC_POSITIONS']['extra'] = 'crystal'
88 | tab['ATOMIC_POSITIONS']['value'] = []
89 |
90 | for cd, sp in zip(cds, species):
91 | tab['ATOMIC_POSITIONS']['value'].append(f'{sp} {cd[0]} {cd[1]} {cd[2]}')
92 |
93 | data['system']['ibrav'] = 0
94 | data['system']['nat'] = len(species)
95 | data['system']['ntyp'] = len(unique_species)
96 |
97 | with open(inputfile, 'w') as fout:
98 | if s.sample is not None:
99 | fout.write(f'! {s.sample.toPython()}\n\n')
100 |
101 | for key, val in data.items():
102 | fout.write(f'&{key.upper()}\n')
103 | for k, v in val.items():
104 | if isinstance(v, str):
105 | fout.write(f' {k} = \'{v}\',\n')
106 | else:
107 | fout.write(f' {k} = {v},\n')
108 | fout.write('/\n')
109 | fout.write('\n')
110 |
111 | for key, val in tab.items():
112 | fout.write(f'{key} {val["extra"]}\n')
113 | fout.write('\n')
114 | for v in val['value']:
115 | fout.write(v)
116 | fout.write('\n')
117 | fout.write('\n')
--------------------------------------------------------------------------------
/atomrdf/json_io.py:
--------------------------------------------------------------------------------
1 | import json
2 | import yaml
3 | from atomrdf.encoder import NumpyArrayEncoder
4 |
5 |
6 | def write_file(outfile, data):
7 | """
8 | Write a given dict as json file
9 |
10 | Parameters
11 | ----------
12 | outfile: string
13 | name of output file. `.json` will be added to the given file name
14 |
15 | data: dict
16 | input data dict
17 |
18 | Returns
19 | -------
20 | None
21 | """
22 | with open(".".join([outfile, "json"]), "w") as fout:
23 | json.dump(data, fout, cls=NumpyArrayEncoder)
24 | # with open(".".join([outfile, "yaml"]), "w") as fout:
25 | # yaml.unsafe_dump(convert_to_dict(sys), fout)
26 |
--------------------------------------------------------------------------------
/atomrdf/mp.py:
--------------------------------------------------------------------------------
1 | """
2 | Wrapper around Materials Project to query structures and get it as a KG
3 | """
4 |
5 | from mp_api.client import MPRester
6 | import numpy as np
7 |
8 | def query_mp(api_key, chemical_system=None, material_ids=None, is_stable=True):
9 | rest = {
10 | "use_document_model": False,
11 | "include_user_agent": True,
12 | "api_key": api_key,
13 | }
14 | if (chemical_system is None) and (material_ids is None):
15 | raise ValueError("Please provide either a chemical system or a list of material ids")
16 | with MPRester(**rest) as mpr:
17 | if chemical_system is not None:
18 | docs = mpr.materials.summary.search(chemsys=chemical_system, is_stable=is_stable)
19 | else:
20 | docs = mpr.materials.summary.search(material_ids=material_ids)
21 | return docs
--------------------------------------------------------------------------------
/atomrdf/namespace.py:
--------------------------------------------------------------------------------
1 | """
2 | This module provides the Namespace class for managing namespaces in the AtomRDF library.
3 |
4 | The Namespace class extends the rdflib.Namespace class and provides additional functionality for working with namespaces.
5 |
6 | Classes
7 | -------
8 | Namespace
9 | A class representing a namespace in the AtomRDF library.
10 | """
11 |
12 | import os
13 | import json
14 | import numpy as np
15 | from rdflib import URIRef
16 | from rdflib import Namespace as RDFLibNamespace
17 | from rdflib import Literal as RDFLibLiteral
18 | from pyscal3.atoms import AttrSetter
19 | from tools4rdf.network.network import OntologyNetwork
20 |
21 | def Literal(value, datatype=None):
22 | if datatype is not None:
23 | return RDFLibLiteral(value, datatype=datatype)
24 | elif isinstance(value, list):
25 | return RDFLibLiteral(json.dumps(value))
26 | elif isinstance(value, np.ndarray):
27 | return RDFLibLiteral(json.dumps(value.tolist()))
28 | else:
29 | return RDFLibLiteral(value)
30 |
31 | class Namespace(AttrSetter, RDFLibNamespace):
32 | """A class representing a namespace in the AtomRDF library.
33 |
34 | This class extends the `rdflib.Namespace` classes.
35 |
36 | Parameters
37 | ----------
38 | infile : str
39 | The input file path.
40 | delimiter : str, optional
41 | The delimiter used in the input file. Defaults to "/".
42 |
43 | Attributes
44 | ----------
45 | network : OntologyNetwork
46 | The ontology network associated with the namespace.
47 | name : str
48 | The name of the namespace.
49 | """
50 |
51 | def __init__(self, infile, delimiter="/"):
52 | """
53 | Initialize the Namespace class.
54 |
55 | Parameters
56 | ----------
57 | infile : str
58 | The input file path.
59 | delimiter : str, optional
60 | The delimiter used in the input file. Defaults to "/".
61 | """
62 | AttrSetter.__init__(self)
63 | self.network = OntologyNetwork(infile)
64 | RDFLibNamespace.__init__(self.network.onto.base_iri)
65 | self.name = self.network.onto.base_iri.split('/')[-1]
66 | mapdict = {}
67 |
68 | # now iterate over all attributes
69 | for k1 in ["class", "object_property", "data_property"]:
70 | for k2, val in self.network.onto.attributes[k1].items():
71 | #print(val.namespace, self.name)
72 | #if val.namespace == self.name:
73 | mapdict[val.name_without_prefix] = val
74 |
75 | # add attributes
76 | self._add_attribute(mapdict)
77 |
78 |
79 | file_location = os.path.dirname(__file__)
80 |
81 | CMSO = Namespace(os.path.join(file_location, "data/cmso.owl"))
82 | LDO = Namespace(os.path.join(file_location, "data/ldo.owl"))
83 | PLDO = Namespace(os.path.join(file_location, "data/pldo.owl"))
84 | PODO = Namespace(os.path.join(file_location, "data/podo.owl"))
85 | ASMO = Namespace(os.path.join(file_location, "data/asmo.owl"))
86 | MATH = Namespace(os.path.join(file_location, "data/asmo.owl"))
87 | CDCO = Namespace(os.path.join(file_location, "data/cdco.owl"))
88 |
89 | PROV = RDFLibNamespace("http://www.w3.org/ns/prov#")
90 | MDO = RDFLibNamespace("https://w3id.org/mdo/calculation/")
91 | UNSAFECMSO = RDFLibNamespace("http://purls.helmholtz-metadaten.de/cmso/")
92 | UNSAFEASMO = RDFLibNamespace("http://purls.helmholtz-metadaten.de/asmo/")
93 |
--------------------------------------------------------------------------------
/atomrdf/ontology.py:
--------------------------------------------------------------------------------
1 | from tools4rdf.network.parser import OntoParser, parse_ontology
2 | from tools4rdf.network.network import OntologyNetworkBase
3 |
4 | import os
5 |
6 | def read_ontology():
7 | file_location = os.path.dirname(__file__)
8 |
9 | cmso = parse_ontology(os.path.join(file_location, "data/cmso.owl"))
10 | pldo = parse_ontology(os.path.join(file_location, "data/pldo.owl"))
11 | podo = parse_ontology(os.path.join(file_location, "data/podo.owl"))
12 | asmo = parse_ontology(os.path.join(file_location, "data/asmo.owl"))
13 | ldo = parse_ontology(os.path.join(file_location, "data/ldo.owl"))
14 | cdco = parse_ontology(os.path.join(file_location, "data/cdco.owl"))
15 |
16 |
17 | #now sum them up
18 | combo = cmso + cdco + pldo + podo + asmo + ldo
19 | combo.attributes['data_property']['cmso:hasSymbol'].range.append("str")
20 | combo.attributes['data_property']['asmo:hasValue'].range.extend(["float", "double", "int", "str"])
21 |
22 | #now combine the ontologies
23 | combo = OntologyNetworkBase(combo)
24 |
25 | #add sring labels as needed
26 | combo.add_namespace("rdfs", "http://www.w3.org/2000/01/rdf-schema#")
27 |
28 | combo.add_term(
29 | "http://www.w3.org/2000/01/rdf-schema#label",
30 | "data_property",
31 | delimiter="#",
32 | namespace="rdfs",
33 | rn = ['str']
34 | )
35 |
36 | combo.add_path(("asmo:CalculatedProperty", "rdfs:label", "string"))
37 | combo.add_path(("asmo:InputParameter", "rdfs:label", "string"))
38 | combo.add_path(("prov:SoftwareAgent", "rdfs:label", "string"))
39 | combo.add_path(("asmo:InteratomicPotential", "rdfs:label", "string"))
40 |
41 | return combo
--------------------------------------------------------------------------------
/atomrdf/stores.py:
--------------------------------------------------------------------------------
1 | from rdflib.store import NO_STORE, VALID_STORE
2 | from rdflib import plugin
3 | from rdflib import Graph
4 | from atomrdf.namespace import Literal
5 |
6 | import os
7 | import shutil
8 |
9 | def create_store(kg, store, identifier, store_file=None, structure_store=None):
10 | """
11 | Create a store based on the given parameters.
12 |
13 | Parameters:
14 | -----------
15 | kg : KnowledgeGraph
16 | The knowledge graph object.
17 | store : str or Project
18 | The type of store to create. It can be either "Memory", "SQLAlchemy", or a pyiron Project object.
19 | identifier : str
20 | The identifier for the store.
21 | store_file : str, optional
22 | The file path to store the data (only applicable for certain store types).
23 | structure_store : str, optional
24 | The structure store to use (only applicable for certain store types).
25 |
26 | Raises:
27 | -------
28 | ValueError
29 | If an unknown store type is provided.
30 |
31 | """
32 | kg.store_file = store_file
33 | if store in ["Memory", "memory"]:
34 | store_memory(
35 | kg,
36 | store,
37 | identifier,
38 | store_file=store_file,
39 | structure_store=structure_store,
40 | )
41 | elif store in ["SQLAlchemy", "db", "database", "sqlalchemy"]:
42 | store_alchemy(
43 | kg,
44 | store,
45 | identifier,
46 | store_file=store_file,
47 | structure_store=structure_store,
48 | )
49 | else:
50 | raise ValueError("Unknown store found!")
51 |
52 |
53 | def store_memory(kg, store, identifier, store_file=None, structure_store=None):
54 | """
55 | Store the knowledge graph in memory.
56 |
57 | Parameters
58 | ----------
59 | kg : KnowledgeGraph
60 | The knowledge graph to be stored.
61 | store : str
62 | The type of store to use for storing the graph.
63 | identifier : str
64 | The identifier for the graph.
65 | store_file : str, optional
66 | The file to store the graph in. Defaults to None.
67 | structure_store : str, optional
68 | The structure store to use. Defaults to None.
69 |
70 | Returns
71 | -------
72 | None
73 | """
74 | graph = Graph(store="Memory", identifier=identifier)
75 | kg.graph = graph
76 | kg.structure_store = _setup_structure_store(structure_store=structure_store)
77 |
78 |
79 | def store_alchemy(kg, store, identifier, store_file=None, structure_store=None):
80 | """
81 | Store the knowledge graph using SQLAlchemy.
82 |
83 | Parameters
84 | ----------
85 | kg : KnowledgeGraph
86 | The knowledge graph to be stored.
87 | store : str
88 | The type of store to be used.
89 | identifier : str
90 | The identifier for the graph.
91 | store_file : str, optional
92 | The file path for the store. Required if store is not 'memory'.
93 | structure_store : str, optional
94 | The structure store to be used.
95 |
96 | Raises
97 | ------
98 | ValueError
99 | If store_file is None and store is not 'memory'.
100 |
101 | Returns
102 | -------
103 | None
104 | """
105 | _check_if_sqlalchemy_is_available()
106 | if store_file is None:
107 | raise ValueError("store file is needed if store is not memory")
108 |
109 | kg.graph = Graph(store="SQLAlchemy", identifier=identifier)
110 | uri = Literal(f"sqlite:///{store_file}")
111 | kg.graph.open(uri, create=True)
112 | kg.structure_store = _setup_structure_store(structure_store=structure_store)
113 |
114 | def _check_if_sqlalchemy_is_available():
115 | try:
116 | import sqlalchemy as sa
117 | except ImportError:
118 | raise RuntimeError("Please install the sqlalchemy package")
119 | try:
120 | import rdflib_sqlalchemy as rsa
121 | except ImportError:
122 | raise RuntimeError(
123 | "Please install the rdllib-sqlalchemy package. The development version is needed, please do pip install git+https://github.com/RDFLib/rdflib-sqlalchemy.git@develop"
124 | )
125 |
126 |
127 | def _setup_structure_store(structure_store=None):
128 | if structure_store is None:
129 | structure_store = os.path.join(os.getcwd(), "rdf_structure_store")
130 | if not os.path.exists(structure_store):
131 | os.mkdir(structure_store)
132 | return structure_store
133 |
134 | def purge(store, identifier, store_file):
135 | if store in ["Memory", "memory"]:
136 | return _purge_memory(identifier, store_file)
137 |
138 | elif store in ["SQLAlchemy", "db", "database", "sqlalchemy"]:
139 | return _purge_alchemy(identifier, store_file)
140 |
141 | else:
142 | raise ValueError("Unknown store found!")
143 |
144 |
145 | def _purge_memory(identifier, store_file):
146 | graph = Graph(store="Memory", identifier=identifier)
147 | return graph
148 |
149 | def _purge_alchemy(identifier, store_file):
150 | os.remove(store_file)
151 | graph = Graph(store="SQLAlchemy", identifier=identifier)
152 | uri = Literal(f"sqlite:///{store_file}")
153 | graph.open(uri, create=True)
154 | return graph
--------------------------------------------------------------------------------
/atomrdf/visualize.py:
--------------------------------------------------------------------------------
1 | import graphviz
2 | import os
3 | from rdflib import BNode, URIRef, Namespace, Literal
4 | import uuid
5 | import json
6 |
7 |
8 | def get_title_from_BNode(x):
9 | return x.toPython()
10 |
11 |
12 | def get_string_from_URI(x):
13 | """
14 | Extract a presentable string from URI.
15 |
16 | Parameters
17 | ----------
18 | x : rdflib.term.URIRef
19 | The URI object to extract the string from.
20 |
21 | Returns
22 | -------
23 | tuple
24 | A tuple containing the presentable string representation of the URI and its type.
25 | The string representation is the last part of the URI after splitting by '#' or '/'.
26 | The type can be either "URIRef" or "BNode".
27 | """
28 | raw = x.toPython()
29 | # first try splitting by #
30 | rawsplit = raw.split("#")
31 | if len(rawsplit) > 1:
32 | return rawsplit[-1], "URIRef"
33 |
34 | # try splitting by = for chebi values
35 | if "CHEBI" in raw:
36 | rawsplit = raw.split("=")
37 | rawsplit = rawsplit[-1].split(":")
38 | if len(rawsplit) > 1:
39 | return ".".join(rawsplit[-2:]), "URIRef"
40 |
41 | if "sample:" in raw:
42 | rawsplit = raw.split(":")
43 | if len(rawsplit) > 1:
44 | return "_".join(rawsplit), "BNode"
45 |
46 | if "activity:" in raw:
47 | rawsplit = raw.split(":")
48 | if len(rawsplit) > 1:
49 | return "_".join(rawsplit), "BNode"
50 |
51 | if "simulation:" in raw:
52 | rawsplit = raw.split(":")
53 | if len(rawsplit) > 1:
54 | return "_".join(rawsplit), "BNode"
55 |
56 | if "operation:" in raw:
57 | rawsplit = raw.split(":")
58 | if len(rawsplit) > 1:
59 | return "_".join(rawsplit), "BNode"
60 |
61 | if "property:" in raw:
62 | rawsplit = raw.split(":")
63 | if len(rawsplit) > 1:
64 | return "_".join(rawsplit), "BNode"
65 |
66 | # just a normal url split now
67 | rawsplit = raw.split("/")
68 | if len(rawsplit) > 1:
69 | return ".".join(rawsplit[-2:]), "URIRef"
70 |
71 | # none of the conditions worked, which means it's a hex string
72 | return raw, "BNode"
73 |
74 |
75 | def parse_object(x):
76 | """
77 | Parse the given object and return its title and type.
78 |
79 | Parameters
80 | ----------
81 | x : RDF term
82 | The RDF term to parse.
83 |
84 | Returns
85 | -------
86 | tuple
87 | A tuple containing the title of the object and its type.
88 |
89 | """
90 | if isinstance(x, BNode):
91 | return get_title_from_BNode(x), "BNode"
92 | elif isinstance(x, URIRef):
93 | return get_string_from_URI(x)
94 | elif isinstance(x, Literal):
95 | return str(x.title()), "Literal"
96 |
97 |
98 | styledict = {
99 | "BNode": {"color": "#ffe6ff", "shape": "box", "style": "filled"},
100 | "URIRef": {"color": "#ffffcc", "shape": "box", "style": "filled"},
101 | "Literal": {"color": "#e6ffcc", "shape": "ellipse", "style": "filled"},
102 | }
103 |
104 |
105 | def _switch_box(box):
106 | if box == "box":
107 | return "rectangle"
108 | # remember that only boxes will be used, circles no!
109 |
110 |
111 | def _fix_id(string1, istype1):
112 | if istype1 == "Literal":
113 | id1 = str(uuid.uuid4())
114 | else:
115 | id1 = string1
116 | return id1
117 |
118 |
119 | def visualize_graph(
120 | g,
121 | styledict=styledict,
122 | rankdir="TB",
123 | hide_types=False,
124 | workflow_view=False,
125 | sample_view=False,
126 | size=None,
127 | layout="dot",
128 | ):
129 | """
130 | Visualizes a graph using Graphviz.
131 |
132 | Parameters
133 | ----------
134 | g : dict
135 | The graph to visualize.
136 | styledict : dict, optional
137 | A dictionary containing styles for different types of nodes and edges. Default is `styledict`.
138 | rankdir : str, optional
139 | The direction of the graph layout. Default is "TB" (top to bottom).
140 | hide_types : bool, optional
141 | Whether to hide nodes with the "type" attribute. Default is False.
142 | workflow_view : bool, optional
143 | Whether to enable the workflow view. Default is False.
144 | sample_view : bool, optional
145 | Whether to enable the sample view. Default is False.
146 | size : str, optional
147 | The size of the graph. Default is None.
148 | layout : str, optional
149 | The layout algorithm to use. Default is "dot".
150 |
151 | Returns
152 | -------
153 | dot : graphviz.Digraph
154 | The graph visualization.
155 | """
156 | dot = graphviz.Digraph()
157 |
158 | dot.attr(
159 | rankdir=rankdir,
160 | style="filled",
161 | size=size,
162 | layout=layout,
163 | overlap="false",
164 | )
165 |
166 | for k in g:
167 | string1, istype1 = parse_object(k[0])
168 | string2, istype2 = parse_object(k[2])
169 | string3, istype = parse_object(k[1])
170 |
171 | plot = True
172 |
173 | if workflow_view:
174 | # we collapse sample information
175 | # if cmso.connector is found, only use it is it is cmso.hasCalculated
176 | # all sub sample props, indicated by sample_x_jsjsj will be ignored.
177 | green_list = ["hasCalculatedProperty", "wasCalculatedBy", "hasValue"]
178 | ssplit = string3.split(".")
179 | if len(ssplit) == 2:
180 | if (ssplit[0] == "cmso") and (ssplit[1] not in green_list):
181 | plot = False
182 | if string3 == "subClassOf":
183 | plot = False
184 | ssplit = string2.split(".")
185 | if string3 == "type":
186 | if (ssplit[0] == "cmso") and (ssplit[1] not in ["CalculatedProperty"]):
187 | plot = False
188 | if (ssplit[0] == "cmso") and (ssplit[1] in ["AtomicScaleSample"]):
189 | dot.node(
190 | string1,
191 | label=string1,
192 | shape=styledict[istype1]["shape"],
193 | style=styledict[istype1]["style"],
194 | color=styledict[istype1]["color"],
195 | fontsize=styledict[istype1]["fontsize"],
196 | fontname=styledict[istype1]["fontname"],
197 | )
198 | plot = False
199 |
200 | elif sample_view:
201 | green_list = ['wasDerivedFrom', 'wasGeneratedBy']
202 | if string3 not in green_list:
203 | plot = False
204 |
205 |
206 | if hide_types and (string3 == "type"):
207 | plot = False
208 |
209 | if not plot:
210 | continue
211 |
212 | if istype1 == "Literal":
213 | id1 = str(uuid.uuid4())
214 | else:
215 | id1 = string1
216 | dot.node(
217 | id1,
218 | label=string1,
219 | shape=styledict[istype1]["shape"],
220 | style=styledict[istype1]["style"],
221 | color=styledict[istype1]["color"],
222 | fontsize=styledict[istype1]["fontsize"],
223 | fontname=styledict[istype1]["fontname"],
224 | )
225 |
226 | if istype2 == "Literal":
227 | id2 = str(uuid.uuid4())
228 | else:
229 | id2 = string2
230 | dot.node(
231 | id2,
232 | label=string2,
233 | shape=styledict[istype2]["shape"],
234 | style=styledict[istype2]["style"],
235 | color=styledict[istype2]["color"],
236 | fontsize=styledict[istype2]["fontsize"],
237 | fontname=styledict[istype2]["fontname"],
238 | )
239 |
240 | dot.edge(
241 | id1,
242 | id2,
243 | color=styledict["edgecolor"],
244 | label=string3,
245 | fontsize=styledict[istype2]["fontsize"],
246 | fontname=styledict[istype2]["fontname"],
247 | )
248 |
249 | return dot
250 |
251 | def _id(item):
252 | return str(item).replace(':', '_')
253 |
254 | def visualize_provenance(
255 | prov,
256 | rankdir="TB",
257 | size=None,
258 | layout="dot",
259 | ):
260 | dot = graphviz.Digraph()
261 | dot.attr(
262 | rankdir=rankdir,
263 | style="filled",
264 | size=size,
265 | layout=layout,
266 | overlap="false",
267 | )
268 | #add all nodes
269 | for key in prov.keys():
270 | nid = _id(key)
271 | #if "activity" in key:
272 | dot.node(nid, label=prov[key]['label'],
273 | shape='box',
274 | color="#C9DAF8",
275 | style="filled",
276 | fontname='Helvetica',
277 | fontsize='8')
278 | #else:
279 | # dot.node(nid, label=prov[key]['label'],
280 | # shape='parallelogram',
281 | # color="#C9DAF8",
282 | # style="filled",
283 | # fontname='Helvetica',
284 | # fontsize='8')
285 | #add all edges
286 | for key, val in prov.items():
287 | if 'inputs' in val.keys():
288 | if val['operation'] == 'input_parameter':
289 | for subkey, subval in val['inputs'].items():
290 | dot.edge(_id(subval), _id(key), label='input_param',
291 | color="#263238",
292 | fontname='Helvetica',
293 | fontsize='8')
294 | if val['operation'] == 'output_parameter':
295 | for subkey, subval in val['inputs'].items():
296 | dot.edge(_id(subval), _id(key), label='output_param',
297 | color="#263238",
298 | fontname='Helvetica',
299 | fontsize='8')
300 | elif val['operation'] == 'sample_for_activity':
301 | for subkey, subval in val['inputs'].items():
302 | dot.edge(_id(subval), _id(key), label='input_sample',
303 | color="#263238",
304 | fontname='Helvetica',
305 | fontsize='8')
306 | elif val['operation'] == 'sample_output':
307 | for subkey, subval in val['inputs'].items():
308 | dot.edge(_id(subval), _id(key), label='output_sample',
309 | color="#263238",
310 | fontname='Helvetica',
311 | fontsize='8')
312 | else:
313 | operation_id = str(uuid.uuid4())
314 | operation = dot.node(operation_id, label=val['operation'],
315 | color="#E6B8AF",
316 | shape='box',
317 | style='filled',
318 | fontname='Helvetica',
319 | fontsize='8')
320 | for subkey, subval in val['inputs'].items():
321 | dot.edge(_id(subval), operation_id, label='input',
322 | color="#263238",
323 | fontname='Helvetica',
324 | fontsize='8')
325 | dot.edge(operation_id, _id(key), label='output',
326 | color="#263238",
327 | fontname='Helvetica',
328 | fontsize='8')
329 | return dot
--------------------------------------------------------------------------------
/atomrdf/workflow/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/atomrdf/workflow/jobdict.yml:
--------------------------------------------------------------------------------
1 | sample:
2 | initial: string-id
3 | final: string-id
4 | structure:
5 | initial: System obj
6 | final: System obj
7 | intermediate: False
8 | method: MolecularStatics/MolecularDynamics/DensityFunctionalTheory/EquationOfState/QuasiHarmonicModel/ThermodynamicIntegration
9 | path: job path folder
10 | dof:
11 | - AtomicPositionRelaxation
12 | - CellVolumeRelaxation
13 | - CellShapeRelaxation
14 | inputs:
15 | - label:
16 | unit:
17 | value:
18 | outputs:
19 | - label:
20 | unit:
21 | value:
22 | associate_to_sample: True
23 | workflow_manager:
24 | uri:
25 | software:
26 | - uri:
27 | label:
28 |
29 | #special keys
30 | #DFT
31 | encut:
32 | kpoint_grid:
33 | kpoint_type:
34 | xc_functional:
35 |
36 | #MD
37 | ensemble: CanonicalEnsemble/MicrocanonicalEnsemble/IsothermalIsobaricEnsemble/IsoenthalpicIsobaricEnsemble/GrandCanonicalEnsemble
38 | temperature:
39 | pressure:
40 | potential:
41 | type:
42 | uri:
43 | label:
44 |
45 |
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/atomrdf/workflow/pyiron/__init__.py
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/calphy.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import ast
4 | from atomrdf.structure import System
5 | import atomrdf.workflow.pyiron.lammps as lammps
6 |
7 | def process_job(job):
8 | method_dict = {}
9 | method_dict['intermediate'] = False
10 | lammps.get_structures(job, method_dict)
11 |
12 | identify_method(job, method_dict)
13 | extract_calculated_quantities(job, method_dict)
14 | add_software(method_dict)
15 | get_simulation_folder(job, method_dict)
16 | return method_dict
17 |
18 | def get_simulation_folder(job, method_dict):
19 | method_dict['path'] = os.path.join(job.project.path, f'{job.name}_hdf5')
20 |
21 |
22 | def identify_method(job, method_dict):
23 | pressure = job.input.pressure
24 | if pressure is None:
25 | iso = True
26 | fix_lattice = True
27 | elif np.isscalar(pressure):
28 | iso = True
29 | fix_lattice = False
30 | elif np.shape(pressure) == (1,):
31 | iso = True
32 | fix_lattice = False
33 | elif np.shape(pressure) == (2,):
34 | iso = True
35 | fix_lattice = False
36 | elif np.shape(pressure) == (1, 3):
37 | iso = False
38 | fix_lattice = False
39 | elif np.shape(pressure) == (2, 3):
40 | iso = False
41 | fix_lattice = False
42 |
43 | dof = []
44 | dof.append("AtomicPositionRelaxation")
45 | ensemble = 'IsothermalIsobaricEnsemble'
46 |
47 | if not fix_lattice:
48 | dof.append("CellVolumeRelaxation")
49 | ensemble = "CanonicalEnsemble"
50 |
51 | if not iso:
52 | dof.append("CellShapeRelaxation")
53 |
54 | method_dict["dof"] = dof
55 | method_dict["ensemble"] = ensemble
56 |
57 | #now potential
58 | ps = job.potential.Config.values[0][0].strip().split('pair_style ')[-1]
59 | name = job.potential.Name.values[0]
60 | potstr = job.potential.Citations.values[0]
61 | potdict = ast.literal_eval(potstr[1:-1])
62 | url = None
63 | if "url" in potdict[list(potdict.keys())[0]].keys():
64 | url = potdict[list(potdict.keys())[0]]["url"]
65 |
66 | method_dict["potential"] = {}
67 | method_dict["potential"]["type"] = ps
68 | method_dict["potential"]["label"] = name
69 | if url is not None:
70 | method_dict["potential"]["uri"] = url
71 | else:
72 | method_dict["potential"]["uri"] = name
73 | method_dict['method'] = 'ThermodynamicIntegration'
74 | method_dict['inputs'] = []
75 | method_dict['inputs'].append(
76 | {
77 | "label": "Pressure",
78 | "value": job.input.pressure,
79 | "unit": "BAR",
80 | }
81 | )
82 | method_dict['inputs'].append(
83 | {
84 | "label": "Temperature",
85 | "value": job.input.temperature,
86 | "unit": "K",
87 | }
88 | )
89 |
90 | def add_software(method_dict):
91 | method_dict["workflow_manager"] = {}
92 | method_dict["workflow_manager"]["uri"] = "https://doi.org/10.1016/j.commatsci.2018.07.043"
93 | method_dict["workflow_manager"]["label"] = "pyiron"
94 | # and finally code details
95 |
96 | software1 = {
97 | "uri": "https://doi.org/10.1016/j.cpc.2021.108171",
98 | "label": "LAMMPS",
99 | }
100 |
101 | software2 = {
102 | "uri": "https://doi.org/10.5281/zenodo.10527452",
103 | "label": "Calphy",
104 | }
105 | method_dict["software"] = [software1, software2]
106 |
107 | def extract_calculated_quantities(job, method_dict):
108 |
109 | outputs = []
110 | outputs.append(
111 | {
112 | "label": "FreeEnergy",
113 | "value": np.round(job['output/energy_free'], decimals=4),
114 | "unit": "EV",
115 | "associate_to_sample": True,
116 | }
117 | )
118 | outputs.append(
119 | {
120 | "label": "VirialPressure",
121 | "value": np.round(job['output/pressure'], decimals=4),
122 | "unit": "GigaPA",
123 | "associate_to_sample": True,
124 | }
125 | )
126 | outputs.append(
127 | {
128 | "label": "Temperature",
129 | "value": np.round(job['output/temperature'], decimals=2),
130 | "unit": "K",
131 | "associate_to_sample": True,
132 | }
133 | )
134 | method_dict['outputs'] = outputs
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/lammps.py:
--------------------------------------------------------------------------------
1 | """
2 | LAMMPS specific functions for parsing
3 |
4 | Use this a reference for specific implementations
5 | """
6 | import os
7 | import numpy as np
8 | import ast
9 | from atomrdf.structure import System
10 |
11 | def process_job(job):
12 | method_dict = {}
13 | method_dict['intermediate'] = False
14 | get_structures(job, method_dict)
15 | identify_method(job, method_dict)
16 | extract_calculated_quantities(job, method_dict)
17 | add_software(method_dict)
18 | get_simulation_folder(job, method_dict)
19 | return method_dict
20 |
21 | def get_simulation_folder(job, method_dict):
22 | method_dict['path'] = os.path.join(job.project.path, f'{job.name}_hdf5')
23 |
24 | def get_structures(job, method_dict):
25 | initial_pyiron_structure = job.structure
26 | final_pyiron_structure = job.get_structure(frame=-1)
27 | initial_pyscal_structure = System.read.ase(initial_pyiron_structure)
28 | initial_sample_id = None
29 |
30 | if "sample_id" in initial_pyiron_structure.info.keys():
31 | initial_sample_id = initial_pyiron_structure.info["sample_id"]
32 |
33 | #add final structure
34 | final_pyscal_structure = System.read.ase(final_pyiron_structure)
35 |
36 | # now we do rthe transfer
37 | method_dict['structure'] = {'initial': initial_pyscal_structure,
38 | 'final': final_pyscal_structure,}
39 | method_dict['sample'] = {'initial':initial_sample_id,
40 | 'final': None}
41 |
42 | def identify_method(job, method_dict):
43 | job_dict = job.input.to_dict()
44 | input_dict = {
45 | job_dict["control_inp/data_dict"]["Parameter"][x]: job_dict[
46 | "control_inp/data_dict"
47 | ]["Value"][x]
48 | for x in range(len(job_dict["control_inp/data_dict"]["Parameter"]))
49 | }
50 | dof = []
51 | temp = None
52 | press = None
53 | md_method = None
54 | ensemble = None
55 |
56 | if "min_style" in input_dict.keys():
57 | dof.append("AtomicPositionRelaxation")
58 | dof.append("CellVolumeRelaxation")
59 | md_method = "MolecularStatics"
60 |
61 | elif "nve" in input_dict["fix___ensemble"]:
62 | if int(input_dict["run"]) == 0:
63 | method = "static"
64 | md_method = "MolecularStatics"
65 | ensemble = "MicrocanonicalEnsemble"
66 |
67 | elif int(input_dict["run"]) > 0:
68 | method = "md_nve"
69 | dof.append("AtomicPositionRelaxation")
70 | md_method = "MolecularDynamics"
71 | ensemble = "MicrocanonicalEnsemble"
72 |
73 | elif "nvt" in input_dict["fix___ensemble"]:
74 | method = "md_nvt"
75 | raw = input_dict["fix___ensemble"].split()
76 | temp = float(raw[3])
77 | dof.append("AtomicPositionRelaxation")
78 | md_method = "MolecularDynamics"
79 | ensemble = "CanonicalEnsemble"
80 |
81 | elif "npt" in input_dict["fix___ensemble"]:
82 | dof.append("AtomicPositionRelaxation")
83 | dof.append("CellVolumeRelaxation")
84 | if "aniso" in input_dict["fix___ensemble"]:
85 | method = "md_npt_aniso"
86 | dof.append("CellShapeRelaxation")
87 | else:
88 | method = "md_npt_iso"
89 | md_method = "MolecularDynamics"
90 | raw = input_dict["fix___ensemble"].split()
91 | temp = float(raw[3])
92 | press = float(raw[7])
93 | ensemble = "IsothermalIsobaricEnsemble"
94 |
95 | method_dict["method"] = md_method
96 | method_dict["dof"] = dof
97 | method_dict["ensemble"] = ensemble
98 |
99 | # now process potential
100 | inpdict = job.input.to_dict()
101 | ps = inpdict["potential_inp/data_dict"]["Value"][0]
102 | name = inpdict["potential_inp/potential/Name"]
103 | potstr = job.input.to_dict()["potential_inp/potential/Citations"]
104 | potdict = ast.literal_eval(potstr[1:-1])
105 | url = None
106 | if "url" in potdict[list(potdict.keys())[0]].keys():
107 | url = potdict[list(potdict.keys())[0]]["url"]
108 |
109 | method_dict["potential"] = {}
110 | method_dict["potential"]["type"] = ps
111 | method_dict["potential"]["label"] = name
112 | if url is not None:
113 | method_dict["potential"]["uri"] = url
114 | else:
115 | method_dict["potential"]["uri"] = name
116 |
117 | #add temperature and pressure as inputs
118 | method_dict['inputs'] = []
119 | if temp is not None:
120 | method_dict['inputs'].append(
121 | {
122 | "label": "Temperature",
123 | "value": temp,
124 | "unit": "K",
125 | }
126 | )
127 | if press is not None:
128 | method_dict['inputs'].append(
129 | {
130 | "label": "Pressure",
131 | "value": press,
132 | "unit": "GigaPA",
133 | }
134 | )
135 |
136 | def add_software(method_dict):
137 | method_dict["workflow_manager"] = {}
138 | method_dict["workflow_manager"]["uri"] = "https://doi.org/10.1016/j.commatsci.2018.07.043"
139 | method_dict["workflow_manager"]["label"] = "pyiron"
140 | # and finally code details
141 |
142 | software = {
143 | "uri": "https://doi.org/10.1016/j.cpc.2021.108171",
144 | "label": "LAMMPS",
145 | }
146 | method_dict["software"] = [software]
147 |
148 | def extract_calculated_quantities(job, method_dict):
149 | """
150 | Extracts calculated quantities from a job.
151 |
152 | Parameters
153 | ----------
154 | job : pyiron.Job
155 | The job object containing the calculated quantities.
156 |
157 | Returns
158 | -------
159 | list
160 | A list of dictionaries, each containing the label, value, unit, and associate_to_sample of a calculated quantity.
161 |
162 | """
163 | energy_tot = np.mean(job.output.energy_tot)
164 | energy_pot = np.mean(job.output.energy_pot)
165 | energy_kin = energy_tot - energy_pot
166 |
167 | volume = np.mean(job.output.volume)
168 |
169 | outputs = []
170 | outputs.append(
171 | {
172 | "label": "TotalEnergy",
173 | "value": np.round(energy_tot, decimals=4),
174 | "unit": "EV",
175 | "associate_to_sample": True,
176 | }
177 | )
178 | outputs.append(
179 | {
180 | "label": "PotentialEnergy",
181 | "value": np.round(energy_pot, decimals=4),
182 | "unit": "EV",
183 | "associate_to_sample": True,
184 | }
185 | )
186 | outputs.append(
187 | {
188 | "label": "KineticEnergy",
189 | "value": np.round(energy_kin, decimals=4),
190 | "unit": "EV",
191 | "associate_to_sample": True,
192 | }
193 | )
194 | outputs.append(
195 | {
196 | "label": "SimulationCellVolume",
197 | "value": np.round(volume, decimals=4),
198 | "unit": "ANGSTROM3",
199 | "associate_to_sample": True,
200 | "base": "SimulationCellVolume",
201 | }
202 | )
203 |
204 | method_dict['outputs'] = outputs
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/murnaghan.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import ast
4 | from atomrdf.structure import System
5 | import atomrdf.workflow.pyiron.lammps as lammps
6 |
7 | def process_job(job):
8 | #murnaghan job processing; add individual lammps jobs first
9 | job_dicts = []
10 | for jobid in job.child_ids:
11 | child_job = job.project.load(jobid)
12 | if type(child_job).__name__ == 'Lammps':
13 | single_job_dict = lammps.process_job(child_job)
14 | #note that we turn the jobs to child here
15 | single_job_dict['intermediate'] = True
16 | job_dicts.append(single_job_dict)
17 |
18 | #create an additional jobdict with the murnaghan job
19 | murnaghan_dict = {}
20 | lammps.get_structures(job, murnaghan_dict)
21 | murnaghan_dict['intermediate'] = False
22 | lammps.get_simulation_folder(job, murnaghan_dict)
23 |
24 | #add the murnaghan method
25 | murnaghan_dict['method'] = "EquationOfState"
26 | outputs = []
27 | outputs.append(
28 | {
29 | "label": "EquilibriumEnergy",
30 | "value": np.round(job['output/equilibrium_energy'], decimals=4),
31 | "unit": "EV",
32 | "associate_to_sample": True,
33 | "base": "TotalEnergy",
34 | }
35 | )
36 | outputs.append(
37 | {
38 | "label": "EquilibriumVolume",
39 | "value": np.round(job['output/equilibrium_volume'], decimals=4),
40 | "unit": "ANGSTROM3",
41 | "associate_to_sample": True,
42 | "base": "Volume",
43 | }
44 | )
45 | outputs.append(
46 | {
47 | "label": "TotalEnergy",
48 | "value": np.round(job['output/energy'], decimals=4),
49 | "unit": "EV",
50 | "associate_to_sample": True,
51 | "base": "TotalEnergy",
52 | }
53 | )
54 | outputs.append(
55 | {
56 | "label": "SimulationCellVolume",
57 | "value": np.round(job['output/volume'], decimals=4),
58 | "unit": "ANGSTROM3",
59 | "associate_to_sample": True,
60 | "base": "SimulationCellVolume",
61 | }
62 | )
63 | outputs.append(
64 | {
65 | "label": "BulkModulus",
66 | "value": np.round(job['output/equilibrium_bulk_modulus'], decimals=2),
67 | "unit": "GigaPA",
68 | "associate_to_sample": True,
69 | }
70 | )
71 |
72 | murnaghan_dict['outputs'] = outputs
73 | lammps.add_software(murnaghan_dict)
74 | job_dicts.append(murnaghan_dict)
75 | return job_dicts
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/pyiron.py:
--------------------------------------------------------------------------------
1 | """
2 | Wrappers for pyiron jobs
3 | """
4 |
5 | import os
6 | from functools import partial, update_wrapper
7 | from pyscal3.core import structure_dict, element_dict
8 |
9 | from atomrdf.structure import _make_crystal, _make_grain_boundary
10 | import atomrdf.workflow.pyiron.lammps as lammps
11 | import atomrdf.workflow.pyiron.vasp as vasp
12 | import atomrdf.workflow.pyiron.murnaghan as murnaghan
13 | import atomrdf.workflow.pyiron.quasiharmonic as qha
14 | import atomrdf.workflow.pyiron.calphy as calphy
15 |
16 | def process_job(job):
17 | """
18 | Checkes if the job is valid and creates the necessary output dict
19 | for the job.
20 |
21 | Parameters
22 | ----------
23 | job : pyiron.Job
24 | The pyiron job object to check.
25 |
26 | Raises
27 | ------
28 | TypeError
29 | If the job is not a valid pyiron job.
30 | """
31 | if type(job).__name__ == 'Lammps':
32 | return lammps.process_job(job)
33 | if type(job).__name__ == 'Vasp':
34 | return vasp.process_job(job)
35 | elif type(job).__name__ == 'Murnaghan':
36 | return murnaghan.process_job(job)
37 | elif type(job).__name__ == 'QuasiHarmonicJob':
38 | return qha.process_job(job)
39 | elif type(job).__name__ == 'Calphy':
40 | return calphy.process_job(job)
41 | else:
42 | raise TypeError("These type of pyiron Job is not currently supported")
43 |
44 |
45 |
46 | def inform_graph(pr, kg):
47 | """
48 | this function in general can be used to do extra methods to set up things as needed
49 | for the workflow environment.
50 |
51 | For example, for pyiron, this updates the project object to have the graph and creator objects
52 | """
53 |
54 | try:
55 | from pyiron_base import Creator, PyironFactory
56 | from pyiron_atomistics.atomistics.structure.atoms import (
57 | ase_to_pyiron,
58 | pyiron_to_ase,
59 | )
60 | import pyiron_atomistics.atomistics.structure.factory as sf
61 | except ImportError:
62 | raise ImportError("Please install pyiron_base and pyiron_atomistics")
63 |
64 | class AnnotatedStructureFactory:
65 | def __init__(self, graph):
66 | self._graph = graph
67 |
68 | def bulk(
69 | self,
70 | element,
71 | repetitions=None,
72 | crystalstructure=None,
73 | a=None,
74 | covera=1.633,
75 | cubic=True,
76 | graph=None,
77 | label=None,
78 | ):
79 |
80 | if crystalstructure is None:
81 | crystalstructure = element_dict[element]["structure"]
82 | if a is None:
83 | a = element_dict[element]["lattice_constant"]
84 |
85 | struct = _make_crystal(
86 | crystalstructure,
87 | repetitions=repetitions,
88 | lattice_constant=a,
89 | ca_ratio=covera,
90 | element=element,
91 | primitive=not cubic,
92 | graph=self._graph,
93 | label=label,
94 | )
95 |
96 | ase_structure = struct.write.ase()
97 | pyiron_structure = ase_to_pyiron(ase_structure)
98 | pyiron_structure.info["sample_id"] = struct.sample
99 | return pyiron_structure
100 |
101 | def grain_boundary(
102 | self,
103 | axis,
104 | sigma,
105 | gb_plane,
106 | crystalstructure=None,
107 | element=None,
108 | a=1,
109 | covera=1.633,
110 | repetitions=(1, 1, 1),
111 | overlap=0.0,
112 | gap=0.0,
113 | vacuum=0.0,
114 | delete_layer="0b0t0b0t",
115 | tolerance= 0.25,
116 | primitive=False,
117 | uc_a=1,
118 | uc_b=1,
119 | graph=None,
120 | names=False,
121 | label=None,
122 | backend='aimsgb'
123 | ):
124 |
125 | struct = _make_grain_boundary(
126 | axis,
127 | sigma,
128 | gb_plane,
129 | structure=crystalstructure,
130 | element=element,
131 | lattice_constant=a,
132 | ca_ratio=covera,
133 | repetitions=repetitions,
134 | overlap=overlap,
135 | gap=gap,
136 | vacuum=vacuum,
137 | delete_layer=delete_layer,
138 | tolerance=tolerance,
139 | primitive=primitive,
140 | uc_a=uc_a,
141 | uc_b=uc_b,
142 | graph=self._graph,
143 | names=names,
144 | label=label,
145 | backend=backend
146 | )
147 |
148 | ase_structure = struct.write.ase()
149 | pyiron_structure = ase_to_pyiron(ase_structure)
150 | pyiron_structure.info["sample_id"] = struct.sample
151 | return pyiron_structure
152 |
153 | class StructureFactory(sf.StructureFactory):
154 | def __init__(self, graph):
155 | super().__init__()
156 | self._annotated_structure = AnnotatedStructureFactory(graph)
157 |
158 | @property
159 | def annotated_structure(self):
160 | return self._annotated_structure
161 |
162 | class StructureCreator(Creator):
163 | def __init__(self, project):
164 | super().__init__(project)
165 | self._structure = StructureFactory(project.graph)
166 |
167 | @property
168 | def structure(self):
169 | return self._structure
170 |
171 | pr.graph = kg
172 | pr._creator = StructureCreator(pr)
173 |
174 |
175 |
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/quasiharmonic.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import ast
4 | from atomrdf.structure import System
5 | import atomrdf.workflow.pyiron.lammps as lammps
6 |
7 | def process_job(job):
8 | #murnaghan job processing; add individual lammps jobs first
9 | job_dicts = []
10 |
11 | #create an additional jobdict with the murnaghan job
12 | quasi_dict = {}
13 | lammps.get_structures(job, quasi_dict)
14 | quasi_dict['intermediate'] = False
15 | lammps.get_simulation_folder(job, quasi_dict)
16 |
17 | #add the murnaghan method
18 | quasi_dict['method'] = "QuasiharmonicModel"
19 | outputs = []
20 | outputs.append(
21 | {
22 | "label": "QuasiharmonicFreeEnergy",
23 | "value": np.round(job['output/free_energy'].T, decimals=4),
24 | "unit": "EV",
25 | "associate_to_sample": True,
26 | "base": "FreeEnergy",
27 | }
28 | )
29 | outputs.append(
30 | {
31 | "label": "QuasiharmonicVolume",
32 | "value": np.round(job['output/volumes'].T, decimals=4),
33 | "unit": "ANGSTROM3",
34 | "associate_to_sample": True,
35 | "base": "SimulationCellVolume",
36 | }
37 | )
38 | outputs.append(
39 | {
40 | "label": "QuasiharmonicTemperature",
41 | "value": np.round(job['output/temperatures'][0], decimals=2),
42 | "unit": "K",
43 | "associate_to_sample": True,
44 | "base": "Temperature",
45 | }
46 | )
47 | quasi_dict['outputs'] = outputs
48 | lammps.add_software(quasi_dict)
49 | job_dicts.append(quasi_dict)
50 | return job_dicts
--------------------------------------------------------------------------------
/atomrdf/workflow/pyiron/vasp.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import ast
4 | from atomrdf.structure import System
5 |
6 | def process_job(job):
7 | method_dict = {}
8 | method_dict['intermediate'] = False
9 | get_structures(job, method_dict)
10 | identify_method(job, method_dict)
11 | extract_calculated_quantities(job, method_dict)
12 | add_software(method_dict)
13 | method_dict['path'] = get_simulation_folder(job)
14 | return method_dict
15 |
16 | def get_simulation_folder(job):
17 | return os.path.join(job.project.path, f'{job.name}_hdf5')
18 |
19 | def get_simulation_raw_folder(job):
20 | return os.path.join(job.project.path, f'{job.name}_hdf5', f'{job.name}')
21 |
22 | def get_structures(job, method_dict):
23 | initial_pyiron_structure = job.structure
24 | final_pyiron_structure = job.get_structure(frame=-1)
25 | initial_pyscal_structure = System.read.ase(initial_pyiron_structure)
26 |
27 | initial_sample_id = None
28 |
29 | if "sample_id" in initial_pyiron_structure.info.keys():
30 | initial_sample_id = initial_pyiron_structure.info["sample_id"]
31 |
32 | #now we can try to parse the POSCAR file directly here
33 | if initial_sample_id is None:
34 | #try to parse job directly, see if we have the structure written down in comments
35 | job.decompress()
36 | poscar_file_locations = [os.path.join(get_simulation_folder(job), 'POSCAR'),
37 | os.path.join(get_simulation_raw_folder(job), 'POSCAR')]
38 | for poscar_file in poscar_file_locations:
39 | if os.path.exists(poscar_file):
40 | lines = []
41 | with open(poscar_file, 'r') as f:
42 | for line in f:
43 | lines.append(line)
44 | break
45 | if 'sample' in lines[0]:
46 | initial_sample_id = lines[0].strip()
47 | break
48 |
49 | #add final structure
50 | final_pyscal_structure = System.read.ase(final_pyiron_structure)
51 |
52 | # now we do rthe transfer
53 | method_dict['structure'] = {'initial': initial_pyscal_structure,
54 | 'final': final_pyscal_structure,}
55 | method_dict['sample'] = {'initial':initial_sample_id,
56 | 'final': None}
57 |
58 | def identify_method(job, method_dict):
59 | #get dof
60 | indf = job.input.incar.to_dict()
61 | params = indf['data_dict']['Parameter']
62 | vals = indf['data_dict']['Value']
63 | mlist = []
64 | for p,v in zip(params, vals):
65 | mlist.append(p + '=' + v)
66 | mstring = ';'.join(mlist)
67 | raw = mstring.split(';')
68 | mdict = {}
69 | for r in raw:
70 | rsplit = r.split('=')
71 | if len(rsplit) == 2:
72 | mdict[rsplit[0].replace(' ','')] = rsplit[1].replace(' ','')
73 | dof = []
74 | if 'ISIF' in mdict.keys():
75 | if mdict['ISIF'] in ['0', '1', '2']:
76 | dof.append('AtomicPositionRelaxation')
77 | elif mdict['ISIF'] == '3':
78 | dof.append('AtomicPositionRelaxation')
79 | dof.append('CellShapeRelaxation')
80 | dof.append('CellVolumeRelaxation')
81 | elif mdict['ISIF'] == '4':
82 | dof.append('AtomicPositionRelaxation')
83 | dof.append('CellShapeRelaxation')
84 | elif mdict['ISIF'] == '5':
85 | dof.append('CellShapeRelaxation')
86 | elif mdict['ISIF'] == '6':
87 | dof.append('CellShapeRelaxation')
88 | dof.append('CellVolumeRelaxation')
89 | elif mdict['ISIF'] == '7':
90 | dof.append('CellVolumeRelaxation')
91 | elif mdict['ISIF'] == '8':
92 | dof.append('AtomicPositionRelaxation')
93 | dof.append('CellVolumeRelaxation')
94 | if 'NSW' in mdict.keys():
95 | if mdict['NSW'] == '0':
96 | dof = []
97 |
98 | method = 'DensityFunctionalTheory'
99 | method_dict['method'] = method
100 | method_dict['dof'] = dof
101 |
102 | inputs = []
103 |
104 | encut = mdict['ENCUT']
105 | inputs.append(
106 | {
107 | "label": "EnergyCutoff",
108 | "value": encut,
109 | "unit": "EV",
110 | }
111 | )
112 |
113 | indf = job.input.to_dict()['kpoints/data_dict']
114 | params = indf['Parameter']
115 | vals = indf['Value']
116 |
117 | kpoint_type = vals[2]
118 | kpoint_grid = vals[3]
119 |
120 | if kpoint_type == 'Monkhorst-pack':
121 | inputs.append(
122 | {
123 | "label": "MonkhorstPackKPointMesh",
124 | "value": kpoint_grid,
125 | }
126 | )
127 | elif kpoint_type == 'Gamma':
128 | inputs.append(
129 | {
130 | "label": "GammaCenteredKPointMesh",
131 | "value": kpoint_grid,
132 | }
133 | )
134 | elif kpoint_type in ['Reciprocal', 'Cartesian']:
135 | inputs.append(
136 | {
137 | "label": "ExplicitKPointMesh",
138 | "value": kpoint_grid,
139 | }
140 | )
141 | else:
142 | inputs.append(
143 | {
144 | "label": "KPointMesh",
145 | "value": kpoint_grid,
146 | }
147 | )
148 | method_dict['inputs'] = inputs
149 |
150 | indf = job.input.to_dict()['potcar/data_dict']
151 | xc = indf['Value'][0]
152 | method_dict['xc_functional'] = xc
153 |
154 | def add_software(method_dict):
155 | method_dict["workflow_manager"] = {}
156 | method_dict["workflow_manager"]["uri"] = "https://doi.org/10.1016/j.commatsci.2018.07.043"
157 | method_dict["workflow_manager"]["label"] = "pyiron"
158 | # and finally code details
159 |
160 | software = {
161 | "uri": "https://www.vasp.at/",
162 | "label": "VASP",
163 | }
164 | method_dict["software"] = [software]
165 |
166 | def extract_calculated_quantities(job, method_dict):
167 | """
168 | Extracts calculated quantities from a job.
169 |
170 | Parameters
171 | ----------
172 | job : pyiron.Job
173 | The job object containing the calculated quantities.
174 |
175 | Returns
176 | -------
177 | list
178 | A list of dictionaries, each containing the label, value, unit, and associate_to_sample of a calculated quantity.
179 |
180 | """
181 | outputs = []
182 | outputs.append(
183 | {
184 | "label": "TotalEnergy",
185 | "value": np.round(job.output.energy_tot[-1], decimals=5),
186 | "unit": "EV",
187 | "associate_to_sample": True,
188 | }
189 | )
190 | outputs.append(
191 | {
192 | "label": "PotentialEnergy",
193 | "value": np.round(job.output.energy_pot[-1], decimals=5),
194 | "unit": "EV",
195 | "associate_to_sample": True,
196 | }
197 | )
198 | outputs.append(
199 | {
200 | "label": "SimulationCellVolume",
201 | "value": np.round(job.output.volume[-1], decimals=5),
202 | "unit": "ANGSTROM3",
203 | "associate_to_sample": True,
204 | }
205 | )
206 |
207 | method_dict['outputs'] = outputs
--------------------------------------------------------------------------------
/atomrdf/workflow/qe/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/atomrdf/workflow/qe/__init__.py
--------------------------------------------------------------------------------
/atomrdf/workflow/qe/qe.py:
--------------------------------------------------------------------------------
1 | """
2 | Wrappers for pyiron jobs
3 | """
4 | import os
5 | import numpy as np
6 | import ast
7 |
8 | from ase.io import read
9 | from atomrdf.structure import System
10 | from ase.io.espresso import read_fortran_namelist
11 | from atomrdf.io import _convert_tab_to_dict
12 |
13 | def _parse_inp(file):
14 | sample = None
15 | comments = []
16 | with open(file, 'r') as fin:
17 | for line in fin:
18 | line = line.strip()
19 | line = line.split('!')
20 | if len(line) > 1:
21 | comments.append(line[-1].strip())
22 | for comment in comments:
23 | rcomment = comment.split(':')
24 | if rcomment[0] == 'sample':
25 | sample = comment
26 | return sample
27 | return sample
28 |
29 |
30 |
31 | def inform_graph(pr, kg):
32 | pass
33 |
34 | def process_job(job):
35 | if not len(job)==2:
36 | raise ValueError('Job must be a tuple with two items: (quantum_espresso_input_file, quantum_espresso_output_file)')
37 | infile = job[0]
38 | outfile = job[1]
39 |
40 | method_dict = {}
41 | method_dict['intermediate'] = False
42 | get_structures(job, method_dict)
43 | identify_method(job, method_dict)
44 | add_software(method_dict)
45 | extract_calculated_quantities(job, method_dict)
46 | method_dict['path'] = os.path.abspath(os.path.dirname(infile))
47 | return method_dict
48 |
49 | def get_structures(job, method_dict):
50 | infile = job[0]
51 | outfile = job[1]
52 |
53 | initial_ase_structure = read(infile, format='espresso-in')
54 | initial_pyscal_structure = System.read.ase(initial_ase_structure)
55 |
56 | #try to get initial sample id
57 | initial_sample_id = _parse_inp(infile)
58 |
59 | final_ase_structure = read(outfile, format='espresso-out')
60 | final_pyscal_structure = System.read.ase(final_ase_structure)
61 | method_dict['structure'] = {'initial': initial_pyscal_structure,
62 | 'final': final_pyscal_structure,}
63 | method_dict['sample'] = {'initial':initial_sample_id,
64 | 'final': None}
65 |
66 | def identify_method(job, method_dict):
67 | infile = job[0]
68 | outfile = job[1]
69 |
70 | with open(infile, 'r') as fin:
71 | data, tab = read_fortran_namelist(fin)
72 |
73 | tab = _convert_tab_to_dict(tab)
74 | calc_method = data['control']['calculation']
75 |
76 | dof = []
77 | if calc_method in ['scf', 'nscf']:
78 | pass
79 | elif calc_method == 'relax':
80 | dof.append('AtomicPositionRelaxation')
81 | elif calc_method == 'vc-relax':
82 | dof.append('AtomicPositionRelaxation')
83 | dof.append('CellShapeRelaxation')
84 | dof.append('CellVolumeRelaxation')
85 | else:
86 | raise ValueError('Unknown calculation method')
87 |
88 | method = 'DensityFunctionalTheory'
89 | method_dict['method'] = method
90 | method_dict['dof'] = dof
91 |
92 | encut = data['system']['ecutwfc']
93 | #convert to eV
94 | inputs = []
95 | inputs.append(
96 | {
97 | "label": "EnergyCutoff",
98 | "value": encut*13.6057039763,
99 | "unit": "EV",
100 | }
101 | )
102 |
103 | #get kpoints
104 | if tab['K_POINTS']['extra'] == 'automatic':
105 | inputs.append(
106 | {
107 | "label": "MonkhorstPackKPointMesh",
108 | "value": " ".join(tab['K_POINTS']['value'][0].split()[:3]),
109 | }
110 | )
111 | elif tab['K_POINTS']['extra'] == 'gamma':
112 | inputs.append(
113 | {
114 | "label": "GammaCenteredKPointMesh",
115 | "value": " ",
116 | }
117 | )
118 | else:
119 | inputs.append(
120 | {
121 | "label": "ExplicitKPointMesh",
122 | "value": " ",
123 | }
124 | )
125 | method_dict['inputs'] = inputs
126 |
127 | #get pseudopotentials
128 | pseudo = None
129 | with open(outfile, 'r') as fin:
130 | for line in fin:
131 | if 'Exchange-correlation' in line:
132 | pseudo = line.split('=')[0].strip()
133 | break
134 |
135 | if pseudo is not None:
136 | method_dict['xc_functional'] = pseudo
137 |
138 | def add_software(method_dict):
139 | software = {
140 | "uri": "https://www.quantum-espresso.org/",
141 | "label": "QuantumEspresso",
142 | }
143 | method_dict["software"] = [software]
144 |
145 | def extract_calculated_quantities(job, method_dict):
146 | infile = job[0]
147 | outfile = job[1]
148 |
149 | struct = read(outfile, format='espresso-out')
150 | outputs = []
151 | outputs.append(
152 | {
153 | "label": "TotalEnergy",
154 | "value": np.round(struct.get_total_energy(), decimals=5),
155 | "unit": "EV",
156 | "associate_to_sample": True,
157 | }
158 | )
159 | outputs.append(
160 | {
161 | "label": "SimulationCellVolume",
162 | "value": np.round(struct.get_volume(), decimals=5),
163 | "unit": "ANGSTROM3",
164 | "associate_to_sample": True,
165 | }
166 | )
167 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | coverage:
2 | status:
3 | patch: off
4 |
--------------------------------------------------------------------------------
/docs/acknowledgements.md:
--------------------------------------------------------------------------------
1 | # Acknowledgements
2 |
3 | ## Developers
4 |
5 | - [Sarath Menon](http://sarathmenon.me)
6 | - [Abril Azócar Guzmán](https://www.fz-juelich.de/profile/guzman_a.azocar)
7 |
8 |
9 | ## Contributers
10 |
11 | Please see the complete list of contributers [here](https://github.com/pyscal/atomrdf/graphs/contributors).
12 |
13 |
14 | ## Acknowledgements
15 |
16 | Funding for this publication was provided by the NFDI consortium [NFDI-MatWerk](https://nfdi-matwerk.de/) in the context of the work of the association German National Research Data Infrastructure (NFDI) e.V. NFDI is financed by the Federal Republic of Germany and the 16 federal states and funded by the Federal Ministry of Education and Research (BMBF) - funding code M532701 / the Deutsche Forschungsgemeinschaft (DFG, German Research Foundation) - [project number 460247524](https://gepris.dfg.de/gepris/projekt/460247524?language=en).
--------------------------------------------------------------------------------
/docs/api.rst:
--------------------------------------------------------------------------------
1 | API Reference
2 | =============
3 |
4 | Structure
5 | ---------
6 | .. automodule:: atomrdf.structure
7 | :members:
8 |
9 | KnowledgeGraph
10 | --------------
11 | .. automodule:: atomrdf.graph
12 | :members:
13 |
14 | Workflow
15 | --------
16 |
17 | .. automodule:: atomrdf.workflow.workflow
18 | :members:
19 |
20 |
21 | Network
22 | -------
23 |
24 | .. automodule:: atomrdf.network.network
25 | :members:
26 |
27 | Namespace
28 | ---------
29 |
30 | .. automodule:: atomrdf.namespace
31 | :members:
32 |
33 | Stores
34 | ------
35 |
36 | .. automodule:: atomrdf.stores
37 | :members:
38 |
--------------------------------------------------------------------------------
/docs/examples.md:
--------------------------------------------------------------------------------
1 | # Examples
2 |
3 | The gallery of examples below cover different ways in which atomrdf can be used.
4 |
5 | ::::{grid} 1 1 2 3
6 | :class-container: text-center
7 | :gutter: 3
8 |
9 | :::{grid-item-card}
10 | :link: ../examples/01_getting_started
11 | :link-type: doc
12 | :class-header: bg-light
13 | Getting started with atomrdf
14 | ^^^
15 | Learn the very basis, including the concepts of creating structures, and querying them.
16 | :::
17 |
18 | :::{grid-item-card}
19 | :link: ../examples/02_grain_boundaries
20 | :link-type: doc
21 | :class-header: bg-light
22 | Creating grain boundary structures
23 | ^^^
24 | Create, visualize, query, and export grain boundary structures.
25 | :::
26 |
27 | ::::
28 |
29 |
30 |
--------------------------------------------------------------------------------
/docs/extending.md:
--------------------------------------------------------------------------------
1 |
2 | # Support
3 |
4 | In case of bugs and feature improvements, you are welcome to create a
5 | new issue on the [github repo](https://github.com/pyscal/atomrdf). You
6 | are also welcome to fix a bug or implement a feature.
7 |
8 | Any other questions or suggestions are welcome, please contact
9 | [us](mailto:rdf@pyscal.org).
10 |
11 | `atomrdf` welcomes and appreciates contribution and extension to the
12 | module. Rather than local modifications, we request that the
13 | modifications be submitted through a pull request, so that the module
14 | can be continuously improved.
15 |
16 | ## Reporting and fixing bugs
17 |
18 | In case a bug is found in the module, it can be reported on the [issues
19 | page of the repository](https://github.com/pyscal/atomrdf/issues). Once a bug is reported, the status can once again monitored on
20 | the issues page. Additionally, you are of course very welcome to fix any
21 | existing bugs.
22 |
23 | ## New features
24 |
25 | If you have an idea for new feature, you can submit a feature idea
26 | through the [issues page of the
27 | repository](https://github.com/pyscal/atomrdf/issues). As much as
28 | information as you can provide about the new feauture would be greatly
29 | helpful. Additionally, you could also work on feature requests already
30 | on the issues page. The following instructions will help you get started
31 | with local feature development.
32 |
33 | ### Setting up local environment
34 |
35 | 1. The first step is to fork `atomrdf`. A detailed tutorial on forking can
36 | be found [here](https://help.github.com/en/articles/fork-a-repo).
37 | After forking, clone the repository to your local machine.
38 | 2. We recommend creating a virtual environment to test new features or
39 | improvements to features. See this
40 | [link](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html)
41 | for help on managing environments.
42 | 3. Once the environment is set up, you can create a new branch for your
43 | feature by `git checkout -b new_feauture`.
44 | 4. Now implement the necessary feature.
45 | 5. Once done, you can reinstall `atomrdf` by `pip install .`.
46 | After that please make sure that the existing tests work by running
47 | `pytest tests/` from the main module folder.
48 | 6. If the tests work, you are almost done! If the new feature is not
49 | covered in existing tests, you can to write a new test in the tests
50 | folder. `atomrdf` uses pytest for tests. [This
51 | link](http://doc.pytest.org/en/latest/getting-started.html) will
52 | help you get started.
53 | 7. Add the necessary docstrings for the new functions implemented.
54 | `atomrdf` uses the [numpy docstring
55 | format](https://numpydoc.readthedocs.io/en/latest/format.html) for
56 | documentation.
57 | 8. Bonus task: Set up few examples that document how the feature works
58 | in the `docs` folder and link it to the examples section.
59 | 9. Final step - Submit a pull request through github. Before you
60 | submit, please make sure that the new feature is documented and has
61 | tests. Once the request is submitted, automated tests would be done.
62 | If all tests are successful, your feauture will be incorporated to calphy and your contributions
63 | will be credited.
64 |
65 | If you have trouble with any of the steps, or you need help, please
66 | [send an email](mailto:rdf@pyscal.org) and we will be happy to
67 | help!
68 |
--------------------------------------------------------------------------------
/docs/gettingstarted.md:
--------------------------------------------------------------------------------
1 | # Installation
2 |
3 | `atomrdf` can be installed on Linux and Mac OS based systems. On Windows systems, it is recommended to use [Windows subsystem for Linux](https://docs.microsoft.com/en-us/windows/wsl/install). The following instructions will help install `atomrdf`:
4 |
5 | ````{tab-set}
6 | ```{tab-item} pip
7 | `pip install atomrdf`
8 | ```
9 |
10 | ```{tab-item} conda
11 | `conda install -c conda-forge atomrdf`
12 | ```
13 |
14 | ```{tab-item} from source
15 | We strongly recommend creating a conda environment for the installation. To see how you can install conda see [here](https://docs.conda.io/projects/conda/en/latest/user-guide/install/).
16 |
17 | Once a conda distribution is available, the following steps will help set up an environment to use `atomrdf`. First step is to clone the repository.
18 |
19 | `git clone https://github.com/pyscal/atomrdf.git`
20 |
21 | After cloning, an environment can be created from the included file-
22 |
23 | `cd atomrdf`
24 | `conda env create -f environment.yml`
25 |
26 | This will install the necessary packages and create an environment called rdf. It can be activated by,
27 |
28 | `conda activate rdf`
29 |
30 | then, install `atomrdf` using,
31 |
32 | `pip install .`
33 | ```
34 | ````
35 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | atomrdf
4 |
5 | Copyright 2022 (c) Sarath Menon $^1$, Abril Azócar Guzmán $^2$
6 | $^1$: Max Planck Institut für Eisenforschung, Dusseldorf, Germany
7 | $^2$: Forschungszentrum Jülich GmbH, Jülich, Germany
8 |
9 | pyscal-rdf is published under the MIT license:
10 |
11 | MIT License
12 |
13 | Copyright (c) 2023 atomrdf
14 |
15 | Permission is hereby granted, free of charge, to any person obtaining a copy
16 | of this software and associated documentation files (the "Software"), to deal
17 | in the Software without restriction, including without limitation the rights
18 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
19 | copies of the Software, and to permit persons to whom the Software is
20 | furnished to do so, subject to the following conditions:
21 |
22 | The above copyright notice and this permission notice shall be included in all
23 | copies or substantial portions of the Software.
24 |
25 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
30 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 | SOFTWARE.
32 |
33 | For more information contact:
34 | rdf@pyscal.org
--------------------------------------------------------------------------------
/docs/source/_static/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/docs/source/_static/logo.png
--------------------------------------------------------------------------------
/environment-docs.yml:
--------------------------------------------------------------------------------
1 | name: rdf
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python >3.8, <3.12
6 | - pyscal3 >=3.1.12
7 | - rdflib
8 | - tools4rdf >=0.2.0
9 | - pyyaml
10 | - jupyterlab
11 | - graphviz
12 | - python-graphviz
13 | - ase
14 | - networkx
15 | - pandas
16 | - jupyter-book
17 | - plotly
18 | - ipywidgets
19 | - atomman
20 |
21 |
--------------------------------------------------------------------------------
/environment-workflows.yml:
--------------------------------------------------------------------------------
1 | name: workflow-rdf
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python >3.8, <3.12
6 | - pyscal3 >=3.1.12
7 | - pyscal-rdf
8 | - rdflib
9 | - pyyaml
10 | - jupyterlab
11 | - graphviz
12 | - python-graphviz
13 | - ase
14 | - networkx
15 | - pandas
16 | - plotly
17 | - ipywidgets
18 | - atomman
19 | #workflow packages
20 | - pyiron_base
21 | - pyiron_atomistics
22 | - pyiron-data
23 | - sqlalchemy
24 | - iprpy-data
25 | - sphinxdft >=2.7.0
26 | - sphinxdft-data
27 | - lammps
28 | - nglview
29 | - pip:
30 | - "git+https://github.com/RDFLib/rdflib-sqlalchemy.git@develop"
31 |
32 |
--------------------------------------------------------------------------------
/environment.yml:
--------------------------------------------------------------------------------
1 | name: rdf
2 | channels:
3 | - conda-forge
4 | dependencies:
5 | - python >3.8, <3.12
6 | - pyscal3 >=3.1.12
7 | - rdflib
8 | #- tools4rdf >=0.2.0
9 | - pyyaml
10 | - jupyterlab
11 | - graphviz
12 | - python-graphviz
13 | - ase =3.23.0
14 | - networkx
15 | - pandas
16 | - plotly
17 | - ipywidgets
18 | #- atomman
19 | - mp-api
20 | - sqlalchemy
21 | - aimsgb
22 | - pymatgen
23 | - mendeleev
24 | - pip:
25 | - "git+https://github.com/RDFLib/rdflib-sqlalchemy.git@develop"
26 | - tools4rdf
27 |
--------------------------------------------------------------------------------
/examples/08_write_qe.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "from atomrdf import KnowledgeGraph, System"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "kg = KnowledgeGraph()"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 3,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "s = System.create.element.Fe(graph = kg)"
28 | ]
29 | },
30 | {
31 | "cell_type": "code",
32 | "execution_count": 4,
33 | "metadata": {},
34 | "outputs": [],
35 | "source": [
36 | "s.write.file('qe_input', format='quantum-espresso')"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 5,
42 | "metadata": {},
43 | "outputs": [
44 | {
45 | "name": "stdout",
46 | "output_type": "stream",
47 | "text": [
48 | "! sample:5f3309f9-4d09-4080-9219-4eef18fd2170\n",
49 | "\n",
50 | "&SYSTEM\n",
51 | " ibrav = 0,\n",
52 | " nat = 2,\n",
53 | " ntyp = 1,\n",
54 | "/\n",
55 | "\n",
56 | "CELL_PARAMETERS angstrom\n",
57 | "2.87 0.0 0.0\n",
58 | "0.0 2.87 0.0\n",
59 | "0.0 0.0 2.87\n",
60 | "\n",
61 | "ATOMIC_SPECIES\n",
62 | "Fe 55.845 None\n",
63 | "\n",
64 | "ATOMIC_POSITIONS crystal\n",
65 | "Fe 0.0 0.0 0.0\n",
66 | "Fe 0.5 0.5 0.5\n",
67 | "\n"
68 | ]
69 | }
70 | ],
71 | "source": [
72 | "! more qe_input"
73 | ]
74 | },
75 | {
76 | "cell_type": "markdown",
77 | "metadata": {},
78 | "source": [
79 | "One can also copy settings from an existing file"
80 | ]
81 | },
82 | {
83 | "cell_type": "code",
84 | "execution_count": 6,
85 | "metadata": {},
86 | "outputs": [],
87 | "source": [
88 | "s.write.file('qe_input_2', format='quantum-espresso', copy_from='qe_ref.in')"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 7,
94 | "metadata": {},
95 | "outputs": [
96 | {
97 | "name": "stdout",
98 | "output_type": "stream",
99 | "text": [
100 | "! sample:5f3309f9-4d09-4080-9219-4eef18fd2170\n",
101 | "\n",
102 | "&CONTROL\n",
103 | " prefix = fe,\n",
104 | "/\n",
105 | "\n",
106 | "&SYSTEM\n",
107 | " ibrav = 0,\n",
108 | " nat = 2,\n",
109 | " ntyp = 1,\n",
110 | " ecutwfc = 25.0,\n",
111 | " ecutrho = 200.0,\n",
112 | " occupations = smearing,\n",
113 | " smearing = mv,\n",
114 | " degauss = 0.01,\n",
115 | " nspin = 2,\n",
116 | " starting_magnetization(1) = 0.6,\n",
117 | "/\n",
118 | "\n",
119 | "&ELECTRONS\n",
120 | "/\n",
121 | "\n",
122 | "CELL_PARAMETERS angstrom\n",
123 | "\u001b[Km--More--(71%)\u001b[m"
124 | ]
125 | }
126 | ],
127 | "source": [
128 | "! more qe_input_2"
129 | ]
130 | },
131 | {
132 | "cell_type": "markdown",
133 | "metadata": {},
134 | "source": [
135 | "This copies the input settings, but replaces the relevant ones."
136 | ]
137 | }
138 | ],
139 | "metadata": {
140 | "kernelspec": {
141 | "display_name": "Python 3 (ipykernel)",
142 | "language": "python",
143 | "name": "python3"
144 | },
145 | "language_info": {
146 | "codemirror_mode": {
147 | "name": "ipython",
148 | "version": 3
149 | },
150 | "file_extension": ".py",
151 | "mimetype": "text/x-python",
152 | "name": "python",
153 | "nbconvert_exporter": "python",
154 | "pygments_lexer": "ipython3",
155 | "version": "3.11.8"
156 | }
157 | },
158 | "nbformat": 4,
159 | "nbformat_minor": 4
160 | }
161 |
--------------------------------------------------------------------------------
/examples/conf.dump:
--------------------------------------------------------------------------------
1 | ITEM: TIMESTEP
2 | 0
3 | ITEM: NUMBER OF ATOMS
4 | 2
5 | ITEM: BOX BOUNDS pp pp pp
6 | 0.0 2.87
7 | 0.0 2.87
8 | 0.0 2.87
9 | ITEM: ATOMS x y z type id
10 | 0.0 0.0 0.0 1 1
11 | 1.435 1.435 1.435 1 2
12 |
--------------------------------------------------------------------------------
/examples/dataset.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/examples/dataset.tar.gz
--------------------------------------------------------------------------------
/examples/qe_ref.in:
--------------------------------------------------------------------------------
1 | &CONTROL
2 | prefix='fe',
3 |
4 | !pseudo_dir = 'directory with pseudopotentials',
5 | !outdir = 'temporary directory for large files'
6 | !verbosity = 'high',
7 | /
8 |
9 | &SYSTEM
10 | ibrav = 0,
11 | nat = 1,
12 | ntyp = 1,
13 | ecutwfc = 25.0,
14 | ecutrho = 200.0,
15 |
16 | occupations='smearing',
17 | smearing='mv',
18 | degauss=0.01,
19 |
20 | nspin = 2,
21 | starting_magnetization(1)= 0.6
22 | /
23 |
24 | &ELECTRONS
25 | /
26 |
27 | ATOMIC_SPECIES
28 | # the second field, atomic mass, is not actually used
29 | # except for MD calculations
30 | Fe 1. Fe.pbe-nd-rrkjus.UPF
31 |
32 | ATOMIC_POSITIONS crystal
33 | Fe 0.0 0.0 0.0
34 | ! this is a comment that the code will ignore
35 |
36 | K_POINTS automatic
37 | 8 8 8 1 1 1
38 |
39 | CELL_PARAMETERS angstrom
40 | 2.87 0.0 0.0
41 | 0.0 2.87 0.0
42 | 0.0 0.0 2.87
43 |
--------------------------------------------------------------------------------
/examples/workflow_examples/04_vacancy_formation_lammps.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "%config IPCompleter.evaluation='unsafe'"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 2,
15 | "metadata": {},
16 | "outputs": [
17 | {
18 | "data": {
19 | "application/vnd.jupyter.widget-view+json": {
20 | "model_id": "1e2d7928898d4f1bbc591b039a4c46f2",
21 | "version_major": 2,
22 | "version_minor": 0
23 | },
24 | "text/plain": []
25 | },
26 | "metadata": {},
27 | "output_type": "display_data"
28 | }
29 | ],
30 | "source": [
31 | "from pyiron_atomistics import Project\n",
32 | "from atomrdf import KnowledgeGraph\n",
33 | "import numpy as np"
34 | ]
35 | },
36 | {
37 | "cell_type": "code",
38 | "execution_count": 3,
39 | "metadata": {},
40 | "outputs": [],
41 | "source": [
42 | "project = 'y8'\n",
43 | "pr = Project(project)\n",
44 | "kg = KnowledgeGraph(store='db', store_file=f'{project}.db')\n",
45 | "kg.enable_workflow(pr, workflow_environment='pyiron')"
46 | ]
47 | },
48 | {
49 | "cell_type": "code",
50 | "execution_count": 4,
51 | "metadata": {},
52 | "outputs": [
53 | {
54 | "name": "stdout",
55 | "output_type": "stream",
56 | "text": [
57 | "The job bulk was saved and received the ID: 1214\n"
58 | ]
59 | }
60 | ],
61 | "source": [
62 | "structure = pr.create.structure.annotated_structure.bulk('Cu', cubic=True, label='cu md', repetitions=(3, 3, 3))\n",
63 | "job = pr.create.job.Lammps('bulk', delete_existing_job=True, delete_aborted_job=True)\n",
64 | "job.structure = structure\n",
65 | "job.potential = '2001--Mishin-Y--Cu-1--LAMMPS--ipr1'\n",
66 | "job.calc_md(pressure=0, temperature=500)\n",
67 | "job.run()\n",
68 | "kg.add_workflow(job, workflow_environment='pyiron')"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 5,
74 | "metadata": {},
75 | "outputs": [
76 | {
77 | "data": {
78 | "text/plain": [
79 | "[cu_md, sample:b5b503c7-f5a6-4794-a1c1-042c7fa77add_from_cu_md]"
80 | ]
81 | },
82 | "execution_count": 5,
83 | "metadata": {},
84 | "output_type": "execute_result"
85 | }
86 | ],
87 | "source": [
88 | "kg.samples"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 6,
94 | "metadata": {},
95 | "outputs": [],
96 | "source": [
97 | "sample = kg.samples[1]"
98 | ]
99 | },
100 | {
101 | "cell_type": "code",
102 | "execution_count": 7,
103 | "metadata": {},
104 | "outputs": [],
105 | "source": [
106 | "min_struct = sample.structure"
107 | ]
108 | },
109 | {
110 | "cell_type": "code",
111 | "execution_count": 8,
112 | "metadata": {},
113 | "outputs": [],
114 | "source": [
115 | "del min_struct[3]"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": 9,
121 | "metadata": {},
122 | "outputs": [
123 | {
124 | "name": "stdout",
125 | "output_type": "stream",
126 | "text": [
127 | "The job vacancy was saved and received the ID: 1215\n"
128 | ]
129 | }
130 | ],
131 | "source": [
132 | "job = pr.create.job.Lammps('vacancy', delete_existing_job=True, delete_aborted_job=True)\n",
133 | "job.structure = min_struct.write.pyiron()\n",
134 | "job.potential = '2001--Mishin-Y--Cu-1--LAMMPS--ipr1'\n",
135 | "job.calc_static()\n",
136 | "job.run()\n",
137 | "kg.add_workflow(job, workflow_environment='pyiron')"
138 | ]
139 | },
140 | {
141 | "cell_type": "markdown",
142 | "metadata": {},
143 | "source": [
144 | "form_energy = energy_vac - ((number_atoms-1)/number_atoms)*energy_bulk"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": 10,
150 | "metadata": {},
151 | "outputs": [
152 | {
153 | "data": {
154 | "text/plain": [
155 | "[cu_md,\n",
156 | " sample:b5b503c7-f5a6-4794-a1c1-042c7fa77add_from_cu_md,\n",
157 | " sample:7f0eed16-820c-4ff1-baa0-64cf2f08f027_from_sample:b5b503c7-f5a6-4794-a1c1-042c7fa77add_from_cu_md]"
158 | ]
159 | },
160 | "execution_count": 10,
161 | "metadata": {},
162 | "output_type": "execute_result"
163 | }
164 | ],
165 | "source": [
166 | "kg.samples"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": 11,
172 | "metadata": {},
173 | "outputs": [],
174 | "source": [
175 | "vac_sample = kg.samples[2]\n",
176 | "bulk_sample = kg.samples[1]"
177 | ]
178 | },
179 | {
180 | "cell_type": "code",
181 | "execution_count": 12,
182 | "metadata": {},
183 | "outputs": [],
184 | "source": [
185 | "e_form = vac_sample.outputs.TotalEnergy - ((bulk_sample.properties.NumberOfAtoms - 1)/bulk_sample.properties.NumberOfAtoms)*bulk_sample.outputs.TotalEnergy"
186 | ]
187 | },
188 | {
189 | "cell_type": "code",
190 | "execution_count": 13,
191 | "metadata": {},
192 | "outputs": [
193 | {
194 | "data": {
195 | "text/plain": [
196 | "-5.688631775700969"
197 | ]
198 | },
199 | "execution_count": 13,
200 | "metadata": {},
201 | "output_type": "execute_result"
202 | }
203 | ],
204 | "source": [
205 | "e_form.value"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": 14,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": [
214 | "e_form.label = 'VacancyFormationEnergy'"
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 15,
220 | "metadata": {},
221 | "outputs": [
222 | {
223 | "name": "stdout",
224 | "output_type": "stream",
225 | "text": [
226 | "http://purls.helmholtz-metadaten.de/asmo/wasCalculatedBy operation:ba8d62e4-8049-4d1f-b596-4d2713b91d71\n",
227 | "http://purls.helmholtz-metadaten.de/asmo/wasCalculatedBy\n"
228 | ]
229 | },
230 | {
231 | "data": {
232 | "text/plain": [
233 | "{'property:34a2146f-25c5-4498-aab9-0599937098e2': {'found': True,\n",
234 | " 'label': 'VacancyFormationEnergy',\n",
235 | " 'operation': 'output_parameter',\n",
236 | " 'inputs': {'0': 'operation:ba8d62e4-8049-4d1f-b596-4d2713b91d71'}},\n",
237 | " 'operation:ba8d62e4-8049-4d1f-b596-4d2713b91d71': {'found': True,\n",
238 | " 'label': 'operation:ba8d62e4-8049-4d1f-b596-4d2713b91d71',\n",
239 | " 'inputs': {},\n",
240 | " 'operation': 'sample_for_activity'}}"
241 | ]
242 | },
243 | "execution_count": 15,
244 | "metadata": {},
245 | "output_type": "execute_result"
246 | }
247 | ],
248 | "source": [
249 | "kg.generate_provenance(label='VacancyFormationEnergy', visualize=False)"
250 | ]
251 | },
252 | {
253 | "cell_type": "code",
254 | "execution_count": 19,
255 | "metadata": {},
256 | "outputs": [],
257 | "source": [
258 | "from rdflib import URIRef"
259 | ]
260 | },
261 | {
262 | "cell_type": "code",
263 | "execution_count": 21,
264 | "metadata": {},
265 | "outputs": [
266 | {
267 | "name": "stdout",
268 | "output_type": "stream",
269 | "text": [
270 | "(rdflib.term.URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef('http://purls.helmholtz-metadaten.de/asmo/Subtraction'))\n",
271 | "(rdflib.term.URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), rdflib.term.URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#type'), rdflib.term.URIRef('http://www.w3.org/ns/prov#Activity'))\n",
272 | "(rdflib.term.URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), rdflib.term.URIRef('http://purls.helmholtz-metadaten.de/asmo/hasMinuend'), rdflib.term.URIRef('activity:b1d6751d-16b6-4ae8-9ace-55da70f803db_TotalEnergy'))\n",
273 | "(rdflib.term.URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), rdflib.term.URIRef('http://purls.helmholtz-metadaten.de/asmo/hasSubtrahend'), rdflib.term.URIRef('property:7c88abd8-93f0-450a-8739-446e78a3e565'))\n",
274 | "(rdflib.term.URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), rdflib.term.URIRef('http://purls.helmholtz-metadaten.de/asmo/hasDifference'), rdflib.term.URIRef('property:9b5d8fe1-bea9-4e06-8c5a-aa9133a50fc2'))\n"
275 | ]
276 | }
277 | ],
278 | "source": [
279 | "for x in kg.triples((URIRef('operation:412b11a7-0bfd-48d9-addb-851e244dbadf'), None, None)):\n",
280 | " print(x)"
281 | ]
282 | },
283 | {
284 | "cell_type": "code",
285 | "execution_count": null,
286 | "metadata": {},
287 | "outputs": [],
288 | "source": []
289 | }
290 | ],
291 | "metadata": {
292 | "kernelspec": {
293 | "display_name": "workflow-rdf",
294 | "language": "python",
295 | "name": "python3"
296 | },
297 | "language_info": {
298 | "codemirror_mode": {
299 | "name": "ipython",
300 | "version": 3
301 | },
302 | "file_extension": ".py",
303 | "mimetype": "text/x-python",
304 | "name": "python",
305 | "nbconvert_exporter": "python",
306 | "pygments_lexer": "ipython3",
307 | "version": "3.11.10"
308 | }
309 | },
310 | "nbformat": 4,
311 | "nbformat_minor": 4
312 | }
313 |
--------------------------------------------------------------------------------
/examples/workflow_examples/qe_run/pw.si.scf.ref:
--------------------------------------------------------------------------------
1 | &CONTROL
2 | calculation = 'vc-relax',
3 | prefix='silicon',
4 | outdir = './',
5 | /
6 | &SYSTEM
7 | ecutwfc = 12,
8 | /
9 | &ELECTRONS
10 | /
11 | &IONS
12 | /
13 | &CELL
14 | /
15 |
16 | K_POINTS automatic
17 | 4 4 4 1 1 1
18 |
--------------------------------------------------------------------------------
/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/logo.png
--------------------------------------------------------------------------------
/notebooks/create_onto.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 6,
6 | "id": "29eb6c7f",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "kg = KnowledgeGraph()\n",
11 | "struct_Fe = System.create.element.Fe(graph=kg)\n",
12 | "\n",
13 | "term = (kg.ontology.terms.cmso.hasVolume > 2) & (kg.ontology.terms.cmso.hasVolume < 4)\n",
14 | "term._condition"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 3,
20 | "id": "85d019a1",
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "from atomrdf import KnowledgeGraph, System"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 9,
30 | "id": "0480f3cb-a9df-4dff-89b6-95747572fb39",
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "from owlready2 import *"
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": 10,
40 | "id": "cdab09a3-b991-4c6f-ab11-3d630293e2a9",
41 | "metadata": {},
42 | "outputs": [],
43 | "source": [
44 | "onto = get_ontology(\"http://purls.helmholtz-metadaten.de/msmo/\")"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 11,
50 | "id": "8f470001-3a9c-4cd0-817f-57fa06a10da0",
51 | "metadata": {},
52 | "outputs": [],
53 | "source": [
54 | "class Calculation(Thing):\n",
55 | " namespace = onto\n",
56 | "\n",
57 | "class hasMethod(Calculation >> ComputationalMethod):\n",
58 | " pass\n",
59 | " \n",
60 | "class ComputationalMethod(Thing):\n",
61 | " namespace = onto\n",
62 | "\n",
63 | "#Atomistic Method\n",
64 | "class AtomisticMethod(ComputationalMethod):\n",
65 | " pass\n",
66 | "\n",
67 | "class DensityFunctionalTheory(AtomisticMethod):\n",
68 | " pass\n",
69 | "\n",
70 | "class MolecularDynamics(AtomisticMethod):\n",
71 | " pass\n",
72 | "\n",
73 | "class MolecularDynamicsMinimization(MolecularDynamics):\n",
74 | " pass\n",
75 | "\n",
76 | "class MolecularDynamicsNPT(MolecularDynamics):\n",
77 | " pass\n",
78 | "\n",
79 | "class MolecularDynamicsNVT(MolecularDynamics):\n",
80 | " pass\n",
81 | "\n",
82 | "#data properties\n",
83 | "class hasPressure(MolecularDynamics >> float):\n",
84 | " pass\n",
85 | "\n",
86 | "class hasTemperature(MolecularDynamics >> float):\n",
87 | " pass\n",
88 | "\n",
89 | "class usesPotential(MolecularDynamics >> str):\n",
90 | " pass\n",
91 | "\n",
92 | "class AbinitioMolecularDynamics(AtomisticMethod):\n",
93 | " pass\n",
94 | "\n",
95 | "\n",
96 | "#Monte Carlo\n",
97 | "class MonteCarloMethod(ComputationalMethod):\n",
98 | " pass\n",
99 | "\n",
100 | "class KineticMonteCarloMethod(MonteCarloMethod):\n",
101 | " pass"
102 | ]
103 | },
104 | {
105 | "cell_type": "code",
106 | "execution_count": 12,
107 | "id": "677852fc-e05a-4922-a808-47761f25ffcb",
108 | "metadata": {},
109 | "outputs": [],
110 | "source": [
111 | "onto.save(file = \"msmo.owl\", format = \"rdfxml\")"
112 | ]
113 | },
114 | {
115 | "cell_type": "code",
116 | "execution_count": null,
117 | "id": "22b6e284-2b54-4851-a379-1a8df26f4705",
118 | "metadata": {},
119 | "outputs": [],
120 | "source": []
121 | }
122 | ],
123 | "metadata": {
124 | "kernelspec": {
125 | "display_name": "Python 3 (ipykernel)",
126 | "language": "python",
127 | "name": "python3"
128 | },
129 | "language_info": {
130 | "codemirror_mode": {
131 | "name": "ipython",
132 | "version": 3
133 | },
134 | "file_extension": ".py",
135 | "mimetype": "text/x-python",
136 | "name": "python",
137 | "nbconvert_exporter": "python",
138 | "pygments_lexer": "ipython3",
139 | "version": "3.11.8"
140 | }
141 | },
142 | "nbformat": 4,
143 | "nbformat_minor": 5
144 | }
145 |
--------------------------------------------------------------------------------
/notebooks/data_read.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "id": "bec4e0e1-2e16-4241-89e0-a7bbf8a53d8c",
6 | "metadata": {},
7 | "source": [
8 | "# DC3 dataset"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": 2,
14 | "id": "2bd53fe2",
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "ebulk = -402.88114640\n",
19 | "evac_isif2 = -398.01662509\n",
20 | "evac_isif3 = -398.02220233"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 3,
26 | "id": "a2fba448",
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "n = 108"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 4,
36 | "id": "3b5694ca",
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "data": {
41 | "text/plain": [
42 | "1.1341403248147799"
43 | ]
44 | },
45 | "execution_count": 4,
46 | "metadata": {},
47 | "output_type": "execute_result"
48 | }
49 | ],
50 | "source": [
51 | "evac_isif2 - ((n-1)/n)*ebulk"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 5,
57 | "id": "443c8f3d",
58 | "metadata": {},
59 | "outputs": [
60 | {
61 | "data": {
62 | "text/plain": [
63 | "1.1285630848147434"
64 | ]
65 | },
66 | "execution_count": 5,
67 | "metadata": {},
68 | "output_type": "execute_result"
69 | }
70 | ],
71 | "source": [
72 | "evac_isif3 - ((n-1)/n)*ebulk"
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 5,
78 | "id": "df95f07a-00a8-42f9-9a18-ba8532a5a512",
79 | "metadata": {
80 | "tags": []
81 | },
82 | "outputs": [],
83 | "source": [
84 | "from pyscal3.core import System\n",
85 | "import pyscal3.crystal_structures as pcs"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": 3,
91 | "id": "00abae5c-4123-4456-8144-cf6362ea2bfd",
92 | "metadata": {
93 | "tags": []
94 | },
95 | "outputs": [],
96 | "source": [
97 | "sys = System('datasets/T_0.04Tm_snapshot_1')"
98 | ]
99 | },
100 | {
101 | "cell_type": "markdown",
102 | "id": "2be601e0-9ea5-4c63-9cd6-67e99fa95306",
103 | "metadata": {},
104 | "source": [
105 | "Lacks all information about the underlying structure:\n",
106 | "\n",
107 | "- species (LAMMPS)\n",
108 | "- lattice constant \n",
109 | "- Crystal structure name\n",
110 | "- Bravais lattice\n",
111 | "- Basis positions\n",
112 | "- Basis occupancy\n",
113 | "- Lattice vectors\n",
114 | "- Space group symbol (?)\n",
115 | "- Space group name (?)"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "id": "5ba58cff-3b6b-44b9-ad5b-92e14e61d517",
121 | "metadata": {},
122 | "source": [
123 | "How we do currently"
124 | ]
125 | },
126 | {
127 | "cell_type": "code",
128 | "execution_count": 6,
129 | "id": "dd472cdf-ef2c-4ad4-9852-39370c715f5a",
130 | "metadata": {
131 | "tags": []
132 | },
133 | "outputs": [
134 | {
135 | "name": "stdout",
136 | "output_type": "stream",
137 | "text": [
138 | "Al\n"
139 | ]
140 | }
141 | ],
142 | "source": [
143 | "atoms, box, sdict = pcs.make_crystal('fcc', lattice_constant=1,\n",
144 | " element='Al', return_structure_dict=True)"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": 7,
150 | "id": "aed4d624-b25a-48b3-8bbe-6140acc5e711",
151 | "metadata": {
152 | "tags": []
153 | },
154 | "outputs": [
155 | {
156 | "data": {
157 | "text/plain": [
158 | "{'natoms': 4,\n",
159 | " 'species': [1, 1, 1, 1],\n",
160 | " 'scaling_factors': [1.0, 1.0, 1.0],\n",
161 | " 'positions': [[0.0, 0.0, 0.0],\n",
162 | " [0.5, 0.0, 0.5],\n",
163 | " [0.0, 0.5, 0.5],\n",
164 | " [0.5, 0.5, 0.0]]}"
165 | ]
166 | },
167 | "execution_count": 7,
168 | "metadata": {},
169 | "output_type": "execute_result"
170 | }
171 | ],
172 | "source": [
173 | "sdict"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "id": "712b32c7-1d6b-4fbb-ae65-64dbebc5a3b2",
180 | "metadata": {},
181 | "outputs": [],
182 | "source": []
183 | }
184 | ],
185 | "metadata": {
186 | "kernelspec": {
187 | "display_name": "Python 3 (ipykernel)",
188 | "language": "python",
189 | "name": "python3"
190 | },
191 | "language_info": {
192 | "codemirror_mode": {
193 | "name": "ipython",
194 | "version": 3
195 | },
196 | "file_extension": ".py",
197 | "mimetype": "text/x-python",
198 | "name": "python",
199 | "nbconvert_exporter": "python",
200 | "pygments_lexer": "ipython3",
201 | "version": "3.11.8"
202 | }
203 | },
204 | "nbformat": 4,
205 | "nbformat_minor": 5
206 | }
207 |
--------------------------------------------------------------------------------
/notebooks/data_set.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 55,
6 | "id": "9a603df5-b7c8-49a7-8745-4055f8b2b31b",
7 | "metadata": {
8 | "tags": []
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from pyscal_rdf import StructureGraph\n",
13 | "from pyscal3.core import System\n",
14 | "from pyscal_rdf.network.network import OntologyNetwork\n",
15 | "from rdflib import Graph, Literal, Namespace\n",
16 | "import copy\n",
17 | "import json\n",
18 | "from deepmerge import always_merger"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": 2,
24 | "id": "b020cf69-527c-4e96-8f24-d1dbb3a91267",
25 | "metadata": {
26 | "tags": []
27 | },
28 | "outputs": [],
29 | "source": [
30 | "o = OntologyNetwork()"
31 | ]
32 | },
33 | {
34 | "cell_type": "code",
35 | "execution_count": 67,
36 | "id": "3b3d2dff-9f05-4722-9497-e7e4dbca5343",
37 | "metadata": {
38 | "tags": []
39 | },
40 | "outputs": [
41 | {
42 | "data": {
43 | "text/plain": [
44 | "{'cmso:hasAltName': cmso:hasAltName, 'cmso:hasName': cmso:hasName, 'cmso:hasAngle_alpha': cmso:hasAngle_alpha, 'cmso:hasAngle_beta': cmso:hasAngle_beta, 'cmso:hasAngle_gamma': cmso:hasAngle_gamma, 'cmso:hasChemicalSymbol': cmso:hasChemicalSymbol, 'cmso:hasSymbol': cmso:hasSymbol, 'cmso:hasComponent_x': cmso:hasComponent_x, 'cmso:hasComponent_y': cmso:hasComponent_y, 'cmso:hasComponent_z': cmso:hasComponent_z, 'cmso:hasCoordinationNumber': cmso:hasCoordinationNumber, 'cmso:hasElementRatio': cmso:hasElementRatio, 'cmso:hasLatticeSystem': cmso:hasLatticeSystem, 'cmso:hasLength_x': cmso:hasLength_x, 'cmso:hasLength_y': cmso:hasLength_y, 'cmso:hasLength_z': cmso:hasLength_z, 'cmso:hasNumberOfAtoms': cmso:hasNumberOfAtoms, 'cmso:hasRepetition_x': cmso:hasRepetition_x, 'cmso:hasRepetition_y': cmso:hasRepetition_y, 'cmso:hasRepetition_z': cmso:hasRepetition_z, 'cmso:hasSpaceGroupNumber': cmso:hasSpaceGroupNumber, 'cmso:hasSpaceGroupSymbol': cmso:hasSpaceGroupSymbol, 'cmso:hasVolume': cmso:hasVolume}"
45 | ]
46 | },
47 | "execution_count": 67,
48 | "metadata": {},
49 | "output_type": "execute_result"
50 | }
51 | ],
52 | "source": [
53 | "o.attributes['data_property']"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": 60,
59 | "id": "477862f5-5b1c-4e8a-b877-ac2fb86ec6a3",
60 | "metadata": {
61 | "tags": []
62 | },
63 | "outputs": [],
64 | "source": [
65 | "def dict_merge(dct, merge_dct):\n",
66 | " for k, v in merge_dct.items():\n",
67 | " if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], dict)): #noqa\n",
68 | " dict_merge(dct[k], merge_dct[k])\n",
69 | " else:\n",
70 | " dct[k] = merge_dct[k]\n",
71 | " \n",
72 | "def create_dict(datadict, path):\n",
73 | " for p in path:\n",
74 | " datadict[str(p[0])] ={}\n",
75 | " datadict = datadict[str(p[0])]\n",
76 | " #exists\n",
77 | " datadict[str(p[0])] = {str(p[-1]): None}\n",
78 | " \n",
79 | "collected_dicts = []\n",
80 | "\n",
81 | "for key in o.attributes['data_node']:\n",
82 | " try:\n",
83 | " path = o.get_path_from_sample(key)\n",
84 | " datadict = {}\n",
85 | " create_dict(datadict, path)\n",
86 | " collected_dicts.append(copy.deepcopy(datadict))\n",
87 | " except:\n",
88 | " pass"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": 61,
94 | "id": "465233b0-fbe3-4483-bbe4-074a6863bf47",
95 | "metadata": {
96 | "tags": []
97 | },
98 | "outputs": [],
99 | "source": [
100 | "org_dict = collected_dicts[0]\n",
101 | "for dd in collected_dicts[1:]:\n",
102 | " always_merger.merge(org_dict, dd)"
103 | ]
104 | },
105 | {
106 | "cell_type": "code",
107 | "execution_count": 62,
108 | "id": "be9abaae-15f6-4259-8858-00ac559fdb1c",
109 | "metadata": {
110 | "tags": []
111 | },
112 | "outputs": [],
113 | "source": [
114 | "with open('a.json', 'w') as fout:\n",
115 | " json.dump(org_dict, fout)"
116 | ]
117 | },
118 | {
119 | "cell_type": "code",
120 | "execution_count": 63,
121 | "id": "ba801924-bc8d-482a-90aa-1978a11189dd",
122 | "metadata": {
123 | "tags": []
124 | },
125 | "outputs": [],
126 | "source": [
127 | "with open('a.yaml', 'w') as fout:\n",
128 | " yaml.safe_dump(org_dict, fout)"
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": null,
134 | "id": "3ad6b74c-adf5-47c9-a6e5-4c193640abf4",
135 | "metadata": {},
136 | "outputs": [],
137 | "source": []
138 | }
139 | ],
140 | "metadata": {
141 | "kernelspec": {
142 | "display_name": "Python 3 (ipykernel)",
143 | "language": "python",
144 | "name": "python3"
145 | },
146 | "language_info": {
147 | "codemirror_mode": {
148 | "name": "ipython",
149 | "version": 3
150 | },
151 | "file_extension": ".py",
152 | "mimetype": "text/x-python",
153 | "name": "python",
154 | "nbconvert_exporter": "python",
155 | "pygments_lexer": "ipython3",
156 | "version": "3.10.13"
157 | }
158 | },
159 | "nbformat": 4,
160 | "nbformat_minor": 5
161 | }
162 |
--------------------------------------------------------------------------------
/notebooks/funowl.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 15,
6 | "id": "209bed88-6265-46bf-858c-f172024aa4db",
7 | "metadata": {
8 | "scrolled": true
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from rdflib import Graph\n",
13 | "from funowl.converters.functional_converter import to_python\n",
14 | "\n",
15 | "# The functional syntax input can be a string, URL, file loc or open file\n",
16 | "onto = to_python(\"pyscal_rdf/data/cmso.owl\")"
17 | ]
18 | },
19 | {
20 | "cell_type": "code",
21 | "execution_count": 17,
22 | "id": "6f78142c-ce92-41c6-b267-85c169eabdd9",
23 | "metadata": {},
24 | "outputs": [
25 | {
26 | "data": {
27 | "text/plain": [
28 | "OntologyDocument(prefixDeclarations=, ontology=Ontology(iri=None, version=None, directlyImportsDocuments=[], axioms=[], annotations=[]))"
29 | ]
30 | },
31 | "execution_count": 17,
32 | "metadata": {},
33 | "output_type": "execute_result"
34 | }
35 | ],
36 | "source": [
37 | "onto"
38 | ]
39 | },
40 | {
41 | "cell_type": "code",
42 | "execution_count": null,
43 | "id": "f8b21676-2bcf-48a0-a8d3-99d80f3f13d9",
44 | "metadata": {},
45 | "outputs": [],
46 | "source": []
47 | }
48 | ],
49 | "metadata": {
50 | "kernelspec": {
51 | "display_name": "Python 3 (ipykernel)",
52 | "language": "python",
53 | "name": "python3"
54 | },
55 | "language_info": {
56 | "codemirror_mode": {
57 | "name": "ipython",
58 | "version": 3
59 | },
60 | "file_extension": ".py",
61 | "mimetype": "text/x-python",
62 | "name": "python",
63 | "nbconvert_exporter": "python",
64 | "pygments_lexer": "ipython3",
65 | "version": "3.10.12"
66 | }
67 | },
68 | "nbformat": 4,
69 | "nbformat_minor": 5
70 | }
71 |
--------------------------------------------------------------------------------
/notebooks/generate_sample_data/generate_data.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Generate dataset\n",
8 | "\n",
9 | "The aim of this notebook is to create a varied dataset with atomRDF that can be used testing quering methods, and also to show to variety of tasks that can be done. This should be runnable top to bottom."
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": 4,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "%config IPCompleter.evaluation='unsafe'"
19 | ]
20 | },
21 | {
22 | "cell_type": "markdown",
23 | "metadata": {},
24 | "source": [
25 | "Create a project"
26 | ]
27 | },
28 | {
29 | "cell_type": "code",
30 | "execution_count": 5,
31 | "metadata": {},
32 | "outputs": [],
33 | "source": [
34 | "from pyiron_atomistics import Project\n",
35 | "from atomrdf import KnowledgeGraph, System\n",
36 | "import numpy as np"
37 | ]
38 | },
39 | {
40 | "cell_type": "code",
41 | "execution_count": 6,
42 | "metadata": {},
43 | "outputs": [],
44 | "source": [
45 | "project = 'data_gen_1'\n",
46 | "pr = Project(project)"
47 | ]
48 | },
49 | {
50 | "cell_type": "code",
51 | "execution_count": 7,
52 | "metadata": {},
53 | "outputs": [],
54 | "source": [
55 | "kg = KnowledgeGraph(store='db', store_file=f'{project}.db')\n",
56 | "kg.enable_workflow(pr, workflow_environment='pyiron')"
57 | ]
58 | },
59 | {
60 | "cell_type": "markdown",
61 | "metadata": {},
62 | "source": [
63 | "Regular structures"
64 | ]
65 | },
66 | {
67 | "cell_type": "code",
68 | "execution_count": 8,
69 | "metadata": {},
70 | "outputs": [],
71 | "source": [
72 | "struct_Fe = System.create.element.Fe(graph=kg)\n",
73 | "struct_Si = System.create.element.Si(graph=kg)\n",
74 | "struct_l12 = System.create.lattice.l12(element=['Al', 'Ni'], \n",
75 | " lattice_constant=3.57, graph=kg)\n"
76 | ]
77 | },
78 | {
79 | "cell_type": "code",
80 | "execution_count": 9,
81 | "metadata": {},
82 | "outputs": [],
83 | "source": [
84 | "struct_gb_1 = System.create.defect.grain_boundary(axis=[0,0,1], \n",
85 | " sigma=5, \n",
86 | " gb_plane=[3, -1, 0],\n",
87 | " element='Fe',\n",
88 | " graph=kg)\n",
89 | "struct_gb_2 = System.create.defect.grain_boundary(axis=[1,1,1], \n",
90 | " sigma=19, \n",
91 | " gb_plane=[-3, 2, 0],\n",
92 | " element='Fe',\n",
93 | " graph=kg)\n",
94 | "\n",
95 | "struct_gb_3 = System.create.defect.grain_boundary(axis=[1,1,1], \n",
96 | " sigma=19, \n",
97 | " gb_plane=[-1, -1, -1],\n",
98 | " element='Fe',\n",
99 | " graph=kg)\n"
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 10,
105 | "metadata": {},
106 | "outputs": [
107 | {
108 | "data": {
109 | "text/plain": [
110 | ""
111 | ]
112 | },
113 | "execution_count": 10,
114 | "metadata": {},
115 | "output_type": "execute_result"
116 | }
117 | ],
118 | "source": [
119 | "struct_Fe = System.create.element.Fe(graph=kg)\n",
120 | "struct_with_vacancy = struct_Fe.delete(indices=[0], copy_structure=True)\n",
121 | "struct_Al = System.create.element.Al(graph=kg)\n",
122 | "struct_Al.substitute_atoms('Mg', indices=[0], copy_structure=True)"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": 11,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "slip_direction = np.array([1, 0, -1])\n",
132 | "slip_plane = np.array([1, 1, 1])\n",
133 | "slip_system = [slip_direction, slip_plane]\n",
134 | "burgers_vector = 0.5\n",
135 | "dislocation_line = np.array([1, 0, -1])\n",
136 | "elastic_constant_dict = {'C11': 169, 'C12': 122, 'C44': 75.4}\n",
137 | "sys = System.create.defect.dislocation(slip_system,\n",
138 | " dislocation_line,\n",
139 | " elastic_constant_dict,\n",
140 | " burgers_vector=burgers_vector,\n",
141 | " element='Cu',\n",
142 | " dislocation_type='monopole',\n",
143 | " graph=kg,\n",
144 | " )"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": 12,
150 | "metadata": {},
151 | "outputs": [
152 | {
153 | "name": "stdout",
154 | "output_type": "stream",
155 | "text": [
156 | "The job j1 was saved and received the ID: 1106\n"
157 | ]
158 | }
159 | ],
160 | "source": [
161 | "structure = pr.create.structure.annotated_structure.bulk('Cu', cubic=True)\n",
162 | "job = pr.create.job.Lammps('j1', delete_existing_job=True, delete_aborted_job=True)\n",
163 | "job.structure = structure\n",
164 | "job.potential = '2001--Mishin-Y--Cu-1--LAMMPS--ipr1'\n",
165 | "job.calc_md(pressure=0, temperature=500)\n",
166 | "job.run()\n",
167 | "kg.add_workflow(job, workflow_environment='pyiron')"
168 | ]
169 | },
170 | {
171 | "cell_type": "code",
172 | "execution_count": 13,
173 | "metadata": {},
174 | "outputs": [
175 | {
176 | "name": "stdout",
177 | "output_type": "stream",
178 | "text": [
179 | "The job murn_job was saved and received the ID: 1106\n",
180 | "The job murn_job_0_9 was saved and received the ID: 1107\n",
181 | "The job murn_job_0_95 was saved and received the ID: 1108\n",
182 | "The job murn_job_1_0 was saved and received the ID: 1109\n",
183 | "The job murn_job_1_05 was saved and received the ID: 1110\n",
184 | "The job murn_job_1_1 was saved and received the ID: 1111\n"
185 | ]
186 | }
187 | ],
188 | "source": [
189 | "structure = pr.create.structure.annotated_structure.bulk('Cu', cubic=True)\n",
190 | "ref_job = pr.create.job.Lammps('j1', delete_existing_job=True, delete_aborted_job=True)\n",
191 | "ref_job.structure = structure\n",
192 | "ref_job.potential = '2001--Mishin-Y--Cu-1--LAMMPS--ipr1'\n",
193 | "ref_job.calc_minimize()\n",
194 | "murn_job = ref_job.create_job(pr.job_type.Murnaghan, 'murn_job')\n",
195 | "murn_job.input[\"num_points\"] = 5\n",
196 | "murn_job.run()\n",
197 | "kg.add_workflow(murn_job, workflow_environment='pyiron', add_intermediate_jobs=True)"
198 | ]
199 | },
200 | {
201 | "cell_type": "code",
202 | "execution_count": 14,
203 | "metadata": {},
204 | "outputs": [
205 | {
206 | "name": "stdout",
207 | "output_type": "stream",
208 | "text": [
209 | "The job quasi was saved and received the ID: 1112\n",
210 | "The job quasi_0_9 was saved and received the ID: 1113\n",
211 | "The job reflmp_0 was saved and received the ID: 1114\n",
212 | "The job quasi_0_92 was saved and received the ID: 1115\n",
213 | "The job reflmp_0 was saved and received the ID: 1116\n",
214 | "The job quasi_0_94 was saved and received the ID: 1117\n",
215 | "The job reflmp_0 was saved and received the ID: 1118\n",
216 | "The job quasi_0_96 was saved and received the ID: 1119\n",
217 | "The job reflmp_0 was saved and received the ID: 1120\n",
218 | "The job quasi_0_98 was saved and received the ID: 1121\n",
219 | "The job reflmp_0 was saved and received the ID: 1122\n",
220 | "The job quasi_1_0 was saved and received the ID: 1123\n",
221 | "The job reflmp_0 was saved and received the ID: 1124\n",
222 | "The job quasi_1_02 was saved and received the ID: 1125\n",
223 | "The job reflmp_0 was saved and received the ID: 1126\n",
224 | "The job quasi_1_04 was saved and received the ID: 1127\n",
225 | "The job reflmp_0 was saved and received the ID: 1128\n",
226 | "The job quasi_1_06 was saved and received the ID: 1129\n",
227 | "The job reflmp_0 was saved and received the ID: 1130\n",
228 | "The job quasi_1_08 was saved and received the ID: 1131\n",
229 | "The job reflmp_0 was saved and received the ID: 1132\n",
230 | "The job quasi_1_1 was saved and received the ID: 1133\n",
231 | "The job reflmp_0 was saved and received the ID: 1134\n"
232 | ]
233 | },
234 | {
235 | "name": "stderr",
236 | "output_type": "stream",
237 | "text": [
238 | "/home/srmnitc/miniconda3/envs/workflow-rdf/lib/python3.11/site-packages/atomrdf/graph.py:376: UserWarning: asmo:hasValue has a range with unspecified datatype!\n",
239 | " warnings.warn(f\"{triple[1].name} has a range with unspecified datatype!\")\n"
240 | ]
241 | }
242 | ],
243 | "source": [
244 | "struct = pr.create.structure.annotated_structure.bulk('Cu')\n",
245 | "refjob = pr.create.job.Lammps('reflmp')\n",
246 | "refjob.structure = struct\n",
247 | "refjob.potential = '2009--Mendelev-M-I--Cu-Zr--LAMMPS--ipr1'\n",
248 | "phono = pr.create.job.PhonopyJob('phono')\n",
249 | "phono.ref_job = refjob\n",
250 | "quasi = pr.create.job.QuasiHarmonicJob('quasi')\n",
251 | "quasi.ref_job = phono\n",
252 | "quasi.input[\"temperature_end\"] = 500\n",
253 | "quasi.input[\"temperature_steps\"] = 2\n",
254 | "quasi.input[\"axes\"]=[\"x\",\"y\",\"z\"]\n",
255 | "quasi.input[\"strains\"] = None\n",
256 | "quasi.run()\n",
257 | "kg.add_workflow(quasi, workflow_environment='pyiron')"
258 | ]
259 | },
260 | {
261 | "cell_type": "code",
262 | "execution_count": 15,
263 | "metadata": {},
264 | "outputs": [],
265 | "source": [
266 | "kg.archive('dataset')"
267 | ]
268 | }
269 | ],
270 | "metadata": {
271 | "kernelspec": {
272 | "display_name": "workflow-rdf",
273 | "language": "python",
274 | "name": "python3"
275 | },
276 | "language_info": {
277 | "codemirror_mode": {
278 | "name": "ipython",
279 | "version": 3
280 | },
281 | "file_extension": ".py",
282 | "mimetype": "text/x-python",
283 | "name": "python",
284 | "nbconvert_exporter": "python",
285 | "pygments_lexer": "ipython3",
286 | "version": "3.11.8"
287 | }
288 | },
289 | "nbformat": 4,
290 | "nbformat_minor": 2
291 | }
292 |
--------------------------------------------------------------------------------
/notebooks/io_poscar.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "id": "3d725a3f-590a-4e35-8e30-d4d8965ca5df",
7 | "metadata": {
8 | "tags": []
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from pyscal_rdf import StructureGraph"
13 | ]
14 | },
15 | {
16 | "cell_type": "markdown",
17 | "id": "a0db892a-cbc4-48f9-aaff-97efb3e4614b",
18 | "metadata": {},
19 | "source": [
20 | "Get the main sample triple"
21 | ]
22 | },
23 | {
24 | "cell_type": "code",
25 | "execution_count": 52,
26 | "id": "66d10b1c-2d57-4d62-84ca-fd70611d4e41",
27 | "metadata": {
28 | "tags": []
29 | },
30 | "outputs": [
31 | {
32 | "name": "stdout",
33 | "output_type": "stream",
34 | "text": [
35 | "Fe\n",
36 | "Si\n"
37 | ]
38 | }
39 | ],
40 | "source": [
41 | "g = SampleGraph()\n",
42 | "struct_Fe = g.create_element(\"Fe\")\n",
43 | "struct_Fe = g.create_element(\"Si\")\n",
44 | "sample = g.samples[0]"
45 | ]
46 | },
47 | {
48 | "cell_type": "code",
49 | "execution_count": 53,
50 | "id": "4827f24b-b4c7-49b2-8867-2da18ec51992",
51 | "metadata": {
52 | "tags": []
53 | },
54 | "outputs": [],
55 | "source": [
56 | "g.iterate_graph(sample, create_new_graph=True)"
57 | ]
58 | },
59 | {
60 | "cell_type": "code",
61 | "execution_count": 54,
62 | "id": "08af91fd-44e1-48dd-914e-7198020dba4c",
63 | "metadata": {
64 | "tags": []
65 | },
66 | "outputs": [],
67 | "source": [
68 | "g.sgraph.write(\"a.ttl\")"
69 | ]
70 | },
71 | {
72 | "cell_type": "code",
73 | "execution_count": 51,
74 | "id": "69ec5d1c-8fb9-4def-8305-53208105a3d5",
75 | "metadata": {
76 | "tags": []
77 | },
78 | "outputs": [],
79 | "source": [
80 | "g.write(\"b.ttl\")"
81 | ]
82 | }
83 | ],
84 | "metadata": {
85 | "kernelspec": {
86 | "display_name": "Python 3 (ipykernel)",
87 | "language": "python",
88 | "name": "python3"
89 | },
90 | "language_info": {
91 | "codemirror_mode": {
92 | "name": "ipython",
93 | "version": 3
94 | },
95 | "file_extension": ".py",
96 | "mimetype": "text/x-python",
97 | "name": "python",
98 | "nbconvert_exporter": "python",
99 | "pygments_lexer": "ipython3",
100 | "version": "3.9.16"
101 | }
102 | },
103 | "nbformat": 4,
104 | "nbformat_minor": 5
105 | }
106 |
--------------------------------------------------------------------------------
/notebooks/mem_usage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/notebooks/mem_usage.png
--------------------------------------------------------------------------------
/notebooks/memscript.py:
--------------------------------------------------------------------------------
1 | def create_graph():
2 | n = 5
3 | from pyscal_rdf import StructureGraph
4 | g = StructureGraph(store='SQLAlchemy', store_file="testfile_multi_2.db")
5 | #g = StructureGraph()
6 | struct_Fe = g.create_structure("l12", element=['Al', 'Ni'], lattice_constant=3.57, repetitions=[n,n,n])
7 | g.add_structure_to_graph(struct_Fe)
8 | g.add_structure_to_graph(struct_Fe)
9 | g.add_structure_to_graph(struct_Fe)
10 |
--------------------------------------------------------------------------------
/notebooks/metadata_schema.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 8,
6 | "id": "9058253b-d5f3-4e70-8952-61f7d2992b08",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from pyscal_rdf.network.ontology import read_ontology\n",
11 | "from rdflib import Graph, Literal, Namespace\n",
12 | "import copy"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 9,
18 | "id": "4c5d73b6-da02-420c-a223-4eef93c872bc",
19 | "metadata": {},
20 | "outputs": [],
21 | "source": [
22 | "onto = read_ontology()"
23 | ]
24 | },
25 | {
26 | "cell_type": "code",
27 | "execution_count": 23,
28 | "id": "09f55784-36b9-471d-a766-4a3a017f7696",
29 | "metadata": {
30 | "collapsed": true,
31 | "jupyter": {
32 | "outputs_hidden": true
33 | }
34 | },
35 | "outputs": [
36 | {
37 | "data": {
38 | "text/plain": [
39 | "{'cmso:hasAltName': cmso:hasAltName,\n",
40 | " 'cmso:hasName': cmso:hasName,\n",
41 | " 'cmso:hasAngle_alpha': cmso:hasAngle_alpha,\n",
42 | " 'cmso:hasAngle_beta': cmso:hasAngle_beta,\n",
43 | " 'cmso:hasAngle_gamma': cmso:hasAngle_gamma,\n",
44 | " 'cmso:hasChemicalSymbol': cmso:hasChemicalSymbol,\n",
45 | " 'cmso:hasSymbol': cmso:hasSymbol,\n",
46 | " 'cmso:hasComponent_x': cmso:hasComponent_x,\n",
47 | " 'cmso:hasComponent_y': cmso:hasComponent_y,\n",
48 | " 'cmso:hasComponent_z': cmso:hasComponent_z,\n",
49 | " 'cmso:hasCoordinationNumber': cmso:hasCoordinationNumber,\n",
50 | " 'cmso:hasElementRatio': cmso:hasElementRatio,\n",
51 | " 'cmso:hasLength_x': cmso:hasLength_x,\n",
52 | " 'cmso:hasLength_y': cmso:hasLength_y,\n",
53 | " 'cmso:hasLength_z': cmso:hasLength_z,\n",
54 | " 'cmso:hasNumberOfAtoms': cmso:hasNumberOfAtoms,\n",
55 | " 'cmso:hasReference': cmso:hasReference,\n",
56 | " 'cmso:hasRepetition_x': cmso:hasRepetition_x,\n",
57 | " 'cmso:hasRepetition_y': cmso:hasRepetition_y,\n",
58 | " 'cmso:hasRepetition_z': cmso:hasRepetition_z,\n",
59 | " 'cmso:hasSpaceGroupNumber': cmso:hasSpaceGroupNumber,\n",
60 | " 'cmso:hasSpaceGroupSymbol': cmso:hasSpaceGroupSymbol,\n",
61 | " 'cmso:hasValue': cmso:hasValue,\n",
62 | " 'cmso:hasVolume': cmso:hasVolume,\n",
63 | " 'pldo:geometricalDegreesOfFreedom': pldo:geometricalDegreesOfFreedom,\n",
64 | " 'pldo:hasGBplane': pldo:hasGBplane,\n",
65 | " 'pldo:macroscopicDegreesOfFreedom': pldo:macroscopicDegreesOfFreedom,\n",
66 | " 'pldo:hasMisorientationAngle': pldo:hasMisorientationAngle,\n",
67 | " 'pldo:hasRotationAxis': pldo:hasRotationAxis,\n",
68 | " 'pldo:hasSigmaValue': pldo:hasSigmaValue,\n",
69 | " 'pldo:microscopicDegreesOfFreedom': pldo:microscopicDegreesOfFreedom,\n",
70 | " 'podo:hasNumberOfVacancies': podo:hasNumberOfVacancies,\n",
71 | " 'podo:hasVacancyConcentration': podo:hasVacancyConcentration,\n",
72 | " 'rdfs:label': rdfs:label}"
73 | ]
74 | },
75 | "execution_count": 23,
76 | "metadata": {},
77 | "output_type": "execute_result"
78 | }
79 | ],
80 | "source": [
81 | "onto.attributes['data_property']"
82 | ]
83 | },
84 | {
85 | "cell_type": "code",
86 | "execution_count": 25,
87 | "id": "ce296bb0-8e6a-4ab2-8940-52d6a0121e6f",
88 | "metadata": {
89 | "scrolled": true
90 | },
91 | "outputs": [
92 | {
93 | "data": {
94 | "text/plain": [
95 | "['cmso:SimulationCell']"
96 | ]
97 | },
98 | "execution_count": 25,
99 | "metadata": {},
100 | "output_type": "execute_result"
101 | }
102 | ],
103 | "source": [
104 | "onto.attributes['data_property']['cmso:hasVolume'].domain"
105 | ]
106 | },
107 | {
108 | "cell_type": "code",
109 | "execution_count": 3,
110 | "id": "8dc0e696-b128-4ebe-9ebe-3312e8b04124",
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "def create_dict(datadict, path):\n",
115 | " for p in path:\n",
116 | " datadict[str(p[0])] ={}\n",
117 | " datadict = datadict[str(p[0])]\n",
118 | " #exists\n",
119 | " datadict[str(p[0])] = {str(p[-1]): None}"
120 | ]
121 | },
122 | {
123 | "cell_type": "code",
124 | "execution_count": 4,
125 | "id": "7d267c94-b4f2-4fe8-be2e-c44f4300c128",
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "collected_dicts = []"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": 10,
135 | "id": "c87873b0-62d1-40d1-a6f3-9418e0f8c8d2",
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "for key in onto.attributes['data_property']:\n",
140 | " try:\n",
141 | " path = onto.get_path_from_sample(key)\n",
142 | " datadict = {}\n",
143 | " create_dict(datadict, path)\n",
144 | " collected_dicts.append(copy.deepcopy(datadict))\n",
145 | " except:\n",
146 | " pass"
147 | ]
148 | },
149 | {
150 | "cell_type": "code",
151 | "execution_count": null,
152 | "id": "30d5eae4-d4a4-432d-99cc-7ec8e5944225",
153 | "metadata": {},
154 | "outputs": [],
155 | "source": [
156 | "def dict_merge(dct, merge_dct):\n",
157 | " for k, v in merge_dct.items():\n",
158 | " if (k in dct and isinstance(dct[k], dict) and isinstance(merge_dct[k], dict)): #noqa\n",
159 | " dict_merge(dct[k], merge_dct[k])\n",
160 | " else:\n",
161 | " dct[k] = merge_dct[k]\n",
162 | " \n",
163 | " \n",
164 | "collected_dicts = []\n",
165 | "\n",
166 | "for key in o.attributes['data_node']:\n",
167 | " try:\n",
168 | " path = o.get_path_from_sample(key)\n",
169 | " datadict = {}\n",
170 | " create_dict(datadict, path)\n",
171 | " collected_dicts.append(copy.deepcopy(datadict))\n",
172 | " except:\n",
173 | " pass"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": 19,
179 | "id": "f407cffc-c1ba-4d51-ae78-0c580a77dea1",
180 | "metadata": {},
181 | "outputs": [
182 | {
183 | "data": {
184 | "text/plain": [
185 | "{'cmso:ComputationalSample': {'cmso:SimulationCell': {'cmso:Angle': {'cmso:Angle': {'cmso:hasAngle_gamma': None}}}}}"
186 | ]
187 | },
188 | "execution_count": 19,
189 | "metadata": {},
190 | "output_type": "execute_result"
191 | }
192 | ],
193 | "source": [
194 | "collected_dicts[3]"
195 | ]
196 | },
197 | {
198 | "cell_type": "code",
199 | "execution_count": null,
200 | "id": "836f088d-dee5-4ebf-8397-9339d0a1362a",
201 | "metadata": {},
202 | "outputs": [],
203 | "source": []
204 | }
205 | ],
206 | "metadata": {
207 | "kernelspec": {
208 | "display_name": "Python 3 (ipykernel)",
209 | "language": "python",
210 | "name": "python3"
211 | },
212 | "language_info": {
213 | "codemirror_mode": {
214 | "name": "ipython",
215 | "version": 3
216 | },
217 | "file_extension": ".py",
218 | "mimetype": "text/x-python",
219 | "name": "python",
220 | "nbconvert_exporter": "python",
221 | "pygments_lexer": "ipython3",
222 | "version": "3.10.13"
223 | }
224 | },
225 | "nbformat": 4,
226 | "nbformat_minor": 5
227 | }
228 |
--------------------------------------------------------------------------------
/notebooks/ontology_parsing.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 52,
6 | "id": "01a625df-df2b-4e77-bc5d-10d56309001d",
7 | "metadata": {},
8 | "outputs": [],
9 | "source": [
10 | "from rdflib import Graph, URIRef, BNode\n",
11 | "import networkx as nx\n",
12 | "from pyscal_rdf.network import Network\n",
13 | "import numpy as np"
14 | ]
15 | },
16 | {
17 | "cell_type": "code",
18 | "execution_count": 5,
19 | "id": "43951a55-2676-4c2f-99a6-9f206b4be357",
20 | "metadata": {},
21 | "outputs": [
22 | {
23 | "data": {
24 | "text/plain": [
25 | ")>"
26 | ]
27 | },
28 | "execution_count": 5,
29 | "metadata": {},
30 | "output_type": "execute_result"
31 | }
32 | ],
33 | "source": [
34 | "g = Graph()\n",
35 | "#g.parse(\"http://purls.helmholtz-metadaten.de/cmso/\", format=\"xml\")\n",
36 | "g.parse(\"../pyscal_rdf/data/cmso.owl\", format='xml')"
37 | ]
38 | },
39 | {
40 | "cell_type": "markdown",
41 | "id": "ac8731b4-dfb5-4180-a30d-0fcf5b7a5d45",
42 | "metadata": {},
43 | "source": [
44 | "Interesting quantities we need:\n",
45 | "\n",
46 | "- http://www.w3.org/2000/01/rdf-schema#domain\n",
47 | "- http://www.w3.org/2000/01/rdf-schema#range\n",
48 | "- http://www.w3.org/2002/07/owl#ObjectProperty\n",
49 | "- http://www.w3.org/2002/07/owl#DatatypeProperty\n",
50 | "- http://www.w3.org/2002/07/owl#Class\n",
51 | "- http://www.w3.org/2002/07/owl#AnnotationProperty"
52 | ]
53 | },
54 | {
55 | "cell_type": "code",
56 | "execution_count": 53,
57 | "id": "c46db803-a0a5-4eca-8375-34c700100931",
58 | "metadata": {},
59 | "outputs": [],
60 | "source": [
61 | "class OntologyNetwork:\n",
62 | " def __init__(self, owlfile=None):\n",
63 | " #super().__init__()\n",
64 | " self.object_property_list = None\n",
65 | " self.data_property_list = None\n",
66 | " self.class_list = None\n",
67 | " if owlfile is not None:\n",
68 | " self.parse(owlfile)\n",
69 | " \n",
70 | " def parse(self, owlfile):\n",
71 | " self.g = Graph()\n",
72 | " self.ng = nx.DiGraph()\n",
73 | " self.g.parse(owlfile, format='xml')\n",
74 | " self._get_object_property()\n",
75 | " self._get_datatype_property()\n",
76 | " self._get_classes()\n",
77 | " \n",
78 | " def _get_object_property(self):\n",
79 | " obj_props = []\n",
80 | " for s in self.g.triples((None, None, URIRef(\"http://www.w3.org/2002/07/owl#ObjectProperty\"))):\n",
81 | " obj_props.append(s[0])\n",
82 | " self.object_property_list = obj_props\n",
83 | "\n",
84 | " def _get_datatype_property(self):\n",
85 | " data_props = []\n",
86 | " for s in self.g.triples((None, None, URIRef(\"http://www.w3.org/2002/07/owl#DatatypeProperty\"))):\n",
87 | " data_props.append(s[0])\n",
88 | " self.data_property_list = data_props\n",
89 | "\n",
90 | " def _get_classes(self):\n",
91 | " classes = []\n",
92 | " for s in self.g.triples((None, None, URIRef(\"http://www.w3.org/2002/07/owl#Class\"))):\n",
93 | " classes.append(s[0])\n",
94 | " self.ng.add_node(s[0], node_type='class')\n",
95 | " self.class_list = classes\n",
96 | "\n",
97 | " def _get_domain_and_range(self):\n",
98 | " for prop in self.object_property_list:\n",
99 | " domain = list([s[2] for s in self.g.triples((prop, URIRef('http://www.w3.org/2000/01/rdf-schema#domain'), None))])\n",
100 | " range = list([s[2] for s in self.g.triples((prop, URIRef('http://www.w3.org/2000/01/rdf-schema#range'), None))])\n",
101 | " print(\"-------------------------\")\n",
102 | " print(prop)\n",
103 | " print(domain)\n",
104 | " print(range)\n",
105 | "\n",
106 | " def _parse_bnode(self, bnode, class_to_add=[], class_to_delete=[]):\n",
107 | " \"\"\"\n",
108 | " Parse a bnode\n",
109 | "\n",
110 | " Notes\n",
111 | " -----\n",
112 | " There are three possibilities: (1) unionOf: which gives another BNode\n",
113 | " then (2) first: gives one class of the union\n",
114 | " then (3) rest: gives a Bnode\n",
115 | " rest BNode -> first -> Class\n",
116 | " rest -> Nil\n",
117 | " \"\"\"\n",
118 | " #first search for union, then its a base node\n",
119 | " res = list([s for s in self.g.triples((bnode, URIRef('http://www.w3.org/2002/07/owl#unionOf'), None))])\n",
120 | " if len(res)>0:\n",
121 | " target_bnode = res[0][2]\n",
122 | " #now chec if instead we get the first term\n",
123 | " #call this func again\n",
124 | " self._parse_bnode(target_bnode, class_to_add, class_to_delete)\n",
125 | " res = list([s for s in self.g.triples((bnode, URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#first'), None))])\n",
126 | " if len(res)>0:\n",
127 | " class_to_add.append(res[0][2])\n",
128 | " class_to_delete.append(res[0][0])\n",
129 | " #we would also get a rest term\n",
130 | " r_res = list([s for s in self.g.triples((bnode, URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest'), None))])\n",
131 | " if len(r_res)>0:\n",
132 | " #we can get either nil\n",
133 | " if r_res[0][2] == URIRef('http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'):\n",
134 | " #we are done\n",
135 | " return class_to_add, class_to_delete\n",
136 | " else:\n",
137 | " target_bnode = r_res[0][2]\n",
138 | " self._parse_bnode(target_bnode, class_to_add, class_to_delete)\n",
139 | " def parse_bnodes(self):\n",
140 | " class_to_add = []\n",
141 | " class_to_delete = []\n",
142 | " for cls in self.class_list:\n",
143 | " if isinstance(cls,BNode):\n",
144 | " self._parse_bnode(cls, class_to_add, class_to_delete)\n",
145 | " class_to_add = np.unique(class_to_add)\n",
146 | " class_to_delete = np.unique(class_to_delete)\n",
147 | " "
148 | ]
149 | },
150 | {
151 | "cell_type": "code",
152 | "execution_count": 54,
153 | "id": "9eda2d60-9473-4575-8a41-b65140f8f2be",
154 | "metadata": {},
155 | "outputs": [],
156 | "source": [
157 | "o = OntologyNetwork(\"../pyscal_rdf/data/cmso.owl\")"
158 | ]
159 | },
160 | {
161 | "cell_type": "code",
162 | "execution_count": 55,
163 | "id": "dc3aebfa-fe59-4b73-9635-ce09b109fa59",
164 | "metadata": {
165 | "scrolled": true
166 | },
167 | "outputs": [
168 | {
169 | "data": {
170 | "text/plain": [
171 | "(array(['http://purls.helmholtz-metadaten.de/cmso/Atom',\n",
172 | " 'http://purls.helmholtz-metadaten.de/cmso/Basis',\n",
173 | " 'http://purls.helmholtz-metadaten.de/cmso/LatticeParameter',\n",
174 | " 'http://purls.helmholtz-metadaten.de/cmso/Length',\n",
175 | " 'http://purls.helmholtz-metadaten.de/cmso/SimulationCell',\n",
176 | " 'http://purls.helmholtz-metadaten.de/cmso/UnitCell'], dtype=' 1\u001b[0m on \u001b[38;5;241m=\u001b[39m \u001b[43mOntologyNetwork\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
31 | "File \u001b[0;32m~/miniconda3/envs/rdf-wf-4/lib/python3.10/site-packages/pyscal_rdf/network.py:22\u001b[0m, in \u001b[0;36mOntologyNetwork.__init__\u001b[0;34m(self, infile)\u001b[0m\n\u001b[1;32m 19\u001b[0m infile \u001b[38;5;241m=\u001b[39m owlfile\n\u001b[1;32m 21\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mg \u001b[38;5;241m=\u001b[39m nx\u001b[38;5;241m.\u001b[39mDiGraph()\n\u001b[0;32m---> 22\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39monto \u001b[38;5;241m=\u001b[39m \u001b[43mOntoParser\u001b[49m\u001b[43m(\u001b[49m\u001b[43minfile\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 23\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mdata_prefix \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124mvalue\u001b[39m\u001b[38;5;124m'\u001b[39m\n\u001b[1;32m 25\u001b[0m \u001b[38;5;66;03m#call methods\u001b[39;00m\n",
32 | "File \u001b[0;32m~/miniconda3/envs/rdf-wf-4/lib/python3.10/site-packages/pyscal_rdf/parser.py:14\u001b[0m, in \u001b[0;36mOntoParser.__init__\u001b[0;34m(self, infile, delimiter)\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mtree \u001b[38;5;241m=\u001b[39m get_ontology(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfile://\u001b[39m\u001b[38;5;132;01m{\u001b[39;00minfile\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\u001b[38;5;241m.\u001b[39mload()\n\u001b[1;32m 13\u001b[0m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[0;32m---> 14\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mFileNotFoundError\u001b[39;00m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mfile \u001b[39m\u001b[38;5;132;01m{\u001b[39;00minfile\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m not found!\u001b[39m\u001b[38;5;124m'\u001b[39m)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattributes \u001b[38;5;241m=\u001b[39m {}\n\u001b[1;32m 16\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mattributes[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mclass\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m {}\n",
33 | "\u001b[0;31mFileNotFoundError\u001b[0m: file /home/menon/miniconda3/envs/rdf-wf-4/lib/python3.10/site-packages/pyscal_rdf/data/cmso.owl not found!"
34 | ]
35 | }
36 | ],
37 | "source": [
38 | "on = OntologyNetwork()"
39 | ]
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "id": "21d603ea-6249-41fd-9e68-800593b19449",
45 | "metadata": {},
46 | "outputs": [],
47 | "source": []
48 | }
49 | ],
50 | "metadata": {
51 | "kernelspec": {
52 | "display_name": "Python 3 (ipykernel)",
53 | "language": "python",
54 | "name": "python3"
55 | },
56 | "language_info": {
57 | "codemirror_mode": {
58 | "name": "ipython",
59 | "version": 3
60 | },
61 | "file_extension": ".py",
62 | "mimetype": "text/x-python",
63 | "name": "python",
64 | "nbconvert_exporter": "python",
65 | "pygments_lexer": "ipython3",
66 | "version": "3.10.12"
67 | }
68 | },
69 | "nbformat": 4,
70 | "nbformat_minor": 5
71 | }
72 |
--------------------------------------------------------------------------------
/notebooks/wrap_creation.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "id": "a3392ffa-aa17-4b08-b7ff-94d21bd61f6d",
7 | "metadata": {
8 | "tags": []
9 | },
10 | "outputs": [],
11 | "source": [
12 | "from pyscal_rdf.network import OntologyNetwork"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": 2,
18 | "id": "9a21e979-8497-4d0e-88f3-4452bab8b85d",
19 | "metadata": {
20 | "tags": []
21 | },
22 | "outputs": [],
23 | "source": [
24 | "n = OntologyNetwork()"
25 | ]
26 | },
27 | {
28 | "cell_type": "code",
29 | "execution_count": 10,
30 | "id": "1ce29502-3322-41fd-960b-e7d3d792cd74",
31 | "metadata": {
32 | "tags": []
33 | },
34 | "outputs": [
35 | {
36 | "name": "stdout",
37 | "output_type": "stream",
38 | "text": [
39 | "I3m\n",
40 | "[['Sample', 'cmso:hasMaterial', 'Material'], ['Material', 'cmso:hasStructure', 'CrystalStructure'], ['CrystalStructure', 'cmso:hasSpaceGroup', 'SpaceGroup'], ['SpaceGroup', 'cmso:hasSpaceGroupSymbol', 'SpaceGroupSymbol']]\n",
41 | "['I3m'] single_string\n"
42 | ]
43 | },
44 | {
45 | "data": {
46 | "text/plain": [
47 | "'PREFIX cmso: PREFIX pldo: PREFIX podo: PREFIX rdf: SELECT DISTINCT ?sample WHERE { ?sample cmso:hasMaterial ?material . ?material cmso:hasStructure ?crystalstructure . ?crystalstructure cmso:hasSpaceGroup ?spacegroup . ?spacegroup cmso:hasSpaceGroupSymbol ?spacegroupsymbol . FILTER (?spacegroupsymbol=\"I3m\"^^xsd:string) }'"
48 | ]
49 | },
50 | "execution_count": 10,
51 | "metadata": {},
52 | "output_type": "execute_result"
53 | }
54 | ],
55 | "source": [
56 | "triplets = n.formulate_query(\"SpaceGroupSymbol\", \"I3m\")\n",
57 | "triplets"
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": 9,
63 | "id": "4f8d7425-24ed-4d62-a562-22cb7be78130",
64 | "metadata": {},
65 | "outputs": [
66 | {
67 | "data": {
68 | "text/plain": [
69 | "['PREFIX cmso: ',\n",
70 | " 'PREFIX pldo: ',\n",
71 | " 'PREFIX podo: ',\n",
72 | " 'PREFIX rdf: ',\n",
73 | " 'SELECT DISTINCT ?sample',\n",
74 | " 'WHERE {',\n",
75 | " ' ?sample cmso:hasMaterial ?material .',\n",
76 | " ' ?material cmso:hasStructure ?crystalstructure .',\n",
77 | " ' ?crystalstructure cmso:hasSpaceGroup ?spacegroup .',\n",
78 | " ' ?spacegroup cmso:hasSpaceGroupSymbol ?spacegroupsymbol .']"
79 | ]
80 | },
81 | "execution_count": 9,
82 | "metadata": {},
83 | "output_type": "execute_result"
84 | }
85 | ],
86 | "source": [
87 | "query = n._formulate_query_path(triplets)\n",
88 | "query"
89 | ]
90 | },
91 | {
92 | "cell_type": "code",
93 | "execution_count": null,
94 | "id": "a8915d92-1e95-4612-ac71-fab71e662c86",
95 | "metadata": {
96 | "tags": []
97 | },
98 | "outputs": [],
99 | "source": [
100 | "xx"
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": null,
106 | "id": "150ff24b-9a17-4d14-aeda-ddc314a2a27b",
107 | "metadata": {},
108 | "outputs": [],
109 | "source": []
110 | }
111 | ],
112 | "metadata": {
113 | "kernelspec": {
114 | "display_name": "Python 3 (ipykernel)",
115 | "language": "python",
116 | "name": "python3"
117 | },
118 | "language_info": {
119 | "codemirror_mode": {
120 | "name": "ipython",
121 | "version": 3
122 | },
123 | "file_extension": ".py",
124 | "mimetype": "text/x-python",
125 | "name": "python",
126 | "nbconvert_exporter": "python",
127 | "pygments_lexer": "ipython3",
128 | "version": "3.10.12"
129 | }
130 | },
131 | "nbformat": 4,
132 | "nbformat_minor": 5
133 | }
134 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | ase
3 | rdflib
4 | pyyaml
5 | graphviz
6 | networkx
7 | pyscal3
8 | spglib
9 | pandas
10 | tools4rdf
11 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 |
4 | with open('README.md') as readme_file:
5 | readme = readme_file.read()
6 |
7 | setup(
8 | name='atomrdf',
9 | version='0.10.2',
10 | author='Abril Azocar Guzman, Sarath Menon',
11 | author_email='sarath.menon@pyscal.org',
12 | description='Ontology based structural manipulation and quering',
13 | long_description=readme,
14 | long_description_content_type='text/markdown',
15 | packages=find_packages(include=['atomrdf', 'atomrdf.*']),
16 | zip_safe=False,
17 | download_url = 'https://github.com/pyscal/atomrdf',
18 | url = 'https://pyscal.org',
19 | install_requires=['numpy', 'ase', 'rdflib',
20 | 'pyyaml', 'graphviz', 'networkx',
21 | 'pyscal3', 'spglib', 'pandas',
22 | 'atomman', 'mp-api', 'aimsgb', 'pymatgen', 'mendeleev'],
23 | classifiers=[
24 | 'Programming Language :: Python :: 3'
25 | ],
26 | include_package_data=True,
27 | )
28 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/tests/__init__.py
--------------------------------------------------------------------------------
/tests/al_data/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/pyscal/atomRDF/b177c889afc5b1b31d043ec36681766e0dc37790/tests/al_data/.gitkeep
--------------------------------------------------------------------------------
/tests/al_data/Al.cif:
--------------------------------------------------------------------------------
1 | # generated using pymatgen
2 | data_Al
3 | _symmetry_space_group_name_H-M 'P 1'
4 | _cell_length_a 4.03892969
5 | _cell_length_b 4.03892969
6 | _cell_length_c 4.03892969
7 | _cell_angle_alpha 90.00000000
8 | _cell_angle_beta 90.00000000
9 | _cell_angle_gamma 90.00000000
10 | _symmetry_Int_Tables_number 1
11 | _chemical_formula_structural Al
12 | _chemical_formula_sum Al4
13 | _cell_volume 65.88687052
14 | _cell_formula_units_Z 4
15 | loop_
16 | _symmetry_equiv_pos_site_id
17 | _symmetry_equiv_pos_as_xyz
18 | 1 'x, y, z'
19 | loop_
20 | _atom_site_type_symbol
21 | _atom_site_label
22 | _atom_site_symmetry_multiplicity
23 | _atom_site_fract_x
24 | _atom_site_fract_y
25 | _atom_site_fract_z
26 | _atom_site_occupancy
27 | Al Al0 1 0.00000000 0.00000000 0.00000000 1
28 | Al Al1 1 0.00000000 0.50000000 0.50000000 1
29 | Al Al2 1 0.50000000 0.00000000 0.50000000 1
30 | Al Al3 1 0.50000000 0.50000000 0.00000000 1
31 |
--------------------------------------------------------------------------------
/tests/al_data/Al.dump:
--------------------------------------------------------------------------------
1 | ITEM: TIMESTEP
2 | 0
3 | ITEM: NUMBER OF ATOMS
4 | 4
5 | ITEM: BOX BOUNDS
6 | 0.000000 4.038930
7 | 0.000000 4.038930
8 | 0.000000 4.038930
9 | ITEM: ATOMS id type x y z
10 | 1 1 0.000000 0.000000 0.000000
11 | 2 1 0.000000 2.019465 2.019465
12 | 3 1 2.019465 0.000000 2.019465
13 | 4 1 2.019465 2.019465 0.000000
14 |
--------------------------------------------------------------------------------
/tests/al_data/Al.json:
--------------------------------------------------------------------------------
1 | {"@module": "pymatgen.core.structure", "@class": "Structure", "charge": 0, "lattice": {"matrix": [[4.0389296931825, 0.0, 2.473131160368575e-16], [6.495094320058286e-16, 4.0389296931825, 2.473131160368575e-16], [0.0, 0.0, 4.0389296931825]], "pbc": [true, true, true], "a": 4.0389296931825, "b": 4.0389296931825, "c": 4.0389296931825, "alpha": 90.0, "beta": 90.0, "gamma": 90.0, "volume": 65.8868705236634}, "sites": [{"species": [{"element": "Al", "occu": 1}], "abc": [0.0, 0.0, 0.0], "xyz": [0.0, 0.0, 0.0], "label": "Al", "properties": {}}, {"species": [{"element": "Al", "occu": 1}], "abc": [0.0, 0.5, 0.5], "xyz": [3.247547160029143e-16, 2.01946484659125, 2.01946484659125], "label": "Al", "properties": {}}, {"species": [{"element": "Al", "occu": 1}], "abc": [0.5, 0.0, 0.5], "xyz": [2.01946484659125, 0.0, 2.01946484659125], "label": "Al", "properties": {}}, {"species": [{"element": "Al", "occu": 1}], "abc": [0.5, 0.5, 0.0], "xyz": [2.0194648465912506, 2.01946484659125, 2.473131160368575e-16], "label": "Al", "properties": {}}]}
--------------------------------------------------------------------------------
/tests/al_data/Al.poscar:
--------------------------------------------------------------------------------
1 | Al4
2 | 1.0
3 | 4.0389296931825003 0.0000000000000000 0.0000000000000002
4 | 0.0000000000000006 4.0389296931825003 0.0000000000000002
5 | 0.0000000000000000 0.0000000000000000 4.0389296931825003
6 | Al
7 | 4
8 | direct
9 | 0.0000000000000000 0.0000000000000000 0.0000000000000000 Al
10 | 0.0000000000000000 0.5000000000000000 0.5000000000000000 Al
11 | 0.5000000000000000 0.0000000000000000 0.5000000000000000 Al
12 | 0.5000000000000000 0.5000000000000000 0.0000000000000000 Al
13 |
--------------------------------------------------------------------------------
/tests/al_data/Al.prismatic:
--------------------------------------------------------------------------------
1 | Generated by pymatgen
2 | 4.0389296931825 4.0389296931825 4.0389296931825
3 | 13 0.0 0.0 0.0 1 0
4 | 13 3.247547160029143e-16 2.01946484659125 2.01946484659125 1 0
5 | 13 2.01946484659125 0.0 2.01946484659125 1 0
6 | 13 2.0194648465912506 2.01946484659125 2.473131160368575e-16 1 0
7 | -1
--------------------------------------------------------------------------------
/tests/al_data/Al_sym.cif:
--------------------------------------------------------------------------------
1 | # generated using pymatgen
2 | data_Al
3 | _symmetry_space_group_name_H-M Fm-3m
4 | _cell_length_a 4.03892969
5 | _cell_length_b 4.03892969
6 | _cell_length_c 4.03892969
7 | _cell_angle_alpha 90.00000000
8 | _cell_angle_beta 90.00000000
9 | _cell_angle_gamma 90.00000000
10 | _symmetry_Int_Tables_number 225
11 | _chemical_formula_structural Al
12 | _chemical_formula_sum Al4
13 | _cell_volume 65.88687052
14 | _cell_formula_units_Z 4
15 | loop_
16 | _symmetry_equiv_pos_site_id
17 | _symmetry_equiv_pos_as_xyz
18 | 1 'x, y, z'
19 | 2 '-x, -y, -z'
20 | 3 '-y, x, z'
21 | 4 'y, -x, -z'
22 | 5 '-x, -y, z'
23 | 6 'x, y, -z'
24 | 7 'y, -x, z'
25 | 8 '-y, x, -z'
26 | 9 'x, -y, -z'
27 | 10 '-x, y, z'
28 | 11 '-y, -x, -z'
29 | 12 'y, x, z'
30 | 13 '-x, y, -z'
31 | 14 'x, -y, z'
32 | 15 'y, x, -z'
33 | 16 '-y, -x, z'
34 | 17 'z, x, y'
35 | 18 '-z, -x, -y'
36 | 19 'z, -y, x'
37 | 20 '-z, y, -x'
38 | 21 'z, -x, -y'
39 | 22 '-z, x, y'
40 | 23 'z, y, -x'
41 | 24 '-z, -y, x'
42 | 25 '-z, x, -y'
43 | 26 'z, -x, y'
44 | 27 '-z, -y, -x'
45 | 28 'z, y, x'
46 | 29 '-z, -x, y'
47 | 30 'z, x, -y'
48 | 31 '-z, y, x'
49 | 32 'z, -y, -x'
50 | 33 'y, z, x'
51 | 34 '-y, -z, -x'
52 | 35 'x, z, -y'
53 | 36 '-x, -z, y'
54 | 37 '-y, z, -x'
55 | 38 'y, -z, x'
56 | 39 '-x, z, y'
57 | 40 'x, -z, -y'
58 | 41 '-y, -z, x'
59 | 42 'y, z, -x'
60 | 43 '-x, -z, -y'
61 | 44 'x, z, y'
62 | 45 'y, -z, -x'
63 | 46 '-y, z, x'
64 | 47 'x, -z, y'
65 | 48 '-x, z, -y'
66 | 49 'x+1/2, y+1/2, z'
67 | 50 '-x+1/2, -y+1/2, -z'
68 | 51 '-y+1/2, x+1/2, z'
69 | 52 'y+1/2, -x+1/2, -z'
70 | 53 '-x+1/2, -y+1/2, z'
71 | 54 'x+1/2, y+1/2, -z'
72 | 55 'y+1/2, -x+1/2, z'
73 | 56 '-y+1/2, x+1/2, -z'
74 | 57 'x+1/2, -y+1/2, -z'
75 | 58 '-x+1/2, y+1/2, z'
76 | 59 '-y+1/2, -x+1/2, -z'
77 | 60 'y+1/2, x+1/2, z'
78 | 61 '-x+1/2, y+1/2, -z'
79 | 62 'x+1/2, -y+1/2, z'
80 | 63 'y+1/2, x+1/2, -z'
81 | 64 '-y+1/2, -x+1/2, z'
82 | 65 'z+1/2, x+1/2, y'
83 | 66 '-z+1/2, -x+1/2, -y'
84 | 67 'z+1/2, -y+1/2, x'
85 | 68 '-z+1/2, y+1/2, -x'
86 | 69 'z+1/2, -x+1/2, -y'
87 | 70 '-z+1/2, x+1/2, y'
88 | 71 'z+1/2, y+1/2, -x'
89 | 72 '-z+1/2, -y+1/2, x'
90 | 73 '-z+1/2, x+1/2, -y'
91 | 74 'z+1/2, -x+1/2, y'
92 | 75 '-z+1/2, -y+1/2, -x'
93 | 76 'z+1/2, y+1/2, x'
94 | 77 '-z+1/2, -x+1/2, y'
95 | 78 'z+1/2, x+1/2, -y'
96 | 79 '-z+1/2, y+1/2, x'
97 | 80 'z+1/2, -y+1/2, -x'
98 | 81 'y+1/2, z+1/2, x'
99 | 82 '-y+1/2, -z+1/2, -x'
100 | 83 'x+1/2, z+1/2, -y'
101 | 84 '-x+1/2, -z+1/2, y'
102 | 85 '-y+1/2, z+1/2, -x'
103 | 86 'y+1/2, -z+1/2, x'
104 | 87 '-x+1/2, z+1/2, y'
105 | 88 'x+1/2, -z+1/2, -y'
106 | 89 '-y+1/2, -z+1/2, x'
107 | 90 'y+1/2, z+1/2, -x'
108 | 91 '-x+1/2, -z+1/2, -y'
109 | 92 'x+1/2, z+1/2, y'
110 | 93 'y+1/2, -z+1/2, -x'
111 | 94 '-y+1/2, z+1/2, x'
112 | 95 'x+1/2, -z+1/2, y'
113 | 96 '-x+1/2, z+1/2, -y'
114 | 97 'x+1/2, y, z+1/2'
115 | 98 '-x+1/2, -y, -z+1/2'
116 | 99 '-y+1/2, x, z+1/2'
117 | 100 'y+1/2, -x, -z+1/2'
118 | 101 '-x+1/2, -y, z+1/2'
119 | 102 'x+1/2, y, -z+1/2'
120 | 103 'y+1/2, -x, z+1/2'
121 | 104 '-y+1/2, x, -z+1/2'
122 | 105 'x+1/2, -y, -z+1/2'
123 | 106 '-x+1/2, y, z+1/2'
124 | 107 '-y+1/2, -x, -z+1/2'
125 | 108 'y+1/2, x, z+1/2'
126 | 109 '-x+1/2, y, -z+1/2'
127 | 110 'x+1/2, -y, z+1/2'
128 | 111 'y+1/2, x, -z+1/2'
129 | 112 '-y+1/2, -x, z+1/2'
130 | 113 'z+1/2, x, y+1/2'
131 | 114 '-z+1/2, -x, -y+1/2'
132 | 115 'z+1/2, -y, x+1/2'
133 | 116 '-z+1/2, y, -x+1/2'
134 | 117 'z+1/2, -x, -y+1/2'
135 | 118 '-z+1/2, x, y+1/2'
136 | 119 'z+1/2, y, -x+1/2'
137 | 120 '-z+1/2, -y, x+1/2'
138 | 121 '-z+1/2, x, -y+1/2'
139 | 122 'z+1/2, -x, y+1/2'
140 | 123 '-z+1/2, -y, -x+1/2'
141 | 124 'z+1/2, y, x+1/2'
142 | 125 '-z+1/2, -x, y+1/2'
143 | 126 'z+1/2, x, -y+1/2'
144 | 127 '-z+1/2, y, x+1/2'
145 | 128 'z+1/2, -y, -x+1/2'
146 | 129 'y+1/2, z, x+1/2'
147 | 130 '-y+1/2, -z, -x+1/2'
148 | 131 'x+1/2, z, -y+1/2'
149 | 132 '-x+1/2, -z, y+1/2'
150 | 133 '-y+1/2, z, -x+1/2'
151 | 134 'y+1/2, -z, x+1/2'
152 | 135 '-x+1/2, z, y+1/2'
153 | 136 'x+1/2, -z, -y+1/2'
154 | 137 '-y+1/2, -z, x+1/2'
155 | 138 'y+1/2, z, -x+1/2'
156 | 139 '-x+1/2, -z, -y+1/2'
157 | 140 'x+1/2, z, y+1/2'
158 | 141 'y+1/2, -z, -x+1/2'
159 | 142 '-y+1/2, z, x+1/2'
160 | 143 'x+1/2, -z, y+1/2'
161 | 144 '-x+1/2, z, -y+1/2'
162 | 145 'x, y+1/2, z+1/2'
163 | 146 '-x, -y+1/2, -z+1/2'
164 | 147 '-y, x+1/2, z+1/2'
165 | 148 'y, -x+1/2, -z+1/2'
166 | 149 '-x, -y+1/2, z+1/2'
167 | 150 'x, y+1/2, -z+1/2'
168 | 151 'y, -x+1/2, z+1/2'
169 | 152 '-y, x+1/2, -z+1/2'
170 | 153 'x, -y+1/2, -z+1/2'
171 | 154 '-x, y+1/2, z+1/2'
172 | 155 '-y, -x+1/2, -z+1/2'
173 | 156 'y, x+1/2, z+1/2'
174 | 157 '-x, y+1/2, -z+1/2'
175 | 158 'x, -y+1/2, z+1/2'
176 | 159 'y, x+1/2, -z+1/2'
177 | 160 '-y, -x+1/2, z+1/2'
178 | 161 'z, x+1/2, y+1/2'
179 | 162 '-z, -x+1/2, -y+1/2'
180 | 163 'z, -y+1/2, x+1/2'
181 | 164 '-z, y+1/2, -x+1/2'
182 | 165 'z, -x+1/2, -y+1/2'
183 | 166 '-z, x+1/2, y+1/2'
184 | 167 'z, y+1/2, -x+1/2'
185 | 168 '-z, -y+1/2, x+1/2'
186 | 169 '-z, x+1/2, -y+1/2'
187 | 170 'z, -x+1/2, y+1/2'
188 | 171 '-z, -y+1/2, -x+1/2'
189 | 172 'z, y+1/2, x+1/2'
190 | 173 '-z, -x+1/2, y+1/2'
191 | 174 'z, x+1/2, -y+1/2'
192 | 175 '-z, y+1/2, x+1/2'
193 | 176 'z, -y+1/2, -x+1/2'
194 | 177 'y, z+1/2, x+1/2'
195 | 178 '-y, -z+1/2, -x+1/2'
196 | 179 'x, z+1/2, -y+1/2'
197 | 180 '-x, -z+1/2, y+1/2'
198 | 181 '-y, z+1/2, -x+1/2'
199 | 182 'y, -z+1/2, x+1/2'
200 | 183 '-x, z+1/2, y+1/2'
201 | 184 'x, -z+1/2, -y+1/2'
202 | 185 '-y, -z+1/2, x+1/2'
203 | 186 'y, z+1/2, -x+1/2'
204 | 187 '-x, -z+1/2, -y+1/2'
205 | 188 'x, z+1/2, y+1/2'
206 | 189 'y, -z+1/2, -x+1/2'
207 | 190 '-y, z+1/2, x+1/2'
208 | 191 'x, -z+1/2, y+1/2'
209 | 192 '-x, z+1/2, -y+1/2'
210 | loop_
211 | _atom_site_type_symbol
212 | _atom_site_label
213 | _atom_site_symmetry_multiplicity
214 | _atom_site_fract_x
215 | _atom_site_fract_y
216 | _atom_site_fract_z
217 | _atom_site_occupancy
218 | Al Al0 4 0.00000000 0.00000000 0.00000000 1
219 |
--------------------------------------------------------------------------------
/tests/conf.dump:
--------------------------------------------------------------------------------
1 | ITEM: TIMESTEP
2 | 0
3 | ITEM: NUMBER OF ATOMS
4 | 2
5 | ITEM: BOX BOUNDS pp pp pp
6 | 0.0 2.87
7 | 0.0 2.87
8 | 0.0 2.87
9 | ITEM: ATOMS x y z type id
10 | 0.0 0.0 0.0 1 1
11 | 1.435 1.435 1.435 1 2
12 |
--------------------------------------------------------------------------------
/tests/qe_data/pw.si.scf.in:
--------------------------------------------------------------------------------
1 | ! sample:36c07edb-6e91-4031-9bed-b48786bb3054
2 |
3 | &CONTROL
4 | calculation = 'vc-relax',
5 | prefix = 'silicon',
6 | outdir = './',
7 | pseudo_dir = '/mnt/c/Users/menon/Documents/repos/projects-iuc17/atomRDF/examples/workflow_examples/qe_run',
8 | /
9 |
10 | &SYSTEM
11 | ecutwfc = 12,
12 | ibrav = 0,
13 | nat = 8,
14 | ntyp = 1,
15 | /
16 |
17 | &ELECTRONS
18 | /
19 |
20 | &IONS
21 | /
22 |
23 | &CELL
24 | /
25 |
26 | K_POINTS automatic
27 |
28 | 4 4 4 1 1 1
29 |
30 | CELL_PARAMETERS angstrom
31 |
32 | 5.43 0.0 0.0
33 | 0.0 5.43 0.0
34 | 0.0 0.0 5.43
35 |
36 | ATOMIC_SPECIES
37 |
38 | Si 28.085 Si.pbe-n-rrkjus_psl.1.0.0.UPF
39 |
40 | ATOMIC_POSITIONS crystal
41 |
42 | Si 0.0 0.0 0.0
43 | Si 0.25 0.25 0.25
44 | Si 0.5 0.5 0.0
45 | Si 0.75 0.75 0.25
46 | Si 0.5 0.0 0.5
47 | Si 0.0 0.5 0.5
48 | Si 0.75 0.25 0.75
49 | Si 0.25 0.75 0.75
50 |
51 |
--------------------------------------------------------------------------------
/tests/test_encoder_and_write.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from atomrdf.encoder import NumpyArrayEncoder
3 | from atomrdf.json_io import write_file
4 | import json
5 | import numpy as np
6 | import os
7 |
8 | def test_encoder():
9 | arr = np.linspace(0, 10, 100)
10 | data = {"array": arr}
11 | dumpdata = json.dumps(data, cls=NumpyArrayEncoder)
12 | assert(np.abs(float(dumpdata.split(',')[-2]) - arr[-2]) < 1E-5)
13 |
14 | enc = NumpyArrayEncoder()
15 | assert type(enc.default(np.float64(23))) == float
16 | assert type(enc.default(np.int64(23))) == int
17 | assert type(enc.default(np.array([23]))) == list
18 |
19 | def test_writer(tmp_path):
20 | arr = np.linspace(0, 10, 100)
21 | data = {"array": arr}
22 | write_file("test_json", data)
23 | assert(os.path.exists("test_json.json"))
24 |
25 |
--------------------------------------------------------------------------------
/tests/test_graph.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | from atomrdf import KnowledgeGraph, System
4 | from atomrdf.namespace import CMSO, PLDO, ASMO, CDCO
5 | import shutil
6 |
7 | def test_structuregraph():
8 | s = KnowledgeGraph()
9 | sys = System.create.element.Fe(graph=s)
10 |
11 | vis = s.visualise()
12 | assert(vis != None)
13 |
14 | vis = s.visualise()
15 | assert(vis != None)
16 |
17 | s.write("temp.ttl", format="turtle")
18 | s = KnowledgeGraph(graph_file="temp.ttl")
19 |
20 | sys = System.create.element.Fe(graph=s)
21 | sys.add_vacancy(0.5, number=1)
22 |
23 | s = KnowledgeGraph()
24 | sys = System.create.element.Fe(graph=s)
25 | assert s.n_samples == 1
26 | #res = s.query_sample("NumberOfAtoms", 2)
27 | #assert(len(res) == 1)
28 |
29 |
30 | def test_logger():
31 | if os.path.exists('tests/atomrdf.log'):
32 | os.remove('tests/atomrdf.log')
33 | s = KnowledgeGraph(enable_log=True)
34 | s.log('testing-logger')
35 | assert str(type(s.log).__name__) == "method"
36 |
37 | def test_add_structure():
38 | s = KnowledgeGraph()
39 | sys = System.create.element.Fe()
40 | s.add_structure(sys)
41 | assert sys.sample in s.sample_ids
42 |
43 | def test_add_cross_triple():
44 |
45 | s = KnowledgeGraph(enable_log=True)
46 | sys = System.create.element.Fe(graph=s)
47 | status, _ = s._check_domain_if_uriref((sys.material, CDCO.hasCrystallographicDefect, PLDO.AntiphaseBoundary))
48 | assert status == True
49 |
50 |
51 | def test_add_quantity():
52 | s = KnowledgeGraph(enable_log=True)
53 | sys = System.create.element.Fe(graph=s)
54 | s.add_calculated_quantity(sys.sample,
55 | 'Energy',
56 | str(23),
57 | unit='eV')
58 | cp = s.value(sys.sample, ASMO.hasCalculatedProperty)
59 | val = s.value(cp, ASMO.hasValue)
60 | assert val.toPython() == '23'
61 |
62 | insp = s.inspect_sample(sys.sample)
63 | assert 'Im-3m' in insp
64 | assert '23' in insp
65 |
66 | def test_archive():
67 | s = KnowledgeGraph(enable_log=True)
68 | sys = System.create.element.Fe(graph=s)
69 | sys = System.create.element.Cu(graph=s)
70 | if os.path.exists('test_archive.tar.gz'):
71 | os.remove('test_archive.tar.gz')
72 | if os.path.exists('test_archive'):
73 | shutil.rmtree('test_archive')
74 | s.archive('test_archive')
75 | assert os.path.exists('test_archive.tar.gz')
76 |
77 | s = KnowledgeGraph.unarchive('test_archive.tar.gz')
78 | assert s.n_samples == 2
79 | os.remove('test_archive.tar.gz')
80 | shutil.rmtree('test_archive')
81 |
82 | def test_sparql_query():
83 | kg = KnowledgeGraph()
84 | struct_Fe = System.create.element.Fe(graph=kg)
85 | struct_Si = System.create.element.Si(graph=kg)
86 | struct_l12 = System.create.lattice.l12(element=['Al', 'Ni'],
87 | lattice_constant=3.57, graph=kg)
88 | query = """
89 | PREFIX cmso:
90 | SELECT DISTINCT ?symbol
91 | WHERE {
92 | ?sample cmso:hasNumberOfAtoms ?number .
93 | ?sample cmso:hasMaterial ?material .
94 | ?material cmso:hasStructure ?structure .
95 | ?structure cmso:hasSpaceGroupSymbol ?symbol .
96 | FILTER (?number="4"^^xsd:integer)
97 | }"""
98 | res = kg.query(query)
99 | assert res.symbol.values[0].toPython() == 'Pm-3m'
100 |
101 | res = kg.query_sample(kg.ontology.terms.cmso.hasAltName@kg.terms.cmso.Structure=='bcc',
102 | enforce_types=True)
103 | assert res.Structure_hasAltNamevalue.values[0].toPython() == 'bcc'
104 |
105 | def test_extract_sample():
106 | kg = KnowledgeGraph()
107 | struct_Fe = System.create.element.Fe(graph=kg)
108 | sample_graph, no_atoms = kg.get_sample(struct_Fe.sample, no_atoms=True)
109 | assert no_atoms == 2
110 | assert sample_graph.sample_ids[0] == struct_Fe.sample
111 |
112 | struct = kg.get_system_from_sample(struct_Fe.sample)
113 | assert len(struct.atoms.positions) == 2
114 | assert struct.graph is not None
115 |
116 | kg.to_file(struct_Fe.sample, filename='POSCAR')
117 | assert os.path.exists('POSCAR')
118 | os.remove('POSCAR')
119 |
120 | kg.to_file(struct_Fe.sample, filename='POSCAR', format='cif')
121 | assert os.path.exists('POSCAR')
122 | os.remove('POSCAR')
123 |
124 | #def test_add_domain_ontoterm():
125 | # from atomrdf.namespace import CMSO, PLDO
126 | # s = KnowledgeGraph()
127 | # sys = System.create.element.Fe(graph=s)
128 | # status, _ = s._check_domain_if_ontoterm((CMSO.Material, CMSO.hasDefect, PLDO.AntiphaseBoundary))
129 | # assert status == True
130 |
131 | def test_purge():
132 | s = KnowledgeGraph()
133 | sys = System.create.element.Fe(graph=s)
134 | s.purge(force=True)
135 | assert s.n_samples == 0
136 |
137 | s = KnowledgeGraph(store='db', store_file=f'testr.db')
138 | sys = System.create.element.Fe(graph=s)
139 | s.purge(force=True)
140 | assert s.n_samples == 0
141 |
--------------------------------------------------------------------------------
/tests/test_network.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | from atomrdf import KnowledgeGraph, System
4 | from atomrdf.namespace import CMSO, PLDO
5 | import shutil
6 |
7 |
8 | def test_network_draw():
9 | s = KnowledgeGraph()
10 | sys = System.create.element.Fe(graph=s)
11 | assert s.ontology.draw() != None
--------------------------------------------------------------------------------
/tests/test_qe.py:
--------------------------------------------------------------------------------
1 | from atomrdf import KnowledgeGraph, System
2 | from ase.io import read
3 | import os
4 |
5 | def test_qe_write():
6 | kg = KnowledgeGraph()
7 | s = System.create.element.Fe(graph = kg)
8 | s.write.file('tests/pw.si.scf.in',
9 | format='quantum-espresso',
10 | copy_from='tests/qe_data/pw.si.scf.in',
11 | pseudo_files=['tests/qe_data/Si.pbe-n-rrkjus_psl.1.0.0.UPF'])
12 | assert os.path.exists('tests/pw.si.scf.in')
13 | struct = read('tests/pw.si.scf.in', format='espresso-in')
14 | assert len(struct) == 2
15 |
16 | def test_qe_read():
17 | kg = KnowledgeGraph()
18 | job = ('tests/qe_data/pw.si.scf.in', 'tests/qe_data/pw.si.scf.out')
19 | kg.add_workflow(job, workflow_environment='qe')
20 | assert kg.n_samples == 2
--------------------------------------------------------------------------------
/tests/test_rotate.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | from atomrdf import KnowledgeGraph, System
4 | import numpy as np
5 |
6 | def test_rotate():
7 | kg = KnowledgeGraph()
8 | s = System.create.element.Al(repetitions=(1,1,1), graph=kg)
9 | rotation_vectors = [[ 1, 1, 0],
10 | [-1, 1, 0],
11 | [ 0, 0, 1]]
12 | srot = s.rotate(rotation_vectors)
13 | assert np.abs(srot.box[0][0] - 5.728) < 1E-3
14 | assert np.abs(srot.box[1][1] - 5.728) < 1E-3
15 | assert np.abs(srot.box[2][2] - 4.050) < 1E-3
16 | assert srot.natoms == 8
17 |
18 |
--------------------------------------------------------------------------------
/tests/test_store.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | from atomrdf import KnowledgeGraph, System
4 | from atomrdf.namespace import CMSO, PLDO
5 | import shutil
6 |
7 |
8 | def test_sqlalchemy():
9 | s = KnowledgeGraph(store='SQLAlchemy', store_file='aa')
10 | sys = System.create.element.Fe(graph=s)
--------------------------------------------------------------------------------
/tests/test_structure.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | import numpy as np
4 | from atomrdf import KnowledgeGraph, System
5 | from atomrdf.namespace import CMSO, PLDO
6 | import shutil
7 |
8 | def test_custom():
9 | kg = KnowledgeGraph()
10 | struct = System.create.lattice.custom([[0. , 0. , 0. ],
11 | [1.435, 1.435, 1.435]],
12 | [1, 1],
13 | [[2.87, 0. , 0. ],
14 | [0. , 2.87, 0. ],
15 | [0. , 0. , 2.87]],
16 | lattice_constant = 2.87,
17 | element='Fe',
18 | graph=kg)
19 | assert kg.value(struct.sample, CMSO.hasNumberOfAtoms).toPython() == 2
20 |
21 | def test_dislocation():
22 | slip_direction = np.array([1, 0, -1])
23 | slip_plane = np.array([1, 1, 1])
24 | slip_system = [slip_direction, slip_plane]
25 | burgers_vector = 0.5
26 | dislocation_line = np.array([1, 0, -1])
27 | elastic_constant_dict = {'C11': 169, 'C12': 122, 'C44': 75.4}
28 | kg = KnowledgeGraph()
29 | sys = System.create.defect.dislocation(slip_system,
30 | dislocation_line,
31 | elastic_constant_dict,
32 | burgers_vector=burgers_vector,
33 | element='Cu',
34 | dislocation_type='monopole',
35 | graph=kg,
36 | )
37 | assert sys.natoms == 96
38 |
39 | sys = System.create.defect.dislocation(slip_system,
40 | dislocation_line,
41 | elastic_constant_dict,
42 | burgers_vector=burgers_vector,
43 | element='Cu',
44 | dislocation_type='periodicarray',
45 | graph=kg,
46 | )
47 | assert sys.natoms == 96
48 |
49 | res = kg.query_sample(kg.ontology.terms.ldo.ScrewDislocation)
50 | assert res is not None
51 |
52 |
53 | def test_read_in():
54 | kg = KnowledgeGraph()
55 | struct = System.read.file('tests/conf.dump', graph=kg, lattice='bcc', lattice_constant=2.861)
56 | assert kg.n_samples == 1
57 |
58 | def test_delete():
59 | s = KnowledgeGraph()
60 | sys = System.create.element.Fe(graph=s)
61 | sys.delete(indices=[0])
62 | assert sys.natoms == 1
63 | ss, n= s.get_sample(sys.sample, no_atoms=True)
64 | assert n==1
65 |
66 | s = KnowledgeGraph()
67 | sys = System.create.element.Fe(graph=s)
68 | del sys[0]
69 | assert sys.natoms == 1
70 | ss, n= s.get_sample(sys.sample, no_atoms=True)
71 | assert n==1
72 |
73 | def test_substitute():
74 | s = KnowledgeGraph()
75 | sys = System.create.element.Fe(graph=s)
76 | sys.substitute_atoms('Li', indices=[0])
77 | species = s.value(sys.sample, CMSO.hasSpecies)
78 | elements = [k[2] for k in s.triples((species, CMSO.hasElement, None))]
79 | assert len(elements) == 2
80 |
81 | def test_interstitials():
82 | s = KnowledgeGraph()
83 | sys = System.create.element.Fe(graph=s)
84 | sys = sys.add_interstitial_impurities(['Li', 'Au'], void_type='tetrahedral')
85 | species = s.value(sys.sample, CMSO.hasSpecies)
86 | elements = [k[2] for k in s.triples((species, CMSO.hasElement, None))]
87 | assert len(elements) == 3
88 |
89 | sys = System.create.element.Fe(graph=s)
90 | sys = sys.add_interstitial_impurities(['Li', 'Au'], void_type='octahedral')
91 | species = s.value(sys.sample, CMSO.hasSpecies)
92 | elements = [k[2] for k in s.triples((species, CMSO.hasElement, None))]
93 | assert len(elements) == 3
94 |
95 | def test_gb():
96 | kg = KnowledgeGraph()
97 | struct_gb_1 = System.create.defect.grain_boundary(axis=[0,0,1],
98 | sigma=5,
99 | gb_plane=[3, -1, 0],
100 | element='Fe',
101 | graph=kg)
102 | res = kg.query_sample(kg.ontology.terms.pldo.GrainBoundary)
103 | assert len(res.AtomicScaleSample.values) == 1
104 |
105 | new = struct_gb_1.repeat((2,2,2))
106 | res = kg.query_sample(kg.ontology.terms.pldo.GrainBoundary)
107 | assert len(res.AtomicScaleSample.values) == 2
108 |
109 | ss = kg.get_sample(new.sample)
110 | res = ss.query_sample(ss.ontology.terms.pldo.GrainBoundary)
111 | assert len(res.AtomicScaleSample.values) == 1
112 |
113 | def test_sf():
114 | kg = KnowledgeGraph()
115 | slip_plane = np.array([0,0,0,1])
116 | slip_direction = np.array([1,0,-1,0])
117 | sf = System.create.defect.stacking_fault(slip_plane,
118 | 1.0,
119 | slip_direction_a=slip_direction,
120 | element="Mg",
121 | repetitions=(2,2,2),
122 | vacuum=10.0,
123 | graph=kg)
124 | assert sf.natoms == 96
125 |
126 | slip_plane = np.array([1,1,1])
127 | sf, sfa, ssa, fsa = System.create.defect.stacking_fault(slip_plane,
128 | 0.5,
129 | element="Cu",
130 | repetitions=(1,1,1),
131 | vacuum=0.0,
132 | graph=kg,
133 | return_atomman_dislocation=True)
134 | assert sfa.system.natoms == 48
135 | assert sfa.abovefault.sum() == 24
136 | assert np.abs(sfa.faultpos_rel - 0.5) < 1e-6
137 | assert np.abs(sfa.faultpos_cart - 12.505406830647294) < 1e-6
--------------------------------------------------------------------------------
/tests/test_structuregraph.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | from atomrdf import KnowledgeGraph, System
3 |
4 | def test_structuregraph():
5 | s = KnowledgeGraph()
6 | sys = System.create.element.Fe(graph=s)
7 | assert(sys.sample != None)
8 |
9 | sys = System.create.lattice.bcc(element="Fe", graph=s)
10 | assert(sys.sample != None)
11 |
12 | sys = System.read.file("tests/al_data/Al.poscar", format="poscar", graph=s)
13 | assert(sys.sample != None)
14 |
15 | sys = System.create.defect.grain_boundary(axis=[0,0,1],
16 | sigma=5,
17 | gb_plane=[3, -1, 0],
18 | element='Fe',
19 | graph=s,
20 | backend='inbuilt')
21 |
22 | assert(sys.sample != None)
23 |
24 | sys = System.create.defect.grain_boundary(axis=[0,0,1],
25 | sigma=5,
26 | gb_plane=[3, -1, 0],
27 | element='Fe',
28 | graph=s,
29 | backend='aimsgb')
30 |
31 | assert(sys.sample != None)
32 |
33 |
34 |
35 |
36 |
--------------------------------------------------------------------------------
/tests/test_visualise.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | import numpy as np
4 | from atomrdf import KnowledgeGraph, System
5 | from atomrdf.namespace import CMSO, PLDO
6 | import shutil
7 | import atomrdf.visualize as viz
8 |
9 |
10 | def test_switch_box():
11 | assert viz._switch_box("box") == "rectangle"
12 |
13 | def test_fix_id():
14 | assert viz._fix_id('hello', 'random') == 'hello'
15 | assert viz._fix_id('hello', 'Literal') != 'hello'
16 |
17 | def test_visualise():
18 | s = KnowledgeGraph()
19 | sys = System.create.element.Cr(graph=s)
20 |
21 | styledict = {"edgecolor": "#D9D9D9",
22 | "BNode": {"color": "#263238"}}
23 | vis = s.visualise(styledict=styledict)
24 | assert(vis != None)
25 |
26 | s = KnowledgeGraph()
27 | sys = System.create.element.Cr(graph=s)
28 | vis = s.visualise(workflow_view=True, hide_types=True)
29 | assert(vis != None)
30 |
--------------------------------------------------------------------------------
/tests/test_workflow.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import os
3 | import numpy as np
4 | from atomrdf import KnowledgeGraph, System, Workflow
5 | from atomrdf.namespace import CMSO, PLDO
6 | import shutil
7 |
8 | def test_wf_creation():
9 | kg = KnowledgeGraph()
10 | #kg.enable_workflow(pr, workflow_environment='pyiron')
11 |
12 |
--------------------------------------------------------------------------------