├── .github └── workflows │ ├── ci.yml │ └── joss.yml ├── .gitignore ├── .readthedocs.yaml ├── .zenodo.json ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── assets ├── benchmarks.png ├── favicon.ico ├── logo.ipynb └── logo.png ├── docs ├── .nojekyll └── source │ ├── _static │ ├── custom.css │ └── index │ │ ├── api_reference.svg │ │ ├── contribution_guidelines.svg │ │ ├── getting_started.svg │ │ └── user_guide.svg │ ├── _templates │ └── autosummary │ │ ├── base.rst │ │ ├── class.rst │ │ └── module.rst │ ├── api.rst │ ├── conf.py │ ├── getting_started.rst │ ├── index.rst │ ├── notebooks │ ├── getting_started │ │ ├── installation.ipynb │ │ └── quick_start.ipynb │ └── user_guide │ │ └── gcme_polyelectrolyte_edlc.ipynb │ └── user_guide.rst ├── environment.yml ├── lib ├── lammps-fix-imagecharges │ ├── README.md │ ├── fix_imagecharges.cpp │ └── fix_imagecharges.h └── openmm-ic-plugin │ ├── CMakeLists.txt │ ├── README.md │ ├── openmmapi │ ├── include │ │ ├── ICDrudeLangevinIntegrator.h │ │ ├── ICKernels.h │ │ ├── ICLangevinIntegrator.h │ │ ├── OpenMMIC.h │ │ └── internal │ │ │ └── windowsExportIC.h │ └── src │ │ ├── ICDrudeLangevinIntegrator.cpp │ │ └── ICLangevinIntegrator.cpp │ ├── platforms │ └── cuda │ │ ├── CMakeLists.txt │ │ ├── EncodeCUDAFiles.cmake │ │ ├── include │ │ ├── CudaICKernelFactory.h │ │ └── CudaICKernels.h │ │ └── src │ │ ├── CudaICKernelFactory.cpp │ │ ├── CudaICKernelSources.cpp.in │ │ ├── CudaICKernelSources.h.in │ │ ├── CudaICKernels.cpp │ │ └── kernels │ │ ├── ICDrudeLangevin.cu │ │ ├── ICLangevin.cu │ │ ├── drudePairForce.cu │ │ ├── drudeParticleForce.cu │ │ └── vectorOps.cu │ ├── python │ ├── CMakeLists.txt │ ├── icplugin.i │ └── setup.py │ └── serialization │ ├── include │ ├── ICDrudeLangevinIntegratorProxy.h │ └── ICLangevinIntegratorProxy.h │ └── src │ ├── ICDrudeLangevinIntegratorProxy.cpp │ ├── ICLangevinIntegratorProxy.cpp │ └── ICSerializationProxyRegistration.cpp ├── paper ├── paper.bib └── paper.md ├── pyproject.toml ├── pytest.ini ├── recipe └── meta.yaml ├── requirements.txt ├── requirements_minimal.txt ├── src └── mdcraft │ ├── __init__.py │ ├── algorithm │ ├── __init__.py │ ├── accelerated.py │ ├── correlation.py │ ├── molecule.py │ ├── topology.py │ ├── unit.py │ └── utility.py │ ├── analysis │ ├── __init__.py │ ├── base.py │ ├── electrostatics.py │ ├── polymer.py │ ├── profile.py │ ├── reader.py │ ├── structure.py │ ├── thermodynamics.py │ └── transport.py │ ├── fit │ ├── __init__.py │ ├── distribution.py │ ├── exponential.py │ ├── fourier.py │ ├── gaussian.py │ ├── polynomial.py │ └── power.py │ ├── lammps │ ├── __init__.py │ └── topology.py │ ├── openmm │ ├── __init__.py │ ├── bond.py │ ├── file.py │ ├── pair.py │ ├── reporter.py │ ├── system.py │ ├── topology.py │ ├── unit.py │ └── utility.py │ └── plot │ ├── __init__.py │ ├── axis.py │ ├── color.py │ └── rcparam.py └── tests ├── test_algorithm_correlation.py ├── test_algorithm_molecule.py ├── test_algorithm_topology.py ├── test_algorithm_unit.py ├── test_algorithm_utility.py ├── test_analysis_electrostatics.py ├── test_analysis_polymer.py ├── test_analysis_profile.py ├── test_analysis_structure.py ├── test_analysis_transport.py ├── test_openmm_file_reporter.py ├── test_openmm_topology.py └── test_openmm_unit.py /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: continuous integration 2 | on: 3 | push: 4 | branches: 5 | - main 6 | jobs: 7 | build: 8 | name: continuous-integration-python-${{ matrix.python-version }} 9 | runs-on: ubuntu-latest 10 | defaults: 11 | run: 12 | shell: bash -el {0} 13 | strategy: 14 | matrix: 15 | python-version: ["3.9", "3.10", "3.11", "3.12"] 16 | timeout-minutes: 60 17 | steps: 18 | - name: checkout 19 | uses: actions/checkout@v4 20 | - name: setup-miniconda 21 | uses: conda-incubator/setup-miniconda@v3 22 | with: 23 | auto-update-conda: true 24 | channels: conda-forge 25 | python-version: ${{ matrix.python-version }} 26 | - name: install-dependencies 27 | run: | 28 | conda activate test 29 | conda env update --name test --file environment.yml --verbose 30 | - name: lint 31 | run: ruff check --target-version=py39 . 32 | continue-on-error: true 33 | - name: test 34 | run: pytest -------------------------------------------------------------------------------- /.github/workflows/joss.yml: -------------------------------------------------------------------------------- 1 | name: Journal of Open Source Software PDF generator 2 | on: 3 | push: 4 | paths: 5 | - .github/workflows/joss.yml 6 | - paper/** 7 | jobs: 8 | paper: 9 | name: joss-pdf-generator 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: checkout 13 | uses: actions/checkout@v4 14 | - name: build-draft-pdf 15 | uses: openjournals/openjournals-draft-action@master 16 | with: 17 | journal: joss 18 | paper-path: paper/paper.md 19 | - name: upload 20 | uses: actions/upload-artifact@v4 21 | with: 22 | name: paper 23 | path: paper/paper.pdf -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/build/ 2 | **/__pycache__/ 3 | **/.pytest_cache/ 4 | .ruff_cache/ 5 | .vscode/ 6 | .coverage 7 | dist/ 8 | docs/build/ 9 | docs/jupyter_execute/ 10 | docs/source/api/ 11 | lib/**/CMakeFiles 12 | lib/**/CMakeCache.txt 13 | src/mdcraft.egg-info 14 | tests/data/ -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | # Set the OS, Python version and other tools you might need 9 | build: 10 | os: ubuntu-22.04 11 | tools: 12 | python: "mambaforge-latest" 13 | 14 | # Build documentation in the "docs/" directory with Sphinx 15 | sphinx: 16 | configuration: docs/source/conf.py 17 | 18 | # Optional but recommended, declare the Python requirements required 19 | # to build your documentation 20 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 21 | conda: 22 | environment: environment.yml 23 | python: 24 | install: 25 | - method: pip 26 | path: . -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "upload_type": "software", 3 | "title": "MDCraft: A Python assistant for performing and analyzing molecular dynamics simulations of soft matter systems", 4 | "creators": [{ 5 | "name": "Ye, Benjamin B.", 6 | "affiliation": "Division of Chemistry and Chemical Engineering, California Institute of Technology, Pasadena, California 91125, United States", 7 | "orcid": "0000-0003-0253-6311" 8 | }, { 9 | "name": "Walker, Pierre J.", 10 | "affiliation": "Division of Chemistry and Chemical Engineering, California Institute of Technology, Pasadena, California 91125, United States", 11 | "orcid": "0000-0001-8628-6561" 12 | }, { 13 | "name": "Wang, Zhen-Gang", 14 | "affiliation": "Division of Chemistry and Chemical Engineering, California Institute of Technology, Pasadena, California 91125, United States", 15 | "orcid": "0000-0002-3361-6114" 16 | }], 17 | "access_right": "open", 18 | "description": "MDCraft is a Python assistant for performing and analyzing molecular dynamics simulations of soft matter systems. It is designed to provide comprehensive support throughout the entire simulation process, from initialization to post-processing.", 19 | "license": "GPL-3.0-or-later" 20 | } -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to @bbye98 on Discord or /u/bbye98 on Reddit. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. 128 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # MDCraft Contribution Guidelines 2 | 3 | Thank you for your interest in contributing to MDCraft, a comprehensive Python package for streamlining molecular dynamics (MD) simulation workflows. We welcome contributions from the community to improve and expand the functionality of MDCraft. 4 | 5 | ## Code of conduct 6 | 7 | By participating in this project, you agree to abide by the [Contributor Covenant](CODE_OF_CONDUCT.md). Please be respectful and considerate in your interactions with others. 8 | 9 | ## How to contribute 10 | 11 | To get an overview of the project, read the [README](README.md) file. 12 | 13 | There are several ways you can contribute to MDCraft, including but not limited to 14 | 15 | * asking and answering questions in [discussions](https://github.com/bbye98/mdcraft/discussions), 16 | * reporting bugs and requesting features by submitting new issues, 17 | * adding new features and fixing bugs by creating pull requests (PRs), 18 | * improving and maintaining consistency in the documentation by updating numpydoc-style docstrings, and 19 | * providing reproducible examples and tutorials in Jupyter notebooks. 20 | 21 | ## Getting started 22 | 23 | ### Issues 24 | 25 | #### Open a new issue 26 | 27 | Before reporting a bug or requesting a feature, search to see if a related issue already exists. If the results comes up empty, you can [submit a new issue](https://github.com/bbye98/mdcraft/issues/new). Make sure you include a clear and descriptive title and provide as much detail as possible to help us understand and reproduce the issue. 28 | 29 | #### Solve an issue 30 | 31 | Scan through our existing issues to find one that interests you. You can narrow down the search using the labels as filters. If you find an issue to work on, you are welcome to open a PR with a fix. 32 | 33 | ### Make changes 34 | 35 | To contribute to MDCraft, you must follow the "fork and pull request" workflow below. 36 | 37 | 1. [Fork the repository.](https://github.com/bbye98/mdcraft/fork) 38 | 2. Clone the fork to your machine using Git and change to the directory: 39 | 40 | git clone https://github.com//mdcraft.git 41 | cd mdcraft 42 | 43 | 3. Create a new branch and check it out: 44 | 45 | git checkout -b 46 | 47 | 4. Start working on your changes! You may want to create and activate an environment, and then install all dependencies: 48 | 49 | python3 -m pip install -r requirements.txt 50 | 51 | Remember to 52 | 53 | * write clean and readable code by following [PEP 8](https://peps.python.org/pep-0008/) style guidelines, 54 | * ensure docstrings adhere to the [numpydoc](https://numpydoc.readthedocs.io/en/latest/format.html) style guidelines, and 55 | * add Pytest-based unit tests for new features and bug fixes. 56 | 57 | ### Commit your update 58 | 59 | When you are ready to submit your changes to GitHub, follow the steps below. 60 | 61 | 1. Ensure that your local copy of MDCraft passes all the unit tests, including any that you may have written, using pytest. 62 | 2. Stage and commit your local files. 63 | 64 | git add . 65 | git commit -m " 66 | 67 | 3. Push changes to the `` branch of your GitHub fork of MDCraft. 68 | 69 | git push 70 | 71 | ### Pull request 72 | 73 | If you wish to contribute your changes to the main MDCraft project, [make a PR](https://github.com/bbye98/mdcraft/compare). The project maintainers will review your PR and, if it provides a significant or useful change to MDCraft, will be merged! -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MDCraft 2 | 3 | 5 | 6 | [![continuous-integration]( 7 | https://github.com/bbye98/mdcraft/actions/workflows/ci.yml/badge.svg)]( 8 | https://github.com/bbye98/mdcraft/actions/workflows/ci.yml) 9 | [![Documentation Status](https://readthedocs.org/projects/mdcraft/badge/?version=latest)]( 10 | https://mdcraft.readthedocs.io/en/latest/?badge=latest) 11 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.13308642.svg)](https://doi.org/10.5281/zenodo.13308642) 12 | 13 | MDCraft is a toolkit of analysis modules and helper functions for 14 | molecular dynamics (MD) simulations. 15 | 16 | * **Documentation**: https://mdcraft.readthedocs.io/ 17 | * **Conda**: https://anaconda.org/bbye98/mdcraft 18 | * **Python Package Index (PyPI)**: https://pypi.org/project/mdcraft/ 19 | 20 | ## Features 21 | 22 | * [`algorithm`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/algorithm): 23 | Efficient Numba, NumPy, and SciPy algorithms for data wrangling and 24 | evaluating structural and dynamical properties. 25 | * [`analysis`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/analysis): 26 | Serial and parallel data analysis tools built on top of the MDAnalysis 27 | framework. 28 | * [`fit`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/fit): 29 | Two-dimensional curve fitting models for use with SciPy. 30 | * [`lammps`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/lammps): 31 | Helper functions for setting up LAMMPS simulations. 32 | * [`openmm`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/openmm): 33 | Extensions to the high-performance OpenMM toolkit, such as custom 34 | bond/pair potentials, support for NetCDF trajectories, and much more. 35 | * [`plot`](https://github.com/bbye98/mdcraft/tree/main/src/mdcraft/plot): 36 | Settings and additional functionality for Matplotlib figures. 37 | 38 | ### Benchmarks 39 | 40 | The novel forcefield provided by MDCraft, Gaussian Core Model with smeared electrostatics (GCMe), provides multiple benefits discussed in more detail within our recent [publication](https://doi.org/10.1021/acs.jctc.4c00603). Of note is the computational speed-up obtained from GCMe, especially when used in OpenMM: 41 | 42 | ![benchmarks](/assets/benchmarks.png) 43 | 44 | The codes used to generate these benchmarks are provided in the associated [repository](https://github.com/bbye98/gcme). 45 | ## Getting started 46 | 47 | ### Prerequisites 48 | 49 | If you use pip to manage your Python packages and plan on using the 50 | OpenMM simulation toolkit, you must compile and install OpenMM manually 51 | since OpenMM is not available in PyPI. See the 52 | ["Compiling OpenMM from Source Code"]( 53 | http://docs.openmm.org/latest/userguide/library/02_compiling.html) 54 | section of the OpenMM User Guide for more information. 55 | 56 | If you use Conda, it is recommended that you use the conda-forge 57 | channel to install dependencies. To make conda-forge the default 58 | channel, use 59 | 60 | conda config --add channels conda-forge 61 | 62 | ### Installation 63 | 64 | MDCraft requires Python 3.9 or later. 65 | 66 | For the most up-to-date version of MDCraft, clone the repository and 67 | install the package using pip: 68 | 69 | git clone https://github.com/bbye98/mdcraft.git 70 | cd mdcraft 71 | python3 -m pip install -e . 72 | 73 | Alternatively, MDCraft is available on Conda: 74 | 75 | conda install bbye98::mdcraft 76 | 77 | and PyPI: 78 | 79 | python3 -m pip install mdcraft 80 | 81 | ### Postrequisites 82 | 83 | To use the method of image charges 84 | (`mdcraft.openmm.system.add_image_charges()`) in your OpenMM 85 | simulations, you must compile and install [`constvplugin`]( 86 | https://github.com/scychon/openmm_constV) or [`openmm-ic-plugin`]( 87 | https://github.com/bbye98/mdcraft/tree/main/lib/openmm-ic-plugin). 88 | 89 | ### Tests 90 | 91 | After installing, to run the MDCraft tests locally, use `pytest`: 92 | 93 | pip install pytest 94 | cd mdcraft 95 | pytest 96 | -------------------------------------------------------------------------------- /assets/benchmarks.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bbye98/mdcraft/ce324d5c85567220304441fcd9e7ddfb0ea0ef9d/assets/benchmarks.png -------------------------------------------------------------------------------- /assets/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bbye98/mdcraft/ce324d5c85567220304441fcd9e7ddfb0ea0ef9d/assets/favicon.ico -------------------------------------------------------------------------------- /assets/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bbye98/mdcraft/ce324d5c85567220304441fcd9e7ddfb0ea0ef9d/assets/logo.png -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/bbye98/mdcraft/ce324d5c85567220304441fcd9e7ddfb0ea0ef9d/docs/.nojekyll -------------------------------------------------------------------------------- /docs/source/_static/custom.css: -------------------------------------------------------------------------------- 1 | .sd-card .sd-card-img-top { 2 | height: 64px; 3 | width: 64px; 4 | margin-left: auto; 5 | margin-right: auto; 6 | margin-top: 16px; 7 | margin-bottom: 16px; 8 | } -------------------------------------------------------------------------------- /docs/source/_static/index/api_reference.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/source/_static/index/contribution_guidelines.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/source/_static/index/getting_started.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/source/_static/index/user_guide.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/base.rst: -------------------------------------------------------------------------------- 1 | {{ objname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. auto{{ objtype }}:: {{ objname }} -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {% set no_inheritance_classes = [ 2 | 'mdcraft.analysis.base.Hash', 3 | 'mdcraft.analysis.reader.LAMMPSDumpTrajectoryReader' 4 | ] %} 5 | 6 | {{ objname | escape | underline }} 7 | 8 | .. currentmodule:: {{ module }} 9 | 10 | .. autoclass:: {{ objname }} 11 | :members: 12 | :show-inheritance: 13 | {% if fullname not in no_inheritance_classes -%} 14 | :inherited-members: 15 | {% endif -%} 16 | 17 | {% block methods %} 18 | {% if methods %} 19 | .. rubric:: {{ _('Methods') }} 20 | 21 | .. autosummary:: 22 | :nosignatures: 23 | {% for item in methods %} 24 | {%- if not item.startswith('_') and ( 25 | fullname not in no_inheritance_classes or item not in inherited_members 26 | ) %} 27 | ~{{ name }}.{{ item }} 28 | {%- endif -%} 29 | {%- endfor %} 30 | {% endif %} 31 | {% endblock %} -------------------------------------------------------------------------------- /docs/source/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {{ objname | escape | underline }} 2 | 3 | .. automodule:: {{ fullname }} 4 | 5 | {% block attributes %} 6 | {% if attributes %} 7 | .. rubric:: Module attributes 8 | 9 | .. autosummary:: 10 | :toctree: 11 | {% for item in attributes %} 12 | {{ item }} 13 | {%- endfor %} 14 | {% endif %} 15 | {% endblock %} 16 | 17 | {% block functions %} 18 | {% if functions %} 19 | .. rubric:: {{ _('Functions') }} 20 | 21 | .. autosummary:: 22 | :toctree: 23 | :nosignatures: 24 | {% for item in functions %} 25 | {{ item }} 26 | {%- endfor %} 27 | {% endif %} 28 | {% endblock %} 29 | 30 | {% block classes %} 31 | {% if classes %} 32 | .. rubric:: {{ _('Classes') }} 33 | 34 | .. autosummary:: 35 | :toctree: 36 | :template: autosummary/class.rst 37 | :nosignatures: 38 | {% for item in classes %} 39 | {{ item }} 40 | {%- endfor %} 41 | {% endif %} 42 | {% endblock %} 43 | 44 | {% block exceptions %} 45 | {% if exceptions %} 46 | .. rubric:: {{ _('Exceptions') }} 47 | 48 | .. autosummary:: 49 | :toctree: 50 | {% for item in exceptions %} 51 | {{ item }} 52 | {%- endfor %} 53 | {% endif %} 54 | {% endblock %} 55 | 56 | {% block modules %} 57 | {% if modules %} 58 | .. autosummary:: 59 | :toctree: 60 | :template: autosummary/module.rst 61 | :recursive: 62 | {% for item in modules %} 63 | {{ item }} 64 | {%- endfor %} 65 | {% endif %} 66 | {% endblock %} -------------------------------------------------------------------------------- /docs/source/api.rst: -------------------------------------------------------------------------------- 1 | .. autosummary:: 2 | :recursive: 3 | :template: autosummary/module.rst 4 | :toctree: api 5 | 6 | mdcraft -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | from datetime import datetime 7 | import pathlib 8 | import sys 9 | 10 | sys.path.insert(0, f"{pathlib.Path(__file__).resolve().parents[2]}/src") 11 | from mdcraft import VERSION # noqa: E402 12 | 13 | now = datetime.now() 14 | 15 | # -- Project information ----------------------------------------------------- 16 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 17 | 18 | project = "MDCraft" 19 | copyright = f"2023–{now.year} Benjamin Ye, Pierre Walker" 20 | author = "Benjamin Ye, Pierre Walker" 21 | version = release = VERSION 22 | 23 | # -- General configuration --------------------------------------------------- 24 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 25 | 26 | extensions = [ 27 | "myst_nb", 28 | "numpydoc", 29 | "sphinx_copybutton", 30 | "sphinx_design", 31 | "sphinx.ext.autodoc", 32 | "sphinx.ext.autosummary", 33 | "sphinx.ext.duration", 34 | "sphinx.ext.githubpages", 35 | "sphinx.ext.intersphinx", 36 | "sphinx.ext.mathjax", 37 | "sphinx.ext.napoleon", 38 | "sphinx.ext.viewcode" 39 | ] 40 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 41 | templates_path = ['_templates'] 42 | 43 | autosummary_generate = True 44 | intersphinx_mapping = { 45 | "dask": ("https://docs.dask.org/en/stable/", None), 46 | "joblib": ("https://joblib.readthedocs.io/en/latest/", None), 47 | "matplotlib": ("https://matplotlib.org/stable/", None), 48 | "mdanalysis": ("https://docs.mdanalysis.org/stable/", None), 49 | "numba": ("https://numba.pydata.org/numba-doc/latest/", None), 50 | "numpy": ("https://numpy.org/doc/stable/", None), 51 | "openmm": ("http://docs.openmm.org/latest/api-python/", None), 52 | "pint": ("https://pint.readthedocs.io/en/stable/", None), 53 | "python": ("https://docs.python.org/3/", None), 54 | "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None) 55 | } 56 | myst_enable_extensions = ["amsmath", "dollarmath"] 57 | myst_heading_anchors = 6 58 | napoleon_numpy_docstring = True 59 | nb_execution_timeout = -1 60 | nb_merge_streams = True 61 | numpydoc_show_class_members = False 62 | toc_object_entries_show_parents = "hide" 63 | 64 | # -- Options for HTML output ------------------------------------------------- 65 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 66 | 67 | html_css_files = ["custom.css"] 68 | html_favicon = "../../assets/favicon.ico" 69 | html_logo = "../../assets/logo.png" 70 | html_show_sourcelink = False 71 | html_static_path = ["_static"] 72 | html_theme = "furo" 73 | html_theme_options = {"sidebar_hide_name": True} -------------------------------------------------------------------------------- /docs/source/getting_started.rst: -------------------------------------------------------------------------------- 1 | Getting Started 2 | =============== 3 | 4 | .. toctree:: 5 | :glob: 6 | :maxdepth: 1 7 | 8 | notebooks/getting_started/* -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | MDCraft Documentation 2 | ===================== 3 | 4 | .. toctree:: 5 | :hidden: 6 | 7 | Home 8 | Getting Started 9 | User Guide 10 | API Reference 11 | 12 | .. highlights:: 13 | 14 | MDCraft is a Python assistant for performing and analyzing molecular dynamics simulations of soft matter systems. It is designed to provide comprehensive support throughout the entire simulation process, from initialization to post-processing. 15 | 16 | .. grid:: 1 1 2 2 17 | :gutter: 1 2 3 4 18 | 19 | .. grid-item-card:: 20 | :img-top: _static/index/getting_started.svg 21 | :text-align: center 22 | 23 | **Setup** 24 | ^^^ 25 | 26 | New to MDCraft? Start here to learn how to install and use the package. 27 | 28 | +++ 29 | 30 | .. button-ref:: getting_started 31 | :expand: 32 | :color: secondary 33 | :click-parent: 34 | 35 | Getting started 36 | 37 | .. grid-item-card:: 38 | :img-top: _static/index/user_guide.svg 39 | :text-align: center 40 | 41 | **Tutorials** 42 | ^^^ 43 | 44 | Want to see how MDCraft can be used? Explore a selection of curated examples. *Work in progress.* 45 | 46 | +++ 47 | 48 | .. button-ref:: user_guide 49 | :expand: 50 | :color: secondary 51 | :click-parent: 52 | 53 | User guide 54 | 55 | .. grid-item-card:: 56 | :img-top: _static/index/api_reference.svg 57 | :text-align: center 58 | 59 | **Features** 60 | ^^^ 61 | 62 | Ready to start using MDCraft in your own projects? See a detailed description of all available classes and functions. 63 | 64 | +++ 65 | 66 | .. button-ref:: api/mdcraft 67 | :expand: 68 | :color: secondary 69 | :click-parent: 70 | 71 | API reference 72 | 73 | .. grid-item-card:: 74 | :img-top: _static/index/contribution_guidelines.svg 75 | :text-align: center 76 | 77 | **Contributing** 78 | ^^^ 79 | 80 | Have an idea for a new feature or found a bug? Learn how to contribute to the development of MDCraft. 81 | 82 | +++ 83 | 84 | .. button-link:: https://github.com/bbye98/mdcraft/blob/main/CONTRIBUTING.md 85 | :expand: 86 | :color: secondary 87 | :click-parent: 88 | 89 | Contribution guidelines -------------------------------------------------------------------------------- /docs/source/notebooks/getting_started/installation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Installation\n", 8 | "\n", 9 | "MDCraft is a Python package and can be installed using pip, the package installer for Python, or a Python package manager like Conda or Mamba. MDCraft requires Python 3.9 or later.\n", 10 | "\n", 11 | "## Prerequisites\n", 12 | "\n", 13 | "### Virtual environement\n", 14 | "\n", 15 | "It is highly recommended that you use a virtual environment for MDCraft to prevent dependency conflicts. If you do not already have a virtual environment for MDCraft and related simulation packages and tools, you can create one for the package installer/manager you use using the instructions below.\n", 16 | "\n", 17 | "#### Conda\n", 18 | "\n", 19 | "1. *Optional*: Make conda-forge the default channel for dependencies:\n", 20 | "\n", 21 | " conda config --add channels conda-forge\n", 22 | "\n", 23 | "2. Create an environment named `mdcraft` with Python:\n", 24 | "\n", 25 | " conda create -n mdcraft python\n", 26 | "\n", 27 | "3. Activate the environment:\n", 28 | "\n", 29 | " conda activate mdcraft\n", 30 | "\n", 31 | "#### venv\n", 32 | "\n", 33 | "1. Create an environment named `mdcraft`:\n", 34 | "\n", 35 | " python3 -m venv mdcraft\n", 36 | "\n", 37 | "2. Activate the environment using one of the following commands:\n", 38 | "\n", 39 | " ```\n", 40 | " source mdcraft/bin/activate # POSIX: bash or zsh\n", 41 | " ```\n", 42 | "\n", 43 | " ```\n", 44 | " mdcraft\\Scripts\\activate.bat # Windows: cmd.exe\n", 45 | " ```\n", 46 | "\n", 47 | " ```\n", 48 | " mdcraft\\Scripts\\Activate.ps1 # Windows: PowerShell\n", 49 | " ```\n", 50 | "\n", 51 | "#### virtualenv\n", 52 | "\n", 53 | "1. Create an environment named `mdcraft`:\n", 54 | "\n", 55 | " virtualenv mdcraft\n", 56 | "\n", 57 | "2. Activate the environment using one of the following commands:\n", 58 | "\n", 59 | " ```\n", 60 | " source mdcraft/bin/activate # Linux or macOS\n", 61 | " ```\n", 62 | "\n", 63 | " ```\n", 64 | " .\\mdcraft\\Scripts\\activate # Windows\n", 65 | " ```\n", 66 | "\n", 67 | "### OpenMM\n", 68 | "\n", 69 | "If you plan on using the high-performance OpenMM simulation toolkit, you must\n", 70 | "- compile and build it yourself using the instructions [here](http://docs.openmm.org/latest/userguide/library/02_compiling.html#compiling-openmm-from-source-code) if you are using pip, or \n", 71 | "- install it (and optionally, a specific CUDA toolkit) using the instructions [here](http://docs.openmm.org/latest/userguide/application/01_getting_started.html#installing-openmm) if you are using Conda/Mamba.\n", 72 | "\n", 73 | "## Install from PyPI\n", 74 | "\n", 75 | "Install MDCraft and required dependencies using pip:\n", 76 | "\n", 77 | " python3 -m pip install mdcraft\n", 78 | "\n", 79 | "## Install from Anaconda\n", 80 | "\n", 81 | "Install MDCraft and required dependencies using Conda:\n", 82 | "\n", 83 | " conda install bbye98::mdcraft\n", 84 | "\n", 85 | "## Install from source\n", 86 | "\n", 87 | "1. Grab a copy of the MDCraft repository:\n", 88 | "\n", 89 | " git clone https://github.com/bbye98/mdcraft.git\n", 90 | "\n", 91 | "2. Enter the repository directory:\n", 92 | "\n", 93 | " cd mdcraft\n", 94 | "\n", 95 | "3. *Optional*: The required dependencies will be installed automatically alongside MDCraft in the next step. To install all dependencies, including those used for development and unit tests, use one of the following commands:\n", 96 | "\n", 97 | " ```\n", 98 | " python3 -m pip install -r requirements.txt # pip package installer\n", 99 | " ```\n", 100 | " \n", 101 | " ```\n", 102 | " conda env update --file environment.yml # Conda/Mamba package manager\n", 103 | " ```\n", 104 | "\n", 105 | "4. Install MDCraft (and required dependencies, if you have not already done so) using pip (even if you are using a Conda/Mamba environment):\n", 106 | "\n", 107 | " python3 -m pip install -e .\n", 108 | "\n", 109 | "## Postrequisites\n", 110 | "\n", 111 | "Try importing MDCraft in Python:\n", 112 | "\n", 113 | " python3 -c \"import mdcraft\"\n", 114 | "\n", 115 | "If no errors like `ModuleNotFoundError: No module named 'mdcraft'` are raised, you have successfully installed MDCraft!\n", 116 | "\n", 117 | "### Method of image charges\n", 118 | "\n", 119 | "If you plan on using the method of image charges (`mdcraft.openmm.system.add_image_charges()`) in your OpenMM simulations, you must compile and install [`constvplugin`](https://github.com/scychon/openmm_constV) or [`openmm-ic-plugin`](https://github.com/bbye98/mdcraft/tree/main/lib/openmm-ic-plugin)." 120 | ] 121 | } 122 | ], 123 | "metadata": { 124 | "kernelspec": { 125 | "display_name": "base", 126 | "language": "python", 127 | "name": "python3" 128 | }, 129 | "language_info": { 130 | "codemirror_mode": { 131 | "name": "ipython", 132 | "version": 3 133 | }, 134 | "file_extension": ".py", 135 | "mimetype": "text/x-python", 136 | "name": "python", 137 | "nbconvert_exporter": "python", 138 | "pygments_lexer": "ipython3", 139 | "version": "3.11.8" 140 | } 141 | }, 142 | "nbformat": 4, 143 | "nbformat_minor": 2 144 | } 145 | -------------------------------------------------------------------------------- /docs/source/user_guide.rst: -------------------------------------------------------------------------------- 1 | User Guide 2 | ========== 3 | 4 | .. toctree:: 5 | :glob: 6 | :maxdepth: 1 7 | 8 | notebooks/user_guide/* -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: mdcraft 2 | channels: 3 | - conda-forge 4 | dependencies: 5 | # CORE DEPENDENCIES 6 | - matplotlib 7 | - mdanalysis >=2.2 8 | - netcdf4 9 | - numba 10 | - numpy 11 | - pandas 12 | - pint 13 | - psutil 14 | - scipy 15 | - sympy 16 | # OPTIONAL DEPENDENCIES 17 | - dask 18 | - joblib 19 | - openmm 20 | # DOCUMENTATION DEPENDENCIES 21 | - furo 22 | - myst-nb 23 | - numpydoc 24 | - sphinx 25 | - sphinx-copybutton 26 | - sphinx-design 27 | # TEST DEPENDENCIES 28 | - ase 29 | - coverage 30 | - dynasor >=2 31 | - mdanalysistests 32 | - pytest 33 | - ruff 34 | - tidynamics -------------------------------------------------------------------------------- /lib/lammps-fix-imagecharges/README.md: -------------------------------------------------------------------------------- 1 | `fix imagecharges` -- LAMMPS Image Charge Fix 2 | ============================================= 3 | 4 | This LAMMPS fix is an updated version of `fix_imagecharges.*` from 5 | [`lammps_fixes`](https://github.com/kdwelle/lammps-fixes) and is 6 | compatible with the current version of LAMMPS (21 Nov 2023). It 7 | implements the method of image charges for a system of charged particles 8 | confined between two parallel perfectly conducting electrodes. 9 | 10 | Currently, only a CPU implementation with support for the OpenMP package 11 | is available. 12 | 13 | Installation 14 | ============ 15 | 16 | Before compiling LAMMPS, copy the `fix_imagecharges.cpp` and 17 | `fix_imagecharges.h` files to the `src` directory of the LAMMPS 18 | distribution. Then, recompile LAMMPS as usual. 19 | 20 | fix imagecharges command 21 | ======================== 22 | 23 | ### Syntax 24 | 25 | fix ID group-ID imagecharges px py pz nx ny nz itype keyword value ... 26 | 27 | * ID, group-ID are documented in [fix](https://docs.lammps.org/fix.html) command 28 | * imagecharges = style name of this fix command 29 | * px, py, pz = coordinates of a point on the image plane 30 | * nx, ny, nz = vector normal to the image plane 31 | * itype = atom type to be used as the image charges 32 | * one or more keyword/value pairs may be appended 33 | 34 | keyword = region or scale 35 | region = ID of region that encompasses the real atoms 36 | scale value = f 37 | f = charge scale factor (default = 1.0) 38 | 39 | ### Example 40 | 41 | fix 1 all imagecharges 0.0 0.0 0.0 0.0 0.0 1.0 4 42 | 43 | Citing this work 44 | ================ 45 | Any work that uses this fix should cite the following publication: 46 | 47 | K. A. Dwelle and A. P. Willard, Constant Potential, Electrochemically Active 48 | Boundary Conditions for Electrochemical Simulation, J. Phys. Chem. C 123, 24095 49 | (2019). https://doi.org/10.1021/acs.jpcc.9b06635 -------------------------------------------------------------------------------- /lib/lammps-fix-imagecharges/fix_imagecharges.h: -------------------------------------------------------------------------------- 1 | #ifdef FIX_CLASS 2 | // clang-format off 3 | FixStyle(imagecharges, FixImageCharges) 4 | // clang-format on 5 | #else 6 | 7 | #ifndef LMP_FIX_IMAGE_CHARGES_H 8 | #define LMP_FIX_IMAGE_CHARGES_H 9 | 10 | #include "fix.h" 11 | 12 | namespace LAMMPS_NS { 13 | 14 | class FixImageCharges: public Fix { 15 | public: 16 | FixImageCharges(class LAMMPS *, int, char **); 17 | virtual ~FixImageCharges(); 18 | int setmask(); 19 | virtual void init(); 20 | void min_setup_pre_force(int); 21 | void setup_pre_force(int); 22 | void min_pre_force(int); 23 | void pre_force(int); 24 | void min_post_force(int); 25 | void post_force(int); 26 | void post_run(); 27 | 28 | double memory_usage(); 29 | void grow_arrays(int); 30 | void copy_arrays(int, int,int); 31 | void set_arrays(int); 32 | 33 | protected: 34 | class Region *region; 35 | char *pxstr, *pystr, *pzstr, *nxstr, *nystr, *nzstr, *idregion, *scalestr; 36 | int *imagei; 37 | double *imageid; 38 | 39 | double pxvalue, pyvalue, pzvalue, nxvalue, nyvalue, nzvalue, scale; 40 | int pxvar, pyvar, pzvar, nxvar, nyvar, nzvar, scalevar; 41 | int pxstyle, pystyle, pzstyle, nxstyle, nystyle, nzstyle, scalestyle; 42 | int varflag, exclusionAtom, itype; 43 | }; 44 | } 45 | 46 | #endif 47 | #endif -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #------------------------------------------------------------------------------- 2 | # OpenMM Image Charge Plugin 3 | # 4 | # Creates the OpenMM image charge plugin library. 5 | # 6 | # Windows: 7 | # OpenMMIC.dll 8 | # OpenMMIC.lib 9 | # Unix: 10 | # libOpenMMIC.so 11 | #------------------------------------------------------------------------------- 12 | 13 | CMAKE_MINIMUM_REQUIRED(VERSION 3.6) 14 | PROJECT(OpenMMICPlugin) 15 | CMAKE_POLICY(SET CMP0146 OLD) 16 | 17 | # We need to know where OpenMM is installed so we can access the headers and libraries. 18 | 19 | SET(OPENMM_DIR "/usr/local/openmm" CACHE PATH "Where OpenMM is installed") 20 | INCLUDE_DIRECTORIES("${OPENMM_DIR}/include") 21 | LINK_DIRECTORIES("${OPENMM_DIR}/lib" "${OPENMM_DIR}/lib/plugins") 22 | 23 | # Specify the C++ version we are building for. 24 | 25 | SET(CMAKE_CXX_STANDARD 11) 26 | 27 | # Set flags for linking on macOS. 28 | 29 | IF(APPLE) 30 | SET (CMAKE_INSTALL_NAME_DIR "@rpath") 31 | SET(EXTRA_COMPILE_FLAGS "-msse2 -stdlib=libc++") 32 | ENDIF(APPLE) 33 | 34 | # Select where to install. 35 | 36 | IF(${CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT}) 37 | IF(WIN32) 38 | SET(CMAKE_INSTALL_PREFIX "$ENV{ProgramFiles}/OpenMM" CACHE PATH "Where to install the plugin" FORCE) 39 | ELSE(WIN32) 40 | SET(CMAKE_INSTALL_PREFIX "/usr/local/openmm" CACHE PATH "Where to install the plugin" FORCE) 41 | ENDIF(WIN32) 42 | ENDIF(${CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT}) 43 | 44 | # The source is organized into subdirectories, but we handle them all from 45 | # this CMakeLists file rather than letting CMake visit them as SUBDIRS. 46 | 47 | SET(IC_PLUGIN_SOURCE_SUBDIRS openmmapi serialization) 48 | 49 | # Set the library name. 50 | 51 | SET(IC_LIBRARY_NAME OpenMMIC) 52 | SET(SHARED_IC_TARGET ${IC_LIBRARY_NAME}) 53 | 54 | # These are all the places to search for header files which are to be part of the API. 55 | 56 | SET(API_INCLUDE_DIRS "openmmapi/include" "openmmapi/include/openmm") 57 | 58 | # Locate header files. 59 | 60 | SET(API_INCLUDE_FILES) 61 | FOREACH(dir ${API_INCLUDE_DIRS}) 62 | FILE(GLOB fullpaths ${dir}/*.h) 63 | SET(API_INCLUDE_FILES ${API_INCLUDE_FILES} ${fullpaths}) 64 | ENDFOREACH(dir) 65 | 66 | # Collect source files. 67 | 68 | SET(SOURCE_FILES) # empty 69 | SET(SOURCE_INCLUDE_FILES) 70 | FOREACH(subdir ${IC_PLUGIN_SOURCE_SUBDIRS}) 71 | FILE(GLOB src_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.cpp) 72 | FILE(GLOB incl_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.h) 73 | SET(SOURCE_FILES ${SOURCE_FILES} ${src_files}) # append 74 | SET(SOURCE_INCLUDE_FILES ${SOURCE_INCLUDE_FILES} ${incl_files}) 75 | 76 | ## Make sure we find these locally before looking in OpenMM/include if 77 | ## OpenMM was previously installed there. 78 | 79 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/include) 80 | ENDFOREACH(subdir) 81 | 82 | # Create the library. 83 | 84 | ADD_LIBRARY(${SHARED_IC_TARGET} SHARED ${SOURCE_FILES} ${SOURCE_INCLUDE_FILES} ${API_INCLUDE_FILES}) 85 | SET_TARGET_PROPERTIES(${SHARED_IC_TARGET} 86 | PROPERTIES COMPILE_FLAGS "-DIC_BUILDING_SHARED_LIBRARY ${EXTRA_COMPILE_FLAGS}" 87 | LINK_FLAGS "${EXTRA_COMPILE_FLAGS}") 88 | TARGET_LINK_LIBRARIES(${SHARED_IC_TARGET} OpenMM OpenMMDrude) 89 | INSTALL_TARGETS(/lib RUNTIME_DIRECTORY /lib ${SHARED_IC_TARGET}) 90 | 91 | # Install headers. 92 | 93 | FILE(GLOB API_ONLY_INCLUDE_FILES "openmmapi/include/*.h") 94 | INSTALL(FILES ${API_ONLY_INCLUDE_FILES} DESTINATION include) 95 | FILE(GLOB API_ONLY_INCLUDE_FILES_INTERNAL "openmmapi/include/internal/*.h") 96 | INSTALL(FILES ${API_ONLY_INCLUDE_FILES_INTERNAL} DESTINATION include/internal) 97 | 98 | # Enable testing. 99 | 100 | # ENABLE_TESTING() 101 | 102 | # Build the implementations for different platforms. 103 | 104 | FIND_PACKAGE(CUDA QUIET) 105 | IF(CUDA_FOUND) 106 | SET(IC_BUILD_CUDA_LIB ON CACHE BOOL "Build implementation for CUDA") 107 | ELSE(CUDA_FOUND) 108 | SET(IC_BUILD_CUDA_LIB OFF CACHE BOOL "Build implementation for CUDA") 109 | ENDIF(CUDA_FOUND) 110 | IF(IC_BUILD_CUDA_LIB) 111 | ADD_SUBDIRECTORY(platforms/cuda) 112 | ENDIF(IC_BUILD_CUDA_LIB) 113 | 114 | # Build the Python API. 115 | 116 | FIND_PROGRAM(PYTHON_EXECUTABLE python) 117 | FIND_PROGRAM(SWIG_EXECUTABLE swig) 118 | IF(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE) 119 | SET(IC_BUILD_PYTHON_WRAPPERS ON CACHE BOOL "Build wrappers for Python") 120 | ELSE(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE) 121 | SET(IC_BUILD_PYTHON_WRAPPERS OFF CACHE BOOL "Build wrappers for Python") 122 | ENDIF(PYTHON_EXECUTABLE AND SWIG_EXECUTABLE) 123 | IF(IC_BUILD_PYTHON_WRAPPERS) 124 | ADD_SUBDIRECTORY(python) 125 | ENDIF(IC_BUILD_PYTHON_WRAPPERS) -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/README.md: -------------------------------------------------------------------------------- 1 | `openmm-ic` — OpenMM Image Charge Plugin 2 | ======================================== 3 | 4 | This OpenMM plugin is an updated version of 5 | [`openmm_constV`](https://github.com/scychon/openmm_constV/) 6 | and implements two Langevin integrators that can 7 | simulate slab systems of normal or Drude particles, respectively, 8 | confined between two parallel perfectly conducting electrodes. 9 | 10 | Currently, only the CUDA platform is supported. 11 | 12 | Building the plugin 13 | =================== 14 | 15 | This project uses [CMake](http://www.cmake.org) for its build system. 16 | To build it, follow these steps: 17 | 18 | 1. Create a directory in which to build the plugin. 19 | 2. Set environmental variables such as `CXXFLAGS='-std=c++11'`, 20 | `OPENMM_CUDA_COMPILER=$(which nvcc)`, etc. 21 | 3. Run the CMake GUI or `ccmake` and specify your new directory as the 22 | build directory and the top level directory of this project as the 23 | source directory. 24 | 4. Press "Configure". 25 | 5. Set `OPENMM_DIR` to point to the directory where OpenMM is installed 26 | (usually `/usr/local/openmm` if self-compiled or `$CONDA_PREFIX` if 27 | installed via conda-forge). This is needed to locate the OpenMM 28 | header files and libraries. 29 | 6. Set `CMAKE_INSTALL_PREFIX` to the directory where the plugin should 30 | be installed. Usually, this will be the same as `OPENMM_DIR`, so the 31 | plugin will be added to your OpenMM installation. 32 | 7. Make sure that `CUDA_TOOLKIT_ROOT_DIR` is set correctly and that 33 | `IC_BUILD_CUDA_LIB` is enabled. 34 | 8. Press "Configure" again if necessary, then press "Generate". 35 | 9. Use the build system you selected to build and install the plugin 36 | using 37 | 38 | make -j 39 | make install 40 | make PythonInstall 41 | 42 | The plugin will be installed as the `openmm-ic` Python package, which 43 | can be imported using `import openmm_ic`. 44 | 45 | Python API 46 | ========== 47 | 48 | The two integrators available are `openmm_ic.ICLangevinIntegrator` and 49 | `openmm_ic.ICDrudeLangevinIntegrator`, and they have the same methods as 50 | their counterparts `openmm.LangevinIntegrator` and 51 | `openmm.DrudeLangevinIntegrator`, respectively. 52 | 53 | A simple example is provided below: 54 | 55 | from openmm import app, unit 56 | from openmm_ic import ICLangevinIntegrator 57 | 58 | # Set up or load the system and topology for the real particles 59 | # system = ... 60 | # topology = ... 61 | 62 | # Set up or retrieve the non-bonded force 63 | # nbforce = ... 64 | 65 | # Determine which particles belong to the wall 66 | # wall_indices = ... 67 | 68 | # Double simulation box size and mirror particle positions 69 | pbv = system.getDefaultPeriodicBoxVectors() 70 | pbv[2] *= 2 71 | system.setDefaultPeriodicBoxVectors(*pbv) 72 | dimensions = topology.getUnitCellDimensions() 73 | dimensions[2] *= 2 74 | topology.setUnitCellDimensions(dimensions) 75 | positions = np.concatenate( 76 | (positions, positions * np.array((1, 1, -1), dtype=int)) 77 | ) 78 | 79 | # Register image charges to the system, topology, and force field 80 | chains_ic = [topology.addChain() for _ in range(topology.getNumChains())] 81 | residues_ic = [topology.addResidue(f"IC_{r.name}", 82 | chains_ic[r.chain.index]) 83 | for r in list(topology.residues())] 84 | for i, atom in enumerate(list(topology.atoms())): 85 | system.addParticle(0 * unit.amu) 86 | topology.addAtom(f"IC_{atom.name}", atom.element, 87 | residues_ic[atom.residue.index])' 88 | q = nbforce.getParticleParameters(i)[0] 89 | nbforce.addParticle( 90 | 0 if i in wall_indices else 91 | -nbforce.getParticleParameters(i)[0], 92 | 0, 0 93 | ) 94 | 95 | # Add existing particle exclusions to mirrored image charges 96 | for i in range(nbforce.getNumExceptions()): 97 | i1, i2, qq = nbforce.getExceptionParameters(i)[:3] 98 | nbforce.addException(N_real + i1, N_real + i2, qq, 0, 0) 99 | 100 | # Prevent wall particles from interacting with their mirrored image charges 101 | for i in wall_indices: 102 | nbforce.addException(i, N_real + i, 0, 0, 0) 103 | 104 | # Create the Langevin integrator 105 | temp = 300 * unit.kelvin 106 | fric = 1 / unit.picosecond 107 | dt = 1 * unit.femtosecond 108 | integrator = ICLangevinIntegrator(temp, fric, dt) 109 | 110 | Alternatively, if you have [MDCraft](https://github.com/bbye98/mdcraft) 111 | installed, you can use the `mdcraft.openmm.system.add_image_charges()` 112 | function to achieve the same result as the code snippet above. 113 | 114 | Citing this work 115 | ================ 116 | Any work that uses this plugin should cite the following publication: 117 | 118 | C. Y. Son and Z.-G. Wang, Image-Charge Effects on Ion Adsorption near 119 | Aqueous Interfaces, Proc. Natl. Acad. Sci. U.S.A. 118, e2020615118 120 | (2021). https://doi.org/10.1073/pnas.2020615118 -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/include/ICKernels.h: -------------------------------------------------------------------------------- 1 | #ifndef IC_KERNELS_H_ 2 | #define IC_KERNELS_H_ 3 | 4 | #include 5 | #include 6 | 7 | #include "ICDrudeLangevinIntegrator.h" 8 | #include "ICLangevinIntegrator.h" 9 | #include "openmm/DrudeForce.h" 10 | #include "openmm/Platform.h" 11 | #include "openmm/System.h" 12 | #include "openmm/Vec3.h" 13 | 14 | namespace ICPlugin { 15 | 16 | /** 17 | * This kernel is invoked by ICLangevinIntegrator to take one time step. 18 | */ 19 | class IntegrateICLangevinStepKernel : public OpenMM::KernelImpl { 20 | public: 21 | static std::string Name() { return "IntegrateICLangevinStep"; } 22 | IntegrateICLangevinStepKernel(std::string name, 23 | const OpenMM::Platform& platform) 24 | : KernelImpl(name, platform) {} 25 | 26 | /** 27 | * Initialize the kernel. 28 | * 29 | * @param system the System this kernel will be applied to 30 | * @param integrator the ICLangevinIntegrator this kernel will be used for 31 | */ 32 | virtual void initialize(const OpenMM::System& system, 33 | const ICLangevinIntegrator& integrator) = 0; 34 | 35 | /** 36 | * Execute the kernel. 37 | * 38 | * @param context the context in which to execute this kernel 39 | * @param integrator the ICLangevinIntegrator this kernel is being used for 40 | */ 41 | virtual void execute(OpenMM::ContextImpl& context, 42 | const ICLangevinIntegrator& integrator) = 0; 43 | 44 | /** 45 | * Compute the kinetic energy. 46 | * 47 | * @param context the context in which to execute this kernel 48 | * @param integrator the ICLangevinIntegrator this kernel is being used for 49 | */ 50 | virtual double computeKineticEnergy( 51 | OpenMM::ContextImpl& context, 52 | const ICLangevinIntegrator& integrator) = 0; 53 | }; 54 | 55 | /** 56 | * This kernel is invoked by ICDrudeLangevinIntegrator to take one time step. 57 | */ 58 | class IntegrateICDrudeLangevinStepKernel : public OpenMM::KernelImpl { 59 | public: 60 | static std::string Name() { return "IntegrateICDrudeLangevinStep"; } 61 | IntegrateICDrudeLangevinStepKernel(std::string name, 62 | const OpenMM::Platform& platform) 63 | : KernelImpl(name, platform) {} 64 | 65 | /** 66 | * Initialize the kernel. 67 | * 68 | * @param system the System this kernel will be applied to 69 | * @param integrator the ICDrudeLangevinIntegrator this kernel will be used 70 | * for 71 | * @param force the DrudeForce to get particle parameters from 72 | */ 73 | virtual void initialize(const OpenMM::System& system, 74 | const ICDrudeLangevinIntegrator& integrator, 75 | const OpenMM::DrudeForce& force) = 0; 76 | 77 | /** 78 | * Execute the kernel. 79 | * 80 | * @param context the context in which to execute this kernel 81 | * @param integrator the ICDrudeLangevinIntegrator this kernel is being 82 | * used for 83 | */ 84 | virtual void execute(OpenMM::ContextImpl& context, 85 | const ICDrudeLangevinIntegrator& integrator) = 0; 86 | 87 | /** 88 | * Compute the kinetic energy. 89 | */ 90 | virtual double computeKineticEnergy( 91 | OpenMM::ContextImpl& context, 92 | const ICDrudeLangevinIntegrator& integrator) = 0; 93 | }; 94 | 95 | } // namespace ICPlugin 96 | 97 | #endif /*IC_KERNELS_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/include/ICLangevinIntegrator.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_ICLANGEVININTEGRATOR_H_ 2 | #define OPENMM_ICLANGEVININTEGRATOR_H_ 3 | 4 | #include "internal/windowsExportIC.h" 5 | #include "openmm/Integrator.h" 6 | #include "openmm/Kernel.h" 7 | 8 | namespace ICPlugin { 9 | 10 | class OPENMM_EXPORT_IC ICLangevinIntegrator : public OpenMM::Integrator { 11 | public: 12 | /** 13 | * Create a ICLangevinIntegrator. 14 | * 15 | * @param temperature the temperature of the heat bath (in Kelvin) 16 | * @param frictionCoeff the friction coefficient which couples the system 17 | * to the heat bath (in inverse picoseconds) 18 | * @param stepSize the step size with which to integrate the system 19 | * (in picoseconds) 20 | * @param numCells the number of real and image cells 21 | * @param cellZSize the z-dimension of the unit cell (in nanometers) 22 | */ 23 | ICLangevinIntegrator(double temperature, double frictionCoeff, 24 | double stepSize, int numCells = 2, double cellZSize = -1); 25 | 26 | /** 27 | * Get the temperature of the main heat bath. 28 | * 29 | * @return the temperature of the heat bath (in Kelvin) 30 | */ 31 | double getTemperature() const { return temperature; } 32 | 33 | /** 34 | * Set the temperature of the main heat bath. 35 | 36 | * @param temp the temperature of the heat bath (in Kelvin) 37 | */ 38 | void setTemperature(double temp) { 39 | if (temp < 0) 40 | throw OpenMM::OpenMMException( 41 | "Temperature cannot be negative"); 42 | temperature = temp; 43 | } 44 | 45 | /** 46 | * Get the friction coefficient which determines how strongly the system is 47 | * coupled to the main heat bath. 48 | * 49 | * @return the friction coefficient (in inverse picoseconds) 50 | */ 51 | double getFriction() const { return friction; } 52 | 53 | /** 54 | * Set the friction coefficient which determines how strongly the system is 55 | * coupled to the main heat bath. 56 | * 57 | * @param coeff the friction coefficient (in inverse picoseconds) 58 | */ 59 | void setFriction(double coeff) { 60 | if (coeff < 0) 61 | throw OpenMM::OpenMMException( 62 | "Friction coefficient cannot be negative"); 63 | friction = coeff; 64 | } 65 | 66 | /** 67 | * Get the random number seed. See setRandomNumberSeed() for details. 68 | * 69 | * @return the random number seed 70 | */ 71 | int getRandomNumberSeed() const { return randomNumberSeed; } 72 | 73 | /** 74 | * Set the random number seed. The precise meaning of this parameter is 75 | * undefined, and is left up to each Platform to interpret in an appropriate 76 | * way. It is guaranteed that if two simulations are run with different 77 | * random number seeds, the sequence of random forces will be different. On 78 | * the other hand, no guarantees are made about the behavior of simulations 79 | * that use the same seed. In particular, Platforms are permitted to use 80 | * non-deterministic algorithms which produce different results on 81 | * successive runs, even if those runs were initialized identically. 82 | * 83 | * If seed is set to 0 (which is the default value assigned), a unique seed 84 | * is chosen when a Context is created from this Force. This is done to 85 | * ensure that each Context receives unique random seeds without you needing 86 | * to set them explicitly. 87 | * 88 | * @param seed the random number seed 89 | */ 90 | void setRandomNumberSeed(int seed) { randomNumberSeed = seed; } 91 | 92 | /** 93 | * Advance a simulation through time by taking a series of time steps. 94 | * 95 | * @param steps the number of time steps to take 96 | */ 97 | void step(int steps); 98 | 99 | /** 100 | * Get the number of real and image cells. 101 | * 102 | * @return the number of real and image cells 103 | */ 104 | int getNumCells() const { return numCells; } 105 | 106 | /** 107 | * Set the number of real and image cells. 108 | * 109 | * @param numCells the number of real and image cells 110 | */ 111 | void setNumCells(int cells) { numCells = cells; } 112 | 113 | /** 114 | * Get z-dimension of the unit cell. 115 | * 116 | * @return the z-dimension of the unit cell (in nanometers) 117 | */ 118 | double getCellZSize() const { return cellZSize; } 119 | 120 | /** 121 | * Set z-dimension of the unit cell. 122 | * 123 | * @param cellZSize the z-dimension of the unit cell (in nanometers) 124 | */ 125 | void setCellZSize(double size) { cellZSize = size; } 126 | 127 | protected: 128 | /** 129 | * This will be called by the Context when it is created. It informs the 130 | * Integrator of what context it will be integrating, and gives it a chance 131 | * to do any necessary initialization. It will also get called again if the 132 | * application calls reinitialize() on the Context. 133 | */ 134 | void initialize(OpenMM::ContextImpl& context); 135 | 136 | /** 137 | * This will be called by the Context when it is destroyed to let the 138 | * Integrator do any necessary cleanup. It will also get called again if 139 | * the application calls reinitialize() on the Context. 140 | */ 141 | void cleanup() { kernel = OpenMM::Kernel(); } 142 | 143 | /** 144 | * Get the names of all Kernels used by this Integrator. 145 | */ 146 | std::vector getKernelNames(); 147 | 148 | /** 149 | * Compute the kinetic energy of the system at the current time. 150 | */ 151 | double computeKineticEnergy(); 152 | 153 | private: 154 | double temperature, friction, cellZSize; 155 | int randomNumberSeed, numCells; 156 | OpenMM::Kernel kernel; 157 | }; 158 | 159 | } // namespace ICPlugin 160 | 161 | #endif /*OPENMM_ICLANGEVININTEGRATOR_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/include/OpenMMIC.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_IC_H_ 2 | #define OPENMM_IC_H_ 3 | 4 | #include "ICLangevinIntegrator.h" 5 | #include "ICDrudeLangevinIntegrator.h" 6 | #include "openmm/Integrator.h" 7 | 8 | #endif /*OPENMM_IC_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/include/internal/windowsExportIC.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_WINDOWSEXPORTIC_H_ 2 | #define OPENMM_WINDOWSEXPORTIC_H_ 3 | 4 | /* 5 | * Shared libraries are messy in Visual Studio. We have to distinguish three 6 | * cases: 7 | * (1) this header is being used to build the OpenMM shared library 8 | * (dllexport) 9 | * (2) this header is being used by a *client* of the OpenMM shared 10 | * library (dllimport) 11 | * (3) we are building the OpenMM static library, or the client is 12 | * being compiled with the expectation of linking with the 13 | * OpenMM static library (nothing special needed) 14 | * In the CMake script for building this library, we define one of the symbols 15 | * IC_BUILDING_{SHARED|STATIC}_LIBRARY 16 | * Client code normally has no special symbol defined, in which case we'll 17 | * assume it wants to use the shared library. However, if the client defines 18 | * the symbol OPENMM_USE_STATIC_LIBRARIES we'll suppress the dllimport so 19 | * that the client code can be linked with static libraries. Note that 20 | * the client symbol is not library dependent, while the library symbols 21 | * affect only the OpenMM library, meaning that other libraries can 22 | * be clients of this one. However, we are assuming all-static or all-shared. 23 | */ 24 | 25 | #ifdef _MSC_VER 26 | // We don't want to hear about how sprintf is "unsafe". 27 | #pragma warning(disable : 4996) 28 | // Keep MS VC++ quiet about lack of dll export of private members. 29 | #pragma warning(disable : 4251) 30 | #if defined(IC_BUILDING_SHARED_LIBRARY) 31 | #define OPENMM_EXPORT_IC __declspec(dllexport) 32 | #elif defined(IC_BUILDING_STATIC_LIBRARY) || defined(IC_USE_STATIC_LIBRARIES) 33 | #define OPENMM_EXPORT_IC 34 | #else 35 | #define OPENMM_EXPORT_IC \ 36 | __declspec(dllimport) // i.e., a client of a shared library 37 | #endif 38 | #else 39 | #define OPENMM_EXPORT_IC // Linux, Mac 40 | #endif 41 | 42 | #endif // OPENMM_WINDOWSEXPORTIC_H_ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/src/ICDrudeLangevinIntegrator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ICDrudeLangevinIntegrator.h" 4 | #include "ICKernels.h" 5 | #include "openmm/Context.h" 6 | #include "openmm/OpenMMException.h" 7 | #include "openmm/internal/ContextImpl.h" 8 | 9 | using namespace ICPlugin; 10 | using namespace OpenMM; 11 | using std::string; 12 | using std::vector; 13 | 14 | ICDrudeLangevinIntegrator::ICDrudeLangevinIntegrator( 15 | double temperature, double frictionCoeff, double drudeTemperature, 16 | double drudeFrictionCoeff, double stepSize, int numCells, double cellZSize) { 17 | setTemperature(temperature); 18 | setFriction(frictionCoeff); 19 | setDrudeTemperature(drudeTemperature); 20 | setDrudeFriction(drudeFrictionCoeff); 21 | setMaxDrudeDistance(0); 22 | setStepSize(stepSize); 23 | setNumCells(numCells); 24 | setCellZSize(cellZSize); 25 | setConstraintTolerance(1e-5); 26 | setRandomNumberSeed(0); 27 | } 28 | 29 | void ICDrudeLangevinIntegrator::initialize(ContextImpl& contextRef) { 30 | if (owner != NULL && &contextRef.getOwner() != owner) 31 | throw OpenMMException("This Integrator is already bound to a context"); 32 | const DrudeForce* force = NULL; 33 | const System& system = contextRef.getSystem(); 34 | for (int i = 0; i < system.getNumForces(); i++) 35 | if (dynamic_cast(&system.getForce(i)) != NULL) { 36 | if (force == NULL) 37 | force = dynamic_cast(&system.getForce(i)); 38 | else 39 | throw OpenMMException( 40 | "The System contains multiple DrudeForces"); 41 | } 42 | if (force == NULL) 43 | throw OpenMMException("The System does not contain a DrudeForce"); 44 | context = &contextRef; 45 | owner = &contextRef.getOwner(); 46 | kernel = context->getPlatform().createKernel( 47 | IntegrateICDrudeLangevinStepKernel::Name(), contextRef); 48 | kernel.getAs().initialize( 49 | contextRef.getSystem(), *this, *force); 50 | } 51 | 52 | vector ICDrudeLangevinIntegrator::getKernelNames() { 53 | std::vector names; 54 | names.push_back(IntegrateICDrudeLangevinStepKernel::Name()); 55 | return names; 56 | } 57 | 58 | double ICDrudeLangevinIntegrator::computeKineticEnergy() { 59 | return kernel.getAs() 60 | .computeKineticEnergy(*context, *this); 61 | } 62 | 63 | void ICDrudeLangevinIntegrator::step(int steps) { 64 | if (context == NULL) 65 | throw OpenMMException("This Integrator is not bound to a context!"); 66 | for (int i = 0; i < steps; ++i) { 67 | context->updateContextState(); 68 | context->calcForcesAndEnergy(true, false); 69 | kernel.getAs().execute(*context, 70 | *this); 71 | } 72 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/openmmapi/src/ICLangevinIntegrator.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ICKernels.h" 4 | #include "ICLangevinIntegrator.h" 5 | #include "openmm/Context.h" 6 | #include "openmm/OpenMMException.h" 7 | #include "openmm/internal/ContextImpl.h" 8 | 9 | using namespace ICPlugin; 10 | using namespace OpenMM; 11 | using std::string; 12 | using std::vector; 13 | 14 | ICLangevinIntegrator::ICLangevinIntegrator(double temperature, 15 | double frictionCoeff, 16 | double stepSize, int numCells, 17 | double cellZSize) { 18 | setTemperature(temperature); 19 | setFriction(frictionCoeff); 20 | setStepSize(stepSize); 21 | setNumCells(numCells); 22 | setCellZSize(cellZSize); 23 | setConstraintTolerance(1e-5); 24 | setRandomNumberSeed(0); 25 | } 26 | 27 | void ICLangevinIntegrator::initialize(ContextImpl& contextRef) { 28 | if (owner != NULL && &contextRef.getOwner() != owner) 29 | throw OpenMMException("This Integrator is already bound to a context"); 30 | context = &contextRef; 31 | owner = &contextRef.getOwner(); 32 | kernel = context->getPlatform().createKernel( 33 | IntegrateICLangevinStepKernel::Name(), contextRef); 34 | kernel.getAs().initialize( 35 | contextRef.getSystem(), *this); 36 | } 37 | 38 | vector ICLangevinIntegrator::getKernelNames() { 39 | std::vector names; 40 | names.push_back(IntegrateICLangevinStepKernel::Name()); 41 | return names; 42 | } 43 | 44 | double ICLangevinIntegrator::computeKineticEnergy() { 45 | return kernel.getAs().computeKineticEnergy( 46 | *context, *this); 47 | } 48 | 49 | void ICLangevinIntegrator::step(int steps) { 50 | if (context == NULL) 51 | throw OpenMMException("This Integrator is not bound to a context!"); 52 | for (int i = 0; i < steps; ++i) { 53 | context->updateContextState(); 54 | context->calcForcesAndEnergy(true, false); 55 | kernel.getAs().execute(*context, *this); 56 | } 57 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | #----------------------------------------- 2 | # OpenMM Image Charge Plugin CUDA Platform 3 | #----------------------------------------- 4 | 5 | # Collect up information about the version of the OpenMM library we're building 6 | # and make it available to the code so it can be built into the binaries. 7 | 8 | SET(IC_CUDA_LIBRARY_NAME OpenMMICCUDA) 9 | SET(SHARED_TARGET ${IC_CUDA_LIBRARY_NAME}) 10 | INCLUDE_DIRECTORIES(BEFORE "${OPENMM_DIR}/include" "${OPENMM_DIR}/include/openmm" "${OPENMM_DIR}/include/openmm/reference" "${OPENMM_DIR}/include/openmm/cuda") 11 | 12 | # These are all the places to search for header files which are 13 | # to be part of the API. 14 | 15 | SET(API_INCLUDE_DIRS "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_SOURCE_DIR}/include/internal") 16 | 17 | # Locate header files. 18 | 19 | SET(API_INCLUDE_FILES) 20 | FOREACH(dir ${API_INCLUDE_DIRS}) 21 | FILE(GLOB fullpaths ${dir}/*.h) 22 | SET(API_INCLUDE_FILES ${API_INCLUDE_FILES} ${fullpaths}) 23 | ENDFOREACH(dir) 24 | 25 | # Collect source files. 26 | 27 | SET(SOURCE_FILES) # empty 28 | SET(SOURCE_INCLUDE_FILES) 29 | 30 | SET(OPENMM_SOURCE_SUBDIRS . ../common) 31 | FOREACH(subdir ${OPENMM_SOURCE_SUBDIRS}) 32 | FILE(GLOB_RECURSE src_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.cpp ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/${subdir}/src/*.c) 33 | FILE(GLOB incl_files ${CMAKE_CURRENT_SOURCE_DIR}/${subdir}/src/*.h) 34 | SET(SOURCE_FILES ${SOURCE_FILES} ${src_files}) # append 35 | SET(SOURCE_INCLUDE_FILES ${SOURCE_INCLUDE_FILES} ${incl_files}) 36 | ENDFOREACH(subdir) 37 | 38 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/src) 39 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_SOURCE_DIR}/platforms/cuda/include) 40 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_SOURCE_DIR}/platforms/cuda/src) 41 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_BINARY_DIR}/platforms/cuda/src) 42 | 43 | # Set variables needed for encoding kernel sources into a C++ class. 44 | 45 | SET(CUDA_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/src) 46 | SET(CUDA_SOURCE_CLASS CudaICKernelSources) 47 | SET(CUDA_KERNELS_CPP ${CMAKE_CURRENT_BINARY_DIR}/src/${CUDA_SOURCE_CLASS}.cpp) 48 | SET(CUDA_KERNELS_H ${CMAKE_CURRENT_BINARY_DIR}/src/${CUDA_SOURCE_CLASS}.h) 49 | SET(SOURCE_FILES ${SOURCE_FILES} ${CUDA_KERNELS_CPP} ${CUDA_KERNELS_H}) 50 | INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_BINARY_DIR}/src) 51 | 52 | # Create the library. 53 | 54 | INCLUDE_DIRECTORIES(${CUDA_TOOLKIT_INCLUDE}) 55 | 56 | FILE(GLOB CUDA_KERNELS ${CUDA_SOURCE_DIR}/kernels/*.cu) 57 | ADD_CUSTOM_COMMAND(OUTPUT ${CUDA_KERNELS_CPP} ${CUDA_KERNELS_H} 58 | COMMAND ${CMAKE_COMMAND} 59 | ARGS -D CUDA_SOURCE_DIR=${CUDA_SOURCE_DIR} -D CUDA_KERNELS_CPP=${CUDA_KERNELS_CPP} -D CUDA_KERNELS_H=${CUDA_KERNELS_H} -D CUDA_SOURCE_CLASS=${CUDA_SOURCE_CLASS} -P ${CMAKE_SOURCE_DIR}/platforms/cuda/EncodeCUDAFiles.cmake 60 | DEPENDS ${CUDA_KERNELS} 61 | ) 62 | SET_SOURCE_FILES_PROPERTIES(${CUDA_KERNELS_CPP} ${CUDA_KERNELS_H} PROPERTIES GENERATED TRUE) 63 | ADD_LIBRARY(${SHARED_TARGET} SHARED ${SOURCE_FILES} ${SOURCE_INCLUDE_FILES} ${API_INCLUDE_FILES}) 64 | 65 | TARGET_LINK_LIBRARIES(${SHARED_TARGET} ${CUDA_LIBRARIES}) 66 | TARGET_LINK_LIBRARIES(${SHARED_TARGET} OpenMM) 67 | TARGET_LINK_LIBRARIES(${SHARED_TARGET} OpenMMCUDA) 68 | TARGET_LINK_LIBRARIES(${SHARED_TARGET} OpenMMDrude) 69 | TARGET_LINK_LIBRARIES(${SHARED_TARGET} ${IC_LIBRARY_NAME}) 70 | SET_TARGET_PROPERTIES(${SHARED_TARGET} PROPERTIES 71 | COMPILE_FLAGS "-DOPENMM_BUILDING_SHARED_LIBRARY ${EXTRA_COMPILE_FLAGS}" 72 | LINK_FLAGS "${EXTRA_COMPILE_FLAGS}") 73 | IF (APPLE) 74 | SET_TARGET_PROPERTIES(${SHARED_TARGET} PROPERTIES LINK_FLAGS "-F/Library/Frameworks -framework CUDA ${EXTRA_COMPILE_FLAGS}") 75 | ENDIF (APPLE) 76 | 77 | INSTALL(TARGETS ${SHARED_TARGET} DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/plugins) 78 | 79 | # Ensure that links to the main CUDA library will be resolved. 80 | IF (APPLE) 81 | SET(CUDA_LIBRARY libOpenMMCUDA.dylib) 82 | INSTALL(CODE "EXECUTE_PROCESS(COMMAND install_name_tool -change ${CUDA_LIBRARY} @loader_path/${CUDA_LIBRARY} ${CMAKE_INSTALL_PREFIX}/lib/plugins/lib${SHARED_TARGET}.dylib)") 83 | ENDIF (APPLE) -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/EncodeCUDAFiles.cmake: -------------------------------------------------------------------------------- 1 | FILE(GLOB CUDA_KERNELS ${CUDA_SOURCE_DIR}/kernels/*.cu) 2 | SET(CUDA_FILE_DECLARATIONS) 3 | SET(CUDA_FILE_DEFINITIONS) 4 | CONFIGURE_FILE(${CUDA_SOURCE_DIR}/${CUDA_SOURCE_CLASS}.cpp.in ${CUDA_KERNELS_CPP}) 5 | FOREACH(file ${CUDA_KERNELS}) 6 | 7 | # Load the file contents and process it. 8 | FILE(STRINGS ${file} file_content NEWLINE_CONSUME) 9 | 10 | # Replace all backslashes by double backslashes as they are being put in a C string. 11 | # Be careful not to replace the backslash before a semicolon as that is the CMAKE 12 | # internal escaping of a semicolon to prevent it from acting as a list seperator. 13 | STRING(REGEX REPLACE "\\\\([^;])" "\\\\\\\\\\1" file_content "${file_content}") 14 | 15 | # Escape double quotes as being put in a C string. 16 | STRING(REPLACE "\"" "\\\"" file_content "${file_content}") 17 | 18 | # Split in separate C strings for each line. 19 | STRING(REPLACE "\n" "\\n\"\n\"" file_content "${file_content}") 20 | 21 | # Determine a name for the variable that will contain this file's contents 22 | FILE(RELATIVE_PATH filename ${CUDA_SOURCE_DIR}/kernels ${file}) 23 | STRING(LENGTH ${filename} filename_length) 24 | MATH(EXPR filename_length ${filename_length}-3) 25 | STRING(SUBSTRING ${filename} 0 ${filename_length} variable_name) 26 | 27 | # Record the variable declaration and definition. 28 | SET(CUDA_FILE_DECLARATIONS ${CUDA_FILE_DECLARATIONS}static\ const\ std::string\ ${variable_name};\n) 29 | FILE(APPEND ${CUDA_KERNELS_CPP} const\ string\ ${CUDA_SOURCE_CLASS}::${variable_name}\ =\ \"${file_content}\"\;\n) 30 | 31 | ENDFOREACH(file) 32 | CONFIGURE_FILE(${CUDA_SOURCE_DIR}/${CUDA_SOURCE_CLASS}.h.in ${CUDA_KERNELS_H}) -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/include/CudaICKernelFactory.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_CUDAICKERNELFACTORY_H_ 2 | #define OPENMM_CUDAICKERNELFACTORY_H_ 3 | 4 | #include "openmm/KernelFactory.h" 5 | 6 | namespace OpenMM { 7 | 8 | class CudaICKernelFactory : public KernelFactory { 9 | public: 10 | KernelImpl* createKernelImpl(std::string name, const Platform& platform, 11 | ContextImpl& context) const; 12 | }; 13 | 14 | } // namespace OpenMM 15 | 16 | #endif /*OPENMM_CUDAICKERNELFACTORY_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/include/CudaICKernels.h: -------------------------------------------------------------------------------- 1 | #ifndef CUDA_IC_KERNELS_H_ 2 | #define CUDA_IC_KERNELS_H_ 3 | 4 | #include "CudaArray.h" 5 | #include "CudaContext.h" 6 | #include "ICKernels.h" 7 | 8 | using namespace ICPlugin; 9 | 10 | namespace OpenMM { 11 | 12 | class CudaIntegrateICLangevinStepKernel : public IntegrateICLangevinStepKernel { 13 | public: 14 | CudaIntegrateICLangevinStepKernel(std::string name, 15 | const Platform& platform, CudaContext& cu) 16 | : IntegrateICLangevinStepKernel(name, platform), 17 | cu(cu), 18 | params(NULL), 19 | invAtomIndex(NULL) {} 20 | ~CudaIntegrateICLangevinStepKernel(); 21 | 22 | /** 23 | * Initialize the kernel, setting up the particle masses. 24 | * 25 | * @param system the System this kernel will be applied to 26 | * @param integrator the ICLangevinIntegrator this kernel will be used for 27 | */ 28 | void initialize(const System& system, 29 | const ICLangevinIntegrator& integrator); 30 | 31 | /** 32 | * Execute the kernel. 33 | * 34 | * @param context the context in which to execute this kernel 35 | * @param integrator the ICLangevinIntegrator this kernel is being used for 36 | */ 37 | void execute(ContextImpl& context, const ICLangevinIntegrator& integrator); 38 | 39 | /** 40 | * Compute the kinetic energy. 41 | * 42 | * @param context the context in which to execute this kernel 43 | * @param integrator the ICLangevinIntegrator this kernel is being used for 44 | */ 45 | double computeKineticEnergy(ContextImpl& context, 46 | const ICLangevinIntegrator& integrator); 47 | 48 | private: 49 | CudaContext& cu; 50 | double prevTemp, prevFriction, prevStepSize, cellZSize; 51 | CudaArray *params, *invAtomIndex; 52 | CUfunction kernel1, kernel2, kernelImage, kernelReorder; 53 | }; 54 | 55 | class CudaIntegrateICDrudeLangevinStepKernel 56 | : public IntegrateICDrudeLangevinStepKernel { 57 | public: 58 | CudaIntegrateICDrudeLangevinStepKernel(std::string name, 59 | const Platform& platform, 60 | CudaContext& cu) 61 | : IntegrateICDrudeLangevinStepKernel(name, platform), cu(cu) {} 62 | 63 | /** 64 | * Initialize the kernel. 65 | * 66 | * @param system the System this kernel will be applied to 67 | * @param integrator the ICDrudeLangevinIntegrator this kernel will be used 68 | * for 69 | * @param force the DrudeForce to get particle parameters from 70 | */ 71 | void initialize(const System& system, 72 | const ICDrudeLangevinIntegrator& integrator, 73 | const DrudeForce& force); 74 | 75 | /** 76 | * Execute the kernel. 77 | * 78 | * @param context the context in which to execute this kernel 79 | * @param integrator the ICDrudeLangevinIntegrator this kernel is being 80 | * used for 81 | */ 82 | void execute(ContextImpl& context, 83 | const ICDrudeLangevinIntegrator& integrator); 84 | 85 | /** 86 | * Compute the kinetic energy. 87 | * 88 | * @param context the context in which to execute this kernel 89 | * @param integrator the ICDrudeLangevinIntegrator this kernel is being 90 | * used for 91 | */ 92 | double computeKineticEnergy(ContextImpl& context, 93 | const ICDrudeLangevinIntegrator& integrator); 94 | 95 | private: 96 | CudaContext& cu; 97 | double prevStepSize, cellZSize; 98 | CudaArray normalParticles, pairParticles, invAtomIndex; 99 | CUfunction kernel1, kernel2, hardwallKernel, kernelImage, kernelReorder; 100 | }; 101 | 102 | } // namespace OpenMM 103 | 104 | #endif /*CUDA_IC_KERNELS_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/src/CudaICKernelFactory.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "CudaICKernelFactory.h" 4 | #include "CudaICKernels.h" 5 | #include "internal/windowsExportIC.h" 6 | #include "openmm/internal/ContextImpl.h" 7 | #include "openmm/OpenMMException.h" 8 | 9 | using namespace OpenMM; 10 | 11 | extern "C" OPENMM_EXPORT_IC void registerPlatforms() { 12 | } 13 | 14 | extern "C" OPENMM_EXPORT_IC void registerKernelFactories() { 15 | try { 16 | Platform& platform = Platform::getPlatformByName("CUDA"); 17 | CudaICKernelFactory* factory = new CudaICKernelFactory(); 18 | platform.registerKernelFactory(IntegrateICLangevinStepKernel::Name(), factory); 19 | platform.registerKernelFactory(IntegrateICDrudeLangevinStepKernel::Name(), factory); 20 | } 21 | catch (std::exception ex) { 22 | // Ignore 23 | } 24 | } 25 | 26 | extern "C" OPENMM_EXPORT_IC void registerCudaICKernelFactories() { 27 | try { 28 | Platform::getPlatformByName("CUDA"); 29 | } 30 | catch (...) { 31 | Platform::registerPlatform(new CudaPlatform()); 32 | } 33 | registerKernelFactories(); 34 | } 35 | 36 | KernelImpl* CudaICKernelFactory::createKernelImpl(std::string name, const Platform& platform, ContextImpl& context) const { 37 | CudaContext& cu = *static_cast(context.getPlatformData())->contexts[0]; 38 | if (name == IntegrateICLangevinStepKernel::Name()) 39 | return new CudaIntegrateICLangevinStepKernel(name, platform, cu); 40 | if (name == IntegrateICDrudeLangevinStepKernel::Name()) 41 | return new CudaIntegrateICDrudeLangevinStepKernel(name, platform, cu); 42 | throw OpenMMException((std::string("Tried to create kernel with illegal kernel name '")+name+"'").c_str()); 43 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/src/CudaICKernelSources.cpp.in: -------------------------------------------------------------------------------- 1 | #include "CudaICKernelSources.h" 2 | 3 | using namespace OpenMM; 4 | using namespace std; -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/src/CudaICKernelSources.h.in: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_CUDAICKERNELSOURCES_H_ 2 | #define OPENMM_CUDAICKERNELSOURCES_H_ 3 | 4 | #include 5 | 6 | namespace OpenMM { 7 | 8 | class CudaICKernelSources { 9 | public: 10 | @CUDA_FILE_DECLARATIONS@ 11 | }; 12 | 13 | } // namespace OpenMM 14 | 15 | #endif /*OPENMM_CUDAICKERNELSOURCES_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/src/kernels/drudePairForce.cu: -------------------------------------------------------------------------------- 1 | float2 drudeParams = PARAMS[index]; 2 | real3 force1 = make_real3(0); 3 | real3 force2 = make_real3(0); 4 | real3 force3 = make_real3(0); 5 | real3 force4 = make_real3(0); 6 | 7 | // First pair. 8 | 9 | real3 delta = make_real3(pos1.x - pos3.x, pos1.y - pos3.y, pos1.z - pos3.z); 10 | real rInv = RSQRT(dot(delta, delta)); 11 | real r = RECIP(rInv); 12 | real u = drudeParams.x * r; 13 | real screening = 1 - (1 + 0.5f * u) * EXP(-u); 14 | real pairEnergy = drudeParams.y * screening * rInv; 15 | energy += pairEnergy; 16 | real3 f = 17 | delta * (drudeParams.y * rInv * rInv * 18 | (screening * rInv - 0.5f * (1 + u) * EXP(-u) * drudeParams.x)); 19 | force1 += f; 20 | force3 -= f; 21 | 22 | // Second pair. 23 | 24 | delta = make_real3(pos1.x - pos4.x, pos1.y - pos4.y, pos1.z - pos4.z); 25 | rInv = RSQRT(dot(delta, delta)); 26 | r = RECIP(rInv); 27 | u = drudeParams.x * r; 28 | screening = 1 - (1 + 0.5f * u) * EXP(-u); 29 | pairEnergy = -drudeParams.y * screening * rInv; 30 | energy += pairEnergy; 31 | f = delta * (-drudeParams.y * rInv * rInv * 32 | (screening * rInv - 0.5f * (1 + u) * EXP(-u) * drudeParams.x)); 33 | force1 += f; 34 | force4 -= f; 35 | 36 | // Third pair. 37 | 38 | delta = make_real3(pos2.x - pos3.x, pos2.y - pos3.y, pos2.z - pos3.z); 39 | rInv = RSQRT(dot(delta, delta)); 40 | r = RECIP(rInv); 41 | u = drudeParams.x * r; 42 | screening = 1 - (1 + 0.5f * u) * EXP(-u); 43 | pairEnergy = -drudeParams.y * screening * rInv; 44 | energy += pairEnergy; 45 | f = delta * (-drudeParams.y * rInv * rInv * 46 | (screening * rInv - 0.5f * (1 + u) * EXP(-u) * drudeParams.x)); 47 | force2 += f; 48 | force3 -= f; 49 | 50 | // Fourth pair. 51 | 52 | delta = make_real3(pos2.x - pos4.x, pos2.y - pos4.y, pos2.z - pos4.z); 53 | rInv = RSQRT(dot(delta, delta)); 54 | r = RECIP(rInv); 55 | u = drudeParams.x * r; 56 | screening = 1 - (1 + 0.5f * u) * EXP(-u); 57 | pairEnergy = drudeParams.y * screening * rInv; 58 | energy += pairEnergy; 59 | f = delta * (drudeParams.y * rInv * rInv * 60 | (screening * rInv - 0.5f * (1 + u) * EXP(-u) * drudeParams.x)); 61 | force2 += f; 62 | force4 -= f; -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/platforms/cuda/src/kernels/drudeParticleForce.cu: -------------------------------------------------------------------------------- 1 | real3 delta = make_real3(pos1.x - pos2.x, pos1.y - pos2.y, pos1.z - pos2.z); 2 | real r2 = delta.x * delta.x + delta.y * delta.y + delta.z * delta.z; 3 | float4 drudeParams = PARAMS[index]; 4 | float k1 = drudeParams.x; 5 | float k2 = drudeParams.y; 6 | float k3 = drudeParams.z; 7 | 8 | // Compute the isotropic force. 9 | 10 | energy += 0.5f * k3 * r2; 11 | real3 force1 = -delta * k3; 12 | real3 force2 = delta * k3; 13 | real3 force3 = make_real3(0); 14 | real3 force4 = make_real3(0); 15 | real3 force5 = make_real3(0); 16 | 17 | // Compute the first anisotropic force. 18 | 19 | if (k1 != 0) { 20 | real3 dir = make_real3(pos2.x - pos3.x, pos2.y - pos3.y, pos2.z - pos3.z); 21 | real invDist = RSQRT(dot(dir, dir)); 22 | dir *= invDist; 23 | real rprime = dot(dir, delta); 24 | energy += 0.5f * k1 * rprime * rprime; 25 | real3 f1 = dir * (k1 * rprime); 26 | real3 f2 = (delta - dir * rprime) * (k1 * rprime * invDist); 27 | force1 -= f1; 28 | force2 += f1 - f2; 29 | force3 += f2; 30 | } 31 | 32 | // Compute the second anisotropic force. 33 | 34 | if (k2 != 0) { 35 | real3 dir = make_real3(pos4.x - pos5.x, pos4.y - pos5.y, pos4.z - pos5.z); 36 | real invDist = RSQRT(dot(dir, dir)); 37 | dir *= invDist; 38 | real rprime = dot(dir, delta); 39 | energy += 0.5f * k2 * rprime * rprime; 40 | real3 f1 = dir * (k2 * rprime); 41 | real3 f2 = (delta - dir * rprime) * (k2 * rprime * invDist); 42 | force1 -= f1; 43 | force2 += f1; 44 | force4 -= f2; 45 | force5 += f2; 46 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/python/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | set(WRAP_FILE ICPluginWrapper.cpp) 2 | set(MODULE_NAME openmm_ic) 3 | 4 | # Execute SWIG to generate source code for the Python module. 5 | 6 | add_custom_command( 7 | OUTPUT "${WRAP_FILE}" 8 | COMMAND "${SWIG_EXECUTABLE}" 9 | -python -c++ 10 | -o "${WRAP_FILE}" 11 | "-I${OPENMM_DIR}/include" 12 | "${CMAKE_CURRENT_SOURCE_DIR}/icplugin.i" 13 | DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/icplugin.i" 14 | WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" 15 | ) 16 | 17 | # Compile the Python module. 18 | 19 | add_custom_target(PythonInstall DEPENDS "${WRAP_FILE}") 20 | set(ICPLUGIN_HEADER_DIR "${CMAKE_SOURCE_DIR}/openmmapi/include") 21 | set(ICPLUGIN_LIBRARY_DIR "${CMAKE_BINARY_DIR}") 22 | configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py ${CMAKE_CURRENT_BINARY_DIR}/setup.py) 23 | add_custom_command(TARGET PythonInstall 24 | COMMAND "${PYTHON_EXECUTABLE}" setup.py build 25 | COMMAND "${PYTHON_EXECUTABLE}" setup.py install 26 | WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" 27 | ) -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/python/icplugin.i: -------------------------------------------------------------------------------- 1 | %module openmm_ic 2 | 3 | %include "factory.i" 4 | %import(module="openmm") "swig/OpenMMSwigHeaders.i" 5 | %include "swig/typemaps.i" 6 | 7 | /* 8 | * The following lines are needed to handle std::vector. 9 | * Similar lines may be needed for vectors of vectors or 10 | * for other STL types like maps. 11 | */ 12 | 13 | %include "std_vector.i" 14 | namespace std { 15 | %template(vectord) vector; 16 | %template(vectori) vector; 17 | }; 18 | 19 | %{ 20 | #include "OpenMM.h" 21 | #include "OpenMMAmoeba.h" 22 | #include "OpenMMDrude.h" 23 | #include "OpenMMIC.h" 24 | #include "openmm/RPMDIntegrator.h" 25 | #include "openmm/RPMDMonteCarloBarostat.h" 26 | %} 27 | 28 | %pythoncode %{ 29 | import openmm.unit as unit 30 | %} 31 | 32 | /* 33 | * Add units to function outputs. 34 | */ 35 | %pythonappend ICPlugin::ICLangevinIntegrator::getTemperature() const %{ 36 | val = unit.Quantity(val, unit.kelvin) 37 | %} 38 | 39 | %pythonappend ICPlugin::ICLangevinIntegrator::getFriction() const %{ 40 | val = unit.Quantity(val, 1 / unit.picosecond) 41 | %} 42 | 43 | %pythonappend ICPlugin::ICDrudeLangevinIntegrator::getTemperature() const %{ 44 | val = unit.Quantity(val, unit.kelvin) 45 | %} 46 | 47 | %pythonappend ICPlugin::ICDrudeLangevinIntegrator::getFriction() const %{ 48 | val = unit.Quantity(val, 1 / unit.picosecond) 49 | %} 50 | 51 | %pythonappend ICPlugin::ICDrudeLangevinIntegrator::getDrudeTemperature() const %{ 52 | val = unit.Quantity(val, unit.kelvin) 53 | %} 54 | 55 | %pythonappend ICPlugin::ICDrudeLangevinIntegrator::getDrudeFriction() const %{ 56 | val = unit.Quantity(val, 1 / unit.picosecond) 57 | %} 58 | 59 | %pythonappend ICPlugin::ICDrudeLangevinIntegrator::getMaxDrudeDistance() const %{ 60 | val = unit.Quantity(val, unit.nanometer) 61 | %} 62 | 63 | /* 64 | * Convert C++ exceptions to Python exceptions. 65 | */ 66 | %exception { 67 | try { 68 | $action 69 | } catch (std::exception &e) { 70 | PyErr_SetString(PyExc_Exception, const_cast(e.what())); 71 | return NULL; 72 | } 73 | } 74 | 75 | namespace ICPlugin { 76 | 77 | class ICLangevinIntegrator : public OpenMM::Integrator { 78 | public: 79 | ICLangevinIntegrator(double temperature, double frictionCoeff, 80 | double stepSize, int numCells = 2, 81 | double cellZSize = -1); 82 | 83 | double getTemperature() const; 84 | void setTemperature(double temp); 85 | double getFriction() const; 86 | void setFriction(double coeff); 87 | int getRandomNumberSeed() const; 88 | void setRandomNumberSeed(int seed); 89 | virtual void step(int steps); 90 | int getNumCells() const; 91 | void setNumCells(int cells); 92 | double getCellZSize() const; 93 | void setCellZSize(double cellZSize); 94 | }; 95 | 96 | class ICDrudeLangevinIntegrator : public OpenMM::Integrator { 97 | public: 98 | ICDrudeLangevinIntegrator(double temperature, double friction, 99 | double drudeTemperature, double drudeFriction, 100 | double stepSize, int numCells = 2, 101 | double cellZSize = -1); 102 | 103 | double getTemperature() const; 104 | void setTemperature(double temp); 105 | double getFriction() const; 106 | void setFriction(double coeff); 107 | double getDrudeTemperature() const; 108 | void setDrudeTemperature(double temp); 109 | double getDrudeFriction() const; 110 | void setDrudeFriction(double coeff); 111 | double getMaxDrudeDistance() const; 112 | void setMaxDrudeDistance(double distance); 113 | virtual void step(int steps); 114 | int getNumCells() const; 115 | void setNumCells(int cells); 116 | double getCellZSize() const; 117 | void setCellZSize(double cellZSize); 118 | }; 119 | 120 | } // namespace ICPlugin -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/python/setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import Extension, setup 2 | import os 3 | import platform 4 | 5 | openmm_dir = '@OPENMM_DIR@' 6 | ic_plugin_header_dir = '@ICPLUGIN_HEADER_DIR@' 7 | ic_plugin_library_dir = '@ICPLUGIN_LIBRARY_DIR@' 8 | 9 | # setup extra compile and link arguments on Mac 10 | extra_compile_args = [] 11 | extra_link_args = [] 12 | 13 | if platform.system() == 'Darwin': 14 | extra_compile_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7'] 15 | extra_link_args += ['-stdlib=libc++', '-mmacosx-version-min=10.7', '-Wl', 16 | '-rpath', os.path.join(openmm_dir, 'lib')] 17 | 18 | extension = Extension(name='_openmm_ic', 19 | sources=['ICPluginWrapper.cpp'], 20 | libraries=['OpenMM', 'OpenMMDrude', 'OpenMMIC'], 21 | include_dirs=[os.path.join(openmm_dir, 'include'), 22 | ic_plugin_header_dir], 23 | library_dirs=[os.path.join(openmm_dir, 'lib'), 24 | ic_plugin_library_dir], 25 | extra_compile_args=extra_compile_args, 26 | extra_link_args=extra_link_args) 27 | 28 | setup(name='openmm-ic', version='1.0', py_modules=['openmm_ic'], 29 | ext_modules=[extension]) -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/serialization/include/ICDrudeLangevinIntegratorProxy.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_IC_DRUDE_LANGEVIN_INTEGRATOR_PROXY_H_ 2 | #define OPENMM_IC_DRUDE_LANGEVIN_INTEGRATOR_PROXY_H_ 3 | 4 | #include "internal/windowsExportIC.h" 5 | #include "openmm/serialization/SerializationProxy.h" 6 | 7 | namespace OpenMM { 8 | 9 | class OPENMM_EXPORT_IC ICDrudeLangevinIntegratorProxy 10 | : public SerializationProxy { 11 | public: 12 | ICDrudeLangevinIntegratorProxy(); 13 | void serialize(const void* object, SerializationNode& node) const; 14 | void* deserialize(const SerializationNode& node) const; 15 | }; 16 | 17 | } // namespace OpenMM 18 | 19 | #endif /*OPENMM_IC_DRUDE_LANGEVIN_INTEGRATOR_PROXY_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/serialization/include/ICLangevinIntegratorProxy.h: -------------------------------------------------------------------------------- 1 | #ifndef OPENMM_IC_LANGEVIN_INTEGRATOR_PROXY_H_ 2 | #define OPENMM_IC_LANGEVIN_INTEGRATOR_PROXY_H_ 3 | 4 | #include "internal/windowsExportIC.h" 5 | #include "openmm/serialization/SerializationProxy.h" 6 | 7 | namespace OpenMM { 8 | 9 | class OPENMM_EXPORT_IC ICLangevinIntegratorProxy : public SerializationProxy { 10 | public: 11 | ICLangevinIntegratorProxy(); 12 | void serialize(const void* object, SerializationNode& node) const; 13 | void* deserialize(const SerializationNode& node) const; 14 | }; 15 | 16 | } // namespace OpenMM 17 | 18 | #endif /*OPENMM_IC_LANGEVIN_INTEGRATOR_PROXY_H_*/ -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/serialization/src/ICDrudeLangevinIntegratorProxy.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ICDrudeLangevinIntegrator.h" 4 | #include "ICDrudeLangevinIntegratorProxy.h" 5 | #include "openmm/serialization/SerializationNode.h" 6 | 7 | using namespace std; 8 | using namespace ICPlugin; 9 | using namespace OpenMM; 10 | 11 | ICDrudeLangevinIntegratorProxy::ICDrudeLangevinIntegratorProxy() 12 | : SerializationProxy("ICDrudeLangevinIntegrator") {} 13 | 14 | void ICDrudeLangevinIntegratorProxy::serialize(const void* object, 15 | SerializationNode& node) const { 16 | node.setIntProperty("version", 1); 17 | const ICDrudeLangevinIntegrator& integrator = 18 | *reinterpret_cast(object); 19 | node.setDoubleProperty("stepSize", integrator.getStepSize()); 20 | node.setDoubleProperty("constraintTolerance", 21 | integrator.getConstraintTolerance()); 22 | node.setDoubleProperty("temperature", integrator.getTemperature()); 23 | node.setDoubleProperty("friction", integrator.getFriction()); 24 | node.setDoubleProperty("drudeTemperature", 25 | integrator.getDrudeTemperature()); 26 | node.setDoubleProperty("drudeFriction", integrator.getDrudeFriction()); 27 | node.setIntProperty("randomSeed", integrator.getRandomNumberSeed()); 28 | } 29 | 30 | void* ICDrudeLangevinIntegratorProxy::deserialize( 31 | const SerializationNode& node) const { 32 | if (node.getIntProperty("version") != 1) 33 | throw OpenMMException("Unsupported version number"); 34 | ICDrudeLangevinIntegrator* integrator = new ICDrudeLangevinIntegrator( 35 | node.getDoubleProperty("temperature"), 36 | node.getDoubleProperty("friction"), 37 | node.getDoubleProperty("drudeTemperature"), 38 | node.getDoubleProperty("drudeFriction"), 39 | node.getDoubleProperty("stepSize")); 40 | integrator->setConstraintTolerance( 41 | node.getDoubleProperty("constraintTolerance")); 42 | integrator->setRandomNumberSeed(node.getIntProperty("randomSeed")); 43 | return integrator; 44 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/serialization/src/ICLangevinIntegratorProxy.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | #include "ICLangevinIntegrator.h" 4 | #include "ICLangevinIntegratorProxy.h" 5 | #include "openmm/serialization/SerializationNode.h" 6 | 7 | using namespace std; 8 | using namespace ICPlugin; 9 | using namespace OpenMM; 10 | 11 | ICLangevinIntegratorProxy::ICLangevinIntegratorProxy() 12 | : SerializationProxy("ICLangevinIntegrator") {} 13 | 14 | void ICLangevinIntegratorProxy::serialize(const void* object, 15 | SerializationNode& node) const { 16 | node.setIntProperty("version", 1); 17 | const ICLangevinIntegrator& integrator = 18 | *reinterpret_cast(object); 19 | node.setDoubleProperty("stepSize", integrator.getStepSize()); 20 | node.setDoubleProperty("constraintTolerance", 21 | integrator.getConstraintTolerance()); 22 | node.setDoubleProperty("temperature", integrator.getTemperature()); 23 | node.setDoubleProperty("friction", integrator.getFriction()); 24 | node.setIntProperty("randomSeed", integrator.getRandomNumberSeed()); 25 | } 26 | 27 | void* ICLangevinIntegratorProxy::deserialize( 28 | const SerializationNode& node) const { 29 | if (node.getIntProperty("version") != 1) 30 | throw OpenMMException("Unsupported version number"); 31 | ICLangevinIntegrator* integrator = new ICLangevinIntegrator( 32 | node.getDoubleProperty("temperature"), 33 | node.getDoubleProperty("friction"), node.getDoubleProperty("stepSize")); 34 | integrator->setConstraintTolerance( 35 | node.getDoubleProperty("constraintTolerance")); 36 | integrator->setRandomNumberSeed(node.getIntProperty("randomSeed")); 37 | return integrator; 38 | } -------------------------------------------------------------------------------- /lib/openmm-ic-plugin/serialization/src/ICSerializationProxyRegistration.cpp: -------------------------------------------------------------------------------- 1 | #ifdef WIN32 2 | #include 3 | 4 | #include 5 | #else 6 | #include 7 | #include 8 | 9 | #include 10 | #endif 11 | 12 | #include "ICDrudeLangevinIntegrator.h" 13 | #include "ICDrudeLangevinIntegratorProxy.h" 14 | #include "ICLangevinIntegrator.h" 15 | #include "ICLangevinIntegratorProxy.h" 16 | #include "openmm/OpenMMException.h" 17 | #include "openmm/serialization/SerializationProxy.h" 18 | 19 | #if defined(WIN32) 20 | #include 21 | extern "C" void registerICSerializationProxies(); 22 | BOOL WINAPI DllMain(HANDLE hModule, DWORD ul_reason_for_call, 23 | LPVOID lpReserved) { 24 | if (ul_reason_for_call == DLL_PROCESS_ATTACH) 25 | registerICSerializationProxies(); 26 | return TRUE; 27 | } 28 | #else 29 | extern "C" void __attribute__((constructor)) registerICSerializationProxies(); 30 | #endif 31 | 32 | using namespace OpenMM; 33 | 34 | extern "C" void registerICSerializationProxies() { 35 | SerializationProxy::registerProxy(typeid(ICPlugin::ICLangevinIntegrator), 36 | new ICLangevinIntegratorProxy()); 37 | SerializationProxy::registerProxy( 38 | typeid(ICPlugin::ICDrudeLangevinIntegrator), 39 | new ICDrudeLangevinIntegratorProxy()); 40 | } -------------------------------------------------------------------------------- /paper/paper.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: 'MDCraft: A Python assistant for performing and analyzing molecular dynamics simulations of soft matter systems' 3 | tags: 4 | - Python 5 | - molecular dynamics 6 | - trajectory analysis 7 | - soft matter 8 | authors: 9 | - name: Benjamin B. Ye 10 | orcid: 0000-0003-0253-6311 11 | corresponding: true 12 | affiliation: 1 13 | - name: Pierre J. Walker 14 | orcid: 0000-0001-8628-6561 15 | affiliation: "1, 2" 16 | - name: Zhen-Gang Wang 17 | orcid: 0000-0002-3361-6114 18 | affiliation: 1 19 | affiliations: 20 | - name: Division of Chemistry and Chemical Engineering, California Institute of Technology, Pasadena, California 91125, United States 21 | index: 1 22 | - name: Department of Chemical Engineering, Imperial College, London SW7 2AZ, United Kingdom 23 | index: 2 24 | date: June __, 2024 25 | bibliography: paper.bib 26 | --- 27 | 28 | # Summary 29 | 30 | MDCraft is a comprehensive Python package designed to enhance research workflows involving molecular dynamics (MD) simulations. It streamlines the entire process—from setting up and executing simulations to analyzing trajectories using sophisticated algorithms and visualizing results—making computational chemistry more accessible to a broader audience. At its core, MDCraft comprises three principal components. 31 | 32 | First, the `openmm` module provides user-friendly tools to initialize, optimize, and run simulations, enabling the exploration of various large soft matter systems across different timescales. This module extends the functionality of the OpenMM [@eastman_openmm_2017] simulation package by introducing custom force fields, such as the efficient and intuitive Gaussian core model with smeared electrostatics (GCMe) [@ye_gcme_2024]; incorporating advanced techniques like the slab correction [@yeh_ewald_1999;@ballenegger_simulations_2009] and the method of image charges [@hautman_molecular_1989] for charged systems with slab geometries; facilitating coarse-grained MD simulations by scaling physical values by the fundamental quantities (mass $m$, length $d$, energy $\epsilon$, and Boltzmann constant $k_\mathrm{B}T$); and offering feature-rich readers and writers for topologies and trajectories stored in memory-efficient formats (such as NetCDF). 33 | 34 | Second, the `algorithm` and `analysis` modules offer optimized serial and multithreaded algorithms and analysis classes for evaluating structural, thermodynamic, and dynamic properties using thermodynamic state and trajectory data. The analysis classes provide properties including, but not limited to, static and dynamic structure factors [@faberTheoryElectricalProperties1965;@ashcroftStructureBinaryLiquid1967;@rogMoldynProgramPackage2003], density and potential profiles, end-to-end vector autocorrelation functions for polymers, and Onsager transport coefficients [@rubinsteinPolymerPhysics2003;@fong_onsager_2020]. The algorithms provide the underlying tools used to perform analysis and are intended to be easily extensible by more-advanced users. These modules are not limited to OpenMM and can also be used with simulation run in other packages, such as LAMMPS [@thompson_lammps_2022] and GROMACS [@abrahamGROMACSHighPerformance2015]. 35 | 36 | Finally, the `fit` and `plot` modules simplify the post-processing and visualization of data, aiding in the creation of figures suitable for scientific publications. These modules consist of models for curve fitting and helper functions that interface seamlessly with the commonly used SciPy [@virtanen_scipy_2020] and Matplotlib [@hunter_matplotlib_2007] libraries. 37 | 38 | Together, these modules provide both novice and experienced MD simulation users with a comprehensive set of tools necessary to conduct computer experiments ranging from simple to complex, all within a single, succinct package. 39 | 40 | # Statement of need 41 | 42 | Although established MD analysis packages such as MDAnalysis [@michaudagrawal_mdanalysis_2011], MDTraj [@mcgibbon_mdtraj_2015], freud [@freud2020] and pytraj [@roePTRAJCPPTRAJSoftware2013] have been around for a considerable time, they primarily focus on the post-simulation analysis. In contrast, MDCraft not only improves upon some of the analysis tools provided, but is designed to also provide comprehensive support throughout the entire simulation process, from initialization to post-processing. 43 | 44 | MDCraft is tightly integrated with OpenMM, something that is unique amongst MD analysis packages. OpenMM is a relatively new simulation toolkit that has seen a surge in popularity in recent years due to its class-leading performance and flexibility through support for custom intermolecular forces and integrators for equations of motion [@eastman_openmm_2017]. Due to its age and design philosophy, OpenMM offers comparatively fewer choices of pair potentials and external forces, and no built-in analysis support. MDCraft fills this gap in two ways. First, the `openmm` module leverages the modularity of OpenMM to provide a suite of custom force fields, problem-solving tools, trajectory readers and writers, and utility functions for unit reduction, topology transformations, and performance optimizations that are not typically available in other simulation packages. Of special significance is the support for GCMe which, as demonstrated in a recent article [@ye_gcme_2024], provides significant acceleration compared to other force fields while also remaining physically meaningful. Then, the classes in the `analysis` module enable computing common structural, thermodynamic, and dynamic properties using the topology, trajectory, and state data generated by OpenMM (or other simulation packages). 45 | 46 | The `analysis` module also stands out due to the flexibility it affords its end users, in contrast to contemporary MD analysis packages. General users have substantial control over what aspects of the properties to calculate and which method to employ through a plethora of well-documented built-in options in each analysis class, without having to be concerned about the underlying implementations. More advanced users, on the other hand, have the option to work directly with the algorithms in the `algorithms` module for further customization. These analysis functions and classes have proven indispensable in several recent publications [@glisman_multivalent_2024;@mantha_adsorption_2024;@lee_molecular_2024]. 47 | 48 | The application of MDCraft extends across various domains within computational chemistry and materials science. Researchers can utilize it to study the low-level mechanisms involved in supercapacitors, polymer gels, drug delivery systems, and nanomaterial synthesis, thus highlighting its versatility and broad applicability in cutting-edge scientific research. 49 | 50 | # Acknowledgements 51 | 52 | We acknowledge contributions from Alec Glisman and Dorian Bruch in the development of this package and financial support from Hong Kong Quantum AI Lab, AIR\@InnoHK of the Hong Kong Government. 53 | 54 | # References 55 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | build-backend = "setuptools.build_meta" 3 | requires = ["setuptools"] 4 | 5 | [project] 6 | name = "mdcraft" 7 | version = "1.3.2" 8 | authors = [ 9 | { name = "Benjamin B. Ye", email = "bye@caltech.edu" }, 10 | { name = "Pierre J. Walker", email = "pjwalker@caltech.edu" } 11 | ] 12 | classifiers = [ 13 | "Intended Audience :: Science/Research", 14 | "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", 15 | "Operating System :: OS Independent", 16 | "Programming Language :: Python", 17 | "Programming Language :: Python :: 3" 18 | ] 19 | dependencies = [ 20 | "matplotlib", 21 | "mdanalysis", 22 | "netCDF4", 23 | "numba", 24 | "numpy", 25 | "pandas", 26 | "pint", 27 | "psutil", 28 | "scipy", 29 | "sympy" 30 | ] 31 | description = "" 32 | license = { file = "LICENSE" } 33 | readme = "README.md" 34 | requires-python = ">=3.9" 35 | 36 | [project.urls] 37 | "Homepage" = "https://github.com/bbye98/mdcraft" 38 | "Bug Tracker" = "https://github.com/bbye98/mdcraft/issues" 39 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | filterwarnings = 3 | ignore:::MDAnalysis -------------------------------------------------------------------------------- /recipe/meta.yaml: -------------------------------------------------------------------------------- 1 | {% set name = "mdcraft" %} 2 | {% set version = "1.2.0" %} 3 | 4 | package: 5 | name: {{ name | lower }} 6 | version: {{ version }} 7 | 8 | source: 9 | path: .. 10 | 11 | build: 12 | noarch: python 13 | number: 0 14 | script: {{ PYTHON }} -m pip install . -vv --no-deps --no-build-isolation 15 | 16 | requirements: 17 | host: 18 | - python >=3.9 19 | - pip 20 | run: 21 | - python >=3.9 22 | - matplotlib 23 | - mdanalysis >=2.2 24 | - netCDF4 25 | - numba 26 | - numpy 27 | - pandas 28 | - pint 29 | - scipy 30 | - sympy 31 | 32 | test: 33 | imports: 34 | - mdcraft 35 | commands: 36 | - pip check 37 | requires: 38 | - pip 39 | 40 | about: 41 | home: https://github.com/bbye98/mdcraft 42 | license: GPL-3.0 43 | license_file: LICENSE 44 | summary: A Python assistant for performing and analyzing molecular dynamics simulations of soft matter systems 45 | doc_url: https://mdcraft.readthedocs.io/ 46 | dev_url: https://github.com/bbye98/mdcraft 47 | 48 | extra: 49 | recipe-maintainers: 50 | - bbye98 -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # PRECOMPILED CORE DEPENDENCIES 2 | matplotlib 3 | mdanalysis >= 2.2.0 4 | netcdf4 5 | numba 6 | numpy 7 | pandas 8 | pint 9 | psutil 10 | scipy 11 | sympy 12 | 13 | # SELF-COMPILED DEPENDENCIES 14 | # constvplugin or openmm-ic-plugin 15 | 16 | # OPTIONAL DEPENDENCIES 17 | dask 18 | joblib 19 | openmm 20 | 21 | # DOCUMENTATION DEPENDENCIES 22 | furo 23 | myst-nb 24 | numpydoc 25 | sphinx 26 | sphinx-copybutton 27 | sphinx-design 28 | 29 | # TEST DEPENDENCIES 30 | ase 31 | coverage 32 | dynasor >= 2 33 | mdanalysistests 34 | pytest 35 | ruff 36 | tidynamics -------------------------------------------------------------------------------- /requirements_minimal.txt: -------------------------------------------------------------------------------- 1 | matplotlib 2 | mdanalysis >= 2.2.0 3 | netcdf4 4 | numba 5 | numpy 6 | pandas 7 | pint 8 | scipy 9 | sympy -------------------------------------------------------------------------------- /src/mdcraft/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | MDCraft 3 | ======= 4 | """ 5 | 6 | from importlib.util import find_spec 7 | 8 | from pint import Quantity, UnitRegistry 9 | 10 | VERSION = "1.2.0" 11 | FOUND_OPENMM = find_spec("openmm") is not None 12 | 13 | Q_ = Quantity 14 | ureg = UnitRegistry(auto_reduce_dimensions=True) 15 | 16 | from . import algorithm, analysis, fit, lammps, plot # noqa: E402 17 | 18 | __all__ = ["FOUND_OPENMM", "VERSION", "algorithm", "analysis", "fit", "lammps", "plot"] 19 | if FOUND_OPENMM: 20 | __all__.append("openmm") 21 | -------------------------------------------------------------------------------- /src/mdcraft/algorithm/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Algorithms 3 | ========== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module is a collection of algorithms used in other MDCraft 7 | (sub)modules. 8 | """ 9 | 10 | from . import accelerated, correlation, molecule, topology, unit, utility 11 | 12 | __all__ = ["accelerated", "correlation", "molecule", "topology", "unit", "utility"] 13 | -------------------------------------------------------------------------------- /src/mdcraft/algorithm/accelerated.py: -------------------------------------------------------------------------------- 1 | """ 2 | Accelerated algorithms 3 | ====================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains miscellaneous Numba-accelerated common algorithms. 7 | """ 8 | 9 | import numba 10 | import numpy as np 11 | 12 | 13 | @numba.njit(fastmath=True) 14 | def numba_histogram_bin_edges( 15 | array: np.ndarray[float], n_bins: int 16 | ) -> np.ndarray[float]: 17 | r""" 18 | Serial Numba-accelerated function to compute the uniform histogram 19 | bin edges for a one-dimensional NumPy array :math:`\mathbf{a}` 20 | and a specified number of bins :math:`N_\mathrm{bins}`. 21 | 22 | Parameters 23 | ---------- 24 | array : `np.ndarray` 25 | One-dimensional array :math:`\mathbf{a}`. 26 | 27 | n_bins : `int` 28 | Number of bins :math:`N_\mathrm{bins}`. 29 | 30 | Returns 31 | ------- 32 | bin_edges : `np.ndarray` 33 | Uniform histogram bin edges for the array :math:`\mathbf{a}`. 34 | """ 35 | 36 | min_, max_ = array.min(), array.max() 37 | n_edges = n_bins + 1 38 | bin_edges = np.empty(n_edges) 39 | delta = (max_ - min_) / n_bins 40 | for i in range(n_edges): 41 | bin_edges[i] = min_ + i * delta 42 | bin_edges[-1] = max_ 43 | return bin_edges 44 | 45 | 46 | @numba.njit(fastmath=True) 47 | def numba_histogram( 48 | array: np.ndarray[float], n_bins: int, bin_edges: np.ndarray[float] 49 | ) -> np.ndarray[int]: 50 | r""" 51 | Serial Numba-accelerated function to compute the histogram of a 52 | one-dimensional NumPy array :math:`\mathbf{a}` using predetermined 53 | bin edges for :math:`N_\mathrm{bins}` bins. 54 | 55 | Parameters 56 | ---------- 57 | array : `np.ndarray` 58 | One-dimensional array :math:`\mathbf{a}`. 59 | 60 | n_bins : `int` 61 | Number of bins :math:`N_\mathrm{bins}`. 62 | 63 | bin_edges : `np.ndarray` 64 | Bin edges. 65 | 66 | Returns 67 | ------- 68 | histogram_ : `np.ndarray` 69 | Histogram of the array :math:`\mathbf{a}`. 70 | """ 71 | 72 | min_, max_ = bin_edges[0], bin_edges[-1] 73 | histogram_ = np.zeros(n_bins, dtype=np.intp) 74 | for x in array: 75 | if x == max_: 76 | bin_ = n_bins - 1 77 | else: 78 | bin_ = int(n_bins * (x - min_) / (max_ - min_)) 79 | if 0 <= bin_ < n_bins: 80 | histogram_[bin_] += 1 81 | return histogram_ 82 | 83 | 84 | @numba.njit(fastmath=True) 85 | def numba_dot(a: np.ndarray[float], b: np.ndarray[float]) -> float: 86 | r""" 87 | Serial Numba-accelerated dot product between two one-dimensional 88 | NumPy arrays :math:`\mathbf{a}` and :math:`\mathbf{b}`, each with 89 | shape :math:`(3,)`. 90 | 91 | .. math:: 92 | 93 | \mathbf{a}\cdot\mathbf{b}=a_1b_1+a_2b_2+a_3b_3 94 | 95 | Parameters 96 | ---------- 97 | a : `np.ndarray` 98 | First vector :math:`\mathbf{a}`. 99 | 100 | **Shape**: :math:`(3,)`. 101 | 102 | b : `np.ndarray` 103 | Second vector :math:`\mathbf{b}`. 104 | 105 | **Shape**: :math:`(3,)`. 106 | 107 | Returns 108 | ------- 109 | ab : `float` 110 | Dot product :math:`\mathbf{a}\cdot\mathbf{b}`. 111 | """ 112 | 113 | return a[0] * b[0] + a[1] * b[1] + a[2] * b[2] 114 | 115 | 116 | @numba.njit(fastmath=True) 117 | def numba_inner(qs: np.ndarray[float], rs: np.ndarray[float]) -> np.ndarray[float]: 118 | r""" 119 | Serial Numba-accelerated inner product between all possible 120 | combinations of multiple one-dimensional NumPy arrays 121 | :math:`\mathbf{q}` and :math:`\mathbf{r}`, each with shape 122 | :math:`(3,)`. 123 | 124 | .. math:: 125 | 126 | \mathbf{q}_i\cdot\mathbf{r}_j 127 | =q_{i1}r_{j1}+q_{i2}r_{j2}+q_{i3}r_{j3} 128 | 129 | Parameters 130 | ---------- 131 | qs : `np.ndarray` 132 | Multiple vectors :math:`\mathbf{q}`. 133 | 134 | **Shape**: :math:`(N_q,\,3)`. 135 | 136 | rs : `np.ndarray` 137 | Multiple vectors :math:`\mathbf{r}`. 138 | 139 | **Shape**: :math:`(N_r,\,3)`. 140 | 141 | Returns 142 | ------- 143 | s : `np.ndarray` 144 | Inner products of the vectors, 145 | :math:`\mathbf{q}_i\cdot\mathbf{r}_j`. 146 | 147 | **Shape**: :math:`(N_q,\,N_r)`. 148 | """ 149 | 150 | s = np.empty((qs.shape[0], rs.shape[0])) 151 | for i in range(qs.shape[0]): 152 | for j in range(rs.shape[0]): 153 | s[i, j] = numba_dot(qs[i], rs[j]) 154 | return s 155 | 156 | 157 | @numba.njit(fastmath=True, parallel=True) 158 | def numba_inner_parallel( 159 | qs: np.ndarray[float], rs: np.ndarray[float] 160 | ) -> np.ndarray[float]: 161 | r""" 162 | Parallel Numba-accelerated inner product between all possible 163 | combinations of multiple one-dimensional NumPy arrays 164 | :math:`\mathbf{q}` and :math:`\mathbf{r}`, each with shape 165 | :math:`(3,)`. 166 | 167 | .. math:: 168 | 169 | \mathbf{q}_i\cdot\mathbf{r}_j 170 | =q_{i1}r_{j1}+q_{i2}r_{j2}+q_{i3}r_{j3} 171 | 172 | Parameters 173 | ---------- 174 | qs : `np.ndarray` 175 | Multiple vectors :math:`\mathbf{q}`. 176 | 177 | **Shape**: :math:`(N_q,\,3)`. 178 | 179 | rs : `np.ndarray` 180 | Multiple vectors :math:`\mathbf{r}`. 181 | 182 | **Shape**: :math:`(N_r,\,3)`. 183 | 184 | Returns 185 | ------- 186 | s : `np.ndarray` 187 | Inner products of the vectors, 188 | :math:`\mathbf{q}_i\cdot\mathbf{r}_j`. 189 | 190 | **Shape**: :math:`(N_q,\,N_r)`. 191 | """ 192 | 193 | s = np.empty((qs.shape[0], rs.shape[0])) 194 | for i in numba.prange(qs.shape[0]): 195 | for j in range(rs.shape[0]): 196 | s[i, j] = numba_dot(qs[i], rs[j]) 197 | return s 198 | -------------------------------------------------------------------------------- /src/mdcraft/algorithm/utility.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utility algorithms 3 | ================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains logical and mathematical utility functions used by 7 | other MDCraft modules. 8 | """ 9 | 10 | from typing import Any 11 | 12 | import numpy as np 13 | import sympy 14 | 15 | 16 | def get_closest_factors( 17 | value: int, n_factors: int, reverse: bool = False 18 | ) -> np.ndarray[int]: 19 | """ 20 | Finds the :math:`n` closest factors for a given number :math:`N`, 21 | sorted in ascending order. 22 | 23 | Parameters 24 | ---------- 25 | value : `int` 26 | Number :math:`N` to be factorized. 27 | 28 | n_factors : `int` 29 | Number of factors :math:`n` to return. 30 | 31 | reverse : `bool`, optional, default: :code:`False` 32 | Specifies whether to sort in descending order. 33 | 34 | Returns 35 | ------- 36 | factors : `np.ndarray` 37 | :math:`n` closest factors for `N`. 38 | 39 | **Shape**: :math:`(n,)`. 40 | """ 41 | 42 | # Take the n-th root of N 43 | rt = value ** (1 / n_factors) 44 | rt_int = int(np.round(rt)) 45 | if np.isclose(rt, rt_int): 46 | return rt_int * np.ones(n_factors, dtype=int) 47 | 48 | # Get all factors of N 49 | _factors = np.fromiter( 50 | ( 51 | factor 52 | for factor, power in sympy.ntheory.factorint(value).items() 53 | for _ in range(power) 54 | ), 55 | dtype=int, 56 | ) 57 | 58 | # Find n closest factors 59 | i = 0 60 | factors = np.ones(n_factors, dtype=int) 61 | for j, f in enumerate(_factors[::-1]): 62 | while True: 63 | if i < n_factors: 64 | m = factors[i] * f 65 | if m <= rt_int or j < n_factors and factors[i] == 1: 66 | factors[i] = m 67 | break 68 | i += 1 69 | else: 70 | factors[np.argmin(factors)] *= f 71 | break 72 | 73 | if reverse: 74 | return np.sort(factors)[::-1] 75 | return np.sort(factors) 76 | 77 | 78 | def replicate( 79 | cell_dims: np.ndarray[float], cell_pos: np.ndarray[float], n_cells: np.ndarray[int] 80 | ) -> np.ndarray[float]: 81 | r""" 82 | Replicates points in an unit cell along the :math:`x`-, :math:`y`-, 83 | and :math:`z`-directions. 84 | 85 | Parameters 86 | ---------- 87 | cell_dims : `numpy.ndarray` 88 | Dimensions of the unit cell. 89 | 90 | **Shape**: :math:`(3,)`. 91 | 92 | cell_pos : `numpy.ndarray` 93 | Positions of the :math:`N` points inside the unit cell. 94 | 95 | **Shape**: :math:`(N,\,3)`. 96 | 97 | n_cells : `numpy.ndarray` 98 | Number of times to replicate the unit cell in each direction. 99 | 100 | **Shape**: :math:`(3,)`. 101 | 102 | Returns 103 | ------- 104 | pos : `numpy.ndarray` 105 | Positions of the original and replicated points. 106 | """ 107 | 108 | # Add cell x-dimensions to cell x-positions and replicate them 109 | # n_y * n_z times 110 | x = np.tile( 111 | np.concatenate( 112 | cell_pos[:, 0] + (cell_dims[0] * np.arange(n_cells[0]))[:, None] 113 | ), 114 | reps=n_cells[1] * n_cells[2], 115 | ) 116 | 117 | # Replicate cell y-positions n_x times, add cell y-dimensions to 118 | # them, and then replicate them n_z times 119 | y = np.tile( 120 | np.concatenate( 121 | np.tile(cell_pos[:, 1], reps=n_cells[0]) 122 | + (np.arange(n_cells[1]) * cell_dims[1])[:, None] 123 | ), 124 | reps=n_cells[2], 125 | ) 126 | 127 | # Replicate cell z-positions n_x * n_y times and add cell 128 | # z-dimensions to them 129 | z = np.concatenate( 130 | np.tile(cell_pos[:, 2], reps=n_cells[0] * n_cells[1]) 131 | + cell_dims[2] * np.arange(n_cells[2])[:, None] 132 | ) 133 | 134 | return np.vstack((x, y, z)).T 135 | 136 | 137 | def rebin(x: np.ndarray[float], factor: int = None) -> np.ndarray[float]: 138 | r""" 139 | Rebins discrete data. 140 | 141 | Parameters 142 | ---------- 143 | x : `numpy.ndarray` 144 | Discrete data to be rebinned in the last dimension. 145 | 146 | factor : `int`, optional 147 | Size reduction factor. If not specified, the biggest factor 148 | on the order of :math:`\mathcal{O}(1)`, if available, is used. 149 | 150 | Returns 151 | ------- 152 | xr : `numpy.ndarray` 153 | Rebinned discrete data. 154 | """ 155 | 156 | if factor is None: 157 | factors = np.array(sympy.divisors(x.shape[-1])[1:]) 158 | factor_indices = np.where(factors < 10)[0] 159 | if len(factor_indices): 160 | factor = factors[factor_indices[-1]] 161 | else: 162 | raise ValueError("No factor provided for rebinning.") 163 | 164 | return x.reshape((*x.shape[:-1], -1, factor)).mean(axis=-1) 165 | 166 | 167 | def depth_first_search( 168 | graph: dict[Any, list[Any]], start: Any, visited: bool, group: list[Any] 169 | ) -> None: 170 | """ 171 | Implements the depth-first search algorithm to find connected 172 | components in a graph. 173 | 174 | Parameters 175 | ---------- 176 | graph : `dict` 177 | Graph to search. 178 | 179 | start : `Any` 180 | Starting node. 181 | 182 | visited : `dict` 183 | Visited nodes. This `dict` is updated in-place. 184 | 185 | group : `list` 186 | Connected nodes. This `list` is updated in-place. 187 | """ 188 | 189 | visited[start] = True 190 | group.append(start) 191 | for neighbor in graph[start]: 192 | if not visited[neighbor]: 193 | depth_first_search(graph, neighbor, visited, group) 194 | 195 | 196 | def find_connected_nodes(graph: dict[Any, list[Any]]) -> list[list[Any]]: 197 | """ 198 | Finds connected components in a graph. 199 | 200 | Parameters 201 | ---------- 202 | graph : `dict` 203 | Graph to search. 204 | 205 | Returns 206 | ------- 207 | results : `list` 208 | Connected components. 209 | """ 210 | 211 | visited = {node: False for node in graph} 212 | results = [] 213 | for start in graph: 214 | if not visited[start]: 215 | group = [] 216 | depth_first_search(graph, start, visited, group) 217 | results.append(group) 218 | return results 219 | 220 | 221 | def is_lower_triangular(matrix: np.ndarray[float]) -> bool: 222 | """ 223 | Checks if a matrix is lower triangular. 224 | 225 | Parameters 226 | ---------- 227 | matrix : `numpy.ndarray` 228 | Matrix to check. 229 | 230 | Returns 231 | ------- 232 | is_lower_triangular : `bool` 233 | Whether the matrix is lower triangular. 234 | """ 235 | 236 | return np.allclose(matrix, np.tril(matrix)) 237 | -------------------------------------------------------------------------------- /src/mdcraft/analysis/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Simulation state data and trajectory analysis 3 | ============================================= 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides a variety of classes for analyzing simulation 7 | trajectories. 8 | """ 9 | 10 | from . import ( 11 | base, 12 | electrostatics, 13 | polymer, 14 | profile, 15 | reader, 16 | structure, 17 | thermodynamics, 18 | transport, 19 | ) 20 | 21 | __all__ = [ 22 | "base", 23 | "electrostatics", 24 | "polymer", 25 | "profile", 26 | "reader", 27 | "structure", 28 | "thermodynamics", 29 | "transport", 30 | ] 31 | -------------------------------------------------------------------------------- /src/mdcraft/fit/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Curve fitting models 3 | ==================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides a library of curve fitting models, such as 7 | exponential, Fourier series, polynomial, and power law models, for use 8 | with :func:`scipy.optimize.curve_fit`. 9 | """ 10 | 11 | from . import distribution, exponential, fourier, gaussian, polynomial, power 12 | 13 | __all__ = ["distribution", "exponential", "fourier", "gaussian", "polynomial", "power"] 14 | -------------------------------------------------------------------------------- /src/mdcraft/fit/distribution.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Weibull distribution models 3 | =========================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | The Weibull distribution is widely used in reliability and life (failure 7 | rate) data analysis. This module provides the one-, two-, and 8 | three-parameter Weibull distributions. 9 | 10 | The three-parameter Weibull distribution is 11 | 12 | .. math:: 13 | 14 | y=ab(x-c)^{b-1}\exp{[-a(x-c)^b]} 15 | 16 | where :math:`a` is the scale parameter, :math:`b` is the shape 17 | parameter, and :math:`c` is the location parameter. 18 | 19 | The two-parameter Weibull distribution 20 | 21 | .. math:: 22 | 23 | y=abx^{b-1}\exp{(-ax^b)} 24 | 25 | has :math:`x-c` replaced with :math:`x`. 26 | 27 | The one-parameter Weibull distribution has the shape parameter fixed, 28 | so only the scale parameter is fitted. 29 | """ 30 | 31 | import numpy as np 32 | 33 | 34 | def weibull(x: np.ndarray, a: float, b: float, c: float = 0) -> np.ndarray: 35 | r""" 36 | General three-parameter Weibull distribution. 37 | 38 | .. math:: 39 | 40 | y=ab(x-c)^{b-1}\exp{[-a(x-c)^b]} 41 | 42 | Parameters 43 | ---------- 44 | x : `numpy.ndarray` 45 | One-dimensional array containing :math:`x`-values. 46 | 47 | a : `float` 48 | Scale parameter :math:`a`. 49 | 50 | b : `float` 51 | Shape parameter :math:`b`. If specified to be a constant, the 52 | one-parameter Weibull distribution is used. 53 | 54 | c : `float`, keyword-only, default: :code:`0` 55 | Location parameter :math:`c`. If not specified as a parameter, 56 | the two-parameter Weibull distribution is used. 57 | 58 | Returns 59 | ------- 60 | fit : `numpy.ndarray` 61 | Fitted :math:`y`-values. 62 | 63 | Examples 64 | -------- 65 | Create a three-parameter Weibull distribution model for fitting. 66 | 67 | >>> model = lambda x, a, b, c: weibull(x, a, b, c) 68 | 69 | Create a two-parameter Weibull distribution model for fitting. 70 | 71 | >>> model = lambda x, a, b: weibull(x, a, b) 72 | 73 | Create a one-parameter Weibull distribution model for fitting, with 74 | :math:`b = 1`. 75 | 76 | >>> model = lambda x, a: weibull(x, a, 1) 77 | """ 78 | 79 | return a * b * (x - c) ** (b - 1) * np.exp(-a * (x - c) ** b) 80 | -------------------------------------------------------------------------------- /src/mdcraft/fit/exponential.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Exponential models 3 | ================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | Exponentials are often used when the rate of change of a quantity is 7 | proportional to the initial amount of the quantity. The general 8 | exponential model is given by 9 | 10 | .. math:: 11 | 12 | y=\sum_{k=1}^na_k\exp{(b_kx)} 13 | 14 | If the coefficient :math:`b_k` for an :math:`\exp` term is negative, 15 | that term represents exponential decay. If the coefficient is positive, 16 | that term represents exponential growth. 17 | 18 | This module provides the general exponential model above for any number 19 | of terms :math:`k`, as well as convenience functions for the one- 20 | (:math:`k=1`) and two-term (:math:`k=2`) exponential models 21 | analogous to MATLAB's :code:`exp1` and :code:`exp2`, respectively. 22 | 23 | Additionally, this module has the stretched exponential function, also 24 | known as the complementary cumulative Weibull distribution, given by 25 | 26 | .. math:: 27 | 28 | y=\exp{\left[-\left(\frac{x}{\alpha}\right)^\beta\right]} 29 | 30 | where :math:`\beta` is the stretching exponent. This expression is 31 | obtained by inserting a fractional power law into the exponential 32 | function. 33 | 34 | This model is generally meaningful only for :math:`x>0`. The graph of 35 | :math:`\log{(y)}` vs. :math:`x` is characteristically stretched when 36 | :math:`0\leq\beta\leq 1` and compressed when :math:`\beta>1` (the latter 37 | case has less practical importance). When :math:`\beta=1`, the one-term 38 | exponential model is recovered. When :math:`\beta=2`, the probability 39 | density function for the normal distribution is obtained. 40 | """ 41 | 42 | import numpy as np 43 | 44 | 45 | def exp(x: np.ndarray, *args: float) -> np.ndarray: 46 | r""" 47 | General exponential model. 48 | 49 | .. math:: 50 | 51 | y=\sum_{k=1}^na_k\exp{(b_kx)} 52 | 53 | Parameters 54 | ---------- 55 | x : `numpy.ndarray` 56 | One-dimensional array containing :math:`x`-values. 57 | 58 | *args : `float` 59 | Fitting parameters for the exponential term(s), ordered as 60 | :math:`a_1,\,b_1,\,a_2,\,b_2,\ldots,\,a_n,\,b_n`, where 61 | :math:`n` is the number of terms in the model. As such, the 62 | number of variable positional arguments must be even. 63 | 64 | Returns 65 | ------- 66 | fit : `numpy.ndarray` 67 | Fitted :math:`y`-values. 68 | 69 | Examples 70 | -------- 71 | Generate :math:`x`- and :math:`y`-values (with error), and then use 72 | :func:`scipy.optimize.curve_fit` to fit coefficients for a two-term 73 | exponential model. 74 | 75 | >>> from scipy import optimize 76 | >>> rng = np.random.default_rng() 77 | >>> x = np.linspace(-0.1, 0.1, 10) 78 | >>> err = (2 * rng.random(x.shape) - 1) / 10 79 | >>> y = np.exp(-8 * x) + np.exp(12 * x) + err 80 | >>> pk, _ = optimize.curve_fit( 81 | lambda x, a1, b1, a2, b2: exp(x, a1, b1, a2, b2), x, y 82 | ) 83 | >>> pk 84 | array([ 1.13072662, -6.90042351, 0.88706719, 12.87854508]) 85 | 86 | Evaluate the fitted :math:`y`-values using the coefficients. 87 | 88 | >>> exp(x, *pk) 89 | array([2.49915084, 2.25973234, 2.09274413, 2.00061073, 1.98962716, 90 | 2.07080543, 2.26106343, 2.58486089, 3.07642312, 3.78274065]) 91 | """ 92 | 93 | n = len(args) 94 | if n < 2 or n % 2 != 0: 95 | emsg = "Number of fitting parameters must be greater than 2 and even." 96 | raise ValueError(emsg) 97 | return np.exp(args[1::2] * x[:, None]) @ args[::2] 98 | 99 | 100 | def exp1(x: np.ndarray, a: float, b: float) -> np.ndarray: 101 | r""" 102 | Convenience function for the :code:`exp1` model from MATLAB. 103 | 104 | .. math:: 105 | 106 | y=a\exp{(bx)} 107 | 108 | Parameters 109 | ---------- 110 | x : `numpy.ndarray` 111 | One-dimensional array containing :math:`x`-values. 112 | 113 | a : `float` 114 | Coefficient :math:`a` for the :math:`\exp` term. 115 | 116 | b : `float` 117 | Coefficient :math:`b` for the :math:`x` term in the :math:`\exp` 118 | term. 119 | 120 | Returns 121 | ------- 122 | fit : `numpy.ndarray` 123 | Fitted :math:`y`-values. 124 | """ 125 | 126 | return exp(x, a, b) 127 | 128 | 129 | def exp2(x: np.ndarray, a: float, b: float, c: float, d: float) -> np.ndarray: 130 | r""" 131 | Convenience function for the :code:`exp2` model from MATLAB. 132 | 133 | .. math:: 134 | 135 | y=a\exp{(bx)}+c\exp{(dx)} 136 | 137 | Parameters 138 | ---------- 139 | x : `numpy.ndarray` 140 | One-dimensional array containing :math:`x`-values. 141 | 142 | a : `float` 143 | Coefficient :math:`a` for the first :math:`\exp` term. 144 | 145 | b : `float` 146 | Coefficient :math:`b` for the :math:`x` term in the first 147 | :math:`\exp` term. 148 | 149 | c : `float` 150 | Coefficient :math:`a` for the second :math:`\exp` term. 151 | 152 | d : `float` 153 | Coefficient :math:`b` for the :math:`x` term in the second 154 | :math:`\exp` term. 155 | 156 | Returns 157 | ------- 158 | fit : `numpy.ndarray` 159 | Fitted :math:`y`-values. 160 | """ 161 | 162 | return exp(x, a, b, c, d) 163 | 164 | 165 | def biexp( 166 | x: np.ndarray, y0: float, a: float, b: float, c: float, d: float 167 | ) -> np.ndarray: 168 | r""" 169 | Bi-exponential function. 170 | 171 | .. math:: 172 | 173 | y=y_0+a\exp{\left(-\frac{x}{b}\right)}+c\exp{\left(-\frac{x}{d}\right)} 174 | 175 | Parameters 176 | ---------- 177 | x : `numpy.ndarray` 178 | One-dimensional array containing :math:`x`-values. 179 | 180 | y0 : `float` 181 | Offset :math:`y_0`. 182 | 183 | a : `float` 184 | Coefficient :math:`a` for the first :math:`\exp` term. 185 | 186 | b : `float` 187 | Coefficient :math:`b` for the :math:`x` term in the first 188 | :math:`\exp` term. 189 | 190 | c : `float` 191 | Coefficient :math:`a` for the second :math:`\exp` term. 192 | 193 | d : `float` 194 | Coefficient :math:`b` for the :math:`x` term in the second 195 | :math:`\exp` term. 196 | 197 | Returns 198 | ------- 199 | fit : `numpy.ndarray` 200 | Fitted :math:`y`-values. 201 | """ 202 | 203 | return y0 + a * np.exp(-x / b) + c * np.exp(-x / d) 204 | 205 | 206 | def stretched_exp(x: np.ndarray, alpha: float, beta: float) -> np.ndarray: 207 | r""" 208 | Stretched exponential function. 209 | 210 | .. math:: 211 | 212 | y=\exp{\left[-\left(\frac{x}{\alpha}\right)^\beta\right]} 213 | 214 | Parameters 215 | ---------- 216 | x : `numpy.ndarray` 217 | One-dimensional array containing :math:`x`-values. 218 | 219 | alpha : `float` 220 | Scaling parameter :math:`\alpha` for :math:`x`. 221 | 222 | beta : `float` 223 | Stretching exponent :math:`\beta`. 224 | 225 | Returns 226 | ------- 227 | fit : `numpy.ndarray` 228 | Fitted :math:`y`-values. 229 | """ 230 | 231 | return np.exp(-((x / alpha) ** beta)) 232 | -------------------------------------------------------------------------------- /src/mdcraft/fit/power.py: -------------------------------------------------------------------------------- 1 | r""" 2 | Power models 3 | ============ 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | The power model is given by 7 | 8 | .. math:: 9 | 10 | y=ax^b+c 11 | 12 | This module provides the power model above and convenience functions for 13 | the one- (:math:`c = 0`) and two-term power models in MATLAB, 14 | :code:`power1` and :code:`power2`, respectively. 15 | """ 16 | 17 | import numpy as np 18 | 19 | 20 | def power(x: np.ndarray, a: float, b: float, c: float = 0) -> np.ndarray: 21 | r""" 22 | General power model. 23 | 24 | .. math:: 25 | 26 | y=ax^b+c 27 | 28 | Parameters 29 | ---------- 30 | x : `numpy.ndarray` 31 | :math:`x`-values. 32 | 33 | a : `float` 34 | Coefficient for the :math:`x^b` term. 35 | 36 | b : `float` 37 | Power constant :math:`b` for the :math:`x^b` term. 38 | 39 | c : `float`, keyword-only, default: :code:`0` 40 | Constant for the :math:`y`-intercept. 41 | 42 | Returns 43 | ------- 44 | fit : `numpy.ndarray` 45 | Fitted :math:`y`-values. 46 | """ 47 | 48 | return a * x**b + c 49 | 50 | 51 | def power1(x: np.ndarray, a: float, b: float) -> np.ndarray: 52 | r""" 53 | Convenience function for the :code:`power1` model from MATLAB. 54 | 55 | .. math:: 56 | 57 | y=ax^b 58 | 59 | Parameters 60 | ---------- 61 | x : `numpy.ndarray` 62 | :math:`x`-values. 63 | 64 | a : `float` 65 | Coefficient for the :math:`x^b` term. 66 | 67 | b : `float` 68 | Power constant :math:`b` for the :math:`x^b` term. 69 | 70 | Returns 71 | ------- 72 | fit : `numpy.ndarray` 73 | Fitted :math:`y`-values. 74 | """ 75 | 76 | return power(x, a, b) 77 | 78 | 79 | def power2(x: np.ndarray, a: float, b: float, c: float) -> np.ndarray: 80 | r""" 81 | Convenience function for the :code:`power2` model from MATLAB. 82 | 83 | .. math:: 84 | 85 | y=ax^b+c 86 | 87 | Parameters 88 | ---------- 89 | x : `numpy.ndarray` 90 | :math:`x`-values. 91 | 92 | a : `float` 93 | Coefficient for the :math:`x^b` term. 94 | 95 | b : `float` 96 | Power constant :math:`b` for the :math:`x^b` term. 97 | 98 | c : `float` 99 | Constant for the :math:`y`-intercept. 100 | 101 | Returns 102 | ------- 103 | fit : `numpy.ndarray` 104 | Fitted :math:`y`-values. 105 | """ 106 | 107 | return power(x, a, b, c) 108 | -------------------------------------------------------------------------------- /src/mdcraft/lammps/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | LAMMPS tools 3 | ============ 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides a number of extensions to the LAMMPS simulation 7 | toolkit. 8 | """ 9 | 10 | from . import topology 11 | 12 | __all__ = ["topology"] 13 | -------------------------------------------------------------------------------- /src/mdcraft/lammps/topology.py: -------------------------------------------------------------------------------- 1 | """ 2 | Topology transformations 3 | ======================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains implementations of common LAMMPS topology 7 | transformations, like the generation of initial particle positions. 8 | """ 9 | 10 | from io import TextIOWrapper 11 | from numbers import Real 12 | from typing import Any, Union 13 | 14 | import numpy as np 15 | 16 | from .. import FOUND_OPENMM, Q_, ureg 17 | from ..algorithm import topology as topo 18 | 19 | if FOUND_OPENMM: 20 | from openmm import app, unit 21 | 22 | 23 | def create_atoms( 24 | dimensions: Union[np.ndarray[float], "unit.Quantity", Q_, "app.Topology"], 25 | N: int = None, 26 | N_p: int = 1, 27 | *, 28 | lattice: str = None, 29 | length: Union[float, "unit.Quantity"] = 0.34, 30 | flexible: bool = False, 31 | bonds: bool = False, 32 | angles: bool = False, 33 | dihedrals: bool = False, 34 | randomize: bool = False, 35 | length_unit: Union["unit.Unit", ureg.Unit] = None, 36 | wrap: bool = False, 37 | ) -> Any: 38 | """ 39 | Generates initial particle positions for coarse-grained simulations. 40 | 41 | .. seealso:: 42 | 43 | This function is an alias for 44 | :func:`mdcraft.algorithm.topology.create_atoms`. 45 | """ 46 | 47 | return topo.create_atoms( 48 | dimensions, 49 | N, 50 | N_p, 51 | lattice=lattice, 52 | length=length, 53 | flexible=flexible, 54 | bonds=bonds, 55 | angles=angles, 56 | dihedrals=dihedrals, 57 | randomize=randomize, 58 | length_unit=length_unit, 59 | wrap=wrap, 60 | ) 61 | 62 | 63 | def write_data( 64 | file: Union[str, TextIOWrapper], 65 | positions: tuple[np.ndarray[float]], 66 | *, 67 | bonds: tuple[np.ndarray[int]] = None, 68 | angles: tuple[np.ndarray[int]] = None, 69 | dihedrals: tuple[np.ndarray[int]] = None, 70 | impropers: tuple[np.ndarray[int]] = None, 71 | dimensions: np.ndarray[float] = None, 72 | tilt: np.ndarray[float] = None, 73 | charges: np.ndarray[float] = None, 74 | masses: np.ndarray[float] = None, 75 | ) -> None: 76 | r""" 77 | Writes topological data to a LAMMPS data file in :code:`atom_style full`. 78 | 79 | Parameters 80 | ---------- 81 | file : `str` or `_io.TextIOWrapper` 82 | LAMMPS data file. 83 | 84 | positions : `tuple` 85 | Atomic positions. Each element of the tuple should contain 86 | atoms of the same atom type. 87 | 88 | **Shape**: Tuple of arrays with shape :math:`(*,\,3)`. 89 | 90 | **Reference units**: :math:`\mathrm{Å}`. 91 | 92 | bonds : `tuple`, keyword-only, optional 93 | Pairs of indices of bonded atoms. Each element of the tuple 94 | should contain bonds of the same bond type. 95 | 96 | **Shape**: Tuple of arrays with shape :math:`(*,\,2)`. 97 | 98 | angles : `tuple`, keyword-only, optional 99 | Triples of indices of atoms that form an angle. Each element of 100 | the tuple should contain angles of the same angle type. 101 | 102 | **Shape**: Tuple of arrays with shape :math:`(*,\,3)`. 103 | 104 | dihedrals : `tuple`, keyword-only, optional 105 | Quadruples of indices of atoms that form a dihedral. Each 106 | element of the tuple should contain dihedrals of the same 107 | dihedral type. 108 | 109 | **Shape**: Tuple of arrays with shape :math:`(*,\,4)`. 110 | 111 | impropers : `tuple`, keyword-only, optional 112 | Quadruples of indices of atoms that form an improper. Each 113 | element of the tuple should contain impropers of the same 114 | improper type. 115 | 116 | **Shape**: Tuple of arrays with shape :math:`(*,\,4)`. 117 | 118 | dimensions : array-like, keyword-only, optional 119 | Box dimensions. If three values are provided, the box 120 | dimensions are assumed to be from :math:`0` to the specified 121 | values. If six values are provided, the box dimensions go from 122 | the three values in the first column to the three values in the 123 | second column. 124 | 125 | **Shape**: :math:`(3,)` or :math:`(3,\,2)`. 126 | 127 | **Reference units**: :math:`\mathrm{Å}`. 128 | 129 | tilt : array-like, keyword-only, optional 130 | Box :math:`xy`, :math:`xz`, and :math:`yz` tilt factors. 131 | 132 | **Shape**: :math:`(3,)`. 133 | 134 | charges : array-like, keyword-only, optional 135 | Atomic charges. 136 | 137 | **Shape**: :math:`(N,)`. 138 | 139 | masses : array-like, keyword-only, optional 140 | Atomic masses. 141 | 142 | **Shape**: :math:`(N,)`. 143 | """ 144 | 145 | if isinstance(file, str): 146 | file = open(file, "w") 147 | 148 | # Write header 149 | file.write("LAMMPS Description\n\n") 150 | n_atoms_type = [len(p) for p in positions] 151 | n_atoms = sum(n_atoms_type) 152 | file.write(f"{n_atoms} atoms\n") 153 | file.write(f"{len(positions)} atom types\n") 154 | if bonds is not None: 155 | n_bonds_type = [len(b) for b in bonds] 156 | file.write(f"{sum(n_bonds_type)} bonds\n") 157 | file.write(f"{len(bonds)} bond types\n") 158 | if angles is not None: 159 | n_angles_type = [len(a) for a in angles] 160 | file.write(f"{sum(n_angles_type)} angles\n") 161 | file.write(f"{len(angles)} angle types\n") 162 | if dihedrals is not None: 163 | n_dihedrals_type = [len(d) for d in dihedrals] 164 | file.write(f"{sum(n_dihedrals_type)} dihedrals\n") 165 | file.write(f"{len(dihedrals)} dihedral types\n") 166 | if impropers is not None: 167 | n_impropers_type = [len(i) for i in impropers] 168 | file.write(f"{sum(n_impropers_type)} impropers\n") 169 | file.write(f"{len(impropers)} improper types\n") 170 | if dimensions is not None: 171 | if dimensions.ndim == 1: 172 | dimensions = np.vstack((np.zeros(3), dimensions)).T 173 | for i, d in enumerate(dimensions): 174 | a = chr(120 + i) 175 | file.write(f"{d[0]:.6g} {d[1]:.6g} {a}lo {a}hi\n") 176 | if tilt is not None: 177 | file.write(f"{tilt[0]:.6g} {tilt[1]:.6g} {tilt[2]:.6g} xy xz yz\n") 178 | 179 | # Write masses 180 | if masses is not None: 181 | if len(masses) != len(positions): 182 | emsg = "Number of masses must match number of atom types." 183 | raise ValueError(emsg) 184 | file.write("\nMasses\n\n") 185 | for i, m in enumerate(masses): 186 | file.write(f"{i + 1} {m:.6g}\n") 187 | 188 | # Write atom positions 189 | if charges is None: 190 | charges = np.zeros(n_atoms) 191 | if len(charges) == len(positions): 192 | charges = list(charges) 193 | for i, (qs, n) in enumerate(zip(charges, n_atoms_type)): 194 | if isinstance(qs, Real): 195 | charges[i] *= np.ones(n) 196 | elif len(charges) == n_atoms: 197 | charges = np.array_split(charges, np.cumsum(n_atoms)[:-1]) 198 | else: 199 | raise ValueError("'charges' has an invalid shape.") 200 | file.write("\nAtoms # full\n\n") 201 | for t, (pos, qs) in enumerate(zip(positions, charges)): 202 | start = sum(n_atoms_type[:t]) 203 | for i, (p, q) in enumerate(zip(pos, qs)): 204 | file.write( 205 | f"{start + i + 1} {start + i + 1} {t + 1} {q:.6g} " 206 | f"{p[0]:.6g} {p[1]:.6g} {p[2]:.6g}\n" 207 | ) 208 | 209 | # Write bonds 210 | if bonds is not None: 211 | file.write("\nBonds\n\n") 212 | for t, b in enumerate(bonds): 213 | start = sum(n_bonds_type[:t]) 214 | for i, (a, b) in enumerate(b): 215 | file.write(f"{start + i + 1} {t + 1} {a} {b}\n") 216 | 217 | # Write angles 218 | if angles is not None: 219 | file.write("\nAngles\n\n") 220 | for t, a in enumerate(angles): 221 | start = sum(n_angles_type[:t]) 222 | for i, (a, b, c) in enumerate(a): 223 | file.write(f"{start + i + 1} {t + 1} {a} {b} {c}\n") 224 | 225 | # Write dihedrals 226 | if dihedrals is not None: 227 | file.write("\nDihedrals\n\n") 228 | for t, d in enumerate(dihedrals): 229 | start = sum(n_dihedrals_type[:t]) 230 | for i, (a, b, c, d) in enumerate(d): 231 | file.write(f"{start + i + 1} {t + 1} {a} {b} {c} {d}\n") 232 | 233 | # Write impropers 234 | if impropers is not None: 235 | file.write("\nImpropers\n\n") 236 | for t, i in enumerate(impropers): 237 | start = sum(n_impropers_type[:t]) 238 | for j, (a, b, c, d) in enumerate(i): 239 | file.write(f"{start + j + 1} {t + 1} {a} {b} {c} {d}\n") 240 | 241 | file.close() 242 | -------------------------------------------------------------------------------- /src/mdcraft/openmm/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | OpenMM tools 3 | ============ 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides a number of extensions to the OpenMM simulation 7 | toolkit. 8 | """ 9 | 10 | from . import bond, file, pair, reporter, system, topology, unit, utility 11 | 12 | __all__ = ["bond", "file", "pair", "reporter", "system", "topology", "unit", "utility"] 13 | -------------------------------------------------------------------------------- /src/mdcraft/openmm/bond.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom OpenMM bond potentials 3 | ============================= 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains implementations of commonly used bond potentials 7 | that are not available in OpenMM, such as the finite extension nonlinear 8 | elastic (FENE) potential. Generally, the bond potentials are named after 9 | their LAMMPS :code:`bond_style` counterparts, if available. 10 | """ 11 | 12 | from typing import Union 13 | 14 | import openmm 15 | from openmm import unit 16 | 17 | from .pair import wca as pwca 18 | 19 | 20 | def _setup_bond( 21 | cbforce: openmm.CustomBondForce, 22 | global_params: dict[str, Union[float, unit.Quantity]], 23 | per_params: list[str], 24 | ) -> None: 25 | """ 26 | Sets up a :class:`openmm.CustomBondForce` object. 27 | 28 | Parameters 29 | ---------- 30 | cbforce : `openmm.CustomBondForce` 31 | Custom bond force object. 32 | 33 | global_params : `dict` 34 | Global parameters. 35 | 36 | per_params : `list` 37 | Per-particle parameters. 38 | """ 39 | 40 | for param in global_params.items(): 41 | cbforce.addGlobalParameter(*param) 42 | for param in per_params: 43 | cbforce.addPerBondParameter(param) 44 | 45 | 46 | def fene( 47 | global_args: dict[str, Union[float, unit.Quantity]] = None, 48 | wca: bool = True, 49 | **kwargs, 50 | ) -> tuple[openmm.CustomBondForce, openmm.CustomNonbondedForce]: 51 | r""" 52 | Implements the finite extensible nonlinear elastic (FENE) potential 53 | used for bead-spring polymer models. 54 | 55 | The potential energy between two bonded particles is given by 56 | 57 | .. math:: 58 | 59 | u_\mathrm{FENE}=-\frac{1}{2}k_{12}r_{0,12}^2 60 | \ln{\left[1-\left(\frac{r_{12}}{r_{0,12}}\right)^2\right]} 61 | +4\epsilon_{12}\left[\left(\frac{\sigma_{12}}{r_{12}}\right)^{12} 62 | -\left(\frac{\sigma_{12}}{r_{12}}\right)^6\right]+\epsilon_{12} 63 | 64 | where :math:`k_{12}` is the bond coefficient in 65 | :math:`\mathrm{kJ}/(\mathrm{nm}^2\cdot\mathrm{mol})`, 66 | :math:`r_{0,12}` is the equilibrium bond length in 67 | :math:`\mathrm{nm}`, :math:`\sigma_{12}` is the average particle 68 | size in :math:`\mathrm{nm}`, and :math:`\epsilon_{12}` is the 69 | dispersion energy in :math:`\mathrm{kJ/mol}`. :math:`k_{12}`, 70 | :math:`r_{0,12}`, :math:`\sigma_{12}` and :math:`\epsilon_{12}` are 71 | determined from per-bond and per-particle parameters `k`, `r0`, 72 | `sigma` and `epsilon`, respectively, which are set using 73 | :meth:`openmm.openmm.CustomBondForce.addBond` and 74 | :meth:`openmm.openmm.NonbondedForce.addParticle`. 75 | 76 | Parameters 77 | ---------- 78 | global_args : `dict`, optional 79 | Constant values :math:`k_{12}` and :math:`r_{0,12}` to use 80 | instead of per-bond parameters. The corresponding per-bond 81 | parameters will not be registered, but the remaining 82 | per-bond parameters will still have to be provided in their 83 | default order. 84 | 85 | wca : `bool`, default: :code:`True` 86 | Determines whether the Weeks–Chandler–Andersen (WCA) potential 87 | is included. 88 | 89 | **kwargs 90 | Keyword arguments to be passed to 91 | :meth:`mdcraft.openmm.pair.wca` if :code:`wca=True`. 92 | 93 | Returns 94 | ------- 95 | bond_fene : `openmm.CustomBondForce` 96 | FENE bond potential. 97 | 98 | pair_wca : `openmm.CustomNonbondedForce` 99 | WCA pair potential, if :code:`wca=True`. 100 | """ 101 | 102 | bond_fene = openmm.CustomBondForce("-0.5*k*r0^2*log(1-(r/r0)^2)") 103 | per_args = ["k", "r0"] 104 | for param in global_args.keys(): 105 | if param in per_args: 106 | per_args.remove(param) 107 | _setup_bond(bond_fene, global_args, per_args) 108 | 109 | if wca: 110 | pair_wca = pwca(**kwargs) 111 | return bond_fene, pair_wca 112 | 113 | return bond_fene 114 | -------------------------------------------------------------------------------- /src/mdcraft/openmm/reporter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom OpenMM reporters 3 | ======================= 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides custom optimized OpenMM reporters. 7 | """ 8 | 9 | from typing import Union 10 | 11 | import numpy as np 12 | import openmm 13 | from openmm import app, unit 14 | 15 | from .file import NetCDFFile 16 | 17 | 18 | class NetCDFReporter: 19 | """ 20 | A NetCDF trajectory reporter for OpenMM that can report velocities 21 | and forces in addition to time and coordinates for all particles in 22 | the simulation or just a subset. 23 | 24 | Parameters 25 | ---------- 26 | file : `str` 27 | Filename of NetCDF file to which the data is saved. If `file` 28 | does not have the :code:`.nc` extension, it will automatically 29 | be appended. 30 | 31 | interval : `int` 32 | Interval (in timesteps) at which to write frames. 33 | 34 | append : `bool`, keyword-only, default: :code:`False` 35 | If :code:`True`, the existing NetCDF file is opened for data to 36 | be appended to. If :code:`False`, a new NetCDF file is opened 37 | (and will clobber an existing file with the same name). 38 | 39 | periodic : `bool`, keyword-only, optional 40 | Specifies whether particle positions should be translated so the 41 | center of every molecule lies in the same periodic box. If 42 | :code:`None` (the default), it will automatically decide whether 43 | to translate molecules based on whether the system being 44 | simulated uses periodic boundary conditions. 45 | 46 | velocities : `bool`, keyword-only, default: :code:`False` 47 | Specifies whether particle velocities should be written to file. 48 | 49 | forces : `bool`, keyword-only, default: :code:`False` 50 | Specifies whether forces exerted on particles should be written 51 | to file. 52 | 53 | subset : `slice`, `numpy.ndarray`, or `openmm.app.Topology`, \ 54 | keyword-only, optional 55 | Slice or array containing the indices of particles to report 56 | data for. If an OpenMM topology is provided instead, the indices 57 | are determined from the atoms found in the topology. 58 | """ 59 | 60 | def __init__( 61 | self, 62 | file: str, 63 | interval: int, 64 | append: bool = False, 65 | periodic: bool = None, 66 | *, 67 | velocities: bool = False, 68 | forces: bool = False, 69 | subset: Union[slice, np.ndarray[int], app.Topology] = None, 70 | ) -> None: 71 | 72 | self._out = NetCDFFile(file, "a" if append else "w") 73 | self._interval = interval 74 | self._periodic = periodic 75 | self._subset = ( 76 | np.fromiter((a.index for a in subset.atoms()), dtype=int) 77 | if isinstance(subset, app.Topology) 78 | else subset 79 | ) 80 | self._velocities = velocities 81 | self._forces = forces 82 | 83 | def __del__(self) -> None: 84 | self._out._nc.close() 85 | 86 | def describeNextReport( 87 | self, simulation: app.Simulation 88 | ) -> tuple[int, bool, bool, bool, bool, bool]: 89 | """ 90 | Get information about the next report this NetCDF reporter will 91 | generate. 92 | 93 | Parameters 94 | ---------- 95 | simulation : `openmm.app.Simulation` 96 | OpenMM simulation to generate a report for. 97 | 98 | Returns 99 | ------- 100 | report : `tuple` 101 | .. container:: 102 | 103 | A six-element tuple containing 104 | 105 | 1. the number of steps until the next report, 106 | 2. whether the report will require coordinates, 107 | 3. whether the report will require velocities, 108 | 4. whether the report will require forces, 109 | 5. whether the report will require energies, and 110 | 6. whether coordinates should be wrapped to lie in a 111 | single periodic box. 112 | """ 113 | 114 | return ( 115 | self._interval - simulation.currentStep % self._interval, 116 | True, 117 | self._velocities, 118 | self._forces, 119 | False, 120 | self._periodic, 121 | ) 122 | 123 | def report(self, simulation: app.Simulation, state: openmm.State) -> None: 124 | """ 125 | Generate a report. 126 | 127 | Parameters 128 | ---------- 129 | simulation : `openmm.app.Simulation` 130 | OpenMM simulation to generate a report for. 131 | 132 | state : `openmm.State` 133 | Current OpenMM simulation state. 134 | """ 135 | 136 | # Get all requested state data from OpenMM State 137 | data = {} 138 | if self._subset is None: 139 | data["coordinates"] = state.getPositions(asNumpy=True).value_in_unit( 140 | unit.angstrom 141 | ) 142 | if self._velocities: 143 | data["velocities"] = state.getVelocities(asNumpy=True).value_in_unit( 144 | unit.angstrom / unit.picosecond 145 | ) 146 | if self._forces: 147 | data["forces"] = state.getForces(asNumpy=True).value_in_unit( 148 | unit.kilocalorie_per_mole / unit.angstrom 149 | ) 150 | else: 151 | data["coordinates"] = state.getPositions(asNumpy=True)[ 152 | self._subset 153 | ].value_in_unit(unit.angstrom) 154 | if self._velocities: 155 | data["velocities"] = state.getVelocities(asNumpy=True)[ 156 | self._subset 157 | ].value_in_unit(unit.angstrom / unit.picosecond) 158 | if self._forces: 159 | data["forces"] = state.getForces(asNumpy=True)[ 160 | self._subset 161 | ].value_in_unit(unit.kilocalorie_per_mole / unit.angstrom) 162 | 163 | # Initialize NetCDF file headers, if not done already 164 | if not hasattr(self._out._nc, "Conventions"): 165 | self._out.write_header( 166 | ( 167 | simulation.topology.getNumAtoms() 168 | if self._subset is None 169 | else len(self._subset) 170 | ), 171 | simulation.topology.getPeriodicBoxVectors() is not None, 172 | self._velocities, 173 | self._forces, 174 | ) 175 | 176 | # Get the lengths and angles that define the size and shape of the 177 | # simulation box 178 | pbv = state.getPeriodicBoxVectors() 179 | if pbv is not None: 180 | (a, b, c, alpha, beta, gamma) = ( 181 | app.internal.unitcell.computeLengthsAndAngles(pbv) 182 | ) 183 | data["cell_lengths"] = 10 * np.array((a, b, c)) 184 | data["cell_angles"] = 180 * np.array((alpha, beta, gamma)) / np.pi 185 | 186 | # Write current frame 187 | self._out.write_model(state.getTime().value_in_unit(unit.picosecond), **data) 188 | -------------------------------------------------------------------------------- /src/mdcraft/openmm/unit.py: -------------------------------------------------------------------------------- 1 | """ 2 | OpenMM physical constants and unit conversions 3 | ============================================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains physical constants and functions for unit 7 | reduction. 8 | """ 9 | 10 | from openmm import unit 11 | 12 | from ..algorithm import unit as u 13 | 14 | VACUUM_PERMITTIVITY = 8.854187812813e-12 * unit.farad / unit.meter 15 | 16 | 17 | def get_scale_factors( 18 | bases: dict[str, unit.Quantity], other: dict[str, list] = {} 19 | ) -> dict[str, unit.Quantity]: 20 | """ 21 | Evaluates scaling factors for reduced units. 22 | 23 | .. seealso:: 24 | 25 | This function is an alias for 26 | :func:`mdcraft.algorithm.unit.get_scale_factors`. 27 | """ 28 | 29 | return u.get_scale_factors(bases, other) 30 | 31 | 32 | def get_lj_scale_factors( 33 | bases: dict[str, unit.Quantity], other: dict[str, list] = {} 34 | ) -> dict[str, unit.Quantity]: 35 | """ 36 | Evaluates scaling factors for reduced Lennard-Jones units. 37 | 38 | .. seealso:: 39 | 40 | This function is an alias for 41 | :func:`mdcraft.algorithm.unit.get_lj_scale_factors`. 42 | """ 43 | 44 | return u.get_lj_scale_factors(bases, other) 45 | -------------------------------------------------------------------------------- /src/mdcraft/plot/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plotting settings and tools 3 | =========================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module contains Matplotlib rcParams and tools for generating 7 | aesthetically-pleasing scientific figures. 8 | """ 9 | 10 | from . import axis, color, rcparam 11 | 12 | __all__ = ["axis", "color", "rcparam"] 13 | -------------------------------------------------------------------------------- /src/mdcraft/plot/axis.py: -------------------------------------------------------------------------------- 1 | """ 2 | Axis components 3 | =============== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides additional functionality for Matplotlib axes. 7 | """ 8 | 9 | from typing import Any 10 | 11 | import matplotlib as mpl 12 | import numpy as np 13 | 14 | 15 | def set_up_tabular_legend( 16 | rows: list[str], 17 | cols: list[str], 18 | *, 19 | hlabel: str = None, 20 | vlabel: str = None, 21 | hla: str = "left", 22 | vla: str = "top", 23 | condense: bool = False, 24 | **kwargs, 25 | ) -> tuple[dict[str, Any], int, int]: 26 | r""" 27 | Sets up a tabular legend for a :class:`matplotlib.axes.Axes` object. 28 | 29 | Parameters 30 | ---------- 31 | rows : `tuple` or `list` 32 | Raw string representations of the row values. 33 | 34 | cols : `tuple` or `list` 35 | Raw string representations of the column values. 36 | 37 | hlabel : `str`, keyword-only, optional 38 | Horizontal label for column values. 39 | 40 | vlabel : `str`, keyword-only, optional 41 | Vertical label for row values. 42 | 43 | hla : `str`, keyword-only, default: :code:`"left"` 44 | Alignment for `hlabel`. 45 | 46 | .. container:: 47 | 48 | **Valid values**: 49 | 50 | * :code:`"left"`: Left-aligned text. 51 | * :code:`"center"`: Horizontally centered text. 52 | 53 | vla : `str`, keyword-only, default: :code:`"top"` 54 | Alignment for `vlabel`. 55 | 56 | .. container:: 57 | 58 | **Valid values**: 59 | 60 | * :code:`"top"`: Top-aligned text. 61 | * :code:`"center"`: Vertically centered text. 62 | 63 | condense : `bool`, keyword-only, default: :code:`False` 64 | Condenses the legend by placing `vlabel` in the empty top-left 65 | corner. Cannot be used when no `vlabel` is specified or in 66 | conjuction with :code:`vla="center"` (which will take priority). 67 | 68 | **kwargs : 69 | Keyword arguments passed to :meth:`matplotlib.axes.Axes.legend`. 70 | 71 | Returns 72 | ------- 73 | properties : `dict` 74 | Properties of the tabular legend to be unpacked and used in the 75 | :meth:`matplotlib.axes.Axes.legend` call. 76 | 77 | .. container:: 78 | 79 | * handles (`list`): :obj:`matplotlib.artist` objects to be 80 | added to the legend. 81 | * labels (`list`): Labels to be shown next to the 82 | :obj:`matplotlib.artist` objects in the legend. 83 | * ncol (`int`): Number of columns in the legend. 84 | * kwargs (`dict`): Keyword arguments passed to 85 | :meth:`matplotlib.axes.Axes.legend`. 86 | 87 | nrow : `int` 88 | Number of rows in the legend. 89 | 90 | idx_start : `int` 91 | Index at which to start storing handles for 92 | :obj:`matplotlib.artist` objects. 93 | 94 | Notes 95 | ----- 96 | Condensing the legend can cause alignment issues in the first column 97 | containing the row values if the row values are not of comparable 98 | length to the rows label. An easy but imprecise fix is to center the 99 | shorter values using the width of the largest 100 | :class:`matplotlib.transforms.Bbox` in the first column. Sample code 101 | for use in your main script utilizing the outputs of this function is 102 | provided below: 103 | 104 | .. code-block:: 105 | 106 | (props, nrow, start) = tabular_legend(..., handletextpad=-5/4) 107 | fig, ax = plt.subplots(...) 108 | for i, x in enumerate(...): 109 | for j, y in enumerate(...): 110 | props["handles"][start + i * nrow + j], = ax.plot(...) 111 | ax.set_xlabel(...) 112 | ax.set_ylabel(...) 113 | lgd = ax.legend(**props) 114 | fig.canvas.draw() 115 | texts = lgd.get_texts()[:nrow] 116 | bounds = [t.get_window_extent().bounds[2] / 2 for t in texts] 117 | center = max(bounds) 118 | for k, t in enumerate(texts): 119 | t.set_position((center - bounds[k], 0)) 120 | plt.show() 121 | """ 122 | 123 | hpad = bool(vlabel) - condense + 1 124 | vpad = bool(hlabel) + 1 125 | nrow = len(rows) + vpad 126 | ncol = len(cols) + hpad 127 | 128 | labels = ["" for _ in range(nrow * ncol)] 129 | if vlabel: 130 | labels[vpad + (len(rows) // 2 if vla == "center" else -condense)] = vlabel 131 | iv = vpad + nrow * (bool(vlabel) - condense) 132 | labels[iv : iv + len(rows)] = rows 133 | if hlabel: 134 | labels[(2 + (hla == "center") * (int(np.ceil(len(cols) / 2)) - 1)) * nrow] = ( 135 | hlabel 136 | ) 137 | labels[hpad * nrow + bool(hlabel) :: nrow] = cols 138 | 139 | return ( 140 | { 141 | "handles": [ 142 | mpl.patches.Rectangle((0, 0), 0.1, 0.1, ec="none", fill=False) 143 | for _ in range(len(labels)) 144 | ], 145 | "labels": labels, 146 | "ncol": ncol, 147 | **kwargs, 148 | }, 149 | nrow, 150 | iv + nrow, 151 | ) 152 | -------------------------------------------------------------------------------- /src/mdcraft/plot/color.py: -------------------------------------------------------------------------------- 1 | """ 2 | Plot colors 3 | =========== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides functions to select colors for plots. 7 | """ 8 | 9 | import colorsys 10 | from typing import Union 11 | 12 | import matplotlib.colors as mc 13 | 14 | 15 | def adjust_lightness( 16 | colors: Union[str, tuple[float], list[Union[str, tuple[float]]]], amount: float 17 | ) -> Union[tuple[float], list[tuple[float]]]: 18 | """ 19 | Adjusts the lightness of colors. 20 | 21 | Parameters 22 | ---------- 23 | color : `str`, `tuple`, or `list`. 24 | The colors to adjust. A single color can be specified as a tuple 25 | of normalized RGB values or a string containing its name or a 26 | hexadecimal value. Multiple colors can be provided in a `list`. 27 | 28 | **Examples**: :code:`"aquamarine"`, :code:`"#080085"`, 29 | :code:`(0.269, 0.269, 0.269)`. 30 | 31 | amount : `float` 32 | The amount to adjust the luminosity by. A value betwen :math:`0` 33 | and :math:`1` darkens the color, while a value greater than 34 | :math:`1` lightens the color. 35 | 36 | Returns 37 | ------- 38 | colors : `tuple` or `list` 39 | The adjusted colors. 40 | """ 41 | 42 | if isinstance(colors, list): 43 | for i, color in enumerate(colors): 44 | colors[i] = adjust_lightness(color, amount) 45 | return colors 46 | 47 | colors = colorsys.rgb_to_hls( 48 | *mc.to_rgb(mc.cnames[colors] if colors in mc.cnames else colors) 49 | ) 50 | return colorsys.hls_to_rgb(colors[0], max(0, min(1, amount * colors[1])), colors[2]) 51 | -------------------------------------------------------------------------------- /src/mdcraft/plot/rcparam.py: -------------------------------------------------------------------------------- 1 | """ 2 | Matplotlib rcParams 3 | =================== 4 | .. moduleauthor:: Benjamin Ye 5 | 6 | This module provides optimized Matplotlib rcParams for various 7 | scientific publications. 8 | """ 9 | 10 | import shutil 11 | 12 | import matplotlib as mpl 13 | 14 | # Define figure size guidelines for various publications in inches 15 | FIGURE_SIZE_LIMITS = { 16 | "acs": {"max_single_width": 3.25, "max_double_width": 7, "max_length": 9.5}, 17 | "aip": { 18 | "max_single_width": 3.37, 19 | "max_double_width": 6.69, 20 | "max_length": 8.25, 21 | "min_font_size": 8, 22 | }, 23 | "rsc": { 24 | "max_single_width": 3.26771654, 25 | "max_double_width": 6.73228346, 26 | "max_length": 9.17322835, 27 | }, 28 | } 29 | 30 | 31 | def update( 32 | journal: str = None, font_scaling: float = 1, size_scaling: float = 1, **kwargs 33 | ) -> None: 34 | """ 35 | Updates the Matplotlib rcParams at runtime. By default, this 36 | function overwrites the following settings: 37 | 38 | .. code:: 39 | 40 | { 41 | "axes.labelsize": 9, 42 | "figure.autolayout": True, 43 | "font.size": 9, 44 | "legend.columnspacing": 1, 45 | "legend.edgecolor": "1", 46 | "legend.fontsize": 9, 47 | "legend.handlelength": 1.25, 48 | "legend.labelspacing": 0.25, 49 | "savefig.dpi": 1_200, 50 | "xtick.labelsize": 9, 51 | "ytick.labelsize": 9, 52 | "text.usetex": True # If LaTeX is available 53 | } 54 | 55 | If a supported journal acronym is provided as the first argument, 56 | the default figure size will also be updated. 57 | 58 | Parameters 59 | ---------- 60 | journal : `str`, optional 61 | Journal acronym used to update the default figure size. 62 | 63 | .. container:: 64 | 65 | **Valid values**: 66 | 67 | * :code:`"acs"`: American Chemical Society. 68 | * :code:`"aip"`: American Institute of Physics. 69 | * :code:`"rsc"`: Royal Society of Chemistry. 70 | 71 | font_scaling : `float`, optional 72 | Scaling factor to apply to the default font size. This 73 | value is multiplied by the default font size to obtain the 74 | final font size. 75 | 76 | size_scaling : `float`, optional 77 | Scaling factor to apply to the default figure size. This 78 | value is multiplied by the default figure size to obtain the 79 | final figure size. 80 | 81 | **kwargs 82 | Additional rcParams to update passed to 83 | :meth:`matplotlib.rcParams.update`. 84 | """ 85 | 86 | fig_size = ( 87 | {} 88 | if journal is None 89 | else { 90 | "figure.figsize": ( 91 | size_scaling * FIGURE_SIZE_LIMITS[journal]["max_single_width"], 92 | size_scaling * 3 * FIGURE_SIZE_LIMITS[journal]["max_single_width"] / 4, 93 | ) 94 | } 95 | ) 96 | 97 | mpl.rcParams.update( 98 | { 99 | "axes.labelsize": font_scaling * 9, 100 | "figure.autolayout": True, 101 | "font.size": font_scaling * 9, 102 | "legend.columnspacing": 1, 103 | "legend.edgecolor": "1", 104 | "legend.fontsize": font_scaling * 9, 105 | "legend.handlelength": 1.25, 106 | "legend.labelspacing": 0.25, 107 | "savefig.dpi": 1_200, 108 | "xtick.labelsize": font_scaling * 9, 109 | "ytick.labelsize": font_scaling * 9, 110 | "text.usetex": bool(shutil.which("latex")), 111 | } 112 | | fig_size 113 | | kwargs 114 | ) 115 | -------------------------------------------------------------------------------- /tests/test_algorithm_molecule.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import MDAnalysis as mda 5 | from MDAnalysis.tests.datafiles import DCD, PSF 6 | import numpy as np 7 | import pytest 8 | 9 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 10 | from mdcraft.algorithm import molecule # noqa: E402 11 | 12 | # Load sample topology and trajectory 13 | universe = mda.Universe(PSF, DCD) 14 | protein = universe.select_atoms("protein") 15 | core = universe.select_atoms("protein and (resid 1-29 60-121 160-214)") 16 | nmp = universe.select_atoms("protein and resid 30-59") 17 | lid = universe.select_atoms("protein and resid 122-159") 18 | arg = universe.select_atoms("resname ARG") 19 | 20 | def test_func_center_of_mass(): 21 | 22 | """ 23 | Test cases 5–15 are inspired by the example in the "Working with 24 | AtomGroups" section of the MDAnalysis Tutorial 25 | (https://www.mdanalysis.org/MDAnalysisTutorial/atomgroups.html). 26 | """ 27 | 28 | # TEST CASE 1: No topology or trajectory 29 | with pytest.raises(ValueError): 30 | molecule.center_of_mass() 31 | 32 | # TEST CASE 2: Invalid grouping 33 | with pytest.raises(ValueError): 34 | molecule.center_of_mass(universe.atoms, "atoms") 35 | 36 | # TEST CASE 3: No system dimension information when number of periodic 37 | # boundary crossings is provided 38 | with pytest.raises(ValueError): 39 | molecule.center_of_mass(universe.atoms, 40 | images=np.zeros((universe.atoms.n_atoms, 3))) 41 | 42 | # TEST CASE 4: Incompatible mass and position arrays 43 | with pytest.raises(ValueError): 44 | molecule.center_of_mass( 45 | masses=universe.atoms.masses, 46 | positions=[r.atoms.positions for r in universe.residues] 47 | ) 48 | 49 | # TEST CASE 5: Center of mass of domains in AdK 50 | assert np.allclose(molecule.center_of_mass(core), core.center_of_mass()) 51 | assert np.allclose(molecule.center_of_mass(nmp), nmp.center_of_mass()) 52 | assert np.allclose(molecule.center_of_mass(lid), lid.center_of_mass()) 53 | 54 | # TEST CASE 6: Center of mass of all particles using AtomGroup 55 | com = universe.atoms.center_of_mass() 56 | assert np.allclose(molecule.center_of_mass(universe.atoms), com) 57 | 58 | # TEST CASE 7: Center of mass of all particles using AtomGroup, but 59 | # with the masses and unwrapped positions returned 60 | c, m, p = molecule.center_of_mass( 61 | universe.atoms, 62 | images=np.zeros((universe.atoms.n_atoms, 3), dtype=int), 63 | dimensions=np.array((0, 0, 0)), 64 | raw=True 65 | ) 66 | assert np.allclose(c, com) 67 | assert np.allclose(m, universe.atoms.masses) 68 | assert np.allclose(p, universe.atoms.positions) 69 | 70 | # TEST CASE 8: Centers of mass of different residues using AtomGroup 71 | res_coms = np.array([r.atoms.center_of_mass() 72 | for r in universe.residues]) 73 | assert np.allclose(molecule.center_of_mass(universe.atoms, "residues"), 74 | res_coms) 75 | 76 | # TEST CASE 9: Centers of mass of different residues using raw masses 77 | # and positions from AtomGroup 78 | assert np.allclose( 79 | molecule.center_of_mass( 80 | universe.atoms, "residues", 81 | masses=[r.atoms.masses for r in universe.residues] 82 | ), 83 | res_coms 84 | ) 85 | 86 | # TEST CASE 10: Centers of mass of different residues using raw masses 87 | # and positions 88 | assert np.allclose( 89 | molecule.center_of_mass( 90 | masses=[r.atoms.masses for r in universe.residues], 91 | positions=[r.atoms.positions for r in universe.residues] 92 | ), 93 | res_coms 94 | ) 95 | 96 | # TEST CASE 11: Centers of mass of arginine residues using AtomGroup 97 | arg_coms = np.array([r.atoms.center_of_mass() for r in arg.residues]) 98 | assert np.allclose(molecule.center_of_mass(arg, "residues"), arg_coms) 99 | 100 | # TEST CASE 12: Centers of mass of arginine residues using AtomGroup 101 | # and specified number of residues 102 | assert np.allclose(molecule.center_of_mass(arg, n_groups=13), arg_coms) 103 | 104 | # TEST CASE 13: Centers of mass of arginine residues using raw masses 105 | # and positions 106 | assert np.allclose(molecule.center_of_mass(masses=arg.masses, 107 | positions=arg.positions, 108 | n_groups=13), 109 | arg_coms) 110 | 111 | # TEST CASE 14: Centers of mass of only segment in AtomGroup 112 | assert np.allclose(molecule.center_of_mass(universe.atoms, "segments"), 113 | com) 114 | 115 | # TEST CASE 15: Centers of mass of only segment in AtomGroup 116 | # containing the arginine residues 117 | assert np.allclose(molecule.center_of_mass(arg, "segments"), 118 | arg.center_of_mass()) 119 | 120 | def test_radius_of_gyration(): 121 | 122 | """ 123 | The reference implementation is adapted from the "Writing your own 124 | trajectory analysis" section of the MDAnalysis User Guide 125 | (https://userguide.mdanalysis.org/stable/examples/analysis/custom_trajectory_analysis.html). 126 | """ 127 | 128 | def radius_of_gyration(group): 129 | positions = group.positions 130 | masses = group.masses 131 | center_of_mass = group.center_of_mass() 132 | r_sq = (positions - center_of_mass) ** 2 133 | r_ssq = np.array((r_sq.sum(axis=1), 134 | (r_sq[:, [1, 2]]).sum(axis=1), 135 | (r_sq[:, [0, 2]]).sum(axis=1), 136 | (r_sq[:, [0, 1]]).sum(axis=1))) 137 | return np.sqrt((masses * r_ssq).sum(axis=1) / masses.sum()) 138 | 139 | # TEST CASE 1: Invalid grouping 140 | with pytest.raises(ValueError): 141 | molecule.radius_of_gyration(universe.atoms, "atoms") 142 | 143 | # TEST CASE 2: Overall radius of gyration 144 | ref = radius_of_gyration(universe.atoms) 145 | assert np.isclose(molecule.radius_of_gyration(universe.atoms), ref[0]) 146 | assert np.allclose(molecule.radius_of_gyration(universe.atoms, 147 | components=True), 148 | ref[1:]) 149 | 150 | # TEST CASE 3: Radii of gyration of arginine residues 151 | ref = np.array([radius_of_gyration(g.atoms) for g in arg.residues]) 152 | assert np.allclose(molecule.radius_of_gyration(arg, "residues"), 153 | ref[:, 0]) 154 | assert np.allclose(molecule.radius_of_gyration(arg, "residues", 155 | components=True), 156 | ref[:, 1:]) 157 | 158 | # TEST CASE 4: Radii of gyration of different residues 159 | ref = np.array([radius_of_gyration(g.atoms) for g in universe.residues]) 160 | assert np.allclose(molecule.radius_of_gyration(universe.atoms, "residues"), 161 | ref[:, 0]) 162 | assert np.allclose(molecule.radius_of_gyration(universe.atoms, "residues", 163 | components=True), 164 | ref[:, 1:]) -------------------------------------------------------------------------------- /tests/test_algorithm_topology.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import numpy as np 5 | from openmm import app, unit 6 | import pytest 7 | 8 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 9 | from mdcraft.algorithm import topology # noqa: E402 10 | 11 | rng = np.random.default_rng() 12 | dims = np.array((10.0, 10.0, 10.0)) 13 | 14 | def test_func_create_atoms(): 15 | 16 | # TEST CASE 1: N not specified 17 | with pytest.raises(ValueError): 18 | topology.create_atoms(dims) 19 | 20 | # TEST CASE 2: N not an integer 21 | with pytest.raises(ValueError): 22 | topology.create_atoms(dims, np.pi) 23 | 24 | # TEST CASE 3: Invalid N_p 25 | with pytest.raises(ValueError): 26 | topology.create_atoms(dims, 9000, 9001) 27 | 28 | # TEST CASE 4: N not divisible by N_p 29 | with pytest.raises(ValueError): 30 | topology.create_atoms(dims, 10, 3) 31 | 32 | # TEST CASE 5: Random melt in reduced units 33 | N = rng.integers(1, 1000) 34 | pos = topology.create_atoms(dims, N) 35 | assert pos.shape == (N, 3) 36 | 37 | # TEST CASE 6: Random melt with default length unit 38 | pos = topology.create_atoms(dims * unit.nanometer, N) 39 | assert pos.shape == (N, 3) and pos.unit == unit.nanometer 40 | 41 | # TEST CASE 7: Random melt with specific length unit 42 | pos = topology.create_atoms(dims * unit.nanometer, N, length_unit=unit.angstrom) 43 | assert pos.shape == (N, 3) and pos.unit == unit.angstrom 44 | 45 | # TEST CASE 8: Topology provided instead of dimensions 46 | topo = app.Topology() 47 | topo.setUnitCellDimensions(dims) 48 | pos = topology.create_atoms(topo, N) 49 | assert pos.shape == (N, 3) 50 | 51 | # TEST CASE 9: Random polymer melt 52 | M = rng.integers(1, 100) 53 | N_p = rng.integers(4, 100) 54 | N = M * N_p 55 | pos = topology.create_atoms(dims, N, N_p) 56 | assert pos.shape == (N, 3) 57 | 58 | # TEST CASE 10: Random polymer melt with bond, angle, and dihedral 59 | # information and wrapped positions 60 | pos, bonds, angles, dihedrals = topology.create_atoms( 61 | dims, N, N_p, bonds=True, angles=True, dihedrals=True, randomize=True, 62 | wrap=True 63 | ) 64 | assert pos.shape == (N, 3) 65 | assert bonds.shape[0] == N - M 66 | assert angles.shape[0] == N - 2 * M 67 | assert dihedrals.shape[0] == N - 3 * M 68 | assert np.all((pos[:, 0] > 0) & (pos[:, 0] < dims[0])) 69 | assert np.all((pos[:, 1] > 0) & (pos[:, 2] < dims[1])) 70 | assert np.all((pos[:, 1] > 0) & (pos[:, 2] < dims[2])) 71 | 72 | # TEST CASE 11: FCC lattice with flexible dimensions 73 | pos, new_dims = topology.create_atoms(dims, lattice="fcc", length=0.8, 74 | flexible=True) 75 | assert np.allclose(pos[4], 0.8 * np.array((0, np.sqrt(3) / 3, 2 * np.sqrt(6) / 3))) 76 | assert np.allclose(dims, new_dims, atol=1) 77 | 78 | # TEST CASE 12: HCP lattice with flexible dimensions 79 | pos, new_dims = topology.create_atoms(dims, lattice="hcp", length=0.8, 80 | flexible=True) 81 | assert np.allclose(pos[1], 0.8 * np.array((0.5, np.sqrt(3) / 2, 0))) 82 | assert np.allclose(dims, new_dims, atol=1) 83 | 84 | # TEST CASE 13: HCP lattice to fill specified dimensions 85 | pos, new_dims = topology.create_atoms(dims, lattice="hcp", length=0.8) 86 | assert np.allclose(pos[1], 0.8 * np.array((0.5, np.sqrt(3) / 2, 0))) 87 | assert np.allclose(dims, new_dims, atol=1) 88 | 89 | # TEST CASE 14: Graphene wall 90 | pos, new_dims = topology.create_atoms(dims, lattice="honeycomb", 91 | length=0.142 * unit.nanometer, 92 | flexible=True) 93 | assert pos[1, 1] == 0.142 * unit.nanometer 94 | assert np.allclose(dims[:2], new_dims[:2], atol=1) 95 | assert new_dims[2] == 0 * unit.nanometer 96 | 97 | # TEST CASE 15: Cubic crystal lattice 98 | pos, new_dims = topology.create_atoms(dims, lattice="cubic", length=1) 99 | assert np.allclose(pos[-1], dims - 1) 100 | 101 | # TEST CASE 16: HCP wall 102 | pos, new_dims = topology.create_atoms([10, 10, 0], lattice="hcp", length=1, flexible=True) 103 | assert np.allclose(pos[:, 2], 0) and new_dims[2] == 0 104 | 105 | def test_func_unwrap(): 106 | 107 | pos_old = np.array(((2.0, 2.0, 2.0),)) 108 | images = np.zeros_like(pos_old, dtype=int) 109 | thresholds = dims / 2 110 | pos = np.array(((8.0, 8.0, 8.0),)) 111 | 112 | # TEST CASE 1: Unwrap not in-place 113 | pos_unwrapped, pos_old_updated, images = topology.unwrap( 114 | pos, pos_old, dims, thresholds=thresholds, images=images, 115 | in_place=False 116 | ) 117 | assert (np.allclose(pos_unwrapped[0], -2) 118 | and np.allclose(pos, pos_old_updated)) 119 | 120 | # TEST CASE 2: Unwrap in-place 121 | topology.unwrap(pos, pos_old, dims) 122 | assert np.allclose(pos[0], -2) 123 | 124 | def test_func_wrap(): 125 | 126 | pos = np.array(((9.0, 10.0, 11.0),)) 127 | 128 | # TEST CASE 1: Wrap not in-place 129 | pos_wrapped = topology.wrap(pos, dims, in_place=False) 130 | assert np.allclose(pos_wrapped[0], (9, 10, 1)) 131 | 132 | # TEST CASE 2: Wrap in-place 133 | topology.wrap(pos, dims) 134 | assert np.allclose(pos[0], (9, 10, 1)) 135 | 136 | def test_func_convert_cell_representation(): 137 | 138 | # TEST CASE 1: Cubic box 139 | parameters = np.array((1, 1, 1, 90, 90, 90)) 140 | vectors = np.eye(3) 141 | assert np.allclose( 142 | topology.convert_cell_representation(vectors=vectors), 143 | parameters 144 | ) 145 | assert np.allclose( 146 | topology.convert_cell_representation(parameters=parameters), 147 | vectors 148 | ) 149 | 150 | # TEST CASE 2: Rhombic docecahedron (xy-square) 151 | parameters = np.array((1, 1, 1, 60, 60, 90)) 152 | vectors = np.array(( 153 | (1, 0, 0), 154 | (0, 1, 0), 155 | (1 / 2, 1 / 2, 1 / np.sqrt(2)) 156 | )) 157 | assert np.allclose( 158 | topology.convert_cell_representation(vectors=vectors), 159 | parameters 160 | ) 161 | assert np.allclose( 162 | topology.convert_cell_representation(parameters=parameters), 163 | vectors 164 | ) 165 | 166 | # TEST CASE 3: Rhombic docecahedron (xy-hexagon) 167 | parameters = np.array((1, 1, 1, 60, 60, 60)) 168 | vectors = np.array(( 169 | (1, 0, 0), 170 | (1 / 2, np.sqrt(3) / 2, 0), 171 | (1 / 2, np.sqrt(3) / 6, np.sqrt(6) / 3) 172 | )) 173 | assert np.allclose( 174 | topology.convert_cell_representation(vectors=vectors), 175 | parameters 176 | ) 177 | assert np.allclose( 178 | topology.convert_cell_representation(parameters=parameters), 179 | vectors 180 | ) 181 | 182 | # TEST CASE 4: Truncated octahedron 183 | parameters = np.array((1, 1, 1, 184 | np.degrees(np.arccos(1 / 3)), 185 | np.degrees(np.arccos(-1 / 3)), 186 | np.degrees(np.arccos(1 / 3)))) 187 | vectors = np.array(( 188 | (1, 0, 0), 189 | (1 / 3, 2 * np.sqrt(2) / 3, 0), 190 | (-1 / 3, np.sqrt(2) / 3, np.sqrt(6) / 3) 191 | )) 192 | assert np.allclose( 193 | topology.convert_cell_representation(vectors=vectors), 194 | parameters 195 | ) 196 | assert np.allclose( 197 | topology.convert_cell_representation(parameters=parameters), 198 | vectors 199 | ) -------------------------------------------------------------------------------- /tests/test_algorithm_unit.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import numpy as np 5 | from openmm import unit 6 | import pytest 7 | 8 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 9 | from mdcraft import ureg 10 | from mdcraft.algorithm.unit import get_lj_scale_factors, strip_unit # noqa: E402 11 | 12 | def test_func_get_lj_scaling_factors(): 13 | 14 | # TEST CASE 1: Lennard-Jones scaling factors 15 | pint_factors = get_lj_scale_factors({ 16 | "mass": 39.948 * ureg.gram / ureg.mole, 17 | "energy": 3.9520829798737548e-25 * ureg.kilocalorie, 18 | "length": 3.4 * ureg.angstrom 19 | }) 20 | openmm_factors = get_lj_scale_factors({ 21 | "mass": 39.948 * unit.gram / unit.mole, 22 | "energy": 0.238 * unit.kilocalorie_per_mole / unit.AVOGADRO_CONSTANT_NA, 23 | "length": 3.4 * unit.angstrom 24 | }) 25 | for key in openmm_factors.keys(): 26 | value, unit_ = strip_unit(openmm_factors[key]) 27 | assert np.isclose(strip_unit(pint_factors[key], unit_)[0], 28 | value) 29 | 30 | def test_func_strip_unit(): 31 | 32 | # TEST CASE 1: Strip unit from non-Quantity 33 | assert strip_unit(90.0, "deg") == (90.0, "deg") 34 | assert strip_unit(90.0, ureg.degree) == (90.0, ureg.degree) 35 | assert strip_unit(90.0, unit.degree) == (90.0, unit.degree) 36 | 37 | # TEST CASE 2: Strip unit from Quantity 38 | k_ = 1.380649e-23 39 | assert strip_unit(k_) == (k_, None) 40 | assert strip_unit(k_ * ureg.joule * ureg.kelvin ** -1) \ 41 | == (k_, ureg.joule * ureg.kelvin ** -1) 42 | assert strip_unit(k_ * unit.joule * unit.kelvin ** -1) \ 43 | == (k_, unit.joule * unit.kelvin ** -1) 44 | 45 | # TEST CASE 3: Strip unit from Quantity with compatible unit specified 46 | g_ = 32.17404855643044 47 | g = 9.80665 * ureg.meter / ureg.second ** 2 48 | assert strip_unit(g_, "foot/second**2") \ 49 | == (g_, ureg.foot / ureg.second ** 2) 50 | assert strip_unit(g, ureg.foot / ureg.second ** 2) \ 51 | == (g_, ureg.foot / ureg.second ** 2) 52 | g = 9.80665 * unit.meter / unit.second ** 2 53 | assert strip_unit(g, "foot/second**2") \ 54 | == (g_, unit.foot / unit.second ** 2) 55 | assert strip_unit(g, unit.foot / unit.second ** 2) \ 56 | == (g_, unit.foot / unit.second ** 2) 57 | 58 | # TEST CASE 4: Strip unit from Quantity with incompatible unit specified 59 | R_ = 8.31446261815324 60 | R__ = 8.205736608095969e-05 61 | assert strip_unit( 62 | R__ * ureg.meter ** 3 * ureg.atmosphere / (ureg.kelvin * ureg.mole), 63 | unit.joule / (unit.kelvin * unit.mole) 64 | ) == (R_, unit.joule / (unit.kelvin * unit.mole)) 65 | assert strip_unit( 66 | R__ * unit.meter ** 3 * unit.atmosphere / (unit.kelvin * unit.mole), 67 | ureg.joule / (ureg.kelvin * ureg.mole) 68 | ) == (R_, ureg.joule / (ureg.kelvin * ureg.mole)) 69 | 70 | # TEST CASE 5: Strip unit from Quantity with non-standard 71 | # incompatible unit specified 72 | with pytest.raises(ValueError): 73 | strip_unit( 74 | R_ * unit.joule / (unit.kelvin * unit.mole), 75 | ureg.meter ** 3 * ureg.atmosphere / (ureg.kelvin * ureg.mole) 76 | ) -------------------------------------------------------------------------------- /tests/test_algorithm_utility.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import numpy as np 5 | import pytest 6 | 7 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 8 | from mdcraft.algorithm import utility # noqa: E402 9 | 10 | rng = np.random.default_rng() 11 | 12 | def test_func_closest_factors(): 13 | 14 | # TEST CASE 1: Cube root of perfect cube 15 | factors = utility.get_closest_factors(1000, 3) 16 | assert np.allclose(factors, 10 * np.ones(3, dtype=int)) 17 | 18 | # TEST CASE 2: Three closest factors in ascending order 19 | factors = utility.get_closest_factors(35904, 3) 20 | assert factors.tolist() == [32, 33, 34] 21 | 22 | # TEST CASE 3: Four closest factors in descending order 23 | factors = utility.get_closest_factors(73440, 4, reverse=True) 24 | assert factors.tolist() == [18, 17, 16, 15] 25 | 26 | def test_func_replicate(): 27 | 28 | # TEST CASE 1: Replicate two vectors 29 | dims = rng.integers(1, 5, size=3) 30 | n_cells = rng.integers(2, 10, size=3) 31 | pos = utility.replicate(dims, np.array(((0, 0, 0), dims // 2)), n_cells) 32 | assert pos.shape[0] == 2 * n_cells.prod() 33 | assert np.allclose(pos[2], (dims[0], 0, 0)) 34 | 35 | def test_func_rebin(): 36 | 37 | # TEST CASE 1: Rebin 1D array 38 | arr = np.arange(50) 39 | ref = np.arange(2, 52, 5) 40 | assert np.allclose(utility.rebin(arr), ref) 41 | 42 | # TEST CASE 2: Rebin 2D array 43 | assert np.allclose(utility.rebin(np.tile(arr[None, :], (5, 1))), 44 | np.tile(ref[None, :], (5, 1))) 45 | 46 | # TEST CASE 3: No factor specified and cannot be determined 47 | with pytest.raises(ValueError): 48 | utility.rebin(np.empty((17,))) -------------------------------------------------------------------------------- /tests/test_analysis_electrostatics.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import MDAnalysis as mda 5 | from MDAnalysis.tests.datafiles import PSF_TRICLINIC, DCD_TRICLINIC 6 | from MDAnalysis.analysis.dielectric import DielectricConstant 7 | import numpy as np 8 | 9 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 10 | from mdcraft.analysis.electrostatics import DipoleMoment # noqa: E402 11 | 12 | def test_class_dipole_moment(): 13 | 14 | """ 15 | The test cases are adapted from the "Dielectric — 16 | :code:`MDAnalysis.analysis.dielectric`" page from the MDAnalysis 17 | User Guide (https://docs.mdanalysis.org/stable/documentation_pages/analysis/dielectric.html). 18 | """ 19 | 20 | universe = mda.Universe(PSF_TRICLINIC, DCD_TRICLINIC) 21 | 22 | diel = DielectricConstant(universe.atoms) 23 | diel.run() 24 | 25 | dm = DipoleMoment(universe.atoms).run() 26 | dm.calculate_relative_permittivity(300) 27 | 28 | pdm = DipoleMoment(universe.atoms, parallel=True).run(module="joblib", 29 | n_jobs=1) 30 | pdm.calculate_relative_permittivity(300) 31 | 32 | # TEST CASE 1: Relative permittivity of water system 33 | assert np.isclose(diel.results.eps_mean, dm.results.dielectrics) 34 | assert np.isclose(diel.results.eps_mean, pdm.results.dielectrics) -------------------------------------------------------------------------------- /tests/test_analysis_polymer.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import MDAnalysis as mda 5 | from MDAnalysis.tests.datafiles import DCD, PSF 6 | from MDAnalysis.analysis.base import AnalysisFromFunction 7 | import numpy as np 8 | 9 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 10 | from mdcraft.analysis.polymer import Gyradius # noqa: E402 11 | 12 | rng = np.random.default_rng() 13 | universe = mda.Universe(PSF, DCD) 14 | protein = universe.select_atoms("protein") 15 | 16 | def test_class_gyradius(): 17 | 18 | """ 19 | The reference implementation is adapted from the "Writing your own 20 | trajectory analysis" section of the MDAnalysis User Guide 21 | (https://userguide.mdanalysis.org/stable/examples/analysis/custom_trajectory_analysis.html). 22 | """ 23 | 24 | def radius_of_gyration(group): 25 | positions = group.positions 26 | masses = group.masses 27 | center_of_mass = group.center_of_mass() 28 | r_sq = (positions - center_of_mass) ** 2 29 | r_ssq = np.array((r_sq.sum(axis=1), 30 | (r_sq[:, [1, 2]]).sum(axis=1), 31 | (r_sq[:, [0, 2]]).sum(axis=1), 32 | (r_sq[:, [0, 1]]).sum(axis=1))) 33 | return np.sqrt((masses * r_ssq).sum(axis=1) / masses.sum()) 34 | 35 | rog = AnalysisFromFunction(radius_of_gyration, universe.trajectory, 36 | protein).run() 37 | 38 | # TEST CASE 1: Time series of overall radii of gyration 39 | gyr = Gyradius(protein, grouping="residues").run() 40 | pgyr = Gyradius( 41 | protein, 42 | grouping="residues", 43 | parallel=True 44 | ).run(module="joblib", n_jobs=1) 45 | assert np.allclose(rog.results["timeseries"][:, 0], gyr.results.gyradii[0]) 46 | assert np.allclose(rog.results["timeseries"][:, 0], pgyr.results.gyradii[0]) 47 | 48 | # TEST CASE 2: Time series of radius of gyration components 49 | gyr = Gyradius(protein, grouping="residues", components=True).run() 50 | pgyr = Gyradius( 51 | protein, 52 | grouping="residues", 53 | components=True, 54 | parallel=True 55 | ).run(module="joblib", n_jobs=1) 56 | assert np.allclose(rog.results["timeseries"][:, 1:], 57 | gyr.results.gyradii[0]) 58 | assert np.allclose(rog.results["timeseries"][:, 1:], 59 | pgyr.results.gyradii[0]) -------------------------------------------------------------------------------- /tests/test_analysis_profile.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import MDAnalysis as mda 5 | from MDAnalysis.tests.datafiles import waterDCD, waterPSF 6 | from MDAnalysis.analysis.lineardensity import LinearDensity 7 | import numpy as np 8 | import pytest 9 | 10 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 11 | from mdcraft.analysis.profile import DensityProfile # noqa: E402 12 | 13 | universe = mda.Universe(waterPSF, waterDCD) 14 | 15 | def test_class_density_profile(): 16 | 17 | """ 18 | The first two test cases are adapted from the "Computing mass and 19 | charge density on each axis" page from the MDAnalysis User Guide 20 | (https://userguide.mdanalysis.org/stable/examples/analysis/volumetric/linear_density.html). 21 | """ 22 | 23 | ld = LinearDensity(universe.atoms, "residues").run() 24 | dp = DensityProfile(universe.atoms, "residues", axes="xy", n_bins=200, 25 | average=False).run() 26 | pdp = DensityProfile(universe.atoms, "residues", axes="xy", n_bins=200, 27 | parallel=True).run(module="joblib", n_jobs=1) 28 | 29 | for ax in "xy": 30 | number_density = (0.602214076 * ld.results[ax].mass_density 31 | / universe.residues.masses[0]) 32 | charge_density = 0.602214076 * ld.results[ax].charge_density 33 | 34 | # TEST CASE 1: Number density profiles 35 | assert np.allclose(number_density, 36 | dp.results.number_densities[ax].mean(axis=1)) 37 | assert np.allclose(number_density, pdp.results.number_densities[ax]) 38 | 39 | # TEST CASE 2: Charge density profiles 40 | assert np.allclose(charge_density, 41 | dp.results.charge_densities[ax].mean(axis=0)) 42 | assert np.allclose(charge_density, pdp.results.charge_densities[ax]) 43 | 44 | # TEST CASE 3: Wrong number of dielectric constants 45 | with pytest.raises(ValueError): 46 | dp.calculate_potential_profiles(dielectrics=(78, 78, 78)) 47 | 48 | # TEST CASE 4: Invalid axes 49 | for axes in ["a", 0, [0, 1]]: 50 | with pytest.raises(ValueError): 51 | dp.calculate_potential_profiles(axes, 78) 52 | 53 | # TEST CASE 5: Invalid or wrong number of surface charge densities 54 | tests = [(0, 0, 0), np.zeros((3, 9))] 55 | for sigmas_q in tests: 56 | with pytest.raises(ValueError): 57 | dp.calculate_potential_profiles(dielectrics=78, sigmas_q=sigmas_q) 58 | 59 | # TEST CASE 6: Invalid or wrong number of potential differences 60 | for dVs in tests: 61 | with pytest.raises(ValueError): 62 | dp.calculate_potential_profiles(dielectrics=78, dVs=dVs) 63 | 64 | # TEST CASE 7: Invalid or wrong number of thresholds 65 | for thresholds in tests: 66 | with pytest.raises(ValueError): 67 | dp.calculate_potential_profiles(dielectrics=78, 68 | thresholds=thresholds) 69 | 70 | # TEST CASE 8: Invalid or wrong number of left boundary potentials 71 | for V0s in tests: 72 | with pytest.raises(ValueError): 73 | dp.calculate_potential_profiles(dielectrics=78, V0s=V0s) 74 | 75 | # TEST CASE 9: Wrong number of methods 76 | with pytest.raises(ValueError): 77 | dp.calculate_potential_profiles( 78 | dielectrics=78, 79 | methods=("integral", "integral", "matrix") 80 | ) 81 | 82 | # TEST CASE 10: Wrong number of booleans for 'pbcs' 83 | with pytest.raises(ValueError): 84 | dp.calculate_potential_profiles(dielectrics=78, 85 | pbcs=(True, True, False)) 86 | 87 | # TEST CASE 11: Invalid axis 88 | with pytest.raises(ValueError): 89 | dp.calculate_potential_profiles("z", 78) 90 | 91 | # TEST CASE 12: Potential profiles from integration 92 | dp.calculate_potential_profiles(dielectrics=78, sigmas_q=0, 93 | methods="integral") 94 | for ax in "xy": 95 | assert np.allclose(dp.results.potentials[ax], 0) 96 | 97 | # TEST CASE 13: Potential profiles from system of equations 98 | dp.calculate_potential_profiles(dielectrics=78, sigmas_q=0, 99 | methods="matrix") 100 | for ax in "xy": 101 | assert np.allclose(dp.results.potentials[ax], 0) -------------------------------------------------------------------------------- /tests/test_analysis_transport.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import pathlib 4 | import sys 5 | import urllib 6 | 7 | import MDAnalysis as mda 8 | from MDAnalysis.analysis.msd import EinsteinMSD 9 | from MDAnalysis.tests.datafiles import RANDOM_WALK, RANDOM_WALK_TOPO 10 | import numpy as np 11 | from scipy.stats import linregress 12 | 13 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 14 | from mdcraft.analysis import transport # noqa: E402 15 | 16 | def test_class_onsager_msd(): 17 | 18 | universe = mda.Universe(RANDOM_WALK_TOPO, RANDOM_WALK) 19 | 20 | start = 20 21 | stop = 60 22 | time = np.arange(universe.trajectory.n_frames) 23 | msd = EinsteinMSD(universe).run().results.timeseries / 6 24 | diff = linregress(time[start:stop], msd[start:stop]).slope 25 | 26 | # TEST CASE 1: MSD and diffusion coefficients of a random walk 27 | # calculated using the Einstein relation 28 | universe.dimensions = np.array((np.inf, np.inf, np.inf, 90, 90, 90)) 29 | onsager_shift = transport.Onsager(universe.atoms, fft=False, reduced=True).run() 30 | onsager_shift.calculate_transport_coefficients(start, stop, scale="linear") 31 | assert np.allclose(onsager_shift.results.msd_self[0, 0], msd) 32 | assert np.isclose(diff, onsager_shift.results.D_i[0, 0]) 33 | 34 | # TEST CASE 2: MSD and diffusion coefficients of a random walk 35 | # calculated using the FFT-based algorithm 36 | universe.dimensions = np.array((np.inf, np.inf, np.inf, 90, 90, 90)) 37 | onsager_fft = transport.Onsager(universe.atoms, reduced=True).run() 38 | onsager_fft.calculate_transport_coefficients(start, stop, scale="linear") 39 | assert np.allclose(onsager_fft.results.msd_self[0, 0], msd) 40 | assert np.isclose(diff, onsager_fft.results.D_i[0, 0]) 41 | 42 | def test_class_onsager_transport_coefficients(): 43 | 44 | """ 45 | The test cases are adapted from the "Mean Squared Displacement — 46 | :code:`MDAnalysis.analysis.msd`" page from the MDAnalysis User Guide 47 | (https://docs.mdanalysis.org/stable/documentation_pages/analysis/msd.html) 48 | and uses data from the paper "Onsager Transport Coefficients and 49 | Transference Numbers in Polyelectrolyte Solutions and Polymerized 50 | Ionic Liquids" by Fong et al. 51 | (https://doi.org/10.1021/acs.macromol.0c02001). 52 | """ 53 | 54 | def acf_fft(x): 55 | N = len(x) 56 | f = np.fft.fft(x, n=2 * N) 57 | return np.fft.ifft(f * f.conj())[:N].real / (N * np.ones(N) 58 | - np.arange(0, N)) 59 | 60 | def msd_fft(r): 61 | N = len(r) 62 | D = np.append(np.square(r).sum(axis=1), 0) 63 | Q = 2 * D.sum() 64 | S1 = np.zeros(N) 65 | for m in range(N): 66 | Q = Q - D[m - 1] - D[N - m] 67 | S1[m] = Q / (N - m) 68 | return S1 - 2 * sum(acf_fft(r[:, i]) for i in range(r.shape[1])) 69 | 70 | def ccf_fft(x, y): 71 | N = len(x) 72 | return np.fft.ifft( 73 | np.fft.fft(x, n=2 ** (2 * N - 1).bit_length()) 74 | * np.fft.fft(y, n=2 ** (2 * N - 1).bit_length()).conj() 75 | )[:N].real / (N * np.ones(N) - np.arange(0, N)) 76 | 77 | def msd_cross_fft(r, k): 78 | N = len(r) 79 | D = np.append(np.multiply(r, k).sum(axis=1), 0) 80 | Q = 2 * D.sum() 81 | S1 = np.zeros(N) 82 | for m in range(N): 83 | Q = Q - D[m - 1] - D[N - m] 84 | S1[m] = Q / (N - m) 85 | return S1 - sum(ccf_fft(r[:, i], k[:, i]) for i in range(r.shape[1])) \ 86 | - sum(ccf_fft(k[:, i], r[:, i]) for i in range(k.shape[1])) 87 | 88 | def calc_L_ii_self(positions): 89 | L_ii_self = np.zeros(positions.shape[0]) 90 | for i in range(positions.shape[1]): 91 | L_ii_self += msd_fft(positions[:, i, :]) 92 | return L_ii_self 93 | 94 | def calc_L_ii(positions): 95 | return msd_fft(positions.sum(axis=1)) 96 | 97 | def calc_L_ij(cation_positions, anion_positions): 98 | return msd_cross_fft(cation_positions.sum(axis=1), 99 | anion_positions.sum(axis=1)) 100 | 101 | def compute_L_ij(anion_positions, cation_positions, volume): 102 | return np.vstack( 103 | ( 104 | calc_L_ii(anion_positions), 105 | calc_L_ij(cation_positions, anion_positions), 106 | calc_L_ii(cation_positions), 107 | calc_L_ii_self(anion_positions), 108 | calc_L_ii_self(cation_positions) 109 | ) 110 | ) / (6 * volume) 111 | 112 | def fit_data(times, f, start, stop): 113 | return linregress(times[start:stop], f[start:stop])[0] 114 | 115 | path = os.getcwd() 116 | if "tests" in path: 117 | path_split = path.split("/") 118 | path = "/".join(path_split[:path_split.index("tests") + 1]) 119 | else: 120 | path += "/tests" 121 | if not os.path.isdir(f"{path}/data/onsager"): 122 | os.makedirs(f"{path}/data/onsager") 123 | os.chdir(f"{path}/data/onsager") 124 | 125 | url = "https://raw.githubusercontent.com/kdfong/transport-coefficients-MSD/master/example-data" 126 | if not os.path.isfile("system.data"): 127 | with urllib.request.urlopen(f"{url}/system.data") as r: 128 | with open("system.data", "w") as f: 129 | f.write(r.read().decode()) 130 | for i in range(1, 6): 131 | if not os.path.isfile(f"traj_{i}.dcd"): 132 | with urllib.request.urlopen(f"{url}/traj_{i}.dcd") as r: 133 | with open(f"traj_{i}.dcd", "wb") as f: 134 | f.write(r.read()) 135 | 136 | dt = 50 137 | start = 40 138 | fit_start = 2 139 | fit_stop = 20 140 | fit_start_self = 20 141 | fit_stop_self = 50 142 | 143 | universe = mda.Universe("system.data", glob.glob("*.dcd"), format="LAMMPS") 144 | groups = [universe.select_atoms(f"type {i}") for i in range(1, 3)] 145 | positions = [np.zeros((universe.trajectory.n_frames - start, g.n_atoms, 3)) 146 | for g in groups] 147 | for i, _ in enumerate(universe.trajectory[40:]): 148 | com = universe.atoms.center_of_mass(wrap=True) 149 | for g, p in zip(groups, positions): 150 | p[i] = g.positions - com 151 | volume = universe.dimensions[:3].prod() 152 | times = np.arange(0, (universe.trajectory.n_frames - start) * dt, dt, 153 | dtype=int) 154 | msds = compute_L_ij(*positions, volume) 155 | 156 | onsager = transport.Onsager(groups, temperature=1, center=True, 157 | center_atom=True, center_wrap=True, 158 | reduced=True, dt=dt).run(start=40) 159 | onsager.calculate_transport_coefficients(fit_start, fit_stop, 160 | start_self=fit_start_self, 161 | stop_self=fit_stop_self, 162 | scale="linear", enforce_linear=False) 163 | 164 | L_ij_array = np.triu(onsager.results.L_ij) 165 | for i, (msd, L_ij) in enumerate(zip(msds, L_ij_array[L_ij_array != 0])): 166 | 167 | # TEST CASE 1: Cross displacements of polyelectrolyte system 168 | assert np.allclose(msd, onsager.results.msd_cross[i, 0] / volume, 169 | atol=1e-3) 170 | 171 | # TEST CASE 2: Onsager transport coefficients of polyelectrolyte 172 | # system 173 | assert np.isclose(fit_data(times, msd, fit_start, fit_stop), L_ij) 174 | 175 | for i, (msd, L_ii_self) in enumerate(zip(msds[3:], onsager.results.L_ii_self[0])): 176 | 177 | # TEST CASE 1: MSDs of polyelectrolyte system 178 | assert np.allclose( 179 | msd, groups[i].n_atoms * onsager.results.msd_self[i, 0] / volume, 180 | atol=1e-6 181 | ) 182 | 183 | # TEST CASE 2: Self Onsager transport coefficients of 184 | # polyelectrolyte system 185 | assert np.isclose( 186 | fit_data(times, msd, fit_start_self, fit_stop_self), 187 | L_ii_self 188 | ) -------------------------------------------------------------------------------- /tests/test_openmm_file_reporter.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pathlib 3 | import sys 4 | 5 | import netCDF4 as nc 6 | import numpy as np 7 | import openmm 8 | from openmm import app, unit 9 | import pytest 10 | 11 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 12 | from mdcraft.openmm import file, pair, reporter, system as s, unit as u # noqa: E402 13 | 14 | def test_classes_netcdffile_netcdfreporter(): 15 | 16 | path = os.getcwd() 17 | if "tests" in path: 18 | path_split = path.split("/") 19 | path = "/".join(path_split[:path_split.index("tests") + 1]) 20 | else: 21 | path += "/tests" 22 | if not os.path.isdir(f"{path}/data/netcdf"): 23 | os.makedirs(f"{path}/data/netcdf") 24 | os.chdir(f"{path}/data/netcdf") 25 | 26 | # Set up a basic OpenMM simulation for a single LJ particle 27 | temp = 300 * unit.kelvin 28 | size = 3.4 * unit.angstrom 29 | mass = 39.948 * unit.amu 30 | scales = u.get_lj_scale_factors({ 31 | "energy": (unit.BOLTZMANN_CONSTANT_kB * temp).in_units_of(unit.kilojoule), 32 | "length": size, 33 | "mass": mass 34 | }) 35 | 36 | dims = 10 * size * np.ones(3) 37 | dims_nd = [L / unit.nanometer for L in dims] 38 | system = openmm.System() 39 | system.setDefaultPeriodicBoxVectors( 40 | (dims_nd[0], 0, 0) * unit.nanometer, 41 | (0, dims_nd[1], 0) * unit.nanometer, 42 | (0, 0, dims_nd[2]) * unit.nanometer 43 | ) 44 | topology = app.Topology() 45 | topology.setUnitCellDimensions(dims) 46 | pair_lj = pair.lj_coul(dims[0] / 4) 47 | s.register_particles(system, topology, 1, mass, nbforce=pair_lj, sigma=size, 48 | epsilon=21.285 * unit.kilojoule_per_mole) 49 | system.addForce(pair_lj) 50 | 51 | plat = openmm.Platform.getPlatformByName("CPU") 52 | dt = 0.005 * scales["time"] 53 | integrator = openmm.LangevinMiddleIntegrator(temp, 1e-3 / dt, dt) 54 | simulation = app.Simulation(topology, system, integrator, plat) 55 | simulation.context.setPositions(dims[None, :] / 2) 56 | 57 | # TEST CASE 1: Correct headers and data for restart file 58 | # (static method, filename) 59 | state = simulation.context.getState(getPositions=True, getVelocities=True, 60 | getForces=True) 61 | 62 | file.NetCDFFile.write_file("restart", state) 63 | ncdf = file.NetCDFFile("restart", "r") 64 | assert ncdf._nc.Conventions in ("AMBERRESTART", b"AMBERRESTART") 65 | assert ncdf.get_num_frames() == 1 66 | assert np.allclose(ncdf.get_positions(), dims / 2) 67 | 68 | # TEST CASE 2: Not a restart file 69 | ncdf = file.NetCDFFile.write_header("restart.nc", 1, True, True, True) 70 | with pytest.raises(ValueError): 71 | ncdf.write_file(state) 72 | 73 | # TEST CASE 3: Correct headers and data for restart file (instance method) 74 | ncdf = file.NetCDFFile("restart.nc", "w", restart=True) 75 | ncdf.write_file(state) 76 | assert ncdf._nc.Conventions in ("AMBERRESTART", b"AMBERRESTART") 77 | assert ncdf.get_num_frames() == 1 78 | assert np.allclose(ncdf.get_positions(), dims / 2) 79 | del ncdf 80 | 81 | # TEST CASE 4: Correct headers and data for restart file 82 | # (static method, NetCDF file) 83 | file.NetCDFFile.write_file( 84 | nc.Dataset("restart.nc", "w", format="NETCDF3_64BIT_OFFSET"), 85 | state 86 | ) 87 | ncdf = file.NetCDFFile("restart.nc", "r") 88 | assert ncdf._nc.Conventions in ("AMBERRESTART", b"AMBERRESTART") 89 | assert ncdf.get_num_frames() == 1 90 | assert np.allclose(ncdf.get_positions(), dims / 2) 91 | del ncdf 92 | 93 | # TEST CASE 5: Correct headers and data for trajectory file 94 | timesteps = 5 95 | simulation.reporters.append( 96 | reporter.NetCDFReporter("traj.nc", 1, periodic=True, velocities=True, 97 | forces=True) 98 | ) 99 | simulation.step(timesteps) 100 | 101 | ncdf = file.NetCDFFile("traj.nc", "r") 102 | cell_lengths, cell_angles = ncdf.get_dimensions(0) 103 | assert ncdf._nc.program in ("MDCraft", b"MDCraft") 104 | assert np.allclose(cell_lengths, dims) 105 | assert np.allclose(cell_angles, 90 * np.ones(3)) 106 | assert np.allclose( 107 | ncdf.get_positions(0) - ncdf.get_times(0) * ncdf.get_velocities(0), 108 | dims / 2, 109 | atol=2e-3 110 | ) 111 | assert ncdf.get_num_frames() == timesteps 112 | assert ncdf.get_velocities().shape == (timesteps, 1, 3) 113 | assert ncdf.get_forces().shape == (timesteps, 1, 3) 114 | 115 | # TEST CASE 6: Correct number of atoms for subset trajectory file 116 | s.register_particles(system, topology, 1, mass, nbforce=pair_lj, 117 | sigma=size, epsilon=21.285 * unit.kilojoule_per_mole) 118 | integrator = openmm.LangevinMiddleIntegrator(temp, 1e-3 / dt, dt) 119 | simulation = app.Simulation(topology, system, integrator, plat) 120 | simulation.context.setPositions(np.vstack((dims / 4, 3 * dims / 4)) 121 | * unit.angstrom) 122 | simulation.reporters.append( 123 | reporter.NetCDFReporter("traj_subset.nc", 1, periodic=True, velocities=True, 124 | forces=True, subset=[0]) 125 | ) 126 | simulation.step(1) 127 | 128 | ncdf = file.NetCDFFile("traj_subset.nc", "r") 129 | assert ncdf.get_num_atoms() == 1 130 | 131 | # TEST CASE 7: Correct number of atoms and lack of velocities and 132 | # forces for full trajectory file 133 | state = simulation.context.getState(getPositions=True) 134 | file.NetCDFFile.write_model( 135 | "traj_two.nc", 136 | state.getTime().value_in_unit(unit.picosecond), 137 | state.getPositions(asNumpy=True).value_in_unit(unit.angstrom) 138 | ) 139 | ncdf = file.NetCDFFile("traj_two.nc", "r") 140 | assert ncdf.get_num_atoms() == 2 141 | with pytest.warns(UserWarning): 142 | assert ncdf.get_velocities() is None 143 | with pytest.warns(UserWarning): 144 | assert ncdf.get_forces() is None -------------------------------------------------------------------------------- /tests/test_openmm_topology.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | import numpy as np 5 | import openmm 6 | from openmm import app 7 | import pytest 8 | 9 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 10 | from mdcraft.openmm import system as s, topology as t # noqa: E402 11 | 12 | def test_func_subset_errors(): 13 | 14 | topology = app.Topology() 15 | topology.addAtom("", "", topology.addResidue("", topology.addChain())) 16 | positions = np.array((0, 0, 0)) 17 | 18 | # TEST CASE 1: Both items to delete and keep are specified 19 | with pytest.raises(ValueError): 20 | t.get_subset(topology, positions, delete=[0], keep=[0]) 21 | 22 | # TEST CASE 2: No item type specified 23 | with pytest.raises(ValueError): 24 | t.get_subset(topology, positions, delete=[0]) 25 | 26 | def test_func_subset_polymer(): 27 | 28 | M = 100 29 | N_p = 25 30 | N = M * N_p 31 | dims = np.array((10, 10, 10)) 32 | positions = t.create_atoms(dims, M * N_p, N_p) 33 | system = openmm.System() 34 | topology = app.Topology() 35 | for _ in range(M): 36 | chain = topology.addChain() 37 | s.register_particles(system, topology, N=N_p, chain=chain) 38 | atoms = list(topology.atoms()) 39 | for m in range(M): 40 | for n in range(N_p - 1): 41 | i = m * N_p + n 42 | topology.addBond(atoms[i], atoms[i + 1]) 43 | 44 | # TEST CASE 1: Nothing to do 45 | topo_sub, pos_sub = t.get_subset(topology, positions) 46 | assert topo_sub is topology 47 | assert pos_sub is positions 48 | 49 | n_atoms = 100 50 | n_chains = n_atoms // N_p 51 | n_bonds = n_atoms - n_chains 52 | 53 | # TEST CASE 2: Delete everything but the first 100 atoms, with no type specified 54 | topo_sub, pos_sub = t.get_subset(topology, positions, delete=atoms[n_atoms:]) 55 | assert topo_sub.getNumAtoms() == n_atoms 56 | assert topo_sub.getNumBonds() == n_bonds 57 | assert topo_sub.getNumResidues() == n_atoms 58 | assert topo_sub.getNumChains() == n_chains 59 | 60 | # TEST CASE 3: Delete everything but the first 100 atoms, with types specified 61 | topo_sub, pos_sub = t.get_subset(topology, positions, delete=np.arange(100, N), 62 | types="atom") 63 | assert topo_sub.getNumAtoms() == n_atoms 64 | assert topo_sub.getNumBonds() == n_bonds 65 | assert topo_sub.getNumResidues() == n_atoms 66 | assert topo_sub.getNumChains() == n_chains 67 | 68 | # TEST CASE 4: Keep first 100 atoms 69 | topo_sub, pos_sub = t.get_subset(topology, positions, keep=np.arange(n_atoms), 70 | types="atom") 71 | assert topo_sub.getNumAtoms() == n_atoms 72 | assert topo_sub.getNumBonds() == n_bonds 73 | assert topo_sub.getNumResidues() == n_atoms 74 | assert topo_sub.getNumChains() == n_chains 75 | 76 | # TEST CASE 5: Keep first 96 bonds 77 | topo_sub, pos_sub = t.get_subset(topology, positions, keep=np.arange(n_bonds), 78 | types=n_bonds * ["bond"]) 79 | assert topo_sub.getNumAtoms() == n_atoms 80 | assert topo_sub.getNumBonds() == n_bonds 81 | assert topo_sub.getNumResidues() == n_atoms 82 | assert topo_sub.getNumChains() == n_chains 83 | 84 | # TEST CASE 6: Keep first 100 residues 85 | topo_sub, pos_sub = t.get_subset(topology, positions, keep=np.arange(n_atoms), 86 | types="residue") 87 | assert topo_sub.getNumAtoms() == n_atoms 88 | assert topo_sub.getNumBonds() == n_bonds 89 | assert topo_sub.getNumResidues() == n_atoms 90 | assert topo_sub.getNumChains() == n_chains 91 | 92 | # TEST CASE 7: Keep first 4 chains 93 | topo_sub, pos_sub = t.get_subset(topology, positions, keep=np.arange(n_chains), 94 | types="chain") 95 | assert topo_sub.getNumAtoms() == n_atoms 96 | assert topo_sub.getNumBonds() == n_bonds 97 | assert topo_sub.getNumResidues() == n_atoms 98 | assert topo_sub.getNumChains() == n_chains -------------------------------------------------------------------------------- /tests/test_openmm_unit.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import sys 3 | 4 | from openmm import unit 5 | 6 | sys.path.insert(0, f"{pathlib.Path(__file__).parents[1].resolve().as_posix()}/src") 7 | from mdcraft.openmm import unit as u # noqa: E402 8 | 9 | def test_func_lj_scaling(): 10 | 11 | temp = 300 * unit.kelvin 12 | 13 | # TEST CASE 1: Correct units for complex scaling factors 14 | scales = u.get_lj_scale_factors( 15 | {"mass": 18.0153 * unit.amu, 16 | "length": 0.275 * unit.nanometer, 17 | "energy": (unit.BOLTZMANN_CONSTANT_kB * temp).in_units_of(unit.kilojoule)} 18 | ) 19 | assert scales["molar_energy"].unit == unit.kilojoule_per_mole 20 | assert scales["velocity"].unit == unit.nanometer / unit.picosecond 21 | assert scales["electric_field"].unit \ 22 | == unit.kilojoule_per_mole / (unit.nanometer * unit.elementary_charge) 23 | 24 | # TEST CASE 2: No default scaling factors 25 | scales = u.get_scale_factors( 26 | {"mass": 18.0153 * unit.amu, 27 | "length": 0.275 * unit.nanometer, 28 | "energy": (unit.BOLTZMANN_CONSTANT_kB * temp).in_units_of(unit.kilojoule), 29 | "charge": 1 * unit.elementary_charge}, 30 | {"surface_charge_density": (("charge", 1), ("length", -2))} 31 | ) 32 | assert "time" not in scales 33 | 34 | # TEST CASE 3: Custom scaling factors 35 | assert scales["surface_charge_density"].unit == unit.elementary_charge / unit.nanometer ** 2 --------------------------------------------------------------------------------