├── .coveragerc ├── .gitattributes ├── .gitignore ├── .isort.cfg ├── .zenodo.json ├── AUTHORS.org ├── CONTRIBUTING.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.org ├── dev ├── README.md ├── docs.requirements.txt ├── interactive.requirements.txt ├── qa.requirements.txt ├── testing.requirements.txt └── typechecking.requirements.txt ├── env.sh ├── info ├── README.org ├── api.rst ├── changelog.org ├── dev_guide.org ├── docs.bib ├── examples │ ├── Lennard_Jones_Pair │ │ ├── README.org │ │ ├── env │ │ │ ├── dev.requirements.list │ │ │ ├── env.yaml │ │ │ ├── pyversion.txt │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── self.requirements.txt │ │ ├── source │ │ │ ├── compute_distance_observable.py │ │ │ ├── sim_maker_run.py │ │ │ ├── state_network.py │ │ │ ├── trivial_run.py │ │ │ └── we.py │ │ └── tasks.py │ ├── Lysozyme │ │ ├── README.org │ │ ├── env │ │ │ ├── dev.requirements.list │ │ │ ├── pyversion.txt │ │ │ ├── requirements.in │ │ │ ├── requirements.txt │ │ │ └── self.requirements.txt │ │ ├── source │ │ │ └── we.py │ │ └── tasks.py │ ├── README.org │ └── RandomWalk │ │ ├── README.org │ │ ├── env │ │ ├── dev.requirements.list │ │ ├── pyversion.txt │ │ ├── requirements.in │ │ ├── requirements.txt │ │ └── self.requirements.txt │ │ ├── source │ │ ├── rw_conventional.py │ │ ├── rw_revo.py │ │ └── rw_wexplore.py │ │ └── tasks.py ├── general_info.org ├── glossary.rst ├── howtos.org ├── installation.org ├── introduction.org ├── logo │ ├── wepy-icon.ico │ ├── wepy-icon.png │ ├── wepy-icon_16x16.png │ ├── wepy-icon_32x32.png │ ├── wepy.svg │ └── wepy_32x32.png ├── news-articles │ ├── 2020-04-29_example-writing-guide.org │ ├── 2020-08-04_wepy-1-0-release.org │ ├── 2020-08-13_wepy-preprint.org │ └── 2020-09-25_news-rss-feed.org ├── news.org ├── quick_start.org ├── quick_start │ ├── introducing_resamplers │ │ └── README.org │ ├── reporters │ │ └── README.org │ └── setting_up_simulation │ │ └── README.org ├── reference.org ├── troubleshooting.org ├── tutorials │ ├── data_analysis │ │ ├── README.org │ │ ├── hdf5_access │ │ │ └── README.org │ │ └── observable_intro │ │ │ └── README.org │ ├── extended_test_drive │ │ ├── README.org │ │ └── tasks.py │ ├── multiple_runs │ │ ├── README.org │ │ ├── continue_simulation │ │ │ └── README.org │ │ ├── merge_data │ │ │ └── README.org │ │ └── parallel_runs │ │ │ └── README.org │ ├── prepare │ │ └── README.org │ ├── rebinding_sim │ │ └── README.org │ ├── seh_system_prep │ │ ├── README.org │ │ └── tasks.py │ ├── slurm │ │ └── README.org │ └── unbinding_sim │ │ └── README.org └── users_guide.org ├── noxfile.py ├── pyproject.toml ├── pytest.ini ├── requirements-distributed.txt ├── requirements-graphics.txt ├── requirements-md.txt ├── requirements-prometheus.txt ├── requirements.in ├── requirements.txt ├── scrapyard └── wexplore_image_reporter.py ├── sphinx ├── Makefile ├── conf.py ├── data_analysis_index.rst ├── index.rst ├── multiple_runs_index.rst ├── quick_start_index.rst ├── static │ └── custom.css └── tutorials_index.rst ├── src ├── pytest_wepy │ ├── __init__.py │ ├── lennard_jones_pair.py │ ├── openmm.py │ ├── test_hdf5.py │ └── test_hdf5_analysis.py ├── wepy │ ├── __about__.py │ ├── __init__.py │ ├── __main__.py │ ├── _print_version.py │ ├── analysis │ │ ├── __init__.py │ │ ├── contig_tree.py │ │ ├── distributed.py │ │ ├── network.py │ │ ├── network_layouts │ │ │ ├── __init__.py │ │ │ ├── layout.py │ │ │ ├── layout_graph.py │ │ │ └── tree.py │ │ ├── parents.py │ │ ├── profiles.py │ │ ├── rates.py │ │ └── transitions.py │ ├── boundary_conditions │ │ ├── __init__.py │ │ ├── boundary.py │ │ ├── randomwalk.py │ │ ├── receptor.py │ │ └── unbinding.py │ ├── hdf5.py │ ├── orchestration │ │ ├── __init__.py │ │ ├── cli.py │ │ ├── configuration.py │ │ ├── orchestrator.py │ │ └── snapshot.py │ ├── reporter │ │ ├── __init__.py │ │ ├── dashboard.py │ │ ├── hdf5.py │ │ ├── openmm.py │ │ ├── receptor │ │ │ ├── __init__.py │ │ │ └── dashboard.py │ │ ├── reporter.py │ │ ├── restree.py │ │ ├── revo │ │ │ ├── __init__.py │ │ │ └── dashboard.py │ │ ├── walker.py │ │ ├── walker_pkl.py │ │ └── wexplore │ │ │ ├── __init__.py │ │ │ └── dashboard.py │ ├── resampling │ │ ├── __init__.py │ │ ├── decisions │ │ │ ├── __init__.py │ │ │ ├── clone_merge.py │ │ │ └── decision.py │ │ ├── distances │ │ │ ├── __init__.py │ │ │ ├── distance.py │ │ │ ├── randomwalk.py │ │ │ └── receptor.py │ │ └── resamplers │ │ │ ├── __init__.py │ │ │ ├── clone_merge.py │ │ │ ├── resampler.py │ │ │ ├── revo.py │ │ │ └── wexplore.py │ ├── runners │ │ ├── __init__.py │ │ ├── openmm.py │ │ ├── randomwalk.py │ │ └── runner.py │ ├── sim_manager.py │ ├── util │ │ ├── __init__.py │ │ ├── json_top.py │ │ ├── kv.py │ │ ├── mdtraj.py │ │ └── util.py │ ├── walker.py │ └── work_mapper │ │ ├── __init__.py │ │ ├── mapper.py │ │ ├── task_mapper.py │ │ └── worker.py ├── wepy_test_drive.py └── wepy_tools │ ├── __init__.py │ ├── monitoring │ ├── __init__.py │ └── prometheus.py │ ├── sim_makers │ ├── __init__.py │ ├── openmm │ │ ├── __init__.py │ │ ├── lennard_jones.py │ │ ├── lysozyme.py │ │ └── sim_maker.py │ └── toys │ │ ├── __init__.py │ │ └── randomwalk.py │ └── systems │ ├── __init__.py │ ├── lennard_jones.py │ ├── mock.py │ ├── openmm │ ├── __init__.py │ ├── base.py │ └── nacl_pair.py │ └── receptor.py ├── tasks ├── __init__.py ├── config.py ├── modules │ ├── __init__.py │ ├── clean.py │ ├── containers.py │ ├── core.py │ ├── docs.py │ ├── env.py │ ├── git.py │ ├── lxd.py │ └── py.py ├── plugins │ ├── __init__.py │ ├── custom.py │ └── tests.py ├── sysconfig.py └── toplevel.py ├── templates ├── envs │ └── conda_blank │ │ ├── dev.requirements.list │ │ ├── env.yaml │ │ ├── pyversion.txt │ │ ├── requirements.in │ │ └── self.requirements.txt ├── examples │ └── org │ │ ├── README.org │ │ ├── input │ │ └── .keep │ │ ├── source │ │ └── .keep │ │ └── tasks.py ├── jigs │ └── org │ │ ├── README.org │ │ ├── input │ │ └── .keep │ │ ├── source │ │ └── .keep │ │ └── tasks.py └── tutorials │ ├── jupyter │ ├── README.ipynb │ ├── input │ │ └── .keep │ └── tasks.py │ └── org │ ├── README.org │ ├── input │ └── .keep │ ├── source │ └── .keep │ └── tasks.py └── tests ├── benchmarks └── test_mappers.py ├── docs ├── .keep ├── test_examples │ ├── test_Lennard_Jones_Pair.py │ ├── test_Lysozyme.py │ └── test_RandomWalk.py ├── test_pages │ └── test_pages.py └── test_tutorials │ ├── test_Orchestrator.py │ ├── test_data_analysis.py │ ├── test_extended_test_drive.py │ └── test_tutorials.py ├── integration ├── .keep ├── conftest.py ├── test_cli.py ├── test_lj_combinations.py └── test_lj_fixture.py ├── unit ├── .keep └── test_work_mapper │ └── test_mapper.py └── utils ├── README.org └── myutils.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | parallel = True 4 | omit = 5 | tests/* 6 | 7 | source = src/ 8 | 9 | [report] 10 | exclude_lines = 11 | @overload 12 | pragma: no cover 13 | raise NotImplementedError 14 | if TYPE_CHECKING: 15 | pass 16 | if __name__ == "__main__": 17 | 18 | # SNIPPET: use this to fail CI for missing test coverage 19 | # fail_under = 100 20 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | src/wepy/_version.py export-subst 2 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | profile = black 3 | 4 | multi_line_output = 3 5 | order_by_type = true 6 | 7 | # add the comment headers for each section 8 | import_heading_stdlib=Standard Library 9 | 10 | import_heading_thirdparty=Third Party Library 11 | 12 | import_heading_firstparty=First Party Library 13 | 14 | import_heading_localfolder=Local Modules 15 | 16 | sections=FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER 17 | -------------------------------------------------------------------------------- /.zenodo.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Weighted Ensemble simulation framework in Python (Mirror of main private development repo. After the main release the development of the project will become public.)", 3 | "license": "MIT", 4 | "title": "ADicksonLab/wepy: 1.0.0 Major version release", 5 | "version": "v1.0.0", 6 | "upload_type": "software", 7 | "publication_date": "2020-08-04", 8 | "creators": [ 9 | { 10 | "affiliation": "Michigan State University", 11 | "name": "Samuel D. Lotz" 12 | }, 13 | { 14 | "affiliation": "Michigan State University", 15 | "name": "Nazanin Donyapour" 16 | }, 17 | { 18 | "affiliation": "Michigan State University", 19 | "name": "Alex Dickson" 20 | }, 21 | { 22 | "affiliation": "Michigan State University", 23 | "name": "Tom Dixon" 24 | }, 25 | { 26 | "affiliation": "Michigan State University", 27 | "name": "Nicole Roussey" 28 | }, 29 | { 30 | "affiliation": "Michigan State University", 31 | "name": "Rob Hall" 32 | } 33 | ], 34 | "access_right": "open", 35 | "related_identifiers": [ 36 | { 37 | "scheme": "url", 38 | "identifier": "https://github.com/ADicksonLab/wepy/tree/v1.0.0", 39 | "relation": "isSupplementTo" 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /AUTHORS.org: -------------------------------------------------------------------------------- 1 | * Credits 2 | 3 | ** Development Lead 4 | 5 | Samuel D. Lotz 6 | 7 | ** Contributors 8 | 9 | Alex Dickson 10 | Nazanin Donyapour 11 | Tom Dixon 12 | Robert Hall 13 | Nicole Roussey 14 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Developing on wepy 4 | 5 | After you have cloned the repo you should have a copy of Python 3.10 installed 6 | and accessible to path as `python3.10`. 7 | 8 | You will also need to install [nox](https://nox.thea.codes/) on your system. I 9 | recommend `pipx`. 10 | 11 | Then you should be able to start coding and running tests: 12 | 13 | ```sh 14 | make test-unit 15 | ``` 16 | 17 | To build the package: 18 | 19 | ```sh 20 | make build 21 | ``` 22 | 23 | To see all the commands run `make` or: 24 | 25 | ```sh 26 | make help 27 | ``` 28 | 29 | All the virtual environment creation and installation is taken care behind the 30 | scenes. 31 | 32 | If you want a standalone environment you can activate in your shell run: 33 | 34 | ```sh 35 | make env 36 | ``` 37 | 38 | Which creates the `.venv` folder. For convenience it can be activated with: 39 | 40 | ```sh 41 | . ./env.sh 42 | ``` 43 | 44 | 45 | ## TODOs 46 | 47 | Note that there are stubs for performing checks like linting, formatting etc. 48 | that aren't used in this project, but could be added at a later time. 49 | 50 | In order to get tests passing many bad tests were deleted. 51 | 52 | Building of documentation is also not implemented at the moment. 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017, 2020 ADicksonLab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | graft src 2 | graft info 3 | graft envs 4 | 5 | prune info/examples/*/_env 6 | prune info/examples/*/_output 7 | prune info/examples/*/_tangle_source 8 | 9 | prune info/tutorials/*/_env 10 | prune info/tutorials/*/_output 11 | prune info/tutorials/*/_tangle_source 12 | 13 | include AUTHORS.org 14 | include CHANGELOG.org 15 | include LICENSE 16 | include README.org 17 | 18 | include pyproject.toml 19 | 20 | include requirements.in 21 | include versioneer.py 22 | 23 | global-exclude *.py[co] __pycache__ *.so *~ 24 | 25 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | ## Notes on this file. The `help` target parses the file and can generate 2 | ## headings and docstrings for targets, in the order they are in the file. To 3 | ## create a heading make a comment like `##@ Heading Content`. To document 4 | ## targets make a comment on the same line as the target name with two `## 5 | ## docstring explanation...`. To leave a target undocumented simply provide no 6 | ## docstring. 7 | 8 | PYTHON=python3.11 9 | TMPDIR=.tmp 10 | 11 | ##@ Getting Started 12 | 13 | env: ## Rebuild the main development environment 14 | nox -s dev_external 15 | echo "Run this to activate dev env: . env.sh" 16 | .PHONY: env 17 | 18 | ##@ Housekeeping 19 | 20 | clean-all: clean clean-docs clean-env clean-compose clean-hooks ## run all clean commands 21 | .PHONY: clean-all 22 | 23 | clean: ## Clean temporary files, directories etc. 24 | rm -rf $(TMPDIR) 25 | rm -rf dist .pytest_cache .mypy_cache .coverage .venv .nox .hatch htmlcov 26 | find . -type f -name "*.pyc" -print -delete 27 | hatch clean 28 | .PHONY: clean 29 | 30 | clean-env: ## Clean the dev environments 31 | rm -rf .venv 32 | .PHONY: clean-env 33 | 34 | ##@ QA (TODO) 35 | 36 | # format: ## Run source code formatters manually. 37 | # nox -s format 38 | # .PHONY: docstrings 39 | 40 | # validate: ## Run all linters, type checks, static analysis, etc. 41 | # nox -s validate 42 | # .PHONY: validate 43 | 44 | # format-check: ## Run code formatting checks 45 | # nox -s format_check 46 | # .PHONY: format-check 47 | 48 | # # check: ## Run only miscellaneous maintenance checks 49 | # # .PHONY: check 50 | 51 | # lint: ## Run only the linters (non-autoformatters). 52 | # nox -s lint 53 | # .PHONY: lint 54 | 55 | # docstring-check: ## Run docstring coverage only. 56 | # nox -s docstring_lint 57 | # .PHONY: docstring 58 | 59 | # typecheck: ## Run only the type checker (requires mypy) 60 | # nox -s typecheck 61 | # .PHONY: typecheck 62 | 63 | ##@ Dev 64 | 65 | test-unit: ## Run unit tests with coverage report 66 | nox -s tests_unit 67 | .PHONY: test 68 | 69 | coverage: ## Report on missing coverage. (Run 'test-unit' to generate new stats) 70 | nox -s coverage 71 | .PHONY: coverage 72 | 73 | serve-coverage: ## Run a temporary web server to display detailed coverage report 74 | python3 -m http.server --directory htmlcov 4322 75 | .PHONY: serve-coverage 76 | 77 | 78 | ##@ Documentation (TODO) 79 | 80 | # docs: ## TODO: Build the documentation 81 | # nox -s docs 82 | # .PHONY: docs 83 | 84 | # clean-docs: ## TODO: Clean temporary files for documentation 85 | # rm -rf docs/_api docs/_build 86 | # .PHONY: clean-docs 87 | 88 | # serve-docs: ## TODO: Run a temporary web server to display current documentation build 89 | # python3 -m http.server --directory docs/_build 4323 90 | # .PHONY: serve-docs 91 | 92 | 93 | ##@ Release Management 94 | 95 | pin: pyproject.toml ## Pin the project dependency versions 96 | nox -s pin 97 | .PHONY: pin 98 | 99 | bumpversion: ## Bump the minor version for the project 100 | nox -s bumpversion 101 | .PHONY: bumpversion 102 | 103 | build: ## Run the python build/packaging, generate sdist & wheel 104 | nox -s build 105 | .PHONY: build 106 | 107 | publish: ## Publish the package to indices 108 | nox -s publish 109 | .PHONY: publish 110 | 111 | ##@ Help 112 | 113 | # An automatic help command: https://www.padok.fr/en/blog/beautiful-makefile-awk 114 | .DEFAULT_GOAL := help 115 | 116 | help: ## (DEFAULT) This command, show the help message 117 | @echo "See CONTRIBUTING.md for dependencies, then run this:" 118 | @echo "" 119 | @echo "If you want a shell in a virtual environment with everything:" 120 | @echo " > make env" 121 | @echo " > . ./env.sh" 122 | @echo "" 123 | @echo "Do testing:" 124 | @echo " > make test-unit" 125 | @echo "" 126 | @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) 127 | .PHONY: help 128 | -------------------------------------------------------------------------------- /README.org: -------------------------------------------------------------------------------- 1 | * Weighted Ensemble Python (wepy) 2 | 3 | #+ATTR_HTML: title="Join the chat at https://gitter.im/wepy/general" 4 | [[https://gitter.im/wepy/general?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge][file:https://badges.gitter.im/wepy/general.svg]] 5 | 6 | [[./info/logo/wepy.svg]] 7 | 8 | # trying to make a zenodo badge but github doesn't support this 9 | # directly. Would have to add a separate build step for this. 10 | #+begin_export html 11 | DOI 12 | #+end_export 13 | 14 | [[https://adicksonlab.github.io/wepy/index.html][Sphinx Documentation]] 15 | 16 | [[https://github.com/ADicksonLab/wepy/blob/master/info/README.org][Plaintext Org-Mode Docs]] 17 | 18 | Modular implementation and framework for running weighted ensemble (WE) 19 | simulations in pure python, where the aim is to have simple things 20 | simple and complicated things possible. The latter being the priority. 21 | 22 | The goal of the architecture is that it should be highly modular to 23 | allow extension, but provide a "killer app" for most uses that just 24 | works, no questions asked. 25 | 26 | Comes equipped with support for [[https://github.com/pandegroup/openmm][OpenMM]] molecular dynamics, 27 | parallelization using multiprocessing, the [[http://pubs.acs.org/doi/abs/10.1021/jp411479c][WExplore]] 28 | and [[https://pubmed.ncbi.nlm.nih.gov/31255090/][REVO]] (Resampling Ensembles by Variance Optimization) resampling 29 | algorithms, and an HDF5 file format and library for storing and 30 | querying your WE datasets that can be used from the command line. 31 | 32 | The deeper architecture of ~wepy~ is intended to be loosely coupled, 33 | so that unforeseen use cases can be accomodated, but tightly 34 | integrated for the most common of use cases, i.e. molecular dynamics. 35 | 36 | This allows freedom for fast development of new methods. 37 | 38 | Full [[https://github.com/ADicksonLab/wepy/blob/master/info/introduction.org][introduction]]. 39 | 40 | ** Installation 41 | 42 | Also see: [[info/installation.org][Installation Instructions]] 43 | 44 | We recommend running this version of `wepy` in a conda environment using `python=3.10` or greater: 45 | 46 | #+BEGIN_SRC bash 47 | conda create -n wepy python=3.10 48 | conda activate wepy 49 | #+END_SRC 50 | 51 | Next, install `wepy` with pip: 52 | 53 | #+BEGIN_SRC bash 54 | pip install wepy 55 | #+END_SRC 56 | 57 | which will also install most dependencies. 58 | 59 | Alternatively, the latest version of `wepy` can be installed from the git repo source: 60 | #+BEGIN_SRC bash 61 | git clone https://github.com/ADicksonLab/wepy.git 62 | cd wepy 63 | pip install . 64 | #+END_SRC 65 | 66 | The OpenMM package can then be installed using conda: 67 | 68 | #+BEGIN_SRC bash 69 | conda install -c conda-forge openmm 70 | #+END_SRC 71 | 72 | Check its installed by running the command line interface: 73 | 74 | #+begin_src bash :tangle check_installation.bash 75 | wepy --help 76 | #+end_src 77 | 78 | ** Citations 79 | 80 | Current [[https://zenodo.org/badge/latestdoi/101077926][Zenodo DOI]]. 81 | 82 | Cite software as: 83 | 84 | #+begin_example 85 | Samuel D. Lotz, Nazanin Donyapour, Alex Dickson, Tom Dixon, Nicole Roussey, & Rob Hall. (2020, August 4). ADicksonLab/wepy: 1.0.0 Major version release (Version v1.0.0). Zenodo. http://doi.org/10.5281/zenodo.3973431 86 | #+end_example 87 | 88 | Accompanying journal article: 89 | 90 | - [[https://pubs.acs.org/doi/abs/10.1021/acsomega.0c03892][ACS Omega]] article 91 | 92 | 93 | 94 | -------------------------------------------------------------------------------- /dev/README.md: -------------------------------------------------------------------------------- 1 | This contains external specifications of the different profiles of dependencies 2 | needed for different tasks. 3 | 4 | They are split up this way to allow for the absolute minimum dependencies needed 5 | in environments like CI where we want performance to be optimal and reducing the 6 | number of dependencies can help that a lot. 7 | -------------------------------------------------------------------------------- /dev/docs.requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinxcontrib-napoleon 3 | sphinxcontrib-newsfeed 4 | sphinxcontrib-bibtex 5 | sphinxcontrib-newsfeed 6 | nbsphinx 7 | notebook 8 | -------------------------------------------------------------------------------- /dev/interactive.requirements.txt: -------------------------------------------------------------------------------- 1 | ipython 2 | pdbpp 3 | -------------------------------------------------------------------------------- /dev/qa.requirements.txt: -------------------------------------------------------------------------------- 1 | black 2 | isort 3 | flake8 4 | flake8-bugbear 5 | interrogate 6 | -------------------------------------------------------------------------------- /dev/testing.requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | coverage 3 | pytest-cov 4 | -------------------------------------------------------------------------------- /dev/typechecking.requirements.txt: -------------------------------------------------------------------------------- 1 | mypy 2 | -------------------------------------------------------------------------------- /env.sh: -------------------------------------------------------------------------------- 1 | . ./.venv/bin/activate 2 | -------------------------------------------------------------------------------- /info/README.org: -------------------------------------------------------------------------------- 1 | 2 | * Wepy: A Weighted Ensemble (WE) simulation framework 3 | ** Getting Started 4 | 5 | - [[file:./installation.org][Installation]] :: Detailed instructions on how to install. 6 | - [[file:./quick_start/index.org][Quick Start]] :: Get up and running as quickly as possible. 7 | - [[file:./introduction.org][Introduction]] :: Background and motivation on the problem this 8 | software solves and the approach it takes to solving it. 9 | 10 | ** Software Documentation 11 | 12 | - [[file:./tutorials/index.org][Tutorials]] & [[file:./examples/README.org][Examples]] :: *Learning-oriented* examples for newcomers. 13 | - [[file:./reference.org][Reference]] :: *Information-oriented* 14 | - [[file:./troubleshooting.org][Troubleshooting]] :: Frequently Asked Questions (FAQ) and common troubleshooting strategies. 15 | 16 | ** Project & Developer Information 17 | 18 | - [[file:./general_info.org][General Info]] :: General information about the project itself. 19 | - [[file:./news.org][News & Updates]] :: Various new items about the project including 20 | release announcements. 21 | - [[file:./changelog.org][Changelog & History]] :: High-level prose description of changes for 22 | each version. 23 | - [[file:./dev_guide.org][Developer's Guide]] :: Instructions and guides for developers. 24 | -------------------------------------------------------------------------------- /info/api.rst: -------------------------------------------------------------------------------- 1 | API Overview 2 | ============ 3 | 4 | This is a listing of the important and most used for an exhaustive 5 | (and potentially exhausting) listing see the module index: 6 | 7 | 8 | Analysis 9 | -------- 10 | 11 | * :any:`Free Energy Profiles of Observables ` 12 | * :any:`Contigs and Contig Tree/Forest ` 13 | * :any:`Parent Tables & Lineage Traces ` 14 | * :any:`Warping Rates ` 15 | * :any:`Macro-State Network ` 16 | * :any:`Network Transition Probabilities ` 17 | 18 | 19 | 20 | Data Storage 21 | ------------ 22 | 23 | * :any:`WepyHDF5 ` 24 | 25 | Orchestration 26 | ------------- 27 | 28 | * :any:`Orchestrator ` 29 | * :any:`Configuration ` 30 | 31 | Wepy Core 32 | --------- 33 | 34 | * :any:`Simulation Manager ` 35 | * :any:`Walker ` 36 | 37 | .. 38 | Orchestration CLI 39 | ----------------- 40 | 41 | TODO 42 | 43 | 44 | Utilities 45 | --------- 46 | 47 | * :any:`JSON Topology ` 48 | * :any:`Miscellaneous ` 49 | * :any:`MDTraj Interface ` 50 | 51 | 52 | Application Components 53 | ---------------------- 54 | 55 | Reporting on Simulations 56 | ^^^^^^^^^^^^^^^^^^^^^^^^ 57 | 58 | General Purpose: 59 | 60 | * :any:`Text Dashboard ` 61 | * :any:`WepyHDF5 ` 62 | * :any:`Resampling Tree ` 63 | * :any:`Last Walkers ` 64 | 65 | WExplore and Image Based Resamplers: 66 | 67 | * :any:`Images ` 68 | * :any:`Dashboard ` 69 | 70 | Resamplers 71 | ^^^^^^^^^^ 72 | 73 | * :any:`WExplore ` 74 | * :any:`REVO ` 75 | 76 | 77 | Distance Metrics 78 | ^^^^^^^^^^^^^^^^ 79 | 80 | * :any:`Ligand Unbinding and Rebinding ` 81 | 82 | 83 | Runners 84 | ^^^^^^^ 85 | 86 | * :any:`OpenMM ` 87 | 88 | Work Mapper 89 | ^^^^^^^^^^^ 90 | 91 | * :any:`Single Process ` 92 | * :any:`Worker Mapper ` 93 | 94 | Parallel mapper via python multiprocessing that implements the 95 | worker-consumer model. There will only be as many forked processes as 96 | workers used. 97 | 98 | * :any:`Task Mapper ` 99 | 100 | Parallel mapper via python multiprocessing that implements a task 101 | based parallelism. Every walker every cycle will have a new process 102 | created (forked) and workers are scheduled via a lock queue. 103 | 104 | Boundary Conditions 105 | ^^^^^^^^^^^^^^^^^^^ 106 | 107 | * :any:`Receptor-Ligand (Un)Binding ` 108 | 109 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/README.org: -------------------------------------------------------------------------------- 1 | * Lennard-Jones particle pair simulation 2 | 3 | Very simple example using a pair of Lennard-Jones particles. 4 | 5 | ** Basic run script 6 | 7 | To see a basic and full example of wepy in action just run the ~we.py~ 8 | script with some arguments for the number of cycles, number of steps, 9 | and number of walkers to use: 10 | 11 | #+begin_src bash :tangle _tangle_source/run0.bash 12 | python source/we.py 10 100 10 13 | #+end_src 14 | 15 | You should see an ~_output/we~ folder appear with the files: 16 | ~results.wepy.h5~ and ~wepy.dash.org~. 17 | 18 | The ~wepy.dash.org.txt~ is a plain text file that gives lots of high 19 | level information about the simulation. This file is written every 20 | cycle so it is useful for looking at the progress of the simulation as 21 | it runs, hence the 'dash' moniker. It also happens to be in a special 22 | format called "org-mode" which allows for folding of sections 23 | etc. when using the right editor which is primarily emacs. 24 | 25 | ** Analysis 26 | 27 | Before running these you should have produce the 28 | ~_output/results.wepy.h5~ file from running the ~we.py~ file. 29 | 30 | *** Computing Observables 31 | 32 | This script shows you how to write a function and to apply it to all 33 | trajectories and save it into the HDF5 file: 34 | 35 | #+begin_src bash :tangle _tangle_source/run1.bash 36 | python source/compute_distance_observable.py 37 | #+end_src 38 | 39 | After this is run we can open up an interactive python session (we 40 | recommend using IPython) and see what has been done: 41 | 42 | #+begin_src python :tangle _tangle_source/inspect_observable.py 43 | import numpy as np 44 | from wepy.hdf5 import WepyHDF5 45 | 46 | wepy_h5 = WepyHDF5('_output/we/results.wepy.h5', mode='r') 47 | 48 | wepy_h5.open() 49 | 50 | # the observables are put in a special compound field for 51 | # trajectories, here we get the h5py.Group for the trajectory and list 52 | # the contents ourselves 53 | print(list(wepy_h5.traj(0,0)['observables'])) 54 | 55 | # we can retrieve the field for the trajectory like this 56 | wepy_h5.get_traj_field(0, 0, 'observables/rmsd') 57 | 58 | # to get all of these values for all of the trajectories we use the 59 | # iterator methods 60 | rmsds = np.array([fields['observables/rmsd'] 61 | for fields in wepy_h5.iter_trajs_fields(['observables/rmsd'])]) 62 | 63 | print(rmsds.shape) 64 | 65 | wepy_h5.close() 66 | 67 | #+end_src 68 | 69 | *** MacroState network 70 | 71 | This script shows an example of assigning labels to frames and then 72 | genrating a network representation. 73 | 74 | #+begin_src bash 75 | python source/state_network.py 76 | #+end_src 77 | 78 | This will produce a GEXF format file which you can open up in Gephi to 79 | look at your network. 80 | 81 | 82 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/dev.requirements.list: -------------------------------------------------------------------------------- 1 | ./requirements.in 2 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/env.yaml: -------------------------------------------------------------------------------- 1 | name: null 2 | channels: 3 | - conda-forge 4 | - defaults 5 | - omnia 6 | dependencies: 7 | - openmm 8 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/pyversion.txt: -------------------------------------------------------------------------------- 1 | 3.7.6 2 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/requirements.in: -------------------------------------------------------------------------------- 1 | --index-url https://pypi.python.org/simple/ 2 | 3 | ipython 4 | pdbpp 5 | invoke 6 | 7 | wepy[all] 8 | 9 | multiprocessing_logging 10 | numpy 11 | scipy 12 | mdtraj -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --output-file=info/examples/Lennard_Jones_Pair/env/requirements.txt ./requirements.in info/examples/Lennard_Jones_Pair/env/requirements.in 6 | # 7 | --index-url https://pypi.python.org/simple/ 8 | 9 | backcall==0.1.0 # via ipython 10 | boltons==20.2.0 # via eliot 11 | click==7.1.1 # via -r ./requirements.in, wepy 12 | cloudpickle==1.3.0 # via dask 13 | cycler==0.10.0 # via matplotlib 14 | dask[bag]==2.14.0 # via -r ./requirements.in, wepy 15 | decorator==4.4.2 # via ipython, networkx, traitlets 16 | dill==0.3.1.1 # via -r ./requirements.in, wepy 17 | eliot==1.12.0 # via -r ./requirements.in, wepy 18 | fancycompleter==0.9.1 # via pdbpp 19 | fsspec==0.7.2 # via dask 20 | git+https://github.com/ADicksonLab/geomm # via -r ./requirements.in, wepy 21 | h5py==2.10.0 # via -r ./requirements.in, wepy 22 | invoke==1.4.1 # via -r info/examples/Lennard_Jones_Pair/env/requirements.in 23 | ipython-genutils==0.2.0 # via traitlets 24 | ipython==7.13.0 # via -r info/examples/Lennard_Jones_Pair/env/requirements.in 25 | jedi==0.17.0 # via ipython 26 | jinja2==2.11.2 # via -r ./requirements.in, wepy 27 | kiwisolver==1.2.0 # via matplotlib 28 | locket==0.2.0 # via partd 29 | markupsafe==1.1.1 # via jinja2 30 | matplotlib==3.2.1 # via -r ./requirements.in, wepy 31 | mdtraj==1.9.3 # via -r ./requirements.in, -r info/examples/Lennard_Jones_Pair/env/requirements.in, wepy 32 | multiprocessing-logging==0.3.1 # via -r ./requirements.in, -r info/examples/Lennard_Jones_Pair/env/requirements.in, wepy 33 | networkx==2.3 # via -r ./requirements.in, wepy 34 | numpy==1.18.2 # via -r ./requirements.in, -r info/examples/Lennard_Jones_Pair/env/requirements.in, h5py, matplotlib, mdtraj, pandas, scipy, wepy 35 | git+https://github.com/ADicksonLab/openmm_systems # via -r ./requirements.in, wepy 36 | pandas==1.0.3 # via -r ./requirements.in, wepy 37 | parso==0.7.0 # via jedi 38 | partd==1.1.0 # via dask 39 | pdbpp==0.10.2 # via -r info/examples/Lennard_Jones_Pair/env/requirements.in 40 | pexpect==4.8.0 # via ipython 41 | pickleshare==0.7.5 # via ipython 42 | pint==0.11 # via -r ./requirements.in, wepy 43 | prometheus-client==0.8.0 # via -r ./requirements.in, wepy 44 | prompt-toolkit==3.0.5 # via ipython 45 | ptyprocess==0.6.0 # via pexpect 46 | pygments==2.6.1 # via ipython, pdbpp 47 | pympler==0.8 # via -r ./requirements.in, wepy 48 | pyparsing==2.4.7 # via matplotlib 49 | pyrepl==0.9.0 # via fancycompleter 50 | pyrsistent==0.16.0 # via eliot 51 | python-dateutil==2.8.1 # via matplotlib, pandas 52 | pytz==2019.3 # via pandas 53 | scipy==1.4.1 # via -r ./requirements.in, -r info/examples/Lennard_Jones_Pair/env/requirements.in, mdtraj, wepy 54 | six==1.14.0 # via cycler, eliot, h5py, pyrsistent, python-dateutil, traitlets 55 | tabulate==0.8.7 # via -r ./requirements.in, wepy 56 | toolz==0.10.0 # via dask, partd 57 | traitlets==4.3.3 # via ipython 58 | wcwidth==0.1.9 # via prompt-toolkit 59 | wepy[all]==1.0.0 # via -r info/examples/Lennard_Jones_Pair/env/requirements.in 60 | wmctrl==0.3 # via pdbpp 61 | zope.interface==5.1.0 # via eliot 62 | 63 | # The following packages are considered to be unsafe in a requirements file: 64 | # setuptools 65 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/env/self.requirements.txt: -------------------------------------------------------------------------------- 1 | # uncomment for developing this example 2 | # -e .[md] 3 | 4 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/source/compute_distance_observable.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | # this is a typical function for mapping over the dataset. I have 4 | # added the extra argument scaling_factor just to demonstrate that you 5 | # can have other arguments. 6 | def traj_field_lj_dist(traj_data, scaling_factor): 7 | 8 | positions = traj_data['positions'] 9 | 10 | # slice out positions for each LJ particle 11 | lj1 = positions[:,0,:] 12 | lj2 = positions[:,1,:] 13 | # compute distances with the scaling factor 14 | distances = scaling_factor * np.sqrt( 15 | (lj1[:,0] - lj2[:,0])**2 + (lj1[:,1] - lj2[:,1])**2 + (lj1[:,2] - lj2[:,2])**2) 16 | 17 | return distances 18 | 19 | 20 | 21 | if __name__ == "__main__": 22 | from wepy.hdf5 import WepyHDF5 23 | 24 | # load the HDF5 file in read/write so we can save data to the 25 | # observables 26 | wepy_hdf5_path = "_output/we/results.wepy.h5" 27 | wepy_h5 = WepyHDF5(wepy_hdf5_path, mode='r+') 28 | 29 | with wepy_h5: 30 | wepy_h5.compute_observable(traj_field_lj_dist, ['positions'], 31 | (2.0,), 32 | save_to_hdf5='2*rmsd', 33 | map_func=map) 34 | 35 | wepy_h5.compute_observable(traj_field_lj_dist, ['positions'], 36 | (1.0,), 37 | save_to_hdf5='rmsd', 38 | map_func=map) 39 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/source/sim_maker_run.py: -------------------------------------------------------------------------------- 1 | import logging 2 | logger = logging.getLogger(__name__) 3 | 4 | if __name__ == "__main__": 5 | 6 | import os 7 | import shutil 8 | import sys 9 | import logging 10 | logger = logging.getLogger(__name__) 11 | from multiprocessing_logging import install_mp_handler 12 | 13 | from wepy_tools.sim_makers.openmm.lennard_jones import LennardJonesPairOpenMMSimMaker 14 | 15 | OUTPUT_DIR = "_output/sim_maker_run" 16 | 17 | logger.setLevel(logging.DEBUG) 18 | install_mp_handler() 19 | 20 | if sys.argv[1] == "-h" or sys.argv[1] == "--help": 21 | print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler") 22 | exit() 23 | else: 24 | n_cycles = int(sys.argv[1]) 25 | n_steps = int(sys.argv[2]) 26 | n_walkers = int(sys.argv[3]) 27 | n_workers = int(sys.argv[4]) 28 | platform = sys.argv[5] 29 | resampler = sys.argv[6] 30 | 31 | print("Number of steps: {}".format(n_steps)) 32 | print("Number of cycles: {}".format(n_cycles)) 33 | 34 | # make the results directory if not already made 35 | try: 36 | shutil.rmtree(OUTPUT_DIR) 37 | except FileNotFoundError: 38 | pass 39 | 40 | os.makedirs(OUTPUT_DIR, exist_ok=True) 41 | 42 | sim_maker = LennardJonesPairOpenMMSimMaker() 43 | 44 | apparatus = sim_maker.make_apparatus( 45 | integrator='LangevinIntegrator', 46 | resampler=resampler, 47 | bc='UnbindingBC', 48 | platform=platform, 49 | ) 50 | config = sim_maker.make_configuration(apparatus, 51 | work_mapper_spec='Mapper', 52 | platform=platform, 53 | work_dir=OUTPUT_DIR) 54 | 55 | sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config) 56 | 57 | sim_manager.run_simulation(n_cycles, n_steps, num_workers=n_workers) 58 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/source/state_network.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | 5 | from wepy.hdf5 import WepyHDF5 6 | from wepy.resampling.decisions.clone_merge import MultiCloneMergeDecision 7 | from wepy.boundary_conditions.unbinding import UnbindingBC 8 | from wepy.analysis.transitions import run_transition_probability_matrix 9 | from wepy.analysis.network import MacroStateNetwork 10 | from wepy.analysis.contig_tree import ContigTree 11 | 12 | output_dir = Path('_output') 13 | sim_dir = output_dir / 'we' 14 | 15 | # Load wepy hdf5 file into python script 16 | wepy_h5 = WepyHDF5(sim_dir / 'results.wepy.h5', mode = 'r+') 17 | run_idx = 0 18 | assg_key = 'rand_assg_idx' 19 | n_classifications = 4 20 | random_seed = 1 21 | 22 | np.random.seed(random_seed) 23 | 24 | # make random assignments 25 | 26 | # observable function 27 | def rand_assg(fields_d, *args, **kwargs): 28 | assignments = np.random.randint(0, n_classifications, 29 | size=fields_d['weights'].shape) 30 | return assignments 31 | 32 | with wepy_h5: 33 | 34 | # compute this random assignment "observable" 35 | wepy_h5.compute_observable( 36 | rand_assg, 37 | ['weights'], 38 | (), 39 | save_to_hdf5=assg_key, 40 | return_results=False, 41 | ) 42 | 43 | # make a contig tree 44 | contig_tree = ContigTree( 45 | wepy_h5, 46 | decision_class=MultiCloneMergeDecision, 47 | boundary_condition_class=UnbindingBC, 48 | ) 49 | 50 | 51 | # make a state network using the key of the assignment and the 52 | # file. Without edges this is just a collection of "macrostates" 53 | # or groupings of microstates from the simulation data. We also 54 | # set some sort of lag time to compute transition probabilities 55 | # with 56 | 57 | random_macrostates = MacroStateNetwork(contig_tree, 58 | assg_field_key="observables/{}".format(assg_key), 59 | transition_lag_time=3) 60 | 61 | node_id = random_macrostates.node_ids[0] 62 | 63 | # we can do things like make a trajectory in mdtraj and output as a 64 | # dcd for a state 65 | traj = random_macrostates.state_to_mdtraj(node_id) 66 | traj.save_dcd(str(output_dir / "state.dcd".format(node_id))) 67 | 68 | # we also can automatically compute the weights of the macrostates. 69 | random_macrostates.set_macrostate_weights() 70 | 71 | # this sets them as macrostate (node) attributes 72 | print("node {} weight:".format(node_id)) 73 | print(random_macrostates.get_node_attribute(node_id, 'Weight')) 74 | 75 | # we can also get a transition probability matrix from this 76 | print(random_macrostates.probmat) 77 | 78 | 79 | # furthermore you can write the network out to a GEXF file that 80 | # can be used for visualization 81 | random_macrostates.write_gexf(str(output_dir / "random_macrostates.csn.gexf")) 82 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/source/trivial_run.py: -------------------------------------------------------------------------------- 1 | """Very basic example of a simulation without a resampler or boundary 2 | conditions""" 3 | 4 | from copy import copy 5 | 6 | import openmm as omm 7 | import simtk.unit as unit 8 | 9 | from openmm_systems.test_systems import LennardJonesPair 10 | 11 | from wepy.resampling.resamplers.resampler import NoResampler 12 | from wepy.runners.openmm import OpenMMRunner, gen_walker_state 13 | from wepy.walker import Walker 14 | from wepy.sim_manager import Manager 15 | 16 | # use a ready made system for OpenMM MD simulation 17 | test_sys = LennardJonesPair() 18 | 19 | integrator = omm.LangevinIntegrator(300.0*unit.kelvin, 20 | 1/unit.picosecond, 21 | 0.002*unit.picoseconds) 22 | 23 | init_state = gen_walker_state(test_sys.positions, test_sys.system, integrator) 24 | 25 | runner = OpenMMRunner(test_sys.system, test_sys.topology, integrator, 26 | platform='Reference') 27 | 28 | # a trivial resampler which does nothing 29 | resampler = NoResampler() 30 | 31 | # Run the simulation 32 | 33 | # number of cycles of WE to perform 34 | n_cycles = 5 35 | 36 | # the number of MD dynamics steps for each cycle 37 | n_steps = 1000 38 | steps = [n_steps for i in range(n_cycles)] 39 | 40 | # number of parallel simulations 41 | n_walkers = 10 42 | 43 | 44 | 45 | # create the initial walkers with equal weights 46 | init_weight = 1.0 / n_walkers 47 | init_walkers = [Walker(copy(init_state), init_weight) for i in range(n_walkers)] 48 | 49 | sim_manager = Manager(init_walkers, 50 | runner=runner, 51 | resampler=resampler) 52 | 53 | # run the simulation and get the results 54 | final_walkers, _ = sim_manager.run_simulation(n_cycles, steps) 55 | -------------------------------------------------------------------------------- /info/examples/Lennard_Jones_Pair/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | example_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv docs.env-example -n {example_name}") 45 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/README.org: -------------------------------------------------------------------------------- 1 | * Lysozyme Simulations 2 | 3 | #+begin_src bash :tangle _tangle_source/run0.bash 4 | python source/we.py 2 2 10 1 CPU REVOResampler 5 | #+end_src 6 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/env/dev.requirements.list: -------------------------------------------------------------------------------- 1 | ./requirements.in 2 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/env/pyversion.txt: -------------------------------------------------------------------------------- 1 | 3.7.6 2 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/env/requirements.in: -------------------------------------------------------------------------------- 1 | 2 | --index-url https://pypi.python.org/simple/ 3 | 4 | # wepy[md] 5 | multiprocessing_logging -------------------------------------------------------------------------------- /info/examples/Lysozyme/env/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --output-file=info/examples/Lysozyme/env/requirements.txt ./requirements.in info/examples/Lysozyme/env/requirements.in 6 | # 7 | --index-url https://pypi.python.org/simple/ 8 | 9 | boltons==20.2.0 # via eliot 10 | click==7.1.1 # via -r ./requirements.in, wepy 11 | cloudpickle==1.3.0 # via dask 12 | cycler==0.10.0 # via matplotlib 13 | dask[bag]==2.14.0 # via -r ./requirements.in 14 | decorator==4.4.2 # via networkx 15 | dill==0.3.1.1 # via -r ./requirements.in, wepy 16 | eliot==1.12.0 # via -r ./requirements.in, wepy 17 | fsspec==0.7.2 # via dask 18 | git+https://github.com/ADicksonLab/geomm # via -r ./requirements.in, wepy 19 | h5py==2.10.0 # via -r ./requirements.in, wepy 20 | jinja2==2.11.2 # via -r ./requirements.in, wepy 21 | kiwisolver==1.2.0 # via matplotlib 22 | locket==0.2.0 # via partd 23 | markupsafe==1.1.1 # via jinja2 24 | matplotlib==3.2.1 # via -r ./requirements.in, wepy 25 | mdtraj==1.9.3 # via -r ./requirements.in, wepy 26 | multiprocessing-logging==0.3.1 # via -r ./requirements.in, -r info/examples/Lysozyme/env/requirements.in, wepy 27 | networkx==2.3 # via -r ./requirements.in, wepy 28 | numpy==1.18.3 # via -r ./requirements.in, h5py, matplotlib, mdtraj, pandas, scipy, wepy 29 | git+https://github.com/ADicksonLab/openmm_systems # via -r ./requirements.in, wepy 30 | pandas==1.0.3 # via -r ./requirements.in, wepy 31 | partd==1.1.0 # via dask 32 | pint==0.11 # via -r ./requirements.in, wepy 33 | prometheus-client==0.8.0 # via -r ./requirements.in 34 | pympler==0.8 # via -r ./requirements.in 35 | pyparsing==2.4.7 # via matplotlib 36 | pyrsistent==0.16.0 # via eliot 37 | python-dateutil==2.8.1 # via matplotlib, pandas 38 | pytz==2019.3 # via pandas 39 | scipy==1.4.1 # via -r ./requirements.in, mdtraj, wepy 40 | six==1.14.0 # via cycler, eliot, h5py, pyrsistent, python-dateutil 41 | tabulate==0.8.7 # via -r ./requirements.in, wepy 42 | toolz==0.10.0 # via dask, partd 43 | wepy[md]==1.0.0 # via -r info/examples/Lysozyme/env/requirements.in 44 | zope.interface==5.1.0 # via eliot 45 | 46 | # The following packages are considered to be unsafe in a requirements file: 47 | # setuptools 48 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/env/self.requirements.txt: -------------------------------------------------------------------------------- 1 | # uncomment for developing this example 2 | # -e .[md] 3 | 4 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/source/we.py: -------------------------------------------------------------------------------- 1 | import logging 2 | logger = logging.getLogger(__name__) 3 | if __name__ == "__main__": 4 | 5 | import os 6 | import shutil 7 | import sys 8 | import logging 9 | logger = logging.getLogger(__name__) 10 | from pathlib import Path 11 | 12 | from multiprocessing_logging import install_mp_handler 13 | 14 | from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker 15 | 16 | logging.getLogger(__name__).setLevel(logging.DEBUG) 17 | install_mp_handler() 18 | 19 | if sys.argv[1] == "-h" or sys.argv[1] == "--help": 20 | print("arguments: n_cycles, n_steps, n_walkers, n_workers, platform, resampler") 21 | exit() 22 | else: 23 | n_cycles = int(sys.argv[1]) 24 | n_steps = int(sys.argv[2]) 25 | n_walkers = int(sys.argv[3]) 26 | n_workers = int(sys.argv[4]) 27 | platform = sys.argv[5] 28 | resampler = sys.argv[6] 29 | 30 | print("Number of steps: {}".format(n_steps)) 31 | print("Number of cycles: {}".format(n_cycles)) 32 | 33 | 34 | output_dir = Path('_output') 35 | 36 | # make the results directory if not already made 37 | try: 38 | shutil.rmtree(output_dir / 'we') 39 | except FileNotFoundError: 40 | pass 41 | 42 | os.makedirs(output_dir / 'we', exist_ok=True) 43 | 44 | sim_maker = LysozymeImplicitOpenMMSimMaker() 45 | 46 | apparatus = sim_maker.make_apparatus( 47 | integrator='LangevinIntegrator', 48 | resampler=resampler, 49 | bc='UnbindingBC', 50 | platform=platform, 51 | ) 52 | config = sim_maker.make_configuration(apparatus, 53 | work_mapper_spec='TaskMapper', 54 | platform=platform, 55 | work_dir=str(output_dir / 'we')) 56 | 57 | sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config) 58 | 59 | sim_manager.run_simulation(n_cycles, n_steps, num_workers=n_workers) 60 | -------------------------------------------------------------------------------- /info/examples/Lysozyme/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | 31 | @task 32 | def clean_env(cx): 33 | cx.run("rm -rf _env") 34 | 35 | @task(pre=[init]) 36 | def env(cx): 37 | """Create the environment from the specs in 'env'. Must have the 38 | entire repository available as it uses the tooling from it. 39 | 40 | """ 41 | 42 | example_name = Path(os.getcwd()).stem 43 | 44 | with cx.cd("../../../"): 45 | cx.run(f"inv docs.env-example -n {example_name}") 46 | -------------------------------------------------------------------------------- /info/examples/README.org: -------------------------------------------------------------------------------- 1 | 2 | * Examples 3 | 4 | See the [[../users_guide.org][User's Guide]] for more information on how to run these in 5 | general. 6 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/README.org: -------------------------------------------------------------------------------- 1 | * Random walk simulations 2 | Examples of how to run the random walk simulations using different 3 | resamplers. 4 | 5 | The randomwalk system implements the random movement of walkers in an 6 | N-dimension space. 7 | 8 | ** Basic run script 9 | 10 | To see a REVO example run ~rw_revo.py~ with arguments for the number of runs, number of cycles, 11 | number of walkers, and the dimension of space. 12 | 13 | #+begin_src bash :tangle _tangle_source/run0.bash 14 | python source/rw_revo.py 1 3 20 10 5 15 | #+end_src 16 | 17 | You should see an ~outputs~ folder appear with the files: 18 | ~results.wepy.h5~ and ~randomwalk_revo.org~ 19 | 20 | The ~randomwalk_revo.org~ file is a plain text file that gives high 21 | level information about the simulation, including simulation properties 22 | and quality parameters for each run. This file is written at the end 23 | of the simulation. 24 | 25 | In this file the first section contains general information about 26 | random walk simulations: 27 | 28 | #+begin_src org 29 | ,* Random walk simulation: 30 | -Number of runs: 2 31 | -Number of cycles: 5 32 | -Number of walkers:10 33 | -Move-forward probability:0.25 34 | -Dimension:5 35 | #+end_src 36 | 37 | 38 | The other file is ~results.wepy.h5~, a ~WepyHDF5~ formatted file that includes 39 | the data from the simulation. 40 | 41 | There are also examples provided for the ~WExplore~ resampler and no resampler. These 42 | two examples use the same arguments as REVO. 43 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/env/dev.requirements.list: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/env/pyversion.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/env/requirements.in: -------------------------------------------------------------------------------- 1 | --index-url https://pypi.python.org/simple/ 2 | 3 | wepy 4 | 5 | numpy 6 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/env/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile 3 | # To update, run: 4 | # 5 | # pip-compile --output-file=info/examples/RandomWalk/env/requirements.txt info/examples/RandomWalk/env/requirements.in 6 | # 7 | --index-url https://pypi.python.org/simple/ 8 | 9 | boltons==20.2.0 # via eliot 10 | click==7.1.2 # via wepy 11 | cycler==0.10.0 # via matplotlib 12 | decorator==4.4.2 # via networkx 13 | dill==0.3.2 # via wepy 14 | eliot==1.12.0 # via wepy 15 | geomm==0.1.7 # via wepy 16 | h5py==2.10.0 # via wepy 17 | importlib-metadata==1.7.0 # via pint 18 | jinja2==2.11.2 # via wepy 19 | kiwisolver==1.2.0 # via matplotlib 20 | markupsafe==1.1.1 # via jinja2 21 | matplotlib==3.3.0 # via wepy 22 | multiprocessing-logging==0.3.1 # via wepy 23 | networkx==2.3 # via wepy 24 | numpy==1.18.3 # via -r info/examples/RandomWalk/env/requirements.in, geomm, h5py, matplotlib, pandas, scipy, wepy 25 | packaging==20.4 # via pint 26 | pandas==1.1.0 # via wepy 27 | pillow==7.2.0 # via matplotlib 28 | pint==0.14 # via wepy 29 | pyparsing==2.4.7 # via matplotlib, packaging 30 | pyrsistent==0.16.0 # via eliot 31 | python-dateutil==2.8.1 # via matplotlib, pandas 32 | pytz==2020.1 # via pandas 33 | scipy==1.5.2 # via geomm, wepy 34 | six==1.15.0 # via cycler, eliot, h5py, packaging, pyrsistent, python-dateutil 35 | tabulate==0.8.7 # via wepy 36 | wepy==1.0.0 # via -r info/examples/RandomWalk/env/requirements.in 37 | zipp==3.1.0 # via importlib-metadata 38 | zope.interface==5.1.0 # via eliot 39 | 40 | # The following packages are considered to be unsafe in a requirements file: 41 | # setuptools 42 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/env/self.requirements.txt: -------------------------------------------------------------------------------- 1 | # uncomment for developing this example 2 | # -e . 3 | 4 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/source/rw_conventional.py: -------------------------------------------------------------------------------- 1 | """This is a exmaple of conventional random walke simultion. There is 2 | no resampling after the random walk dynamics. 3 | 4 | """ 5 | import sys 6 | import os 7 | import os.path as osp 8 | from pathlib import Path 9 | 10 | import numpy as np 11 | 12 | from wepy.resampling.resamplers.resampler import NoResampler 13 | from wepy.resampling.distances.randomwalk import RandomWalkDistance 14 | from wepy.runners.randomwalk import RandomWalkRunner, UNIT_NAMES 15 | from wepy.walker import Walker, WalkerState 16 | 17 | from wepy_tools.sim_makers.toys.randomwalk import RandomwalkProfiler 18 | 19 | 20 | 21 | SAVE_FIELDS = ('positions') 22 | # Name of field's unit in the HDF5 23 | UNITS = UNIT_NAMES 24 | 25 | outputs_dir = Path('_output') 26 | 27 | if not osp.exists(outputs_dir): 28 | os.makedirs(outputs_dir) 29 | 30 | # sets the input paths 31 | hdf5_filename = 'rw_results.wepy.h5' 32 | reporter_filename = 'randomwalk_conventional.org' 33 | 34 | hdf5_path= outputs_dir / hdf5_filename 35 | reporter_path = outputs_dir / reporter_filename 36 | 37 | 38 | 39 | if __name__=="__main__": 40 | if sys.argv[1] == "--help" or sys.argv[1] == '-h': 41 | print("arguments: n_cycles, n_walkers, dimension") 42 | else: 43 | n_runs = int(sys.argv[1]) 44 | n_cycles = int(sys.argv[2]) 45 | n_walkers = int(sys.argv[3]) 46 | dimension = int(sys.argv[4]) 47 | 48 | # set up the distance function 49 | distance = RandomWalkDistance(); 50 | 51 | 52 | 53 | # set up the NOResampler 54 | resampler = NoResampler() 55 | 56 | # set up a RandomWalkProfilier 57 | rw_profiler = RandomwalkProfiler(resampler, 58 | dimension, 59 | hdf5_filename=str(hdf5_path), 60 | reporter_filename=str(reporter_path)) 61 | 62 | # runs the simulations and gets the result 63 | rw_profiler.run( 64 | num_runs=n_runs, 65 | num_cycles=n_cycles, 66 | num_walkers=n_walkers, 67 | ) 68 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/source/rw_wexplore.py: -------------------------------------------------------------------------------- 1 | """This is a exmaple of random walke simultion using the WExplore 2 | resampler. 3 | 4 | """ 5 | import sys 6 | import os 7 | import os.path as osp 8 | from pathlib import Path 9 | 10 | import numpy as np 11 | 12 | from wepy.resampling.resamplers.wexplore import WExploreResampler 13 | from wepy.resampling.distances.randomwalk import RandomWalkDistance 14 | from wepy.runners.randomwalk import RandomWalkRunner, UNIT_NAMES 15 | from wepy.walker import Walker, WalkerState 16 | 17 | from wepy_tools.sim_makers.toys.randomwalk import RandomwalkProfiler 18 | 19 | 20 | ON = True 21 | OFF = False 22 | # the maximum weight allowed for a walker 23 | PMAX = 0.1 24 | # the minimum weight allowed for a walker 25 | PMIN = 1e-100 26 | 27 | # set the value of distance exponent 28 | DIST_EXPONENT = 4 29 | # the merge distance value 30 | MERGE_DIST = 2.5 31 | # field in the HDF5 32 | SAVE_FIELDS = ('positions') 33 | # Name of field's unit in the HDF5 34 | UNITS = UNIT_NAMES 35 | PROBABILITY=0.25 36 | WEIGHTS=ON 37 | 38 | # the maximum number of regions allowed under each parent region 39 | MAX_N_REGIONS = (10, 10, 10, 10) 40 | 41 | # the maximum size of regions, new regions will be created if a walker 42 | # is beyond this distance from each voronoi image unless there is an 43 | # already maximal number of regions 44 | MAX_REGION_SIZES = (16, 4, 1, .25) 45 | 46 | outputs_dir = Path('_output') 47 | if not osp.exists(outputs_dir): 48 | os.makedirs(outputs_dir) 49 | 50 | 51 | # sets the input paths 52 | hdf5_filename = 'rw_results.wepy.h5' 53 | reporter_filename = 'randomwalk_wexplore.org' 54 | 55 | hdf5_path= outputs_dir / hdf5_filename 56 | reporter_path = outputs_dir / reporter_filename 57 | 58 | 59 | 60 | if __name__=="__main__": 61 | if sys.argv[1] == "--help" or sys.argv[1] == '-h': 62 | print("arguments: n_cycles, n_walkers, dimension") 63 | else: 64 | 65 | n_runs = int(sys.argv[1]) 66 | n_cycles = int(sys.argv[2]) 67 | n_walkers = int(sys.argv[3]) 68 | dimension = int(sys.argv[4]) 69 | 70 | 71 | dimension = 5 72 | 73 | # set up initial state for walkers 74 | position_coords = np.zeros((1, dimension)) 75 | 76 | init_state = WalkerState(positions=position_coords, time=0.0) 77 | 78 | # set up the distance function 79 | distance = RandomWalkDistance(); 80 | 81 | 82 | 83 | # set up the WExplore Resampler with the parameters 84 | resampler = WExploreResampler(distance=distance, 85 | init_state=init_state, 86 | max_n_regions=MAX_N_REGIONS, 87 | max_region_sizes=MAX_REGION_SIZES, 88 | pmin=PMIN, pmax=PMAX) 89 | 90 | 91 | # set up a RandomWalkProfilier 92 | rw_profiler = RandomwalkProfiler(resampler, 93 | dimension, 94 | hdf5_filename=str(hdf5_path), 95 | reporter_filename=str(reporter_path)) 96 | 97 | # runs the simulations and gets the result 98 | rw_profiler.run(num_runs=n_runs, num_cycles=n_cycles, 99 | num_walkers=n_walkers) 100 | 101 | #set up the Wexplore Resampler with the parameters 102 | -------------------------------------------------------------------------------- /info/examples/RandomWalk/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | example_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv docs.env-example -n {example_name}") 45 | -------------------------------------------------------------------------------- /info/general_info.org: -------------------------------------------------------------------------------- 1 | 2 | * General Information 3 | 4 | ** Bugs, issues, and feature requests 5 | 6 | - [[https://github.com/ADicksonLab/wepy/issues][Issue Tracker]] 7 | 8 | ** Builds, Distributions, and Packages 9 | 10 | ~wepy~ releases and packages are available on: 11 | 12 | - [[https://pypi.org/project/wepy/][PyPI]] 13 | - [[https://github.com/ADicksonLab/wepy/releases][Github Releases]] 14 | 15 | There are also historical releases archived on [[https://zenodo.org/badge/latestdoi/101077926][Zenodo]] that also are 16 | assigned DOIs for easy citations. 17 | 18 | DOI: [[https://doi.org/10.5281/zenodo.3973431][10.5281/zenodo.3973431]] 19 | 20 | 21 | 22 | ** Community & Related Projects 23 | 24 | General chat and discussion takes place on [[https://gitter.im/wepy/general][gitter.im]]. 25 | 26 | We will also list any projects using ~wepy~ in the 27 | @@rst::doc:`Introduction `@@. 28 | 29 | Just make a PR or issue to add your own. 30 | 31 | ** Contacting the developers 32 | 33 | You can contact developers directly via their contact info on the 34 | [[https://www.egr.msu.edu/~alexrd/][Dickson Lab website]], using the [[https://github.com/ADicksonLab/wepy/issues][issue tracker]], or on [[https://gitter.im/wepy/general][gitter.im]] chat. 35 | 36 | -------------------------------------------------------------------------------- /info/howtos.org: -------------------------------------------------------------------------------- 1 | 2 | * HOWTOS 3 | 4 | * Tips & Tricks 5 | -------------------------------------------------------------------------------- /info/installation.org: -------------------------------------------------------------------------------- 1 | * Installation Guide 2 | 3 | Welcome to the installation guide for wepy. Follow the steps below to 4 | set up the environment and install wepy. 5 | 6 | ** Prerequisites 7 | 8 | Ensure that you are using *Python 3.10*. It is recommended to set up a 9 | conda environment for managing dependencies and ensuring compatibility. 10 | 11 | #+BEGIN_SRC bash 12 | conda create -n wepy-env python=3.10 13 | conda activate wepy-env 14 | #+END_SRC 15 | 16 | ** Installing wepy 17 | 18 | You can install wepy from the latest git repository. 19 | 20 | Installing from Git (for the latest version): 21 | 22 | #+BEGIN_SRC bash 23 | pip install git+https://github.com/ADicksonLab/wepy.git 24 | #+END_SRC 25 | 26 | ** Installing OpenMM and mdtraj 27 | 28 | To install OpenMM, which is highly recommended for running molecular 29 | dynamics simulations: 30 | 31 | #+BEGIN_SRC bash 32 | conda install -c conda-forge openmm=8.0.0 33 | #+END_SRC 34 | 35 | If the conda version of openmm doesn't work for your compute 36 | environment, consult the 37 | [[http://docs.openmm.org/latest/userguide/application.html#installing-openmm][OpenMM 38 | documentation]]. 39 | 40 | The mdtraj library is used by wepy to create and manipulate system 41 | topologies. It can also be installed via conda: 42 | 43 | #+BEGIN_SRC bash 44 | conda install -c conda-forge mdtraj=1.9.9 45 | #+END_SRC 46 | 47 | These version numbers work correctly at the time of this writing and 48 | avoid some incompatibility issues with numpy and pandas dependencies. 49 | 50 | ** Verifying Installation 51 | 52 | After installation, you should be able to import wepy in python. Verify 53 | it by running: 54 | 55 | #+BEGIN_SRC bash 56 | python -c "import wepy" 57 | #+END_SRC 58 | 59 | If the installation was successful, you should not see any errors. 60 | -------------------------------------------------------------------------------- /info/introduction.org: -------------------------------------------------------------------------------- 1 | * Introduction & Features 2 | 3 | There is an academic paper describing various aspects of the design and 4 | usage of ~wepy~: 5 | 6 | - [[https://doi.org/10.1021/acsomega.0c03892][Wepy: A Flexible Software Framework for Simulating Rare Events with Weighted Ensemble Resampling]] 7 | 8 | ** Weighted Ensemble (WE) 9 | 10 | The weighted ensemble algorithm (WE) is a strategy for simulating rare 11 | or long-timescale events in stochastic systems 12 | ([[http://www.ncbi.nlm.nih.gov/pmc/articles/PMC1224912/][Huber, 1996]]). 13 | It creates several parallel simulations called walkers with individual 14 | weights corresponding to their likelihood. Throughout the simulation, 15 | walkers that exhibit behaviors or reach states that are of particular 16 | relevance or significance to the objectives of the simulation are 17 | cloned. To compensate for cloning, and to manage the computational 18 | expense of the simulation, some of the remaining walkers are merged. 19 | Cloning and merging are together referred to as "resampling". It has 20 | been [[https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2830257/][previously 21 | shown]] that WE resampling is "statistically exact", in that it does not 22 | change the expectation values of the weights at any point in space. 23 | 24 | ** Features of Wepy 25 | - State of the art WE resamplers: WExplore 26 | ([[http://www.ncbi.nlm.nih.gov/pmc/articles/PMC4404516/][Dickson, 27 | 2014]]) and REVO 28 | ([[https://pubs.aip.org/aip/jcp/article/150/24/244112/198013/REVO-Resampling-of-ensembles-by-variation][Donyapour, 29 | 2019]]) 30 | - Fast GPU-enabled molecular dynamics via OpenMM 31 | ([[https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3539733/][Eastman, 32 | 2013]]) 33 | - Purpose built HDF5 storage format for WE data with extensive API: 34 | ~WepyHDF5~ 35 | - Analysis routines for: 36 | - free energy profiles 37 | - rate calculations 38 | - computing trajectory observables 39 | - extracting linear trajectories from clone-merge trees 40 | - aggregating multiple runs 41 | - Expert friendly: Fully-featured framework for building and customizing 42 | simulations for exactly what you need. 43 | - Leverage the entire python ecosystem. 44 | - No /ad hoc/ configuration files, everything is python. 45 | 46 | 47 | Once you have wepy @@rst::any:`installed `@@ you can check out the 48 | @@rst::any:`quickstart `@@ to get a rough idea of how it works. 49 | 50 | Then you can head on to the @@rst::any:`tutorials `@@ or execute the 51 | [[https://github.com/ADicksonLab/wepy/tree/master/info/examples][examples]]. 52 | 53 | ** Contributed wepy libraries and other useful resources 54 | 55 | Here is a list of packages that are not in the main Wepy repository but 56 | may be of interest to users of Wepy. 57 | 58 | They are: 59 | 60 | - [[https://github.com/ADicksonLab/geomm][geomm]] :: purely functional 61 | library for common numerical routines in computational biology and 62 | chemistry, with no dependency on specific file or topology formats. 63 | 64 | - [[https://github.com/ADicksonLab/CSNAnalysis][CSNAnalysis]] :: small 65 | library for aiding in the analysis of conformation state networks 66 | (CSNs) which can be generated from Wepy data. -------------------------------------------------------------------------------- /info/logo/wepy-icon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/info/logo/wepy-icon.ico -------------------------------------------------------------------------------- /info/logo/wepy-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/info/logo/wepy-icon.png -------------------------------------------------------------------------------- /info/logo/wepy-icon_16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/info/logo/wepy-icon_16x16.png -------------------------------------------------------------------------------- /info/logo/wepy-icon_32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/info/logo/wepy-icon_32x32.png -------------------------------------------------------------------------------- /info/logo/wepy_32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/info/logo/wepy_32x32.png -------------------------------------------------------------------------------- /info/news-articles/2020-04-29_example-writing-guide.org: -------------------------------------------------------------------------------- 1 | * Tutorial for Contributors for Running and Writing Examples 2 | 3 | #+begin_export rst 4 | .. feed-entry:: 5 | :author: salotz 6 | :date: 2020-04-29 7 | #+end_export 8 | 9 | We have a separate repository where we save all the workshops we have 10 | done for the users/contributors in our lab: 11 | 12 | [[https://gitlab.com/ADicksonLab/wepy-developer-resources]] 13 | 14 | These are topics which may change throughout time, so we don't include 15 | them in the main ~wepy~ tree nor do we guarantee they will work in 16 | perpetuity. 17 | 18 | Today we are running a workshop on getting people started running & 19 | writing examples for ~wepy~: 20 | 21 | [[https://gitlab.com/ADicksonLab/wepy-developer-resources/-/tree/master/workshops/2020-04-29_Examples-Tutorials-HOWTO][Examples & Tutorials HOWTO]] 22 | 23 | In the ~wepy~ project we have a number of utilities which we think 24 | will help us in keeping the examples in our docs up to date with the 25 | code. 26 | 27 | Outdated docs are a frequent problem with software 28 | projects. Hopefully, the strategies that we start to outline here can 29 | help you in all your software projects by gaining a discipline of 30 | carefully maintaining and pinning your dependencies for purpose-driven 31 | virtualenvs. 32 | 33 | A common structure for writing examples and tutorials also helps to 34 | allow for automated processes to create and test them in a uniform and 35 | low friction way. 36 | 37 | ~salotz 38 | -------------------------------------------------------------------------------- /info/news-articles/2020-08-13_wepy-preprint.org: -------------------------------------------------------------------------------- 1 | * Wepy Paper Preprint Released 2 | 3 | #+begin_export rst 4 | .. feed-entry:: 5 | :author: salotz 6 | :date: 2020-08-13 7 | #+end_export 8 | 9 | A preprint for the academic paper describing ~wepy~ has been published 10 | on [[https://chemrxiv.org/s/369c2530c150add1450b][ChemRxiv]]. 11 | 12 | This paper goes into more detail regarding some of the relationships 13 | of the ~wepy~ architecture to the mathematical descriptions of 14 | Weighted Ensemble (WE) in general. 15 | 16 | It also describes some of the challenges and solutions that ~wepy~ 17 | solves with regards to the complexity of WE trajectory data that is 18 | different from normal linear trajectories. 19 | 20 | In these respects it is a great supplementary resource for 21 | understanding some of the higher-level issues ~wepy~ solves. 22 | 23 | 24 | Enjoy, 25 | 26 | ~salotz 27 | -------------------------------------------------------------------------------- /info/news-articles/2020-09-25_news-rss-feed.org: -------------------------------------------------------------------------------- 1 | * RSS Feed Now Available 2 | 3 | #+begin_export rst 4 | .. feed-entry:: 5 | :author: salotz 6 | :date: 2020-09-25 7 | #+end_export 8 | 9 | I have now made an RSS feed for this news section: 10 | https://adicksonlab.github.io/wepy/news.rss 11 | 12 | You can follow releases of packages using the services they are 13 | deployed on: 14 | 15 | - [[https://pypi.org/rss/project/wepy/releases.xml][PyPI]] 16 | - Github: 17 | - [[https://github.com/ADicksonLab/wepy/releases.atom][Releases]] 18 | - [[https://github.com/ADicksonLab/wepy/commits.atom][Commits]] 19 | - [[https://github.com/ADicksonLab/wepy/tags.atom][Tags]] 20 | 21 | 22 | If you aren't familiar you'll need to get a Feed Reader applications 23 | that basically goes and fetches the RSS (or Atom) feed pages and 24 | provides you a readable digest of the articles there. 25 | 26 | ~salotz 27 | -------------------------------------------------------------------------------- /info/news.org: -------------------------------------------------------------------------------- 1 | 2 | * News and Updates 3 | 4 | # This needs to be able to be found within the tree of the source 5 | # code. the package I am using makes this a pain and you have to go 6 | # through the HTML, but I would just like to make it able to be found 7 | # in sphinx itself.. so we just make a plaintext link 8 | # would be something like this normally 9 | 10 | [[https://adicksonlab.github.io/wepy/news.rss][RSS Feed]] 11 | 12 | # RSS Feed: ~https://current-domain/news.rss~ 13 | 14 | #+begin_export rst 15 | .. feed:: 16 | :rss: news.rss 17 | :title: wepy News 18 | :description: Various announcements related to the wepy project. 19 | 20 | news-articles/2020-09-25_news-rss-feed 21 | news-articles/2020-08-13_wepy-preprint 22 | news-articles/2020-08-04_wepy-1-0-release 23 | news-articles/2020-04-29_example-writing-guide 24 | 25 | #+end_export 26 | 27 | -------------------------------------------------------------------------------- /info/troubleshooting.org: -------------------------------------------------------------------------------- 1 | * FAQ 2 | 3 | ** HDF5 4 | 5 | *** My results files are large, can I compress them? 6 | 7 | There is an HDF5 tool that allows you to "repack" and compress your 8 | datasets called ~h5repack~. A good ratio of compression to time is 9 | using GZIP level 2 but read the documentation to customize for 10 | yourself: 11 | 12 | #+begin_src bash 13 | h5repack -f GZIP=2 original.wepy.h5 compressed.wepy.h5 14 | #+end_src 15 | 16 | There are other more involved mechanisms, but this should cut your 17 | size significantly. 18 | 19 | *** I get a 'valid identifiers' error when accessing HDF5 data. 20 | 21 | If you get something like this: 22 | 23 | #+begin_example 24 | ValueError: Type names and field names must be valid identifiers: "b'decision_id'" 25 | #+end_example 26 | 27 | When accessing your WepyHDF5 data its likely because you have a 28 | version of ~h5py~ that is too new. Simply downgrade to before version 29 | 3.0. 30 | 31 | You can check this by running: 32 | 33 | #+begin_src sh 34 | pip list | grep h5py 35 | #+end_src 36 | 37 | And install an ealier version: 38 | 39 | #+begin_src sh 40 | pip install 'h5py<3' 41 | #+end_src 42 | 43 | See [[*I think I have the wrong versions of dependencies][I think I have the wrong versions of dependencies]] for more info on 44 | fixing dependencies. 45 | 46 | *** Clearing HDF5 Write Locks 47 | 48 | Sometimes if processes don't shut down correctly there will be open locks on the 49 | dataset. If you are absolutely sure that no processes are still active you can 50 | manually clear these flags using the HDF5 toolkit: 51 | 52 | #+begin_src sh 53 | h5clear -s -m $file 54 | #+end_src 55 | 56 | ** Dependencies 57 | 58 | *** I think I have the wrong versions of dependencies 59 | 60 | All known version constrained dependencies are recorded both in the 61 | ~setup.py~ file (this is what gets installed) as well as the 62 | ~requirements.in~ file. The ~requirements.in~ file allows for more 63 | complex expressions of dependencies and may have notes as to why they 64 | are constrained. 65 | 66 | You can check ~info/changelog.org~ to find when and why a constraint 67 | was added as well. 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /info/tutorials/data_analysis/README.org: -------------------------------------------------------------------------------- 1 | * Analyzing Wepy Data 2 | :PROPERTIES: 3 | :CUSTOM_ID: data_analysis 4 | :END: 5 | 6 | Data of the Wepy runs are mostly stored as HDF5 files. In this section 7 | we will go over the basic tools to how to work with HDF5 files generated 8 | by Wepy. 9 | 10 | - [[file:./hdf5_access/README.rst][Working with HDF5]] 11 | - [[file:./observable_intro/README.rst][Introducing Observables]] 12 | -------------------------------------------------------------------------------- /info/tutorials/data_analysis/hdf5_access/README.org: -------------------------------------------------------------------------------- 1 | * Working with HDF5 2 | 3 | The wepy.hdf5 module is part of the WEPY (Weighted Ensemble Pathway) 4 | framework, used for handling HDF5 files that store simulation data. This 5 | module helps in reading, writing, and managing the simulation data 6 | stored in HDF5 format. 7 | 8 | To work with an HDF5 file, you need to create a WepyHDF5 object. It is a 9 | good idea to open the file "r+" mode which will append to the file 10 | instead of overwriting it. 11 | 12 | #+BEGIN_SRC python 13 | from wepy.hdf5 import WepyHDF5 14 | 15 | # Open an existing HDF5 file or create a new one 16 | wepy_h5 = WepyHDF5('path_to_your_file.h5', mode='r+') # 'r' for read, 'w' for write, 'a' for append 17 | #+END_SRC 18 | 19 | You can access the trajectories stored in the HDF5 file: 20 | 21 | #+BEGIN_SRC python 22 | with wepy_h5: 23 | trajs = wepy_h5.get_run_trajs(run_idx=0) 24 | #+END_SRC 25 | 26 | To extract specific data, such as configurations or observables: 27 | 28 | #+BEGIN_SRC python 29 | with wepy_h5 30 | configurations = wepy_h5.get_run_configurations(run_idx=0) 31 | observables = wepy_h5.get_run_observable_field(run_idx=0, obs_field='your_observable') 32 | #+END_SRC 33 | 34 | To write data into the HDF5 file: 35 | 36 | #+BEGIN_SRC python 37 | with wepy_h5: 38 | wepy_h5.add_run(new_run) 39 | wepy_h5.add_run_configurations(run_idx=0, configurations=new_configurations) 40 | wepy_h5.add_run_observable_field(run_idx=0, obs_field='your_observable', data=new_data) 41 | #+END_SRC 42 | 43 | Always close the HDF5 file after operations: 44 | 45 | #+BEGIN_SRC python 46 | wepy_h5.close() 47 | #+END_SRC 48 | -------------------------------------------------------------------------------- /info/tutorials/data_analysis/observable_intro/README.org: -------------------------------------------------------------------------------- 1 | * Introducing Observables 2 | 3 | Observables are quantities calculated from simulation data that provide 4 | insights into the system's behavior. A common observable is RMSD (Root 5 | Mean Square Deviation). In this guide, we'll walk through the process of 6 | defining and using observables within a simulation. 7 | 8 | #+BEGIN_SRC python 9 | import mdtraj as mdj 10 | 11 | # Load the PDB file 12 | pdb_path = 'path/to/your/file.pdb' 13 | pdb = mdj.load_pdb(pdb_path) 14 | #+END_SRC 15 | 16 | To calculate observables, we often need to select specific residues or 17 | atoms. Here's how to select residues using MDTraj. 18 | 19 | #+BEGIN_SRC python 20 | # Select specific residues based on segment names and residue numbers 21 | active_1_res = pdb.top.select('(segname PROA and (residue 399)) or (segname PROB and (residue 366))') 22 | active_2_res = pdb.top.select('(segname PROB and (residue 399)) or (segname PROC and (residue 366))') 23 | active_3_res = pdb.top.select('(segname PROC and (residue 399)) or (segname PROA and (residue 366))') 24 | 25 | CARA_active_res = pdb.top.select('segname CARA and (residue 24 25)') 26 | CARB_active_res = pdb.top.select('segname CARB and (residue 24 25)') 27 | CARC_active_res = pdb.top.select('segname CARC and (residue 24 25)') 28 | #+END_SRC 29 | 30 | An observable function takes simulation data as input and computes a 31 | specific property. You can define your own functions based on your 32 | research needs. 33 | 34 | Here's an example of how you might define an observable function to 35 | compute centroid distances: 36 | 37 | #+BEGIN_SRC python 38 | import numpy as np 39 | 40 | def centroid_distance(fields_d, *args, **kwargs): 41 | centroid_distances = [] 42 | for i in range(len(fields_d['positions'])): 43 | active_1_centroid = np.mean(fields_d['positions'][i][args[0]['active_1_res']], axis=0) 44 | active_2_centroid = np.mean(fields_d['positions'][i][args[0]['active_2_res']], axis=0) 45 | active_3_centroid = np.mean(fields_d['positions'][i][args[0]['active_3_res']], axis=0) 46 | 47 | CARA_active_centroid = np.mean(fields_d['positions'][i][args[0]['CARA_active_res']], axis=0) 48 | CARB_active_centroid = np.mean(fields_d['positions'][i][args[0]['CARB_active_res']], axis=0) 49 | CARC_active_centroid = np.mean(fields_d['positions'][i][args[0]['CARC_active_res']], axis=0) 50 | 51 | centroid_distances.append(np.array([ 52 | np.linalg.norm(active_1_centroid - CARA_active_centroid), 53 | np.linalg.norm(active_3_centroid - CARB_active_centroid), 54 | np.linalg.norm(active_2_centroid - CARC_active_centroid) 55 | ])) 56 | 57 | return np.array(centroid_distances) 58 | #+END_SRC 59 | 60 | Once we have the observable function, we can use it to compute 61 | properties during the simulation. Wepy provides tools to manage 62 | simulation data and compute observables. 63 | 64 | #+BEGIN_SRC python 65 | from wepy.hdf5 import WepyHDF5 66 | 67 | # Path to the WEPY results files 68 | outputs_dir = ['path/to/output1', 'path/to/output2'] 69 | wepy_results = [WepyHDF5(output_dir + '/wepy.results.h5', mode='r+') for output_dir in outputs_dir] 70 | 71 | for wepy_result in wepy_results: 72 | with wepy_result: 73 | args = [{ 74 | 'active_1_res': active_1_res, 75 | 'active_2_res': active_2_res, 76 | 'active_3_res': active_3_res, 77 | 'CARA_active_res': CARA_active_res, 78 | 'CARB_active_res': CARB_active_res, 79 | 'CARC_active_res': CARC_active_res 80 | }] 81 | 82 | obs = wepy_result.compute_observable( 83 | centroid_distance, 84 | ['positions'], # Specify the required fields 85 | args=(args), # Pass custom arguments 86 | save_to_hdf5='centroid', # Save results to the HDF5 file 87 | return_results=True 88 | ) 89 | #+END_SRC 90 | 91 | Now you have centroid distance data that you can use to analyze the 92 | system's behavior. You can also use the data to visualize the system's 93 | behavior over time. 94 | -------------------------------------------------------------------------------- /info/tutorials/extended_test_drive/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | example_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv docs.env-tutorial -n {example_name}") 45 | -------------------------------------------------------------------------------- /info/tutorials/multiple_runs/README.org: -------------------------------------------------------------------------------- 1 | * Running Multiple Simulations 2 | 3 | In this section, we will discuss how to run multiple simulations in 4 | parallel, how to continue a simulation from a previous run, and how to 5 | merge data from multiple simulations. 6 | 7 | - [[file:./parallel_runs/README.rst][Starting Parallel Runs]] 8 | - [[file:./continue_simulation/README.rst][Continue the Simulation]] 9 | - [[file:./merge_data/README.rst][Merge Multiple Simulation Data]] 10 | -------------------------------------------------------------------------------- /info/tutorials/multiple_runs/merge_data/README.org: -------------------------------------------------------------------------------- 1 | * Merge Multiple Simulation Data 2 | 3 | In this section, we will cover how to merge multiple simulation data 4 | files into a single HDF5 file using the wepy.hdf5 module. This process 5 | is essential for combining the results of different simulation runs, 6 | making it easier to analyze the overall data. 7 | 8 | When running multiple simulations, it is often useful to merge the 9 | resulting data files into a single file. This allows for a more 10 | streamlined analysis and management of the data. The following example 11 | demonstrates how to use the WepyHDF5 class to accomplish this task. 12 | 13 | The following Python script shows how to initialize, clone, and link 14 | multiple HDF5 files containing simulation results. 15 | 16 | #+BEGIN_SRC python 17 | import os.path as osp 18 | from wepy.hdf5 import WepyHDF5 19 | 20 | print('running...', flush=True) 21 | 22 | # Define the base path where the merged file will be saved 23 | BASE_PATH = '' 24 | 25 | # List of HDF5 files to be merged 26 | hdf5_filenames = [ 27 | 'run1.h5', 28 | 'run2.h5', 29 | 'run3.h5', 30 | ... 31 | ] 32 | 33 | # Initialization of a single HDF5 file 34 | init_wepy_h5 = WepyHDF5(filename=hdf5_filenames[0], mode='r') 35 | 36 | # Creating a clone (This has to be done every time you link all HDF5 files in as separate runs) 37 | CLONE_FILENAME = osp.join(BASE_PATH, 'merge.h5') 38 | 39 | with init_wepy_h5: 40 | wepy_file = init_wepy_h5.clone(path=CLONE_FILENAME, mode='w') 41 | 42 | # The linking step 43 | with wepy_file: 44 | for hdf5_filename in hdf5_filenames: 45 | wepy_file.link_file_runs(hdf5_filename) 46 | #+END_SRC 47 | 48 | The script performs the following steps: 49 | 50 | 1. *Initialization*: 51 | - We start by importing the necessary modules and defining the base 52 | path where the merged file will be saved. 53 | - A list of HDF5 filenames is provided, which contains the paths to 54 | the individual simulation result files. 55 | 2. *Cloning*: 56 | - We initialize a WepyHDF5 object with the first HDF5 file in read 57 | mode. 58 | - Using the clone method, we create a new HDF5 file that will serve 59 | as the merged output file. 60 | 3. *Linking*: 61 | - We open the newly created HDF5 file in write mode. 62 | - Each of the HDF5 files listed in ~hdf5_filenames~ is linked to the 63 | new file using the ~link_file_runs~ method. 64 | 65 | By following these steps, you can merge multiple HDF5 files into a 66 | single file, making it easier to handle and analyze your simulation 67 | data. 68 | 69 | Merging simulation data is a crucial step in data analysis workflows, 70 | especially when dealing with multiple runs. The wepy.hdf5 module 71 | provides a straightforward way to achieve this, ensuring that you can 72 | efficiently combine and manage your simulation results. 73 | -------------------------------------------------------------------------------- /info/tutorials/multiple_runs/parallel_runs/README.org: -------------------------------------------------------------------------------- 1 | * Starting Parallel Runs 2 | 3 | In Wepy you can run two seperate scripts with same initial condition or 4 | you can use different initial conditions for parallel runs. It is simply 5 | running two simulation scripts that build their own simulation objects 6 | and run them. 7 | 8 | An example usage for this case would be using a randomized subset of 9 | trajectories for setting up the walkers. This way the walkers have 10 | different starting points. It is important to note that it is a good 11 | practice to have different scripts for each of these simulation runs. 12 | This way you can keep track of the changes you made to the scripts and 13 | the results you obtained from them. 14 | -------------------------------------------------------------------------------- /info/tutorials/seh_system_prep/README.org: -------------------------------------------------------------------------------- 1 | 2 | * sEH ligand unbinding: A real use-case 3 | 4 | In the course of a real project that was meant to simulate different 5 | ligands unbinding from the protein soluble epoxide hydrolase (sEH) a 6 | small importable package was built called [[https://gitlab.com/salotz/seh_prep][seh_prep]]. 7 | 8 | In this tutorial we will use this package to build a simulation 9 | similar to the one in this paper: TODO. 10 | 11 | #+RST: .. TODO add paper citation 12 | 13 | ** Getting Started 14 | 15 | In the package you will find several scripts (in the ~seh_prep~ dir) 16 | for: 17 | 18 | - equilibrating freshly minted systems: ~equilibrate.py~ 19 | - common components and functions used by these types of simulations: ~modules.py~ 20 | - a complete definition of all parameters used for all components used 21 | by the simulation: ~parameters.py~ 22 | - script for running a simple simulation for testing: ~run_without_orch.py~ 23 | - a script for generating an initial orchestrator for running 24 | simulations: ~gen_orchestrator.py~ 25 | 26 | There are others but lets ignore them for now. 27 | 28 | The necessary force field data used for all simulations is in the 29 | ~data~ folder. For these simulations we are using CHARMM 36 with the 30 | CGENFF force fields for parametrizing the ligands. 31 | 32 | There are some example inputs in the ~example_data~ folder we will be 33 | using for the tutorial. 34 | 35 | To use and edit this project it is probably best to just use git to 36 | clone it and install it: 37 | 38 | #+begin_src bash 39 | git clone https://gitlab.com/salotz/seh_prep.git 40 | pip install --no-use-pep517 -e ./seh_prep 41 | #+end_src 42 | 43 | I won't take you through creating virtual environments but it is 44 | probably good to do this in a virtual environment. 45 | 46 | 47 | ** The inputs 48 | 49 | Its worth talking about what the inputs are here since this is a point 50 | of considerable confusion. 51 | 52 | Ultimately we are going to be running MD simulations using OpenMM and 53 | a lot of input files are related to the force fields, topologies, and 54 | initial positions for that. 55 | 56 | For this we can't really recommend any single tool since there are 57 | many different programs and suites of programs that do 58 | this. Furthermore, it is not a really well-defined process and there 59 | is considerable complexity involved (at least in our experience). The 60 | only inputs, besides parameters, we need for our wepy simulations are 61 | the JSON topology of the full MD system and the initial positions for 62 | the full system. 63 | 64 | These are in the ~sEH_lig-X.top.json~ and ~initial_positions.coords.txt~ 65 | files respectively. The latter can be read using the standard numpy 66 | reader for text files. 67 | 68 | For this particular case we used the CHARMM-GUI server to do the 69 | preprocessing of the system. Unfortunately, the topology formats 70 | produced by this service were not easily transformed into mdtraj or 71 | OpenMM topologies (which can easily be translated to the JSON 72 | format). And so manual scripting was used to accomplish this as well 73 | as extracting the positions (in the correct order). I suggest if 74 | possible to use the model building tools provided by OpenMM itself or 75 | other related programs like PDBFixer. 76 | 77 | In any case once you will want to ultimately get an mdtraj ~Topology~ 78 | object and then use the ~mdtraj_to_json_topology~ method from 79 | ~wepy.utils.mdtraj~ to convert it to JSON. 80 | 81 | The other files here are the forcefields for OpenMM in their XML 82 | format. The ~charmm36.xml~, ~charm36_solvent.xml~ files come with the 83 | OpenMM distribution and are just copied over. The forcefield for the 84 | ligand, ~unl.xml~, was generated from the corresponding 'prm' and 85 | 'rtf' from CGENFF files using a utility in the package [[https://github.com/ParmEd/ParmEd][parmed]]. 86 | Something like: 87 | 88 | #+begin_src python 89 | lig_params = pmd.charmm.CharmmParameterSet("unl.rtf", "unl.prm") 90 | params = pmd.openmm.OpenMMParameterSet.from_parameterset(lig_params) 91 | 92 | # here you have an opportunity to rename the ligand if you choose 93 | params.residues['UNL'] = lig_params.residues["UNL"] 94 | params.write('unl.xml') 95 | #+end_src 96 | 97 | ** Equilibrating the starting structure 98 | 99 | Once you have assembled the necessary components to at least run a 100 | straigtforward OpenMM simulation we can start to equilibrate it. I 101 | won't go through the details here, but in brief it first does a 102 | steepest descent minimization followed by a series of MD with 103 | increasing temperatures. If you installed the ~seh_prep~ package with 104 | pip then you can run the modules/scripts anywhere like this: 105 | 106 | #+begin_src bash 107 | lig_id="X" 108 | python -m seh_prep.equilibrate \ 109 | "initial_positions.coords.txt" \ 110 | "sEH_lig-${lig_id}.top.json" \ 111 | "UNL" \ 112 | "charmm36.xml" "charmm36_solvent.xml" "unl.xml" \ 113 | $lig_id 114 | #+end_src 115 | 116 | We are calling giving the ligand the ID "X" and the residue name in 117 | all the topologies for the ligand is "UNL". 118 | 119 | This requires that you have OpenCL installed and have a GPU to 120 | actually run it on. 121 | 122 | 123 | 124 | -------------------------------------------------------------------------------- /info/tutorials/seh_system_prep/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | 8 | def tangle_orgfile(cx, file_path): 9 | """Tangle the target file using emacs in batch mode. Implicitly dumps 10 | things relative to the file.""" 11 | 12 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 13 | 14 | @task 15 | def init(cx): 16 | cx.run("mkdir -p _tangle_source") 17 | cx.run("mkdir -p _output") 18 | 19 | @task 20 | def clean(cx): 21 | cx.run("rm -rf _tangle_source") 22 | cx.run("rm -rf _output") 23 | 24 | @task(pre=[init]) 25 | def tangle(cx): 26 | tangle_orgfile(cx, "README.org") 27 | 28 | 29 | @task 30 | def clean_env(cx): 31 | cx.run("rm -rf _env") 32 | 33 | @task(pre=[init]) 34 | def env(cx): 35 | """Create the environment from the specs in 'env'. Must have the 36 | entire repository available as it uses the tooling from it. 37 | 38 | """ 39 | 40 | example_name = Path(os.getcwd()).stem 41 | 42 | with cx.cd("../../../"): 43 | cx.run(f"inv docs.env-tutorial -n {example_name}") 44 | -------------------------------------------------------------------------------- /info/tutorials/slurm/README.org: -------------------------------------------------------------------------------- 1 | * Using SLURM to Start a Wepy Simulation 2 | 3 | To start a Wepy simulation using SLURM, you can use the following SLURM 4 | script template. Customize the script according to your specific 5 | resource requirements and environment setup. 6 | 7 | ** Example SLURM Script 8 | 9 | #+BEGIN_SRC bash 10 | #!/bin/bash --login 11 | ########## SBATCH Lines for Resource Request ########## 12 | 13 | #SBATCH --time=TIME_LIMIT # limit of wall clock time - how long the job will run (e.g., 168:00:00 for 168 hours) 14 | #SBATCH -A YOUR_ALLOCATION_ACCOUNT # replace with your allocation account 15 | #SBATCH --nodes=NUMBER_OF_NODES # the number of nodes requested (e.g., 1) 16 | #SBATCH --ntasks=NUMBER_OF_TASKS # the number of tasks to run (e.g., 1) 17 | #SBATCH --cpus-per-task=CPUS_PER_TASK # the number of CPUs (or cores) per task (e.g., 8) 18 | #SBATCH --gpus-per-task=GPUS_PER_TASK # request GPUs per task (e.g., v100:4) 19 | #SBATCH --mem=MEMORY_PER_NODE # total memory per node (e.g., 64G) 20 | #SBATCH --job-name=JOB_NAME # give your job a name for easier identification (e.g., wepy_run) 21 | #SBATCH --output=%x.out # standard output file (e.g., wepy_run.out) 22 | #SBATCH --error=%x.err # standard error file (e.g., wepy_run.err) 23 | 24 | ########## Command Lines to Run ########## 25 | 26 | # Load necessary modules (customize as needed) 27 | module load Conda/3 # load the Conda module (modify if needed) 28 | 29 | # Initialize Conda (replace with the path to your Conda installation) 30 | eval "$(/path/to/conda/bin/conda shell.bash hook)" # e.g., /mnt/home/username/anaconda3/bin/conda 31 | 32 | # Activate your Conda environment (replace with your environment name) 33 | conda activate your_environment_name # e.g., wepy_env 34 | 35 | # Set your home directory for the project (replace with your home directory) 36 | HOME_DIR="/path/to/your/home/directory" # e.g., /mnt/home/username/project_dir 37 | JOBNAME=${SLURM_JOB_ID} 38 | 39 | # Set your log directory (replace with your log directory) 40 | LOG_DIR="$HOME_DIR/logs" # e.g., $HOME_DIR/logs 41 | 42 | # Log the beginning of the run 43 | echo 'Beginning of the run' 1>> "$LOG_DIR/$JOBNAME.log" 2>> "$LOG_DIR/$JOBNAME.log" 44 | 45 | # Load CUDA module if necessary (customize as needed) 46 | module load centos7/lib/cuda/12 # modify CUDA version if needed 47 | 48 | # Change to the home directory 49 | cd "$HOME_DIR" 50 | 51 | # Log the SLURM_JOB_ID number 52 | echo "SLURM_JOB_ID: $SLURM_JOB_ID" 1>> "$LOG_DIR/$JOBNAME.log" 2>> "$LOG_DIR/$JOBNAME.log" 53 | 54 | # Running the Wepy simulation script (replace with your script name) 55 | echo "Running Wepy simulation script" 1>> "$LOG_DIR/$JOBNAME.log" 2>> "$LOG_DIR/$JOBNAME.log" 56 | python wepy_run.py # replace with your Wepy simulation script name 57 | #+END_SRC 58 | 59 | ** Instructions for Customization 60 | 61 | 1. *Resource Requests:* 62 | - Replace ~TIME_LIMIT~ with the desired wall clock time limit (e.g., 63 | ~168:00:00~ for 168 hours). 64 | - Replace ~YOUR_ALLOCATION_ACCOUNT~ with your specific allocation 65 | account. 66 | - Replace ~NUMBER_OF_NODES~ with the number of nodes you need (e.g., 67 | ~1~). 68 | - Replace ~NUMBER_OF_TASKS~ with the number of tasks to run (e.g., 69 | ~1~). 70 | - Replace ~CPUS_PER_TASK~ with the number of CPUs per task (e.g., ~8~). 71 | - Replace ~GPUS_PER_TASK~ with the type and number of GPUs per task 72 | (e.g., ~v100:4~). 73 | - Replace ~MEMORY_PER_NODE~ with the total memory per node (e.g., 74 | ~64G~). 75 | - Replace ~JOB_NAME~ with a name for your job (e.g., ~wepy_run~). 76 | 2. *Conda Setup:* 77 | - Replace ~/path/to/conda/bin/conda~ with the actual path to your 78 | Conda installation. 79 | - Replace ~your_environment_name~ with the name of your Conda 80 | environment. 81 | 3. *Home and Log Directory:* 82 | - Set ~HOME_DIR~ to the directory where your project files are located. 83 | - Ensure ~LOG_DIR~ points to where you want the log files to be saved. 84 | 4. *CUDA Module:* 85 | - Adjust the ~module load centos7/lib/cuda/12~ line depending on your 86 | cuda path. 87 | 5. *Wepy Simulation Script:* 88 | - Replace ~wepy_run.py~ with the name of your Wepy simulation script. 89 | 90 | This template provides flexibility for users to customize the SLURM 91 | script according to their specific needs while maintaining a general 92 | structure for running a Wepy simulation. 93 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "wepy" 7 | description = "Weighted Ensemble Framework" 8 | readme = {"file" = "README.org", "content-type" = "text/plain"} 9 | license = "MIT" 10 | 11 | authors = [ 12 | { name = "Samuel Lotz", email = "salotz@salotz.info" }, 13 | { name = "Alex Dickson", email = "alexrd@msu.edu" }, 14 | { name = "Tom Dixon" }, 15 | { name = "Robert Hall" }, 16 | { name = "Nicole Roussey" }, 17 | ] 18 | dynamic = ["version"] 19 | 20 | classifiers = [ 21 | "Topic :: Utilities", 22 | "License :: OSI Approved :: MIT License", 23 | "Programming Language :: Python :: 3" 24 | ] 25 | 26 | dependencies = [ 27 | "numpy", 28 | "h5py>=3", 29 | "networkx", 30 | "pandas", 31 | "dill", 32 | "click", 33 | "scipy", 34 | "geomm", 35 | "tabulate", 36 | "jinja2", 37 | "pint", 38 | "multiprocessing_logging", 39 | ] 40 | 41 | [project.optional-dependencies] 42 | 43 | md = [ 44 | "mdtraj", 45 | "openmm_systems==0.0.0", 46 | ] 47 | 48 | distributed = [ 49 | "dask[bag]", 50 | ] 51 | 52 | prometheus = [ 53 | "prometheus_client", 54 | "pympler", 55 | ] 56 | 57 | graphics = [ 58 | "matplotlib", 59 | # NOTE: pillow only needed for matplotlib but it needs to be above this 60 | # version for security reasons. If matplotlib also adds these constraints this 61 | # can be removed 62 | "pillow>=10.0.1", 63 | ] 64 | 65 | [project.urls] 66 | 67 | Documentation = "https://adicksonlab.github.io/wepy/index.html" 68 | Source = "https://github.com/ADicksonLab/wepy" 69 | Issues = "https://github.com/ADicksonLab/wepy/issues" 70 | 71 | [project.scripts] 72 | 73 | wepy = "wepy.__main__:cli" 74 | 75 | # NOTE: currently disabled since it requires OpenMM currently to work which is 76 | # not ideal for unit testing 77 | 78 | # [project.entry-points.pytest11] 79 | # pytest-wepy = "pytest_wepy" 80 | 81 | [tool.interrogate] 82 | 83 | fail-under = 100 84 | verbose = 2 85 | 86 | [tool.hatch.version] 87 | path = "src/wepy/__about__.py" 88 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --verbose 3 | -------------------------------------------------------------------------------- /requirements-distributed.txt: -------------------------------------------------------------------------------- 1 | # frozen requirements generated by pip-deepfreeze 2 | cloudpickle==2.2.1 3 | dask==2023.3.2 4 | fsspec==2023.3.0 5 | importlib-metadata==6.2.0 6 | locket==1.0.0 7 | packaging==23.0 8 | partd==1.3.0 9 | PyYAML==6.0 10 | toolz==0.12.0 11 | zipp==3.15.0 12 | -------------------------------------------------------------------------------- /requirements-graphics.txt: -------------------------------------------------------------------------------- 1 | # frozen requirements generated by pip-deepfreeze 2 | contourpy==1.1.1 3 | cycler==0.12.1 4 | fonttools==4.43.1 5 | importlib-resources==6.1.0 6 | kiwisolver==1.4.5 7 | matplotlib==3.8.0 8 | packaging==23.0 9 | Pillow==10.0.1 10 | pyparsing==3.0.9 11 | zipp==3.15.0 12 | -------------------------------------------------------------------------------- /requirements-md.txt: -------------------------------------------------------------------------------- 1 | # frozen requirements generated by pip-deepfreeze 2 | astunparse==1.6.3 3 | mdtraj==1.9.7 4 | openmm-systems==0.0.0 5 | pyparsing==3.0.9 6 | wheel==0.41.2 7 | -------------------------------------------------------------------------------- /requirements-prometheus.txt: -------------------------------------------------------------------------------- 1 | # frozen requirements generated by pip-deepfreeze 2 | prometheus-client==0.16.0 3 | Pympler==1.0.1 4 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | # this is an abstract listing of the requirements for other projects 2 | # which are co-developing with this repo can use to compile dependencies 3 | # for 4 | 5 | --index-url https://pypi.python.org/simple/ 6 | 7 | numpy 8 | h5py > 3 9 | networkx == 2.3 10 | pandas 11 | dill 12 | 13 | click 14 | scipy 15 | matplotlib 16 | tabulate 17 | jinja2 18 | pint 19 | 20 | eliot 21 | multiprocessing_logging 22 | 23 | # mdtraj 24 | mdtraj 25 | 26 | # distributed 27 | dask[bag] 28 | 29 | # prometheus 30 | prometheus_client 31 | pympler 32 | 33 | # causes simultaneous dev to fail 34 | # git+https://github.com/ADicksonLab/geomm 35 | # git+https://github.com/ADicksonLab/openmm_systems 36 | 37 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # frozen requirements generated by pip-deepfreeze 2 | click==8.1.3 3 | dill==0.3.6 4 | geomm==0.2.0 5 | h5py==3.8.0 6 | Jinja2==3.1.2 7 | MarkupSafe==2.1.2 8 | multiprocessing-logging==0.3.4 9 | networkx==3.1 10 | numpy==1.24.2 11 | pandas==2.0.0 12 | Pint==0.20.1 13 | python-dateutil==2.8.2 14 | pytz==2023.3 15 | scipy==1.10.1 16 | six==1.16.0 17 | tabulate==0.9.0 18 | tzdata==2023.3 19 | -------------------------------------------------------------------------------- /scrapyard/wexplore_image_reporter.py: -------------------------------------------------------------------------------- 1 | import logging 2 | logger = logging.getLogger(__name__) 3 | 4 | import numpy as np 5 | 6 | import mdtraj as mdj 7 | 8 | from wepy.reporter.reporter import ProgressiveFileReporter 9 | from wepy.util.mdtraj import json_to_mdtraj_topology, mdtraj_to_json_topology 10 | from wepy.util.json_top import json_top_subset 11 | 12 | class WExploreAtomImageReporter(ProgressiveFileReporter): 13 | """Reporter for generating 3D molecular structures from WExplore 14 | region images. 15 | 16 | This will only be meaningful for WExplore simulations where the 17 | region images are actually 3D coordinates. 18 | 19 | """ 20 | 21 | FILE_ORDER = ("init_state_path", "image_path") 22 | SUGGESTED_EXTENSIONS = ("image_top.pdb", "wexplore_images.dcd") 23 | 24 | 25 | def __init__(self, 26 | init_image=None, 27 | image_atom_idxs=None, 28 | json_topology=None, 29 | **kwargs): 30 | """Constructor for the WExploreAtomImageReporter. 31 | 32 | Parameters 33 | ---------- 34 | 35 | init_image : numpy.array, optional 36 | The initial region image. Used for generating the topology 37 | as well. If not given will be eventually generated. 38 | (Default = None) 39 | 40 | image_atom_idxs : list of int 41 | The indices of the atoms that are part of the topology 42 | subset that comprises the image. 43 | 44 | json_topology : str 45 | JSON format topology for the whole system. A subset of the 46 | atoms will be taken using the image_atom_idxs. 47 | 48 | """ 49 | 50 | super().__init__(**kwargs) 51 | 52 | assert json_topology is not None, "must give a JSON format topology" 53 | assert image_atom_idxs is not None, \ 54 | "must give the indices of the atoms for the subset of the topology that is the image" 55 | 56 | self.image_atom_idxs = image_atom_idxs 57 | 58 | self.json_main_rep_top = json_top_subset(json_topology, self.image_atom_idxs) 59 | 60 | self.init_image = None 61 | self._top_pdb_written = False 62 | self.image_traj_positions = [] 63 | 64 | # if an initial image was given use it, otherwise just don't 65 | # worry about it, the reason for this is that there is no 66 | # interface for getting image indices from distance metrics as 67 | # of now. 68 | if init_image is not None: 69 | self.init_image = init_image 70 | self.image_traj_positions.append(self.init_image) 71 | 72 | # and times 73 | self.times = [0] 74 | 75 | 76 | def init(self, **kwargs): 77 | 78 | super().init(**kwargs) 79 | 80 | if self.init_image is not None: 81 | 82 | image_mdj_topology = json_to_mdtraj_topology(self.json_main_rep_top) 83 | 84 | # initialize the initial image into the image traj 85 | init_image_traj = mdj.Trajectory([self.init_image], 86 | time=self.times, 87 | topology=image_mdj_topology) 88 | 89 | 90 | 91 | # save this as a PDB for a topology to view in VMD etc. to go 92 | # along with the trajectory we will make 93 | logger.info("Writing initial image to {}".format(self.init_state_path)) 94 | init_image_traj.save_pdb(self.init_state_path) 95 | 96 | self._top_pdb_written = True 97 | 98 | def report(self, cycle_idx=None, resampler_data=None, 99 | **kwargs): 100 | 101 | # load the json topology as an mdtraj one 102 | image_mdj_topology = json_to_mdtraj_topology(self.json_main_rep_top) 103 | 104 | # collect the new images defined 105 | new_images = [] 106 | for resampler_rec in resampler_data: 107 | image = resampler_rec['image'] 108 | new_images.append(image) 109 | 110 | times = np.array([cycle_idx + 1 for _ in range(len(new_images))]) 111 | 112 | 113 | # combine the new image positions and times with the old 114 | self.image_traj_positions.extend(new_images) 115 | self.times.extend(times) 116 | 117 | # only save if we have an image yet 118 | if len(self.image_traj_positions) > 0: 119 | 120 | # make a trajectory of the new images, using the cycle_idx as the time 121 | new_image_traj = mdj.Trajectory(self.image_traj_positions, 122 | time=self.times, 123 | topology=image_mdj_topology) 124 | 125 | # if we haven't already written a topology PDB write it now 126 | if not self._top_pdb_written: 127 | new_image_traj[0].save_pdb(self.init_state_path) 128 | self._top_pdb_written = True 129 | 130 | # then the images to the trajectory file 131 | new_image_traj.save_dcd(self.image_path) 132 | -------------------------------------------------------------------------------- /sphinx/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 20 | -------------------------------------------------------------------------------- /sphinx/data_analysis_index.rst: -------------------------------------------------------------------------------- 1 | .. _data_analysis: 2 | 3 | Analyzing Wepy Data 4 | ========= 5 | 6 | Data of the Wepy runs are mostly stored as HDF5 files. In this section 7 | we will go over the basic tools to how to work with HDF5 files generated 8 | by Wepy. 9 | 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | hdf5_access/README 15 | observable_intro/README 16 | -------------------------------------------------------------------------------- /sphinx/index.rst: -------------------------------------------------------------------------------- 1 | .. wepy documentation master file, created by 2 | sphinx-quickstart on Mon Dec 10 13:22:52 2018. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | 7 | 8 | wepy: A Weighted Ensemble (WE) simulation framework 9 | ============================================== 10 | 11 | .. optional logo here 12 | .. image:: /_static/logo.svg 13 | :width: 450 pt 14 | :align: center 15 | :target: https://adicksonlab.github.io/wepy/ 16 | 17 | Getting Started 18 | --------------- 19 | 20 | .. toctree:: 21 | :maxdepth: 1 22 | 23 | _source/introduction 24 | _source/installation 25 | _source/quick_start/index 26 | 27 | Software Documentation 28 | ---------------------- 29 | 30 | .. toctree:: 31 | :maxdepth: 1 32 | 33 | _source/tutorials/index 34 | _source/troubleshooting 35 | _source/reference 36 | _source/glossary 37 | 38 | 39 | Project & Developer Information 40 | ------------------------------- 41 | 42 | .. toctree:: 43 | :maxdepth: 1 44 | 45 | _source/general_info 46 | _source/news 47 | _source/changelog 48 | _source/dev_guide 49 | 50 | 51 | Metrics, Reports, & Dashboards 52 | ------------------------------ 53 | 54 | `Performance Regressions `_ 55 | 56 | `Code Quality `_ 57 | 58 | `Test Coverage `_ 59 | 60 | 61 | 62 | Indices and tables 63 | ------------------ 64 | 65 | * :ref:`genindex` 66 | * :ref:`modindex` 67 | * :ref:`search` 68 | -------------------------------------------------------------------------------- /sphinx/multiple_runs_index.rst: -------------------------------------------------------------------------------- 1 | Running Multiple Simulations 2 | ============================= 3 | 4 | In this section, we will discuss how to run multiple simulations in 5 | parallel, how to continue a simulation from a previous run, and how to 6 | merge data from multiple simulations. 7 | 8 | .. toctree:: 9 | :maxdepth: 1 10 | 11 | parallel_runs/README 12 | continue_simulation/README 13 | merge_data/README 14 | -------------------------------------------------------------------------------- /sphinx/quick_start_index.rst: -------------------------------------------------------------------------------- 1 | Quick Start 2 | =========== 3 | asdfasdfsdafasdf 4 | In this section, we will provide a quick start guide to get you up and 5 | running with Wepy. This section includes setting up a simulation 6 | environment with a pre-built system, and an introduction to how data is 7 | saved in a Wepy simulation. 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | setting_up_simulation/README 13 | reporters/README 14 | introducing_resamplers/README -------------------------------------------------------------------------------- /sphinx/static/custom.css: -------------------------------------------------------------------------------- 1 | a { 2 | color: #027bab; 3 | } 4 | 5 | a:hover { 6 | color: #750076; 7 | } 8 | 9 | th.field-name { 10 | min-width: 100px; 11 | color: #Ee4000; 12 | } 13 | 14 | tt, code { 15 | color: #Ee4000; 16 | border-radius: 0.3em; 17 | padding: 0.0em 0.3em; 18 | } 19 | 20 | a.reference.internal code.xref span.pre { 21 | color: #Ee4000; 22 | border-bottom: none; 23 | border-radius: 0; 24 | padding: 0; 25 | } 26 | 27 | a.reference.internal, a.reference.internal:hover { 28 | border-bottom: none; 29 | } 30 | 31 | a.reference.internal:hover code { 32 | background: #027bab 33 | } 34 | 35 | a.reference.internal:hover code.xref span.pre { 36 | color: #Ee4000; 37 | /* background: #027bab; */ 38 | border-bottom: none; 39 | } 40 | 41 | tt.xref, code.xref, a tt { 42 | background: none; 43 | border-bottom: none; 44 | } 45 | 46 | code.literal { 47 | color: #551a8b; 48 | /* background: #015259; */ 49 | } 50 | 51 | pre { 52 | padding: 20px 30px; 53 | /* background: #003038; */ 54 | } 55 | 56 | div > dl { 57 | /* border-left: 2px solid #00384021; */ 58 | padding-left: 5px; 59 | } 60 | 61 | dt { 62 | color: #027bab; 63 | } 64 | -------------------------------------------------------------------------------- /sphinx/tutorials_index.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | In this section, we will provide a number of practical tutorials for using Wepy to collect research data. 5 | This will cover how to prepare your own systems for simulation, how to write Wepy scripts to perform ligand binding and unbinding, data analysis, running and analyzing simulation replicates, and submitting Wepy jobs to a compute cluster. 6 | 7 | .. toctree:: 8 | :maxdepth: 1 9 | 10 | prepare/README 11 | unbinding_sim/README 12 | rebinding_sim/README 13 | data_analysis/index 14 | multiple_runs/index 15 | slurm/README 16 | -------------------------------------------------------------------------------- /src/pytest_wepy/__init__.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | import pytest 3 | 4 | 5 | @pytest.fixture(scope="class") 6 | def test_wepy_fixture(): 7 | return "Hello" 8 | -------------------------------------------------------------------------------- /src/pytest_wepy/openmm.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/pytest_wepy/openmm.py -------------------------------------------------------------------------------- /src/wepy/__about__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.1.0" 2 | -------------------------------------------------------------------------------- /src/wepy/__init__.py: -------------------------------------------------------------------------------- 1 | """Top-level package.""" 2 | 3 | # Local Modules 4 | from .__about__ import __version__ 5 | 6 | __author__ = "Samuel D. Lotz" 7 | __email__ = "samuel.lotz@salotz.info" 8 | -------------------------------------------------------------------------------- /src/wepy/__main__.py: -------------------------------------------------------------------------------- 1 | """Glue all the CLIs together into one interface.""" 2 | 3 | # First Party Library 4 | from wepy.orchestration.cli import cli as orch_cli 5 | 6 | cli = orch_cli 7 | 8 | 9 | # SNIPPET: I was intending to aggregate multiple command lines other 10 | # than the orchestration, but this never materialized or was 11 | # needed. In the future though this can be the place for that. 12 | 13 | # @click.group() 14 | # def cli(): 15 | # """ """ 16 | # pass 17 | 18 | # # add in the sub-clis 19 | # cli.add_command(orch_cli) 20 | 21 | # # the orchestrator stuff we keep in the top-level still though 22 | # for subgroup in orch_subgroups: 23 | # cli.add_command(subgroup) 24 | 25 | if __name__ == "__main__": 26 | cli() 27 | -------------------------------------------------------------------------------- /src/wepy/_print_version.py: -------------------------------------------------------------------------------- 1 | if __name__ == "__main__": 2 | # Local Modules 3 | from .__about__ import __version__ 4 | 5 | print(__version__) 6 | -------------------------------------------------------------------------------- /src/wepy/analysis/__init__.py: -------------------------------------------------------------------------------- 1 | """Analysis tools for wepy datasets. 2 | 3 | Analysis of weighted ensemble (WE) datasets are complicated by their 4 | nonlinear branching structure, and thus in its raw form cannot be 5 | analyzed by the tools available for linear trajectories. See the 6 | documentation (TODO: link) for a more in depth explanation of the 7 | general structure of weighted ensemble data. 8 | 9 | These analysis modules do not aim to redo any particular analysis 10 | calculation but rather to aid in accessing, summarizing, and 11 | manipulating branched simulation data. 12 | 13 | The starting point for this is the `ContigTree` class in the 14 | `contig_tree` module. This is a general purpose container that 15 | combines the data specifying the branching structure with the 16 | underlying linear data structures. It is necessary for branched data 17 | but can also be used for collections of linear data as well. 18 | 19 | The main anlaysis routines we attempt to facilitate are: 20 | 21 | 1. Generating probability histograms over calculated 22 | observables. (TODO) 23 | 24 | 2. Generating contiguous trajectories of walkers that have reached 25 | target boundary conditions (TODO). 26 | 27 | 3. Visualizing the cloning and merging history of simulations 28 | (i.e. resampling trees) using the `parents` module and 29 | `network_layouts`. 30 | 31 | 4. Generating 'state networks' from frame labels and computing 32 | transition matrices with variable lag times using the `network` and 33 | `transitions` modules. 34 | 35 | To calculate observables on the dataset you need to work directly with 36 | the data storage API in the `wepy.hdf5.WepyHDF5` module using the 37 | `compute_observable` method. 38 | 39 | See Also 40 | -------- 41 | 42 | `wepy.hdf5.WepyHDF5.compute_observable` 43 | 44 | Notes 45 | ----- 46 | 47 | References 48 | ---------- 49 | 50 | Examples 51 | -------- 52 | 53 | """ 54 | -------------------------------------------------------------------------------- /src/wepy/analysis/network_layouts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/analysis/network_layouts/__init__.py -------------------------------------------------------------------------------- /src/wepy/analysis/network_layouts/layout.py: -------------------------------------------------------------------------------- 1 | """General stuff for layouts.""" 2 | 3 | 4 | class LayoutError(Exception): 5 | """Error for requirements specific to network layouts.""" 6 | 7 | pass 8 | -------------------------------------------------------------------------------- /src/wepy/boundary_conditions/__init__.py: -------------------------------------------------------------------------------- 1 | """Library of and framework for making boundary conditions in wepy simulations. 2 | 3 | In many simulations you would like to apply external constraints upon 4 | the dynamics of a simulation. For example in ligand unbinding 5 | simulations you may want to recognize and act on the fact that a 6 | ligand has reached some sort of threshold distance away from it's 7 | originally bound protein. The `LigandUnbinding` boundary condition 8 | class does exactly this and modifies the walker state to restart it in 9 | the original starting position instead of continuing the dynamics of 10 | the runner. Another possible way to implement this would be to only 11 | change a single data field in the walker state that tells you the 12 | "color" of the walker which indicates the last boundary it crossed, 13 | without interrupting the dynamical continuity of the 14 | simulation. However, the analysis routines in `wepy` are adapted to 15 | deal with such discontinuities if you desire such non-equilibrium 16 | simulations. 17 | 18 | Use of boundary conditions is so common that the `wepy` simulation 19 | manager and `WepyHDF5` storage backend provide an interface for 20 | applying and saving results of boundary conditions. 21 | 22 | Boundary conditions need not be used in a simulation at all however 23 | and are strictly optional. 24 | 25 | This module provides a small library of common boundary conditions and 26 | a reference for the implementation of your own boundary 27 | conditions. The approach to have a microframework for making boundary 28 | conditions was chosen because the actual details of them are highly 29 | domain dependent and it is unlikely that `wepy` could begin to cover 30 | all of them. 31 | 32 | A boundary condition is simply a duck typed object that follows the 33 | interface called by the simulation manager. This means that you need 34 | not use this microframework at all and can implement one from the 35 | ground up. I.e. no explicit class inheritance checks are done at any 36 | point. 37 | 38 | However, to make things easy just inherit from the `BoundaryCondition` 39 | base class and override the necessary functions. 40 | 41 | These methods must be implemented to work correctly in the reference 42 | simulation manager: 43 | - warp_walkers 44 | - progress 45 | - update_bc 46 | 47 | 48 | A non-trivial boundary condition (BC for short) will override at least 49 | the `warp_walkers` method. "Warping" in `wepy` terminology means for a 50 | walker to have met the constraints of the BC and have it's state be 51 | modified in response (think warping through space via a wormhole like 52 | in star trek). Warps can be 'continuous' or 'discontinuous' depending 53 | on whether the continuity of dynamics is broken due to this 54 | effect. For example, resetting the positions of a molecular dynamics 55 | to its starting point is discontinuous, and the change of a string 56 | label or some other field not used by the runner to compute dynamics 57 | is continuous. 58 | 59 | For each warping event a single warping record is generated, that 60 | should detail the event. 61 | 62 | The simulation manager will also make calls to `progress` and 63 | `update_bc`, but these are accessory to the `warp_walkers` method. 64 | 65 | The `progress` call returns a continual record (one per cycle) and is 66 | intended to report on some metrics of progress the walkers have 67 | achieved. For example in the ligand unbinding example this can be the 68 | minimum distance to the protein. The progress record can contain 69 | multiple features per walker. It can also be used to make economical 70 | use of calculations used in the BC when determining the warpings of 71 | the walkers. 72 | 73 | Additionally for routines in `wepy.analysis` to work properly the 74 | `warping_discontinuity` classmethod (can be called from the class 75 | without an object instance) must be implemented correctly. 76 | 77 | The `update_bc` call is used to get changes to the BC object 78 | itself. To use the ligand unbinding example one might lower the 79 | threshold for unbinding if so many cycles go by without an event 80 | occuring with perhaps introspection on the progress values. 81 | 82 | Any changes in the state of the BC object should be happen in the call 83 | stack of `update_bc` and reflected in the 'bc_record' returned to the 84 | simulation manager. 85 | 86 | Record groups generated by a BC class are: 87 | - warping 88 | - boundary_conditions 89 | - progress 90 | 91 | It is recommended (but optional) to have all the field names, shapes, 92 | and dtypes for the records in the class definition for these 93 | where possible. 94 | 95 | Notes 96 | ----- 97 | 98 | Boundary conditions in wepy simulations are optional. 99 | 100 | Inherit from the 101 | `wepy.boundary_conditions.boundary.BoundaryConditions` class to get 102 | started developing your own boundary conditions. 103 | 104 | Boundary conditions must implement methods: `warp_walkers`, 105 | `progress`, and `update_bc` for simulation. 106 | 107 | It is the programmers responsibility to handle discontinuous warping 108 | events by implementing the `warping_discontinuity` class method. 109 | 110 | 111 | References 112 | ---------- 113 | 114 | Examples 115 | -------- 116 | 117 | """ 118 | -------------------------------------------------------------------------------- /src/wepy/boundary_conditions/randomwalk.py: -------------------------------------------------------------------------------- 1 | """Boundary conditions for random walk. 2 | """ 3 | 4 | # Standard Library 5 | import itertools as it 6 | import logging 7 | 8 | logger = logging.getLogger(__name__) 9 | # Standard Library 10 | import time 11 | from collections import defaultdict 12 | 13 | # Third Party Library 14 | import numpy as np 15 | from geomm.centering import center_around 16 | from geomm.distance import minimum_distance 17 | from geomm.grouping import group_pair 18 | from geomm.rmsd import calc_rmsd 19 | from geomm.superimpose import superimpose 20 | 21 | # First Party Library 22 | from wepy.boundary_conditions.boundary import WarpBC 23 | from wepy.util.util import box_vectors_to_lengths_angles 24 | from wepy.walker import WalkerState 25 | 26 | 27 | class RandomWalkBC(WarpBC): 28 | """Boundary condition for a random walk simulation with warping 29 | controlled by the sum of walker positions crossing a threshold. 30 | 31 | Implements the WarpBC superclass. 32 | 33 | This boundary condition will warp walkers to a number of initial 34 | states whenever a walker crosses the threshold distance from the 35 | origin. 36 | """ 37 | 38 | # Records of boundary condition changes (sporadic) 39 | BC_FIELDS = WarpBC.BC_FIELDS + ("threshold_distance",) 40 | 41 | BC_SHAPES = WarpBC.BC_SHAPES + ((1,),) 42 | BC_DTYPES = WarpBC.BC_DTYPES + (int,) 43 | 44 | # warping (sporadic) 45 | WARPING_FIELDS = WarpBC.WARPING_FIELDS + () 46 | WARPING_SHAPES = WarpBC.WARPING_SHAPES + () 47 | WARPING_DTYPES = WarpBC.WARPING_DTYPES + () 48 | 49 | WARPING_RECORD_FIELDS = WarpBC.WARPING_RECORD_FIELDS + () 50 | 51 | # progress towards the boundary conditions (continual) 52 | PROGRESS_FIELDS = WarpBC.PROGRESS_FIELDS + ("distance",) 53 | PROGRESS_SHAPES = WarpBC.PROGRESS_SHAPES + (Ellipsis,) 54 | PROGRESS_DTYPES = WarpBC.PROGRESS_DTYPES + (int,) 55 | 56 | PROGRESS_RECORD_FIELDS = WarpBC.PROGRESS_RECORD_FIELDS + ("distance",) 57 | 58 | def __init__( 59 | self, threshold=None, initial_states=None, initial_weights=None, **kwargs 60 | ): 61 | """Constructor for RandomWalkBC. 62 | 63 | Arguments 64 | --------- 65 | 66 | threshold : int 67 | The threshold distance for recording a warping event. 68 | 69 | initial_states : list of objects implementing the State interface 70 | The list of possible states that warped walkers will assume. 71 | 72 | initial_weights : list of float, optional 73 | List of normalized probabilities of the initial_states 74 | provided. If not given, uniform probabilities will be 75 | used. 76 | 77 | ligand_idxs : arraylike of int 78 | The indices of the atom positions in the state considered 79 | the ligand. 80 | 81 | binding_site_idxs : arraylike of int 82 | The indices of the atom positions in the state considered 83 | the binding site. 84 | 85 | Raises 86 | ------ 87 | AssertionError 88 | If any of the following kwargs are not given: 89 | threshold, initial_states. 90 | """ 91 | 92 | super().__init__( 93 | initial_states=initial_states, initial_weights=initial_weights, **kwargs 94 | ) 95 | 96 | # test inputs 97 | assert threshold is not None, "Must give a threshold distance" 98 | 99 | # save attributes 100 | self._threshold = threshold 101 | 102 | @property 103 | def threshold(self): 104 | """The cutoff RMSD for considering a walker bound.""" 105 | return self._threshold 106 | 107 | def _progress(self, walker): 108 | """Calculate if the walker has bound and provide progress record. 109 | 110 | Parameters 111 | ---------- 112 | walker : object implementing the Walker interface 113 | 114 | Returns 115 | ------- 116 | is_bound : bool 117 | Whether the walker is unbound (warped) or not 118 | 119 | progress_data : dict of str : value 120 | Dictionary of the progress record group fields 121 | for this walker alone. 122 | 123 | """ 124 | 125 | pos = walker.state["positions"] 126 | 127 | distance = np.sum(pos) 128 | 129 | # test to see if the threshold was crossed 130 | crossed = False 131 | if distance >= self._threshold: 132 | crossed = True 133 | 134 | progress_data = {"distance": distance} 135 | 136 | return crossed, progress_data 137 | -------------------------------------------------------------------------------- /src/wepy/boundary_conditions/unbinding.py: -------------------------------------------------------------------------------- 1 | """ Alias for the new receptor.py module for old pickles etc.. Will be deprecated.""" 2 | 3 | # First Party Library 4 | from wepy.boundary_conditions.receptor import * 5 | -------------------------------------------------------------------------------- /src/wepy/orchestration/__init__.py: -------------------------------------------------------------------------------- 1 | if __name__ == "__main__": 2 | # entry-point to the orchestration CLI 3 | # First Party Library 4 | from wepy.orchestration.cli import cli 5 | 6 | cli() 7 | -------------------------------------------------------------------------------- /src/wepy/orchestration/snapshot.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | from copy import copy, deepcopy 3 | 4 | 5 | class SimApparatus: 6 | """The simulation apparatus are the components needed for running a 7 | simulation without the initial conditions for starting the simulation. 8 | 9 | A runner is strictly necessary but a resampler and boundary 10 | conditions are not. 11 | 12 | Parameters 13 | ---------- 14 | 15 | Returns 16 | ------- 17 | 18 | """ 19 | 20 | def __init__(self, filters): 21 | self._filters = deepcopy(filters) 22 | 23 | @property 24 | def filters(self): 25 | """ """ 26 | return self._filters 27 | 28 | 29 | class WepySimApparatus(SimApparatus): 30 | """ """ 31 | 32 | RUNNER_IDX = 0 33 | BC_IDX = 1 34 | RESAMPLER_IDX = 2 35 | 36 | def __init__(self, runner, resampler=None, boundary_conditions=None): 37 | if resampler is None: 38 | raise ValueError("must provide a resampler") 39 | 40 | # add them in the order they are done in Wepy 41 | filters = [runner, boundary_conditions, resampler] 42 | 43 | super().__init__(filters) 44 | 45 | @property 46 | def runner(self): 47 | return self.filters[self.RUNNER_IDX] 48 | 49 | @property 50 | def boundary_conditions(self): 51 | return self.filters[self.BC_IDX] 52 | 53 | @property 54 | def resampler(self): 55 | return self.filters[self.RESAMPLER_IDX] 56 | 57 | 58 | class SimSnapshot: 59 | """ """ 60 | 61 | def __init__(self, walkers, apparatus): 62 | self._walkers = deepcopy(walkers) 63 | self._apparatus = deepcopy(apparatus) 64 | 65 | @property 66 | def walkers(self): 67 | """ """ 68 | return self._walkers 69 | 70 | @property 71 | def apparatus(self): 72 | """ """ 73 | return self._apparatus 74 | -------------------------------------------------------------------------------- /src/wepy/reporter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/reporter/__init__.py -------------------------------------------------------------------------------- /src/wepy/reporter/openmm.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | from pint import UnitRegistry 3 | 4 | # First Party Library 5 | from wepy.reporter.dashboard import RunnerDashboardSection 6 | 7 | # initialize the unit registry 8 | units = UnitRegistry() 9 | 10 | 11 | class OpenMMRunnerDashboardSection(RunnerDashboardSection): 12 | RUNNER_SECTION_TEMPLATE = """ 13 | 14 | Runner: {{ name }} 15 | 16 | Integration Step Size: {{ step_time }} 17 | 18 | Single Walker Sampling Time: {{ walker_total_sampling_time }} 19 | 20 | Total Sampling Time: {{ total_sampling_time }} 21 | """ 22 | 23 | def __init__(self, runner=None, step_time=None, **kwargs): 24 | if "name" not in kwargs: 25 | kwargs["name"] = "OpenMMRunner" 26 | 27 | super().__init__(runner=runner, step_time=step_time, **kwargs) 28 | 29 | if runner is None: 30 | assert ( 31 | step_time is not None 32 | ), "If no complete runner is given must give parameters: step_time" 33 | 34 | # assume it has units 35 | self.step_time = step_time 36 | 37 | else: 38 | simtk_step_time = runner.integrator.getStepSize() 39 | simtk_val = simtk_step_time.value_in_unit(simtk_step_time.unit) 40 | 41 | # convert to a more general purpose pint unit, which will be 42 | # used for the dashboards so we don't have the simtk 43 | # dependency 44 | self.step_time = simtk_val * units(simtk_step_time.unit.get_name()) 45 | 46 | # TODO 47 | 48 | # integrator and params 49 | 50 | # FF and params 51 | 52 | # updatables 53 | self.walker_total_sampling_time = 0.0 * units("microsecond") 54 | self.total_sampling_time = 0.0 * units("microsecond") 55 | 56 | def update_values(self, **kwargs): 57 | super().update_values(**kwargs) 58 | 59 | # amount of new sampling time for each walker 60 | new_walker_sampling_time = self.step_time * kwargs["n_segment_steps"] 61 | 62 | # accumulated sampling time for a single walker 63 | self.walker_total_sampling_time += new_walker_sampling_time 64 | 65 | # amount of sampling time for all walkers 66 | new_sampling_time = new_walker_sampling_time * len(kwargs["new_walkers"]) 67 | 68 | # accumulated sampling time for the ensemble 69 | self.total_sampling_time += new_sampling_time 70 | 71 | def gen_fields(self, **kwargs): 72 | fields = super().gen_fields(**kwargs) 73 | 74 | ## formatting 75 | 76 | # units for the different time scales 77 | step_size_unit_spec = "femtosecond" 78 | sampling_time_unit_spec = "microsecond" 79 | 80 | # get the unit objects for them 81 | step_size_unit = units(step_size_unit_spec) 82 | sampling_time_unit = units(sampling_time_unit_spec) 83 | 84 | # step size string 85 | step_size_str = str(self.step_time.to(step_size_unit)) 86 | 87 | # single walker sampling 88 | walker_samp_str = str(self.walker_total_sampling_time.to(sampling_time_unit)) 89 | 90 | # total sampling 91 | total_samp_str = str(self.total_sampling_time.to(sampling_time_unit)) 92 | 93 | new_fields = { 94 | "step_time": step_size_str, 95 | "walker_total_sampling_time": walker_samp_str, 96 | "total_sampling_time": total_samp_str, 97 | } 98 | 99 | fields.update(new_fields) 100 | 101 | return fields 102 | -------------------------------------------------------------------------------- /src/wepy/reporter/receptor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/reporter/receptor/__init__.py -------------------------------------------------------------------------------- /src/wepy/reporter/receptor/dashboard.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | from jinja2 import Template 3 | from pint import UnitRegistry 4 | 5 | # First Party Library 6 | from wepy.reporter.dashboard import BCDashboardSection 7 | 8 | # initialize the unit registry 9 | units = UnitRegistry() 10 | 11 | 12 | class ReceptorBCDashboardSection(BCDashboardSection): 13 | BC_SECTION_TEMPLATE = """ 14 | 15 | Boundary Condition: {{ name }} 16 | 17 | Total Number of Dynamics segments: {{ total_n_walker_segments }} 18 | 19 | Total Number of Warps: {{ total_crossings }} 20 | 21 | Cumulative Boundary Crossed Weight: {{ total_unbound_weight }} 22 | 23 | Rate (1/τ): {{ rate }} 24 | 25 | Parameters: 26 | 27 | {{ parameters }} 28 | 29 | 30 | ** Warping Log 31 | 32 | {{ warping_log }} 33 | 34 | """ 35 | 36 | def __init__(self, **kwargs): 37 | super().__init__(**kwargs) 38 | 39 | def gen_fields(self, **kwargs): 40 | fields = super().gen_fields(**kwargs) 41 | 42 | # since there is only one boundary to cross here we don't 43 | # really have to do any special reporting for meaningful 44 | # boundaries. So we just use the standard one. 45 | 46 | # we calculate the non-dimensional rate in terms of the cycle 47 | # numbers, then you would just have to multiply that number by 48 | # 1/time_per_cycle. In order to handle variable number of 49 | # walkers we just keep track of all of the segments that were 50 | # run total by keeping a running tally of how many 51 | # 'new_walkers' are received in the 'total_n_walkers' 52 | # attribute 53 | 54 | rate = self.total_crossed_weight / self.total_n_walker_segments 55 | 56 | new_fields = { 57 | "parameters": "", 58 | "rate": rate, 59 | } 60 | 61 | # combine the superclass fields with the fields here, 62 | # overwriting them from the superclass if they were redefined 63 | # explicitly 64 | fields.update(new_fields) 65 | 66 | return fields 67 | 68 | 69 | class UnbindingBCDashboardSection(ReceptorBCDashboardSection): 70 | RECEPTOR_PARAMETERS = """ 71 | Cutoff Distance: {{ cutoff_distance }} 72 | """ 73 | 74 | def __init__(self, bc=None, cutoff_distance=None, **kwargs): 75 | if "name" not in kwargs: 76 | kwargs["name"] = "UnbindingBC" 77 | 78 | super().__init__(bc=bc, cutoff_distance=cutoff_distance, **kwargs) 79 | 80 | if bc is not None: 81 | self.cutoff_distance = bc.cutoff_distance 82 | else: 83 | assert ( 84 | cutoff_distance is not None 85 | ), "If no bc is given must give parameters: cutoff_distance" 86 | self.cutoff_distance = cutoff_distance 87 | 88 | def gen_fields(self, **kwargs): 89 | fields = super().gen_fields(**kwargs) 90 | 91 | parameters_str = Template(self.RECEPTOR_PARAMETERS).render( 92 | cutoff_distance=self.cutoff_distance, 93 | ) 94 | 95 | new_fields = { 96 | "parameters": parameters_str, 97 | } 98 | 99 | return fields 100 | 101 | 102 | class RebindingBCDashboardSection(ReceptorBCDashboardSection): 103 | RECEPTOR_PARAMETERS = """ 104 | Cutoff RMSD: {{ cutoff_rmsd }} 105 | """ 106 | 107 | def __init__(self, bc=None, cutoff_rmsd=None, **kwargs): 108 | if "name" not in kwargs: 109 | kwargs["name"] = "RebindingBC" 110 | 111 | super().__init__(bc=bc, cutoff_rmsd=cutoff_rmsd, **kwargs) 112 | 113 | if bc is not None: 114 | self.cutoff_rmsd = bc.cutoff_rmsd 115 | else: 116 | assert ( 117 | cutoff_rmsd is not None 118 | ), "If no bc is given must give parameters: cutoff_rmsd" 119 | self.cutoff_rmsd = cutoff_rmsd 120 | 121 | def gen_fields(self, **kwargs): 122 | fields = super().gen_fields(**kwargs) 123 | 124 | parameters_str = Template(self.RECEPTOR_PARAMETERS).render( 125 | cutoff_rmsd=self.cutoff_rmsd, 126 | ) 127 | 128 | new_fields = { 129 | "parameters": parameters_str, 130 | } 131 | 132 | return fields 133 | -------------------------------------------------------------------------------- /src/wepy/reporter/revo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/reporter/revo/__init__.py -------------------------------------------------------------------------------- /src/wepy/reporter/walker_pkl.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | # Standard Library 6 | import os 7 | import os.path as osp 8 | import pickle 9 | 10 | # First Party Library 11 | from wepy.reporter.reporter import Reporter 12 | 13 | 14 | class WalkerPklReporter(Reporter): 15 | def __init__(self, save_dir="./", freq=100, num_backups=2): 16 | # the directory in which to save the pickles 17 | self.save_dir = save_dir 18 | # the frequency of cycles to backup the walkers as a pickle 19 | self.backup_freq = freq 20 | # the number of most recent walker pickles to keep, this will remove the rest 21 | self.num_backups = num_backups 22 | 23 | def init(self, *args, **kwargs): 24 | # make sure the save_dir exists 25 | if not osp.exists(self.save_dir): 26 | os.makedirs(self.save_dir) 27 | 28 | def report(self, cycle_idx=None, new_walkers=None, **kwargs): 29 | # total number of cycles completed 30 | n_cycles = cycle_idx + 1 31 | # if the cycle is on the frequency backup walkers to a pickle 32 | if n_cycles % self.backup_freq == 0: 33 | pkl_name = "walkers_cycle_{}.pkl".format(cycle_idx) 34 | pkl_path = osp.join(self.save_dir, pkl_name) 35 | with open(pkl_path, "wb") as wf: 36 | pickle.dump(new_walkers, wf) 37 | # remove old pickles if we have more than the num_backups 38 | if (cycle_idx // self.backup_freq) >= self.num_backups: 39 | old_idx = cycle_idx - self.num_backups * self.backup_freq 40 | old_pkl_fname = "walkers_cycle_{}.pkl".format(old_idx) 41 | os.remove(osp.join(self.save_dir, old_pkl_fname)) 42 | -------------------------------------------------------------------------------- /src/wepy/reporter/wexplore/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/reporter/wexplore/__init__.py -------------------------------------------------------------------------------- /src/wepy/resampling/decisions/__init__.py: -------------------------------------------------------------------------------- 1 | """Classes for encapsulating the and providing semantics for 2 | resampling decisions. 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /src/wepy/resampling/distances/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy/resampling/distances/__init__.py -------------------------------------------------------------------------------- /src/wepy/resampling/distances/randomwalk.py: -------------------------------------------------------------------------------- 1 | """This module here is part of the RandomWalk object that implements 2 | the distance metric for the RandomWalk walk system. This distance 3 | metric is a scaled version of the Manhattan Norm. 4 | 5 | """ 6 | 7 | # Standard Library 8 | import logging 9 | 10 | logger = logging.getLogger(__name__) 11 | 12 | # Third Party Library 13 | import numpy as np 14 | 15 | # First Party Library 16 | from wepy.resampling.distances.distance import Distance 17 | 18 | 19 | class RandomWalkDistance(Distance): 20 | """A class to implement the RandomWalkDistance metric for measuring 21 | differences between walker states. This is a normalized Manhattan 22 | distance measured between the difference in positions of the walkers. 23 | 24 | """ 25 | 26 | def __init__(self): 27 | """Construct a RandomWalkDistance metric.""" 28 | pass 29 | 30 | def image(self, state): 31 | """Transform a state into a random walk image. 32 | 33 | A random walk image is just the position of a walker in the 34 | N-dimensional space. 35 | 36 | Parameters 37 | ---------- 38 | 39 | state : object implementing WalkerState 40 | A walker state object with positions in a numpy array 41 | of shape (N), where N is the the dimension of the random 42 | walk system. 43 | 44 | Returns 45 | ------- 46 | 47 | randomwalk_image : array of floats of shape (N) 48 | The positions of a walker in the N-dimensional space. 49 | 50 | """ 51 | return state["positions"] 52 | 53 | def image_distance(self, image_a, image_b): 54 | """Compute the distance between the image of the two walkers. 55 | 56 | Parameters 57 | ---------- 58 | 59 | image_a : array of float of shape (1, N) 60 | Position of the first walker's state. 61 | 62 | image_b: array of float of shape (1, N) 63 | Position of the second walker's state. 64 | 65 | Returns 66 | ------- 67 | 68 | distance: float 69 | The normalized Manhattan distance. 70 | 71 | """ 72 | return np.average(np.abs(image_a - image_b)) 73 | -------------------------------------------------------------------------------- /src/wepy/resampling/resamplers/__init__.py: -------------------------------------------------------------------------------- 1 | """Resampler framework and library. 2 | 3 | This sub-package provides an interface for implementing new resamplers 4 | that are able to reuse existing distance metrics and decision classes. 5 | 6 | Resamplers minimally must implement a single method 'resample'. 7 | 8 | """ 9 | -------------------------------------------------------------------------------- /src/wepy/runners/__init__.py: -------------------------------------------------------------------------------- 1 | """Module for runners that are used to propagate dynamics of 2 | walkers. 3 | 4 | """ 5 | -------------------------------------------------------------------------------- /src/wepy/runners/randomwalk.py: -------------------------------------------------------------------------------- 1 | """The random walk dynamics runner. 2 | 3 | In this system, the state of the walkers is defined as an 4 | N-dimensional vector of non-negative values. The walkers start at 5 | position zero (in N-dimensional space) and randomly move a step either 6 | forward or backward with the given probabilities. This is done in each 7 | dimension at each dynamic step. All moves that result in a negative 8 | position are rejected. 9 | 10 | One potentioanl use of the random walk system is to test the 11 | performance of differnt resamplers as seen in these papers: 12 | 13 | "WExplore: Hierarchical Exploration of High-Dimensional Spaces 14 | Using the Weighted Ensemble Algorithm" and 15 | "REVO: Resampling of Ensembles by Variation Optimization". 16 | 17 | """ 18 | 19 | # Standard Library 20 | import logging 21 | 22 | logger = logging.getLogger(__name__) 23 | # Standard Library 24 | import random as rand 25 | 26 | # Third Party Library 27 | import numpy as np 28 | from pint import UnitRegistry 29 | 30 | # First Party Library 31 | from wepy.runners.runner import Runner 32 | from wepy.walker import Walker, WalkerState 33 | 34 | units = UnitRegistry() 35 | 36 | # the names of the units. We pass them through pint just to validate 37 | # them 38 | UNIT_NAMES = ( 39 | ("positions_unit", str(units("microsecond").units)), 40 | ("time_unit", str(units("picosecond").units)), 41 | ) 42 | 43 | """Mapping of units identifiers to the corresponding pint units.""" 44 | 45 | 46 | class RandomWalkRunner(Runner): 47 | """RandomWalk runner for random walk simulations.""" 48 | 49 | def __init__(self, probability=0.25): 50 | """Constructor for RandomWalkRunner. 51 | 52 | Parameters 53 | ---------- 54 | 55 | probabilty : float 56 | "Probability" is defined here as the forward-move 57 | probability only. The backward-move probability is 58 | 1-probability.(Default = 0.25) 59 | 60 | """ 61 | 62 | self._probability = probability 63 | 64 | @property 65 | def probability(self): 66 | """The probability of forward-move in an N-dimensional space""" 67 | return self._probability 68 | 69 | def _walk(self, positions): 70 | """Run dynamics for the RandomWalk system for one step. 71 | 72 | Parameters 73 | ---------- 74 | positions : arraylike of shape (1, dimension) 75 | Current position of the walker. 76 | 77 | Returns 78 | ------- 79 | new_positions : arraylike of shape (1, dimension) 80 | The positions of the walker after one dynamic step. 81 | 82 | """ 83 | 84 | # make the deep copy of current posiotion 85 | new_positions = positions.copy() 86 | 87 | # get the dimension of the random walk space 88 | dimension = new_positions.shape[1] 89 | 90 | # iterates over each dimension 91 | for dim_idx in range(dimension): 92 | # Generates an uniform random number to choose between 93 | # moving forward or backward. 94 | rand_num = rand.uniform(0, 1) 95 | 96 | # make a forward movement 97 | if rand_num < self.probability: 98 | new_positions[0][dim_idx] += 1 99 | # make a backward movement 100 | else: 101 | new_positions[0][dim_idx] -= 1 102 | 103 | # implement the boundary condition for movement, movements 104 | # to -1 are rejected 105 | if new_positions[0][dim_idx] < 0: 106 | new_positions[0][dim_idx] = 0 107 | 108 | return new_positions 109 | 110 | def run_segment(self, walker, segment_length, **kwargs): 111 | """Runs a random walk simulation for the given number of steps. 112 | 113 | Parameters 114 | ---------- 115 | walker : object implementing the Walker interface 116 | The walker for which dynamics will be propagated. 117 | 118 | 119 | segment_length : int 120 | The numerical value that specifies how much dynamical steps 121 | are to be run. 122 | 123 | Returns 124 | ------- 125 | new_walker : object implementing the Walker interface 126 | Walker after dynamics was run, only the state should be modified. 127 | 128 | """ 129 | 130 | # Gets the current posiotion of RandomWalk Walker 131 | positions = walker.state["positions"] 132 | 133 | # Make movements for the segment_length steps 134 | for _ in range(segment_length): 135 | # calls walk function for one step movement 136 | new_positions = self._walk(positions) 137 | positions = new_positions 138 | 139 | # makes new state form new positions 140 | new_state = WalkerState(positions=new_positions, time=0.0) 141 | 142 | # creates new_walker from new state and current weight 143 | new_walker = Walker(new_state, walker.weight) 144 | 145 | return new_walker 146 | -------------------------------------------------------------------------------- /src/wepy/runners/runner.py: -------------------------------------------------------------------------------- 1 | """Abstract Base classes implementing the Runner interface. 2 | 3 | Runner Interface 4 | ---------------- 5 | 6 | All a runner needs to implement is the 'run_segment' method which 7 | should accept a walker and a spec for the length of the segment to run 8 | (e.g. number of dynamics steps). 9 | 10 | Additionally, any number of optional key word arguments should be given. 11 | 12 | As a matter of convention, classes accessory to a runner (such as 13 | State, Walker, Worker, etc.) should also be put in the same module as 14 | the runner. 15 | 16 | See the openmm.py module for an example. 17 | 18 | """ 19 | 20 | # Third Party Library 21 | 22 | 23 | class Runner(object): 24 | """Abstract base class for the Runner interface.""" 25 | 26 | def pre_cycle(self, **kwargs): 27 | """Perform pre-cycle behavior. run_segment will be called for each 28 | walker so this allows you to perform changes of state on a 29 | per-cycle basis. 30 | 31 | Parameters 32 | ---------- 33 | 34 | kwargs : key-word arguments 35 | Key-value pairs to be interpreted by each runner implementation. 36 | 37 | """ 38 | 39 | # by default just pass since subclasses need not implement this 40 | pass 41 | 42 | def post_cycle(self, **kwargs): 43 | """Perform post-cycle behavior. run_segment will be called for each 44 | walker so this allows you to perform changes of state on a 45 | per-cycle basis. 46 | 47 | Parameters 48 | ---------- 49 | 50 | kwargs : key-word arguments 51 | Key-value pairs to be interpreted by each runner implementation. 52 | 53 | """ 54 | 55 | # by default just pass since subclasses need not implement this 56 | pass 57 | 58 | def run_segment(self, walker, segment_length, **kwargs): 59 | """Run dynamics for the walker. 60 | 61 | Parameters 62 | ---------- 63 | walker : object implementing the Walker interface 64 | The walker for which dynamics will be propagated. 65 | segment_length : int or float 66 | The numerical value that specifies how much dynamics are to be run. 67 | 68 | Returns 69 | ------- 70 | new_walker : object implementing the Walker interface 71 | Walker after dynamics was run, only the state should be modified. 72 | 73 | """ 74 | 75 | raise NotImplementedError 76 | 77 | 78 | class NoRunner(Runner): 79 | """Stub Runner that just returns the walkers back with the same state. 80 | 81 | May be useful for testing. 82 | """ 83 | 84 | def run_segment(self, walker, segment_length, **kwargs): 85 | # documented in superclass 86 | return walker 87 | -------------------------------------------------------------------------------- /src/wepy/util/__init__.py: -------------------------------------------------------------------------------- 1 | """Miscellaneous functions used in wepy that don't fit elsewhere. 2 | 3 | Warnings 4 | -------- 5 | 6 | Deprecation warning! 7 | 8 | Try not to rely on these functions as part of your stable code. These 9 | were included here in wepy because other libraries do not provide this 10 | functionality, although they probably should. That is these functions 11 | are outside of the purview of the goals of wepy and are not guaranteed 12 | to be included in wepy indefinitely. 13 | 14 | 15 | Please do not rely on anything in the util module to exist 16 | indefinitely. 17 | 18 | Any function is subject to deprecation at any release. 19 | 20 | If you use these functions consider copying the source code. 21 | 22 | If any of these functions or classes become reified and officially 23 | part of wepy they will go in a sub-module dedicated to their purposes. 24 | 25 | But these will likely (and should) be moved to other libraries as they 26 | are more general purpose than wepy. 27 | 28 | """ 29 | -------------------------------------------------------------------------------- /src/wepy/work_mapper/__init__.py: -------------------------------------------------------------------------------- 1 | """Classes used for efficiently performing work at different stages of 2 | the wepy simulation cycle. 3 | 4 | Wepy simulations have four sequential stages of computation: 5 | 6 | 1. running segments 7 | 2. boundary conditions 8 | 3. resampling 9 | 4. reporting 10 | 11 | Stage 1 involves propagating dynamics for each walker in a completely 12 | independent using the same runner function (i.e. `run_segment`) and 13 | can always be completely parallelized. 14 | 15 | Steps 2 and 4, with a few more assumptions of independence (which are 16 | most likely satisfied without overly complex behavior), are probably 17 | also completely parallelizable over walkers and reporters 18 | respectively. 19 | 20 | Resampling may also have a degree of parallelizability but will vary 21 | considerably between resamplers and so cannot be treated outside of 22 | any individual framework for resamplers and so will not be treated 23 | generally in this submodule. 24 | 25 | Currently, wepy supports a worker based task queue implementation 26 | (wepy.work_mapper.mapper.WorkerMapper) for running parallel segments 27 | in stage 1, which is very useful for utilizing GPU compute sources. 28 | It is pretty general and utilizes the multiprocessing library for 29 | starting worker processes. Specific code for running OpenMM GPU 30 | calculations is contained within the openmm runner module. 31 | 32 | Currently general solutions for parallelizing stage 2 and 3 are not 33 | provided or supported in the simulation manager (to avoid premature 34 | optimization) but these are easy targets for improving the efficiency 35 | of wepy code. 36 | 37 | If you don't care about parallelizing your segment running (say for 38 | simple test systems) you can use the reference implementation 39 | (wepy.work_mapper.mapper.Mapper) which is basically just a wrapper 40 | around a for-loop. 41 | 42 | This sub-module provides reference implementations and/or abstract 43 | base classes for a few interfaces. 44 | 45 | The WorkMapper class interface has the following methods: 46 | 47 | - init 48 | - cleanup 49 | - map 50 | 51 | A single attribute for the segment_function that is set at runtime 52 | with a call to `init`: 53 | 54 | - segment_func 55 | 56 | And an optional attribute to enable reporting on segment running performance: 57 | 58 | - worker_segment_times : dict of int : list of float 59 | 60 | Which should be a dictionary mapping a worker index to a list of float 61 | values of time (in seconds) for each task (i.e. segment of walker 62 | dynamics) it ran in the last cycle only (not cumulative over 63 | consecutive cycles). 64 | 65 | The `init` method is called at runtime by the simulation manager at 66 | the beginning of the simulation and allows for performing such actions 67 | as opening file handles or starting worker processes (as is the case 68 | with the WorkerMapper). 69 | 70 | The only necessary key-word argument to this method is 'segment_func' 71 | which will be provided by the simulation manager (and is derived at 72 | runtime from the runner). 73 | 74 | The `cleanup` method likewise is called either at the end of a 75 | succesful simulation or when an error occurs in the call to 76 | `run_cycle` in the simulation manager (e.g. to allow killing of live 77 | processes). 78 | 79 | The `map` function acts similar to the python builtin except it does 80 | not accept the function to map over the data. This is instead set 81 | during the call to `init` from the 'segment_func' key-word argument. 82 | 83 | The WorkerMapper class and related Worker and Task classes will be 84 | provided as-is and are not really intended to be superclasses although 85 | you are free to do so. 86 | 87 | The only interfacing that facilitates their usage is that the 88 | simulation manager will pass a required keyword argument 'num_workers' 89 | to the call to `init`. 90 | 91 | See the simulation manager module to see what fields are passed to the 92 | mappers. These will likely not be removed in the future, although more 93 | may be added. 94 | 95 | """ 96 | -------------------------------------------------------------------------------- /src/wepy/work_mapper/worker.py: -------------------------------------------------------------------------------- 1 | """Classes for workers and tasks for use with WorkerMapper. """ 2 | 3 | # Standard Library 4 | import logging 5 | 6 | logger = logging.getLogger(__name__) 7 | # Standard Library 8 | import multiprocessing as mp 9 | import time 10 | 11 | # First Party Library 12 | # we can't move the WorkerMapper here until some of the pickles I have 13 | # laying around don't expect it to be here. In the meantime, new 14 | # software can expect it to be here so we import it here. 15 | from wepy.work_mapper.mapper import ( 16 | ABCWorkerMapper, 17 | TaskException, 18 | Worker, 19 | WorkerException, 20 | WorkerMapper, 21 | WrapperException, 22 | ) 23 | 24 | # this whole thing should get refactored into a better name which 25 | # should be something like ConsumerMapper because our workers act like 26 | # consumers 27 | -------------------------------------------------------------------------------- /src/wepy_test_drive.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | 6 | # Third Party Library 7 | import click 8 | import simtk.unit as unit 9 | from multiprocessing_logging import install_mp_handler 10 | 11 | # First Party Library 12 | # import all of the sim makers we have available 13 | from wepy_tools.sim_makers.openmm import * 14 | 15 | SYSTEM_SIM_MAKERS = { 16 | "LennardJonesPair": LennardJonesPairOpenMMSimMaker, 17 | "LysozymeImplicit": LysozymeImplicitOpenMMSimMaker, 18 | } 19 | 20 | 21 | def parse_system_spec(spec): 22 | sys_spec, runner_platform = spec.split("/") 23 | 24 | runner, platform = runner_platform.split("-") 25 | 26 | return sys_spec, runner, platform 27 | 28 | 29 | @click.option("-v", "--verbose", is_flag=True) 30 | @click.option( 31 | "-W", "--work-mapper", default="WorkerMapper", help="Work mapper for doing work." 32 | ) 33 | @click.option("-R", "--resampler", default="WExplore", help="Resampling algorithm.") 34 | @click.argument("n_workers", type=int) 35 | @click.argument("tau", type=float) 36 | @click.argument("n_cycles", type=int) 37 | @click.argument("n_walkers", type=int) 38 | @click.argument("system") 39 | @click.command() 40 | def cli( 41 | verbose, 42 | work_mapper, 43 | resampler, 44 | n_workers, 45 | tau, 46 | n_cycles, 47 | n_walkers, 48 | system, 49 | ): 50 | """Run a pre-parametrized wepy simulation. 51 | 52 | \b 53 | Parameters 54 | ---------- 55 | 56 | \b 57 | SYSTEM : str 58 | Which pre-parametrized simulation to run should have the format: System/Runner-Platform 59 | 60 | \b 61 | N_WALKERS : int 62 | Number of parallel trajectories to run 63 | 64 | \b 65 | N_CYCLES : int 66 | How many cycles to run the simulation for 67 | 68 | \b 69 | TAU : float 70 | Cycle simulation time in picoseconds 71 | 72 | \b 73 | N_WORKERS : int 74 | Number of worker processes to run on 75 | 76 | \b 77 | Available Systems 78 | ----------------- 79 | 80 | LennardJonesPair : A pair of Lennard-Jones particles 81 | 82 | LysozymeImplicit : Lysozyme-xylene receptor ligand in implicit solvent (2621 atoms) 83 | 84 | \b 85 | Available Runners/Platforms 86 | --------------------------- 87 | 88 | \b 89 | OpenMM- 90 | Reference 91 | CPU 92 | OpenCL (GPU) 93 | CUDA (GPU) 94 | 95 | 96 | \b 97 | Available Work Mappers 98 | ---------------------- 99 | 100 | WorkerMapper (default) : parallel python multiprocessing based 101 | worker-consumer concurrency model 102 | 103 | WIP not available in test drive yet: 104 | 105 | TaskMapper : parallel python multiprocessing based task-process 106 | based concurrency model 107 | 108 | Mapper : non-parallel single-process implementation 109 | 110 | 111 | \b 112 | Available Resamplers 113 | -------------------- 114 | 115 | No : Doesn't do any resampling. Simply runs an ensemble of walkers. 116 | 117 | WExplore : Hierarchical History Dependent Voronoi Binning 118 | 119 | REVO : Stateless and Binless algorithm that rewards in-ensemble novelty. 120 | 121 | \b 122 | Examples 123 | -------- 124 | 125 | python -m wepy_test_drive LennardJonesPair/OpenMM-CPU 20 10 2 4 126 | 127 | \b 128 | Notes 129 | ----- 130 | 131 | When using a GPU platform your number of workers should be the 132 | number of GPUs you want to use. 133 | 134 | """ 135 | 136 | if verbose: 137 | logging.getLogger(__name__).setLevel(logger.debug) 138 | install_mp_handler() 139 | logger.debug("Starting the test") 140 | 141 | resampler_fullname = resampler + "Resampler" 142 | 143 | sys_spec, runner, platform = parse_system_spec(system) 144 | 145 | # choose which sim_maker to use 146 | sim_maker = SYSTEM_SIM_MAKERS[sys_spec]() 147 | 148 | apparatus = sim_maker.make_apparatus( 149 | platform=platform, 150 | resampler=resampler_fullname, 151 | ) 152 | 153 | # compute the number of steps to take from tau 154 | tau = tau * unit.picosecond 155 | n_steps = round(tau / apparatus.filters[0].integrator.getStepSize()) 156 | 157 | config = sim_maker.make_configuration( 158 | apparatus, work_mapper_spec=work_mapper, platform=platform 159 | ) 160 | 161 | sim_manager = sim_maker.make_sim_manager(n_walkers, apparatus, config) 162 | 163 | # run the simulation 164 | sim_manager.run_simulation(n_cycles, n_steps, num_workers=n_workers) 165 | 166 | 167 | if __name__ == "__main__": 168 | cli() 169 | -------------------------------------------------------------------------------- /src/wepy_tools/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/wepy_tools/monitoring/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy_tools/monitoring/__init__.py -------------------------------------------------------------------------------- /src/wepy_tools/sim_makers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy_tools/sim_makers/__init__.py -------------------------------------------------------------------------------- /src/wepy_tools/sim_makers/openmm/__init__.py: -------------------------------------------------------------------------------- 1 | # First Party Library 2 | from wepy_tools.sim_makers.openmm.lennard_jones import LennardJonesPairOpenMMSimMaker 3 | from wepy_tools.sim_makers.openmm.lysozyme import LysozymeImplicitOpenMMSimMaker 4 | from wepy_tools.sim_makers.openmm.sim_maker import ( 5 | OpenMMSimMaker, 6 | OpenMMToolsTestSysSimMaker, 7 | ) 8 | 9 | __all__ = [ 10 | "LennardJonesPairOpenMMSimMaker", 11 | "LysozymeImplicitOpenMMSimMaker", 12 | ] + [ # the base classes 13 | "OpenMMSimMaker", 14 | "OpenMMToolsTestSysSimMaker", 15 | ] 16 | -------------------------------------------------------------------------------- /src/wepy_tools/sim_makers/openmm/lennard_jones.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | import numpy as np 3 | from openmm_systems.test_systems import LennardJonesPair 4 | from scipy.spatial.distance import euclidean 5 | 6 | # First Party Library 7 | from wepy.boundary_conditions.receptor import UnbindingBC 8 | from wepy.resampling.distances.distance import Distance 9 | from wepy.runners.openmm import GET_STATE_KWARG_DEFAULTS 10 | from wepy_tools.sim_makers.openmm import OpenMMToolsTestSysSimMaker 11 | 12 | 13 | ## Distance Metric 14 | # we define a simple distance metric for this system, assuming the 15 | # positions are in a 'positions' field 16 | class PairDistance(Distance): 17 | def __init__(self, metric=euclidean): 18 | self.metric = metric 19 | 20 | def image(self, state): 21 | return state["positions"] 22 | 23 | def image_distance(self, image_a, image_b): 24 | dist_a = self.metric(image_a[0], image_a[1]) 25 | dist_b = self.metric(image_b[0], image_b[1]) 26 | 27 | return np.abs(dist_a - dist_b) 28 | 29 | 30 | # class PairUnbinding(BoundaryCondition): 31 | 32 | # pass 33 | 34 | 35 | class LennardJonesPairOpenMMSimMaker(OpenMMToolsTestSysSimMaker): 36 | TEST_SYS = LennardJonesPair 37 | 38 | BCS = OpenMMToolsTestSysSimMaker.BCS + [UnbindingBC] 39 | 40 | LIGAND_IDXS = [0] 41 | RECEPTOR_IDXS = [1] 42 | 43 | UNBINDING_BC_DEFAULTS = { 44 | "cutoff_distance": 1.0, # nm 45 | "periodic": False, 46 | } 47 | 48 | DEFAULT_BC_PARAMS = OpenMMToolsTestSysSimMaker.DEFAULT_BC_PARAMS 49 | DEFAULT_BC_PARAMS.update( 50 | { 51 | "UnbindingBC": UNBINDING_BC_DEFAULTS, 52 | } 53 | ) 54 | 55 | def make_bc(self, bc_class, bc_params): 56 | if bc_class == UnbindingBC: 57 | bc_params.update( 58 | { 59 | "distance": self.distance, 60 | "initial_state": self.init_state, 61 | "topology": self.json_top(), 62 | "ligand_idxs": self.LIGAND_IDXS, 63 | "receptor_idxs": self.RECEPTOR_IDXS, 64 | } 65 | ) 66 | 67 | bc = bc_class(**bc_params) 68 | 69 | return bc 70 | 71 | def __init__(self): 72 | # must set this here since we need it to generate the state, 73 | # will get called again in the superclass method 74 | self.getState_kwargs = dict(GET_STATE_KWARG_DEFAULTS) 75 | if self.GET_STATE_KWARGS is not None: 76 | self.getState_kwargs.update(self.GET_STATE_KWARGS) 77 | 78 | test_sys = LennardJonesPair() 79 | 80 | init_state = self.make_state(test_sys.system, test_sys.positions) 81 | 82 | super().__init__( 83 | distance=PairDistance(), 84 | init_state=init_state, 85 | system=test_sys.system, 86 | topology=test_sys.topology, 87 | ) 88 | -------------------------------------------------------------------------------- /src/wepy_tools/sim_makers/toys/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy_tools/sim_makers/toys/__init__.py -------------------------------------------------------------------------------- /src/wepy_tools/systems/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy_tools/systems/__init__.py -------------------------------------------------------------------------------- /src/wepy_tools/systems/lennard_jones.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | import numpy as np 3 | from scipy.spatial.distance import euclidean 4 | 5 | # First Party Library 6 | from wepy.resampling.distances.distance import Distance 7 | 8 | 9 | class PairDistance(Distance): 10 | def __init__(self, metric=euclidean): 11 | self.metric = metric 12 | 13 | def image(self, state): 14 | return state["positions"] 15 | 16 | def image_distance(self, image_a, image_b): 17 | dist_a = self.metric(image_a[0], image_a[1]) 18 | dist_b = self.metric(image_b[0], image_b[1]) 19 | 20 | return np.abs(dist_a - dist_b) 21 | -------------------------------------------------------------------------------- /src/wepy_tools/systems/mock.py: -------------------------------------------------------------------------------- 1 | """Mock systems""" 2 | -------------------------------------------------------------------------------- /src/wepy_tools/systems/openmm/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/src/wepy_tools/systems/openmm/__init__.py -------------------------------------------------------------------------------- /src/wepy_tools/systems/openmm/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | import numpy as np 4 | 5 | import scipy 6 | import scipy.special 7 | import scipy.integrate 8 | 9 | import openmm 10 | import openmm.unit as unit 11 | import openmm.app as omma 12 | 13 | class TestSystem(object): 14 | 15 | """Abstract base class for test systems, demonstrating how to implement a test system. 16 | 17 | Parameters 18 | ---------- 19 | 20 | Attributes 21 | ---------- 22 | system : openmm.System 23 | System object for the test system 24 | positions : list 25 | positions of test system 26 | topology : list 27 | topology of the test system 28 | 29 | Notes 30 | ----- 31 | 32 | Unimplemented methods will default to the base class methods, which raise a NotImplementedException. 33 | 34 | Examples 35 | -------- 36 | 37 | Create a test system. 38 | 39 | >>> testsystem = TestSystem() 40 | 41 | Retrieve a deep copy of the System object. 42 | 43 | >>> system = testsystem.system 44 | 45 | Retrieve a deep copy of the positions. 46 | 47 | >>> positions = testsystem.positions 48 | 49 | Retrieve a deep copy of the topology. 50 | 51 | >>> topology = testsystem.topology 52 | 53 | Serialize system and positions to XML (to aid in debugging). 54 | 55 | >>> (system_xml, positions_xml) = testsystem.serialize() 56 | 57 | """ 58 | 59 | def __init__(self, **kwargs): 60 | """Abstract base class for test system. 61 | 62 | Parameters 63 | ---------- 64 | 65 | """ 66 | 67 | # Create an empty system object. 68 | self._system = openmm.System() 69 | 70 | # Store positions. 71 | self._positions = unit.Quantity(np.zeros([0, 3], float), unit.nanometers) 72 | 73 | # Empty topology. 74 | self._topology = omma.Topology() 75 | # MDTraj Topology is built on demand. 76 | self._mdtraj_topology = None 77 | 78 | @property 79 | def system(self): 80 | """The openmm.System object corresponding to the test system.""" 81 | return self._system 82 | 83 | @system.setter 84 | def system(self, value): 85 | self._system = value 86 | 87 | @system.deleter 88 | def system(self): 89 | del self._system 90 | 91 | @property 92 | def positions(self): 93 | """The openmm.unit.Quantity object containing the particle positions, with units compatible with openmm.unit.nanometers.""" 94 | return self._positions 95 | 96 | @positions.setter 97 | def positions(self, value): 98 | self._positions = value 99 | 100 | @positions.deleter 101 | def positions(self): 102 | del self._positions 103 | 104 | @property 105 | def topology(self): 106 | """The openmm.app.Topology object corresponding to the test system.""" 107 | return self._topology 108 | 109 | @topology.setter 110 | def topology(self, value): 111 | self._topology = value 112 | self._mdtraj_topology = None 113 | 114 | @topology.deleter 115 | def topology(self): 116 | del self._topology 117 | 118 | @property 119 | def mdtraj_topology(self): 120 | """The mdtraj.Topology object corresponding to the test system (read-only).""" 121 | import mdtraj as md 122 | if self._mdtraj_topology is None: 123 | self._mdtraj_topology = md.Topology.from_openmm(self._topology) 124 | return self._mdtraj_topology 125 | 126 | 127 | def construct_restraining_potential(self, particle_indices, K): 128 | """Make a CustomExternalForce that puts an origin-centered spring on the chosen particles""" 129 | 130 | # Add a restraining potential centered at the origin. 131 | energy_expression = '(K/2.0) * (x^2 + y^2 + z^2);' 132 | energy_expression += 'K = %f;' % (K / (unit.kilojoules_per_mole / unit.nanometers ** 2)) # in OpenMM units 133 | force = openmm.CustomExternalForce(energy_expression) 134 | for particle_index in particle_indices: 135 | force.addParticle(particle_index, []) 136 | 137 | return force -------------------------------------------------------------------------------- /src/wepy_tools/systems/receptor.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | import mdtraj as mdj 3 | import numpy as np 4 | import simtk.unit as unit 5 | 6 | # First Party Library 7 | from wepy.util.mdtraj import json_to_mdtraj_topology, mdtraj_to_json_topology 8 | from wepy.util.util import box_vectors_to_lengths_angles 9 | 10 | 11 | def binding_site_idxs( 12 | json_topology, 13 | ligand_idxs, 14 | receptor_idxs, 15 | coords, 16 | box_vectors, 17 | cutoff, 18 | periodic=True, 19 | ): 20 | """Parameters 21 | ---------- 22 | 23 | json_topology : str 24 | 25 | ligand_idxs : arraylike (1,) 26 | 27 | receptor_idxs : arraylike (1,) 28 | 29 | coords : N x 3 arraylike of float or simtk.Quantity 30 | If not a quantity will implicitly be treated as being in 31 | nanometers. 32 | 33 | box_vectors : simtk.Quantity 34 | If not a quantity will implicitly be treated as being in 35 | nanometers. 36 | 37 | cutoff : float or simtk.Quantity 38 | If not a quantity will implicitly be treated as being in 39 | nanometers. 40 | 41 | Returns 42 | ------- 43 | 44 | binding_site_idxs : arraylike (1,) 45 | 46 | """ 47 | 48 | # if they are simtk.units convert quantities to numbers in 49 | # nanometers 50 | if unit.is_quantity(cutoff): 51 | cutoff = cutoff.value_in_unit(unit.nanometer) 52 | 53 | if unit.is_quantity(coords): 54 | coords = coords.value_in_unit(unit.nanometer) 55 | 56 | if unit.is_quantity(box_vectors): 57 | box_vectors = box_vectors.value_in_unit(unit.nanometer) 58 | 59 | box_lengths, box_angles = box_vectors_to_lengths_angles(box_vectors) 60 | 61 | # make a trajectory to compute the neighbors from 62 | traj = mdj.Trajectory( 63 | np.array([coords]), 64 | unitcell_lengths=[box_lengths], 65 | unitcell_angles=[box_angles], 66 | topology=json_to_mdtraj_topology(json_topology), 67 | ) 68 | 69 | neighbors_idxs = mdj.compute_neighbors( 70 | traj, cutoff, ligand_idxs, periodic=periodic 71 | )[0] 72 | 73 | # selects protein atoms from neighbors list 74 | binding_selection_idxs = np.intersect1d(neighbors_idxs, receptor_idxs) 75 | 76 | return binding_selection_idxs 77 | -------------------------------------------------------------------------------- /tasks/__init__.py: -------------------------------------------------------------------------------- 1 | from invoke import Collection, Task, task 2 | 3 | import inspect 4 | 5 | 6 | ## Utilities 7 | 8 | # these helper functions are for automatically listing all of the 9 | # functions defined in the tasks module 10 | 11 | def _is_mod_task(mod, func): 12 | return issubclass(type(func), Task) and inspect.getmodule(func) == mod 13 | 14 | def _get_functions(mod): 15 | """get only the functions that aren't module functions and that 16 | aren't private (i.e. start with a '_')""" 17 | 18 | return {func.__name__ : func for func in mod.__dict__.values() 19 | if _is_mod_task(mod, func) } 20 | 21 | 22 | ## Namespace 23 | 24 | # add all of the modules to the CLI 25 | ns = Collection() 26 | 27 | ## Top-level 28 | 29 | from . import toplevel 30 | for func in _get_functions(toplevel).values(): 31 | ns.add_task(func) 32 | 33 | 34 | ## STUB: User-added modules 35 | # from user_modules import MODULES as user_modules 36 | 37 | # for module in user_modules: 38 | # ns.add_collection(module) 39 | 40 | ## Upstream 41 | 42 | from .modules import MODULES as modules 43 | 44 | for module in modules: 45 | ns.add_collection(module) 46 | 47 | ## Plugins 48 | 49 | try: 50 | # import all the user defined stuff and override 51 | from .plugins import PLUGIN_MODULES as plugins 52 | 53 | for module in plugins: 54 | ns.add_collection(module) 55 | 56 | except Exception as e: 57 | print("Loading plugins failed with error ignoring:") 58 | print(e) 59 | -------------------------------------------------------------------------------- /tasks/config.py: -------------------------------------------------------------------------------- 1 | """User settings for a project.""" 2 | 3 | # load the system configuration. You can override them in this module, 4 | # but beware it might break stuff 5 | from .sysconfig import * 6 | 7 | ## Customize these for all features 8 | 9 | PROJECT_SLUG = "wepy" 10 | 11 | VERSION = '1.1.0' 12 | 13 | 14 | ENV_METHOD = 'conda' 15 | -------------------------------------------------------------------------------- /tasks/modules/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # SNIPPET: add this to import modules 3 | 4 | # should be copied in by the installation process 5 | from . import core 6 | from . import clean 7 | from . import env 8 | from . import git 9 | from . import py 10 | from . import docs 11 | from . import lxd 12 | from . import containers 13 | 14 | MODULES = [ 15 | core, 16 | clean, 17 | env, 18 | git, 19 | py, 20 | docs, 21 | lxd, 22 | containers, 23 | ] 24 | -------------------------------------------------------------------------------- /tasks/modules/clean.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | from ..config import ( 4 | CLEAN_EXPRESSIONS, 5 | ) 6 | 7 | ### User config examples 8 | 9 | # SNIPPET: expecting something like this 10 | # CLEAN_EXPRESSIONS = [ 11 | # "\"*~\"", 12 | # ] 13 | 14 | @task 15 | def ls(cx): 16 | 17 | for clean_expr in CLEAN_EXPRESSIONS: 18 | cx.run('find . -type f -name {} -print'.format(clean_expr)) 19 | 20 | @task(pre=[ls], default=True) 21 | def clean(cx): 22 | 23 | print("Deleting Targets") 24 | for clean_expr in CLEAN_EXPRESSIONS: 25 | cx.run('find . -type f -name {} -delete'.format(clean_expr)) 26 | 27 | -------------------------------------------------------------------------------- /tasks/modules/containers.py: -------------------------------------------------------------------------------- 1 | """Tasks for managing containers and clusters. 2 | 3 | While a lot of this might be able to be done with special purpose 4 | tools we try to cover as many things that we have tried. 5 | 6 | Warning 7 | ------- 8 | 9 | This does not cover best practices at this time. 10 | 11 | """ 12 | import os 13 | from pathlib import Path 14 | 15 | from invoke import task 16 | 17 | 18 | from ..config import ( 19 | PROJECT_SLUG, 20 | CONTAINER_TOOL, 21 | ) 22 | 23 | ## Container definitions 24 | 25 | @task 26 | def build(cx, root=None): 27 | """Build all containers in dir `containers` using Dockerfiles.""" 28 | 29 | assert root is not None, \ 30 | "Must provide a root directory with expected structure." 31 | 32 | jig_name = Path(root).stem 33 | 34 | containers_dir = Path(root) / "input/containers" 35 | 36 | cx.run(f"mkdir -p {root}/_output/containers") 37 | 38 | print(containers_dir) 39 | for container in os.listdir(containers_dir): 40 | 41 | container_dir = containers_dir / container 42 | 43 | image_name = f"{PROJECT_SLUG}-{jig_name}-{container}" 44 | 45 | print(f"making: {image_name}") 46 | 47 | # remove if already in there 48 | cx.run(f"{CONTAINER_TOOL} rmi {image_name}", warn=True) 49 | 50 | # rebuild 51 | cx.run(f"{CONTAINER_TOOL} build -t {image_name} {container_dir}") 52 | 53 | cx.run(f"{CONTAINER_TOOL} image save {image_name} > {root}/_output/containers/{image_name}.tar") 54 | 55 | # remove from the index 56 | cx.run(f"{CONTAINER_TOOL} rmi {image_name}") 57 | 58 | @task 59 | def list_built(cx, root=None): 60 | """List the built containers in dirs (not container tool memory).""" 61 | 62 | assert root is not None, \ 63 | "Must provide a root directory with expected structure." 64 | 65 | images_dir = Path(root) / "_output/containers" 66 | 67 | image_names = [] 68 | for image_fname in os.listdir(images_dir): 69 | print(image_fname) 70 | 71 | image_name = Path(image_fname).stem 72 | 73 | image_names.append(image_name) 74 | 75 | return image_names 76 | 77 | 78 | @task 79 | def load(cx): 80 | """Load the containers into container tool local memory.""" 81 | 82 | assert root is not None, \ 83 | "Must provide a root directory with expected structure." 84 | 85 | containers_list_built(cx) 86 | 87 | jig_name = Path(root).stem 88 | 89 | images_dir = Path(root) / "_output/containers" 90 | 91 | image_names = list_built(cx) 92 | 93 | for image_name in image_names: 94 | cx.run(f"{CONTAINER_TOOL} load < {images_dir}/{image_name}.tar {image_name}") 95 | 96 | @task 97 | def unload(cx): 98 | 99 | raise NotImplementedError 100 | 101 | assert root is not None, \ 102 | "Must provide a root directory with expected structure." 103 | 104 | list_built(cx) 105 | 106 | jig_name = Path(root).stem 107 | 108 | images_dir = Path(root) / "_output/containers" 109 | 110 | image_names = list_built(cx) 111 | 112 | for image_name in image_names: 113 | cx.run(f"{CONTAINER_TOOL} rm {image_name}", warn=True) 114 | 115 | 116 | -------------------------------------------------------------------------------- /tasks/modules/core.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os.path as osp 4 | import os 5 | from pathlib import Path 6 | 7 | @task 8 | def sanity(cx): 9 | """Perform sanity check for jubeo""" 10 | 11 | print("All systems go!") 12 | 13 | 14 | @task 15 | def pin_tool_deps(cx): 16 | """Pins or upgrades the requirements.txt for the jubeo tooling from 17 | the requirements.in (from the upstream repo) and the 18 | local.requirements.in (for project specific tooling dependencies) 19 | files.""" 20 | 21 | req_in = Path('.jubeo') / "requirements.in" 22 | local_req_in = Path('.jubeo') / "local.requirements.in" 23 | req_txt = Path('.jubeo') / "requirements.txt" 24 | 25 | assert osp.exists(req_in), "No 'requirements.in' file" 26 | 27 | # add the local reqs if given 28 | if osp.exists(local_req_in): 29 | req_str = f"{req_in} {local_req_in}" 30 | else: 31 | req_str = req_in 32 | 33 | cx.run(f"pip-compile --upgrade --output-file={req_txt} {req_str}") 34 | -------------------------------------------------------------------------------- /tasks/modules/git.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | from ..config import ( 4 | INITIAL_VERSION, 5 | GIT_LFS_TARGETS, 6 | VERSION, 7 | ) 8 | 9 | ## Constants 10 | 11 | VCS_RELEASE_TAG_TEMPLATE = "v{}" 12 | 13 | @task 14 | def lfs_track(cx): 15 | """Update all the files that need tracking via git-lfs.""" 16 | 17 | for lfs_target in GIT_LFS_TARGETS: 18 | cx.run("git lfs track {}".format(lfs_target)) 19 | 20 | 21 | @task 22 | def init(cx): 23 | 24 | tag_string = VCS_RELEASE_TAG_TEMPLATE.format(INITIAL_VERSION) 25 | 26 | cx.run("git init && " 27 | "git add -A && " 28 | "git commit -m 'initial commit' && " 29 | f"git tag -a {tag_string} -m 'initialization release'") 30 | 31 | 32 | 33 | @task 34 | def publish(cx): 35 | 36 | tag_string = VCS_RELEASE_TAG_TEMPLATE.format(VERSION) 37 | 38 | cx.run(f"git push origin {tag_string}") 39 | 40 | 41 | @task 42 | def release(cx): 43 | 44 | tag_string = VCS_RELEASE_TAG_TEMPLATE.format(VERSION) 45 | 46 | print("Releasing: ", VERSION, "with tag: ", tag_string) 47 | 48 | cx.run(f"git tag -a {tag_string} -m 'See the changelog for details'") 49 | -------------------------------------------------------------------------------- /tasks/modules/lxd.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | from ..config import ( 4 | PROJECT_SLUG, 5 | ) 6 | 7 | @task 8 | def copy_ssh(cx, name='dev'): 9 | """Copy SSH keys to a container.""" 10 | 11 | cx.run(f'ssh-keygen -f "$HOME/.ssh/known_hosts" -R "{PROJECT_SLUG}.dev.lxd"') 12 | cx.run(f"ssh-copy-id {PROJECT_SLUG}.{name}.lxd") 13 | 14 | @task 15 | def push_profile(cx, name='dev'): 16 | """Update your dotfiles in a container.""" 17 | 18 | cx.run(f"fab -H {PROJECT_SLUG}.{name} push-profile") 19 | 20 | @task 21 | def bootstrap(cx, name='dev'): 22 | """Bootstrap the container from a bare image. 23 | 24 | Not necessary if you started from a premade dev env container. 25 | 26 | """ 27 | 28 | cx.run(f"fab -H {PROJECT_SLUG}.{name} bootstrap") 29 | 30 | @task 31 | def push(cx, name='dev'): 32 | """Push the files for this project 33 | 34 | Ignores according to the gitignore file 35 | 36 | """ 37 | 38 | cx.run(f"fab -H {PROJECT_SLUG}.{name} push-project") 39 | 40 | @task 41 | def pull(cx, name='dev'): 42 | """Pull the files for this project 43 | 44 | Ignores according to the gitignore file 45 | 46 | """ 47 | 48 | cx.run(f"fab -H {PROJECT_SLUG}.{name} pull-project") 49 | -------------------------------------------------------------------------------- /tasks/plugins/__init__.py: -------------------------------------------------------------------------------- 1 | """Specify which plugins to load""" 2 | 3 | # import plugins: 4 | 5 | from . import custom 6 | from . import tests 7 | 8 | # specify which plugins to install, the custom one is included by 9 | # default to get users going 10 | PLUGIN_MODULES = [ 11 | custom, 12 | tests, 13 | ] 14 | -------------------------------------------------------------------------------- /tasks/plugins/custom.py: -------------------------------------------------------------------------------- 1 | """Put user defined tasks in the plugins folder. You can start with 2 | some customizations in this file which is included by default.""" 3 | 4 | from invoke import task 5 | 6 | 7 | -------------------------------------------------------------------------------- /tasks/plugins/tests.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | from ..config import ( 4 | REPORTS_DIR, 5 | ) 6 | 7 | import sys 8 | import os 9 | import os.path as osp 10 | from pathlib import Path 11 | 12 | import pytest 13 | 14 | 15 | # TODO: this should be done better 16 | @task 17 | def integration(cx, tag=None, node='node_minor'): 18 | """Run the integration tests. 19 | 20 | This is a large test suite and needs specific hardware resources 21 | in order to run all of them. For this reason there are different 22 | test objects which are tagged as different grades of nodes. The 23 | idea is that depending on the machine you are able to test on you 24 | will still be able to run some of the tests to test pure-python 25 | code paths or code paths that involve GPUs etc. 26 | 27 | The node types are: 28 | 29 | - minor :: no GPUs 30 | - dev :: at least 1 GPU 31 | - production :: more than 1 GPU 32 | 33 | You can use these as a 'mark' selection when running pytest or use 34 | it as the option for this command. 35 | 36 | """ 37 | 38 | lines = [ 39 | f"coverage run -m pytest ", 40 | f"-m 'not interactive' ", 41 | f"tests/test_integration", 42 | ] 43 | 44 | if node == 'minor': 45 | node = '' 46 | 47 | options = { 48 | "html" : ( 49 | "--html=reports/pytest/{tag}/integration/report.html" 50 | if tag is not None 51 | else "" 52 | ), 53 | 54 | "node" : node, 55 | } 56 | 57 | if tag is None: 58 | cx.run('heerr', 59 | warn=True) 60 | else: 61 | cx.run(f"coverage run -m pytest -m 'not interactive' tests/test_integration", 62 | warn=True) 63 | -------------------------------------------------------------------------------- /tasks/sysconfig.py: -------------------------------------------------------------------------------- 1 | """Configuration managed by the system. All changes here will be 2 | overwrote upon update. 3 | 4 | Typically gives a collection of good defaults. Override in config.py 5 | 6 | """ 7 | 8 | ### Cleaning 9 | 10 | CLEAN_EXPRESSIONS = [ 11 | "\"*~\"", 12 | ] 13 | 14 | 15 | ### Envs 16 | 17 | # which virtual environment tool to use: venv or conda 18 | ENV_METHOD = 'venv' 19 | 20 | # which env spec to use by default 21 | DEFAULT_ENV = 'dev' 22 | 23 | # directory where env specs are read from 24 | ENVS_DIR = 'envs' 25 | 26 | # Python version source, this is how we get the different python 27 | # versions. This is a keyword not a path 28 | PYTHON_VERSION_SOURCE = "pyenv" 29 | 30 | # which versions will be requested to be installed, in the order of 31 | # precendence for interactive work 32 | PYTHON_VERSIONS = ( 33 | '3.8.1', 34 | '3.7.6', 35 | '3.6.10', 36 | ) 37 | 38 | 39 | ### Git 40 | 41 | INITIAL_VERSION = '0.0.0a0.dev0' 42 | GIT_LFS_TARGETS = [] 43 | VERSION = '0.0.0a0.dev0' 44 | 45 | 46 | ### Python Code base 47 | 48 | REPORTS_DIR = "reports" 49 | 50 | ## docs 51 | 52 | LOGO_DIR = "info/logo" 53 | 54 | ORG_DOCS_SOURCES = [ 55 | 'changelog', 56 | 'dev_guide', 57 | 'general_info', 58 | 'installation', 59 | 'introduction', 60 | 'news', 61 | 'quick_start', 62 | 'troubleshooting', 63 | 'users_guide', 64 | 'reference', 65 | 'news-articles', 66 | ] 67 | 68 | RST_DOCS_SOURCES = [ 69 | 'glossary', 70 | 'api', 71 | ] 72 | 73 | BIB_DOCS_SOURCES = [ 74 | 'docs', 75 | ] 76 | 77 | PYPIRC="$HOME/.pypirc" 78 | TESTING_PYPIRC="$HOME/.pypirc" 79 | 80 | # this is the name of the pyenv "version" to use for creating and 81 | # activating conda 82 | PYENV_CONDA_NAME = 'miniconda3-latest' 83 | 84 | ## tests 85 | TESTS_DIR = "tests" 86 | 87 | ## benchmarks 88 | BENCHMARKS_DIR = "benchmarks" 89 | 90 | # the range of commits to use for running all of the asv regression 91 | # benchmarks. See documentation in `asv run --help` for details. 92 | # Defaults to using the HASHFILE. 93 | ASV_RANGE = "HASHFILE:benchmark_selection.list" 94 | 95 | 96 | ### Containers 97 | 98 | # choose the container tool, options really are just: docker or podman 99 | # since these two are compatible 100 | CONTAINER_TOOL = "podman" 101 | -------------------------------------------------------------------------------- /tasks/toplevel.py: -------------------------------------------------------------------------------- 1 | """User editable top-level commands""" 2 | 3 | from invoke import task 4 | 5 | from .config import * 6 | 7 | -------------------------------------------------------------------------------- /templates/envs/conda_blank/dev.requirements.list: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /templates/envs/conda_blank/env.yaml: -------------------------------------------------------------------------------- 1 | name: wepy-dev 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | 7 | -------------------------------------------------------------------------------- /templates/envs/conda_blank/pyversion.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /templates/envs/conda_blank/requirements.in: -------------------------------------------------------------------------------- 1 | --index-url https://pypi.python.org/simple/ 2 | -------------------------------------------------------------------------------- /templates/envs/conda_blank/self.requirements.txt: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /templates/examples/org/README.org: -------------------------------------------------------------------------------- 1 | * Example of an example 2 | -------------------------------------------------------------------------------- /templates/examples/org/input/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/examples/org/input/.keep -------------------------------------------------------------------------------- /templates/examples/org/source/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/examples/org/source/.keep -------------------------------------------------------------------------------- /templates/examples/org/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | example_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv docs.env-example -n {example_name}") 45 | -------------------------------------------------------------------------------- /templates/jigs/org/README.org: -------------------------------------------------------------------------------- 1 | * Example of an example 2 | -------------------------------------------------------------------------------- /templates/jigs/org/input/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/jigs/org/input/.keep -------------------------------------------------------------------------------- /templates/jigs/org/source/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/jigs/org/source/.keep -------------------------------------------------------------------------------- /templates/jigs/org/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | jig_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv py.env-jig -n {jig_name}") 45 | -------------------------------------------------------------------------------- /templates/tutorials/jupyter/README.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [] 9 | } 10 | ], 11 | "metadata": { 12 | "kernelspec": { 13 | "display_name": "Python 3", 14 | "language": "python", 15 | "name": "python3" 16 | }, 17 | "language_info": { 18 | "codemirror_mode": { 19 | "name": "ipython", 20 | "version": 3 21 | }, 22 | "file_extension": ".py", 23 | "mimetype": "text/x-python", 24 | "name": "python", 25 | "nbconvert_exporter": "python", 26 | "pygments_lexer": "ipython3", 27 | "version": "3.8.1" 28 | } 29 | }, 30 | "nbformat": 4, 31 | "nbformat_minor": 4 32 | } 33 | -------------------------------------------------------------------------------- /templates/tutorials/jupyter/input/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/tutorials/jupyter/input/.keep -------------------------------------------------------------------------------- /templates/tutorials/jupyter/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | @task 8 | def init(cx): 9 | cx.run("mkdir -p _output") 10 | cx.run("mkdir -p _tangle_source") 11 | 12 | @task 13 | def clean(cx): 14 | cx.run("rm -rf _output/*") 15 | cx.run("rm -rf _tangle_source/*") 16 | 17 | @task(pre=[init]) 18 | def tangle(cx): 19 | cx.run("jupyter-nbconvert --to 'python' --output-dir=_tangle_source README.ipynb") 20 | 21 | 22 | @task 23 | def clean_env(cx): 24 | cx.run("rm -rf _env") 25 | 26 | @task(pre=[init]) 27 | def env(cx): 28 | """Create the environment from the specs in 'env'. Must have the 29 | entire repository available as it uses the tooling from it. 30 | 31 | """ 32 | 33 | example_name = Path(os.getcwd()).stem 34 | 35 | with cx.cd("../../../"): 36 | cx.run(f"inv docs.env-tutorial -n {example_name}") 37 | -------------------------------------------------------------------------------- /templates/tutorials/org/README.org: -------------------------------------------------------------------------------- 1 | 2 | * Your Tutorial Here 3 | -------------------------------------------------------------------------------- /templates/tutorials/org/input/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/tutorials/org/input/.keep -------------------------------------------------------------------------------- /templates/tutorials/org/source/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/templates/tutorials/org/source/.keep -------------------------------------------------------------------------------- /templates/tutorials/org/tasks.py: -------------------------------------------------------------------------------- 1 | from invoke import task 2 | 3 | import os 4 | import os.path as osp 5 | from pathlib import Path 6 | 7 | def tangle_orgfile(cx, file_path): 8 | """Tangle the target file using emacs in batch mode. Implicitly dumps 9 | things relative to the file.""" 10 | 11 | cx.run(f"emacs -Q --batch -l org {file_path} -f org-babel-tangle") 12 | 13 | @task 14 | def init(cx): 15 | cx.run("mkdir -p _tangle_source") 16 | cx.run("mkdir -p _output") 17 | 18 | @task 19 | def clean(cx): 20 | cx.run("rm -rf _tangle_source") 21 | cx.run("rm -rf _output") 22 | 23 | @task(pre=[init]) 24 | def tangle(cx): 25 | tangle_orgfile(cx, "README.org") 26 | cx.run(f"chmod ug+x ./_tangle_source/*.bash", warn=True) 27 | cx.run(f"chmod ug+x ./_tangle_source/*.sh", warn=True) 28 | cx.run(f"chmod ug+x ./_tangle_source/*.py", warn=True) 29 | 30 | @task 31 | def clean_env(cx): 32 | cx.run("rm -rf _env") 33 | 34 | @task(pre=[init]) 35 | def env(cx): 36 | """Create the environment from the specs in 'env'. Must have the 37 | entire repository available as it uses the tooling from it. 38 | 39 | """ 40 | 41 | example_name = Path(os.getcwd()).stem 42 | 43 | with cx.cd("../../../"): 44 | cx.run(f"inv docs.env-tutorial -n {example_name}") 45 | -------------------------------------------------------------------------------- /tests/docs/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/tests/docs/.keep -------------------------------------------------------------------------------- /tests/docs/test_examples/test_Lennard_Jones_Pair.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | import pytest 8 | from pytest_shutil.cmdline import chdir 9 | from pytest_shutil.run import run, run_as_main 10 | 11 | ### Tests 12 | 13 | 14 | def test_dir(datadir_factory, printer): 15 | example = "Lennard_Jones_Pair" 16 | 17 | datadir = datadir_factory.mkdatadir(f"../_examples/{example}") 18 | 19 | assert (datadir / "README.org").is_file() 20 | assert (datadir / "source").is_dir() 21 | 22 | 23 | def test_trivial_run(datadir_factory, printer): 24 | example = "Lennard_Jones_Pair" 25 | 26 | datadir = datadir_factory.mkdatadir(f"../_examples/{example}") 27 | 28 | with chdir(datadir): 29 | run( 30 | [ 31 | "python", 32 | "source/trivial_run.py", 33 | ], 34 | ) 35 | 36 | 37 | def test_sim_maker_run(datadir_factory, printer): 38 | example = "Lennard_Jones_Pair" 39 | 40 | datadir = datadir_factory.mkdatadir(f"../_examples/{example}") 41 | 42 | with chdir(datadir): 43 | run( 44 | [ 45 | "python", 46 | "source/sim_maker_run.py", 47 | "2", 48 | "100", 49 | "10", 50 | "1", 51 | "Reference", 52 | "NoResampler", 53 | ], 54 | ) 55 | 56 | run( 57 | [ 58 | "python", 59 | "source/sim_maker_run.py", 60 | "2", 61 | "100", 62 | "10", 63 | "1", 64 | "Reference", 65 | "WExploreResampler", 66 | ], 67 | ) 68 | run( 69 | [ 70 | "python", 71 | "source/sim_maker_run.py", 72 | "2", 73 | "100", 74 | "10", 75 | "1", 76 | "Reference", 77 | "REVOResampler", 78 | ], 79 | ) 80 | run( 81 | [ 82 | "python", 83 | "source/sim_maker_run.py", 84 | "2", 85 | "100", 86 | "10", 87 | "1", 88 | "CPU", 89 | "NoResampler", 90 | ], 91 | ) 92 | run( 93 | [ 94 | "python", 95 | "source/sim_maker_run.py", 96 | "2", 97 | "100", 98 | "10", 99 | "1", 100 | "CPU", 101 | "WExploreResampler", 102 | ], 103 | ) 104 | run( 105 | [ 106 | "python", 107 | "source/sim_maker_run.py", 108 | "2", 109 | "100", 110 | "10", 111 | "1", 112 | "CPU", 113 | "REVOResampler", 114 | ], 115 | ) 116 | 117 | 118 | def test_we_analysis(datadir_factory, printer): 119 | example = "Lennard_Jones_Pair" 120 | 121 | datadir = datadir_factory.mkdatadir(f"../_examples/{example}") 122 | 123 | printer("Testing from inside test_we_analysis") 124 | 125 | with chdir(datadir): 126 | printer(f"Datadir: {datadir}") 127 | printer(f"Current dir: {os.getcwd()}") 128 | 129 | out = run(["python", "source/we.py", "10", "100", "10"]) 130 | 131 | printer(out) 132 | 133 | assert (datadir / "_output/we/results.wepy.h5").is_file() 134 | assert (datadir / "_output/we/wepy.dash.org").is_file() 135 | 136 | out = run( 137 | [ 138 | "python", 139 | "source/compute_distance_observable.py", 140 | ] 141 | ) 142 | 143 | printer(out) 144 | 145 | out = run( 146 | [ 147 | "python", 148 | "source/state_network.py", 149 | ] 150 | ) 151 | 152 | printer(out) 153 | 154 | assert (datadir / "_output/state.dcd").is_file() 155 | assert (datadir / "_output/random_macrostates.csn.gexf").is_file() 156 | 157 | ### Tangled sources 158 | 159 | out = run( 160 | [ 161 | "python", 162 | "_tangle_source/inspect_observable.py", 163 | ] 164 | ) 165 | 166 | printer(out) 167 | 168 | out = run( 169 | [ 170 | "bash", 171 | "./_tangle_source/run0.bash", 172 | ] 173 | ) 174 | 175 | printer(out) 176 | 177 | out = run( 178 | [ 179 | "bash", 180 | "./_tangle_source/run1.bash", 181 | ] 182 | ) 183 | -------------------------------------------------------------------------------- /tests/docs/test_examples/test_Lysozyme.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | from pytest_check import check 8 | from pytest_shutil.cmdline import chdir 9 | from pytest_shutil.run import run, run_as_main 10 | 11 | ### Tests 12 | 13 | EXAMPLE = "Lysozyme" 14 | 15 | 16 | def test_dir(datadir_factory, printer): 17 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 18 | 19 | assert (datadir / "README.org").is_file() 20 | assert (datadir / "input").is_dir() 21 | assert (datadir / "source").is_dir() 22 | 23 | 24 | def test_runs(datadir_factory, printer): 25 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 26 | 27 | with chdir(datadir): 28 | run( 29 | [ 30 | "bash", 31 | "_tangle_source/run0.bash", 32 | ], 33 | ) 34 | 35 | 36 | def test_we(datadir_factory, printer): 37 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 38 | 39 | with chdir(datadir): 40 | with check: 41 | printer("running CPU-NoResampler") 42 | run( 43 | ["python", "source/we.py", "2", "2", "10", "1", "CPU", "NoResampler"], 44 | ) 45 | 46 | printer("running CPU-REVOResampler") 47 | with check: 48 | print("CPU-REVOResampler") 49 | run( 50 | ["python", "source/we.py", "2", "2", "10", "1", "CPU", "REVOResampler"], 51 | ) 52 | 53 | printer("running CPU-WExploreResampler") 54 | with check: 55 | print("CPU_WExploreResampler") 56 | run( 57 | [ 58 | "python", 59 | "source/we.py", 60 | "2", 61 | "2", 62 | "10", 63 | "1", 64 | "CPU", 65 | "WExploreResampler", 66 | ], 67 | ) 68 | -------------------------------------------------------------------------------- /tests/docs/test_examples/test_RandomWalk.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | from pytest_shutil.cmdline import chdir 8 | from pytest_shutil.run import run, run_as_main 9 | 10 | ### Tests 11 | 12 | EXAMPLE = "RandomWalk" 13 | 14 | 15 | def test_dir(datadir_factory, printer): 16 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 17 | 18 | assert (datadir / "README.org").is_file() 19 | assert (datadir / "input").is_dir() 20 | assert (datadir / "source").is_dir() 21 | 22 | 23 | def test_runs(datadir_factory, printer): 24 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 25 | 26 | with chdir(datadir): 27 | run( 28 | [ 29 | "bash", 30 | "_tangle_source/run0.bash", 31 | ], 32 | ) 33 | 34 | 35 | def test_scripts(datadir_factory, printer): 36 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 37 | 38 | with chdir(datadir): 39 | run( 40 | ["python", "source/rw_conventional.py", "1", "10", "10", "3"], 41 | ) 42 | 43 | run( 44 | ["python", "source/rw_revo.py", "1", "10", "10", "3"], 45 | ) 46 | 47 | run( 48 | ["python", "source/rw_wexplore.py", "1", "10", "10", "3"], 49 | ) 50 | -------------------------------------------------------------------------------- /tests/docs/test_pages/test_pages.py: -------------------------------------------------------------------------------- 1 | """Test the main documentation pages.""" 2 | 3 | # Standard Library 4 | import os 5 | import os.path as osp 6 | from pathlib import Path 7 | 8 | # Third Party Library 9 | from pytest_shutil.cmdline import chdir 10 | from pytest_shutil.run import run, run_as_main 11 | 12 | 13 | def test_dir_structure(datadir_factory): 14 | datadir = Path(datadir_factory.mkdatadir("../_tangled_docs")) 15 | 16 | assert (datadir / "README").is_dir() 17 | assert (datadir / "info").is_dir() 18 | 19 | assert (datadir / "README/README.org").is_file() 20 | assert (datadir / "info/README/README.org").is_file() 21 | 22 | 23 | def test_readme(datadir_factory): 24 | datadir = Path(datadir_factory.mkdatadir("../_tangled_docs/README")) 25 | 26 | with chdir(datadir): 27 | out = run( 28 | [ 29 | "bash", 30 | "check_installation.bash", 31 | ], 32 | ) 33 | 34 | 35 | def test_installation(datadir_factory): 36 | datadir = Path(datadir_factory.mkdatadir("../_tangled_docs/info/installation")) 37 | 38 | with chdir(datadir): 39 | out = run( 40 | [ 41 | "bash", 42 | "check_installation.bash", 43 | ], 44 | ) 45 | 46 | 47 | def test_quick_start(datadir_factory): 48 | datadir = Path(datadir_factory.mkdatadir("../_tangled_docs/info/quick_start")) 49 | 50 | with chdir(datadir): 51 | out = run( 52 | [ 53 | "bash", 54 | "test_drive.bash", 55 | ], 56 | ) 57 | 58 | out = run( 59 | [ 60 | "python", 61 | "noresampler_example.py", 62 | ], 63 | ) 64 | 65 | out = run( 66 | [ 67 | "bash", 68 | "noresampler_example.bash", 69 | ], 70 | ) 71 | 72 | 73 | def test_introduction(datadir_factory): 74 | # STUB 75 | # datadir = Path(datadir_factory.mkdatadir('../_tangled_docs/info/introduction')) 76 | # with chdir(datadir): 77 | # pass 78 | 79 | pass 80 | 81 | 82 | def test_users_guide(datadir_factory): 83 | # STUB 84 | # datadir = Path(datadir_factory.mkdatadir('../_tangled_docs/info/users_guide')) 85 | # with chdir(datadir): 86 | # pass 87 | 88 | pass 89 | 90 | 91 | def test_howtos(datadir_factory): 92 | # STUB 93 | # datadir = Path(datadir_factory.mkdatadir('../_tangled_docs/info/howtos')) 94 | # with chdir(datadir): 95 | # pass 96 | 97 | pass 98 | 99 | 100 | def test_reference(datadir_factory): 101 | datadir = Path(datadir_factory.mkdatadir("../_tangled_docs/info/reference")) 102 | with chdir(datadir): 103 | out = run( 104 | [ 105 | "python", 106 | "decision_fields_0.py", 107 | ], 108 | ) 109 | 110 | out = run( 111 | [ 112 | "python", 113 | "record_fields_0.py", 114 | ], 115 | ) 116 | 117 | 118 | def test_troubleshooting(datadir_factory): 119 | # STUB 120 | # datadir = Path(datadir_factory.mkdatadir('../_tangled_docs/info/troubleshooting')) 121 | # with chdir(datadir): 122 | # pass 123 | 124 | pass 125 | -------------------------------------------------------------------------------- /tests/docs/test_tutorials/test_Orchestrator.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | from pytest_shutil.cmdline import chdir 8 | from pytest_shutil.run import run, run_as_main 9 | 10 | ### Tests 11 | 12 | EXAMPLE = "Orchestrator" 13 | 14 | 15 | def test_dir(datadir_factory, printer): 16 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 17 | 18 | assert (datadir / "README.org").is_file() 19 | assert (datadir / "input").is_dir() 20 | assert (datadir / "source").is_dir() 21 | 22 | 23 | def test_runs(datadir_factory, printer): 24 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 25 | 26 | with chdir(datadir): 27 | run( 28 | [ 29 | "bash", 30 | "_tangle_source/run0.bash", 31 | ], 32 | ) 33 | 34 | 35 | def test_scripts(datadir_factory, printer): 36 | datadir = datadir_factory.mkdatadir(f"../_examples/{EXAMPLE}") 37 | 38 | with chdir(datadir): 39 | run( 40 | ["python", "source/make_orchestrator.py", "1", "10", "10", "3"], 41 | ) 42 | -------------------------------------------------------------------------------- /tests/docs/test_tutorials/test_data_analysis.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | from pytest_shutil.cmdline import chdir 8 | from pytest_shutil.run import run, run_as_main 9 | 10 | ### Tests 11 | 12 | 13 | def test_tutorial(datadir_factory, printer): 14 | tutorial = "data_analysis" 15 | 16 | datadir = datadir_factory.mkdatadir(f"../_tutorials/{tutorial}") 17 | 18 | assert (datadir / "README.ipynb").is_file() 19 | assert (datadir / "input").is_dir() 20 | 21 | with chdir(datadir): 22 | run( 23 | [ 24 | "python", 25 | "_tangle_source/README.py", 26 | ], 27 | ) 28 | 29 | assert (datadir / "_output/results_run1.wepy.h5").exists() 30 | assert (datadir / "_output/results_run2.wepy.h5").exists() 31 | assert (datadir / "_output/results_run3.wepy.h5").exists() 32 | 33 | assert (datadir / "_output/lj-pair.pdb").exists() 34 | assert (datadir / "_output/lj-pair_walker_lineage").exists() 35 | -------------------------------------------------------------------------------- /tests/docs/test_tutorials/test_extended_test_drive.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import os 3 | import os.path as osp 4 | from pathlib import Path 5 | 6 | # Third Party Library 7 | from pytest_check import check 8 | from pytest_shutil.cmdline import chdir 9 | from pytest_shutil.run import run, run_as_main 10 | 11 | ### Tests 12 | 13 | 14 | def test_tutorial(datadir_factory, printer): 15 | tutorial = "extended_test_drive" 16 | 17 | datadir = datadir_factory.mkdatadir(f"../_tutorials/{tutorial}") 18 | 19 | assert (datadir / "README.org").is_file() 20 | assert (datadir / "input").is_dir() 21 | 22 | 23 | def test_run0(datadir_factory, printer): 24 | tutorial = "extended_test_drive" 25 | 26 | datadir = datadir_factory.mkdatadir(f"../_tutorials/{tutorial}") 27 | 28 | with chdir(datadir): 29 | # check that the help message runs 30 | 31 | run( 32 | [ 33 | "bash", 34 | "_tangle_source/run-help.bash", 35 | ], 36 | ) 37 | 38 | # default Run with WExplore 39 | 40 | run0_out = run( 41 | [ 42 | "bash", 43 | "_tangle_source/run0.bash", 44 | ], 45 | ) 46 | 47 | with check: 48 | assert ( 49 | datadir / "_tangle_source/expected_run0_ls.txt" 50 | ).read_text() == run0_out 51 | 52 | assert (datadir / "_output/run0/root.wepy.h5").exists() 53 | assert (datadir / "_output/run0/root.dash.org").exists() 54 | assert (datadir / "_output/run0/root.init_top.pdb").exists() 55 | assert (datadir / "_output/run0/root.walkers.dcd").exists() 56 | 57 | # REVO run 58 | 59 | run( 60 | [ 61 | "bash", 62 | "_tangle_source/revo_run.bash", 63 | ], 64 | ) 65 | 66 | assert (datadir / "_output/revo_run/root.wepy.h5").exists() 67 | assert (datadir / "_output/revo_run/root.dash.org").exists() 68 | assert (datadir / "_output/revo_run/root.init_top.pdb").exists() 69 | assert (datadir / "_output/revo_run/root.walkers.dcd").exists() 70 | 71 | # No run 72 | 73 | run( 74 | [ 75 | "bash", 76 | "_tangle_source/no_run.bash", 77 | ], 78 | ) 79 | 80 | assert (datadir / "_output/no_run/root.wepy.h5").exists() 81 | assert (datadir / "_output/no_run/root.dash.org").exists() 82 | assert (datadir / "_output/no_run/root.init_top.pdb").exists() 83 | assert (datadir / "_output/no_run/root.walkers.dcd").exists() 84 | 85 | ## analysis 86 | 87 | # part 0 88 | analysis0_out = run( 89 | [ 90 | "python", 91 | "_tangle_source/analysis0.py", 92 | ], 93 | ) 94 | 95 | assert (datadir / "_output/run0/traj0.dcd").exists() 96 | assert (datadir / "_output/run0/last_cycle.dcd").exists() 97 | 98 | with check: 99 | assert ( 100 | datadir / "_tangle_source/test_analysis_0.txt" 101 | ).read_text() == analysis0_out.strip() 102 | -------------------------------------------------------------------------------- /tests/docs/test_tutorials/test_tutorials.py: -------------------------------------------------------------------------------- 1 | """Test the examples library.""" 2 | 3 | # Standard Library 4 | import os 5 | import os.path as osp 6 | from pathlib import Path 7 | 8 | # Third Party Library 9 | import delegator 10 | 11 | # the helper modules for testing 12 | from myutils import cd 13 | 14 | ## write one test per example 15 | -------------------------------------------------------------------------------- /tests/integration/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/tests/integration/.keep -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | # Third Party Library 2 | import pytest 3 | 4 | 5 | # using this to get rid of the warning without having to put it in my 6 | # config file 7 | def pytest_configure(config): 8 | config.addinivalue_line("markers", "interactive: tests which give you the debugger") 9 | -------------------------------------------------------------------------------- /tests/unit/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ADicksonLab/wepy/0bed04e0adcaa81ef4ea670588874285b2670db1/tests/unit/.keep -------------------------------------------------------------------------------- /tests/unit/test_work_mapper/test_mapper.py: -------------------------------------------------------------------------------- 1 | # Standard Library 2 | import logging 3 | 4 | logger = logging.getLogger(__name__) 5 | # Standard Library 6 | import multiprocessing as mp 7 | import time 8 | from copy import deepcopy 9 | 10 | # Third Party Library 11 | import pytest 12 | 13 | # First Party Library 14 | from wepy.walker import Walker, WalkerState 15 | from wepy.work_mapper.mapper import Mapper, TaskException 16 | from wepy.work_mapper.task_mapper import ( 17 | TaskMapper, 18 | TaskProcessException, 19 | WalkerTaskProcess, 20 | ) 21 | from wepy.work_mapper.worker import Worker, WorkerException, WorkerMapper 22 | 23 | ARGS = (0, 1, 2) 24 | 25 | 26 | def gen_walkers(): 27 | return [Walker(WalkerState(**{"num": arg}), 1 / len(ARGS)) for arg in ARGS] 28 | 29 | 30 | # test basic functionality 31 | def task_pass(walker): 32 | # simulate it actually taking some time 33 | n = walker.state["num"] 34 | return Walker(WalkerState(**{"num": n + 1}), walker.weight) 35 | 36 | 37 | TASK_PASS_ANSWER = [n + 1 for n in ARGS] 38 | 39 | 40 | class TestWorkMappers: 41 | def test_mapper(self): 42 | mapper = Mapper(segment_func=task_pass) 43 | 44 | mapper.init() 45 | 46 | results = mapper.map(gen_walkers()) 47 | 48 | assert all( 49 | [res.state["num"] == TASK_PASS_ANSWER[i] for i, res in enumerate(results)] 50 | ) 51 | 52 | mapper.cleanup() 53 | 54 | def test_worker_mapper(self): 55 | mapper = WorkerMapper(segment_func=task_pass, num_workers=3, worker_type=Worker) 56 | 57 | mapper.init() 58 | 59 | results = mapper.map(gen_walkers()) 60 | 61 | assert all( 62 | [res.state["num"] == TASK_PASS_ANSWER[i] for i, res in enumerate(results)] 63 | ) 64 | 65 | mapper.cleanup() 66 | 67 | def test_task_mapper(self): 68 | mapper = TaskMapper( 69 | segment_func=task_pass, num_workers=3, walker_task_type=WalkerTaskProcess 70 | ) 71 | 72 | mapper.init() 73 | 74 | results = mapper.map(gen_walkers()) 75 | 76 | assert all( 77 | [res.state["num"] == TASK_PASS_ANSWER[i] for i, res in enumerate(results)] 78 | ) 79 | 80 | mapper.cleanup() 81 | 82 | time.sleep(1) 83 | 84 | 85 | # test that task failures are passed up properly 86 | def task_fail(walker): 87 | n = walker.state["num"] 88 | if n == 1: 89 | raise ValueError("No soup for you!!") 90 | else: 91 | return Walker(WalkerState(**{"num": n + 1}), walker.weight) 92 | 93 | 94 | class TestTaskFail: 95 | ARGS = ((0, 1, 2),) 96 | 97 | def test_mapper(self): 98 | mapper = Mapper(segment_func=task_fail) 99 | 100 | mapper.init() 101 | 102 | with pytest.raises(TaskException) as task_exc_info: 103 | results = mapper.map(gen_walkers()) 104 | 105 | mapper.cleanup() 106 | 107 | def test_worker_mapper(self): 108 | mapper = WorkerMapper(segment_func=task_fail, num_workers=3, worker_type=Worker) 109 | 110 | mapper.init() 111 | 112 | with pytest.raises(TaskException) as task_exc_info: 113 | results = mapper.map(gen_walkers()) 114 | 115 | mapper.cleanup() 116 | 117 | def test_task_mapper(self): 118 | mapper = TaskMapper( 119 | segment_func=task_fail, num_workers=3, walker_task_type=WalkerTaskProcess 120 | ) 121 | 122 | mapper.init() 123 | 124 | with pytest.raises(TaskException) as task_exc_info: 125 | results = mapper.map(gen_walkers()) 126 | 127 | mapper.cleanup() 128 | -------------------------------------------------------------------------------- /tests/utils/README.org: -------------------------------------------------------------------------------- 1 | Put individual modules here that can be imported into tests. Choose 2 | names that don't clash. Typically you can just add a 'my' to the 3 | beginning of them. 4 | -------------------------------------------------------------------------------- /tests/utils/myutils.py: -------------------------------------------------------------------------------- 1 | """Generic modules that help with running tests more smoothly.""" 2 | 3 | # Standard Library 4 | import os 5 | import os.path as osp 6 | from contextlib import contextmanager 7 | 8 | 9 | @contextmanager 10 | def cd(newdir): 11 | """Change directories use as a context manager.""" 12 | prevdir = os.getcwd() 13 | os.chdir(osp.expanduser(newdir)) 14 | try: 15 | yield 16 | finally: 17 | os.chdir(prevdir) 18 | --------------------------------------------------------------------------------