├── .coveragerc ├── .gitattributes ├── pytram ├── _xtram │ ├── __init__.py │ └── xtram.py ├── _dtram │ ├── __init__.py │ └── dtram.py ├── __init__.py ├── reader.py ├── estimator.py ├── api.py ├── tramdata.py └── _version.py ├── examples └── three-state-model │ ├── three-states-P_0_ij.dat │ ├── three-states-P_1_ij.dat │ ├── three-states-f_i.dat │ ├── three-states-pi_i.dat │ ├── three-states-b_K_i.dat │ └── README.rst ├── MANIFEST.in ├── .gitignore ├── conda-recipe ├── bld.bat ├── build.sh ├── meta.yaml └── run_test.py ├── doc ├── reader.rst ├── tramdata.rst ├── api.rst ├── index.rst ├── install.rst ├── user.rst ├── Makefile └── conf.py ├── setup.cfg ├── ext ├── lse │ ├── _lse.h │ ├── lse.pyx │ └── _lse.c ├── xtram │ ├── _xtram.h │ ├── xtram.pyx │ └── _xtram.c └── dtram │ ├── _dtram.h │ ├── dtram.pyx │ └── _dtram.c ├── .travis.yml ├── tools └── install_miniconda.sh ├── GETTING_STARTED ├── test ├── test_exceptions.py ├── test_tramdata.py ├── test_estimator.py ├── test_lse.py ├── test_dtram.py ├── test_xtram.py └── test_three_state_model.py ├── CHANGELOG ├── setup.py ├── README.rst ├── bin ├── dtram.py └── xtram.py ├── xTRAM_example.ipynb └── dTRAM_example.ipynb /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = */_version.py 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | pytram/_version.py export-subst 2 | -------------------------------------------------------------------------------- /pytram/_xtram/__init__.py: -------------------------------------------------------------------------------- 1 | # xTRAM estimator class 2 | from .xtram import XTRAM 3 | -------------------------------------------------------------------------------- /examples/three-state-model/three-states-P_0_ij.dat: -------------------------------------------------------------------------------- 1 | 0.750 0.250 0.000 2 | 0.500 0.000 0.500 3 | 0.000 0.025 0.975 4 | -------------------------------------------------------------------------------- /examples/three-state-model/three-states-P_1_ij.dat: -------------------------------------------------------------------------------- 1 | 0.500 0.500 0.000 2 | 0.500 0.000 0.500 3 | 0.000 0.500 0.500 4 | -------------------------------------------------------------------------------- /pytram/_dtram/__init__.py: -------------------------------------------------------------------------------- 1 | # raise the dTRAM estimator class onto pytram.dtram level 2 | from .dtram import DTRAM 3 | -------------------------------------------------------------------------------- /examples/three-state-model/three-states-f_i.dat: -------------------------------------------------------------------------------- 1 | 2.442347035369235364e+00 2 | 3.135494215929169659e+00 3 | 1.397619423751545187e-01 4 | -------------------------------------------------------------------------------- /examples/three-state-model/three-states-pi_i.dat: -------------------------------------------------------------------------------- 1 | 8.695652173912775396e-02 2 | 4.347826086956435576e-02 3 | 8.695652173913079874e-01 4 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include CHANGELOG 3 | include ext/lse/_lse.h 4 | include ext/dtram/_dtram.h 5 | include ext/xtram/_xtram.h 6 | include versioneer.py 7 | include pytram/_version.py 8 | -------------------------------------------------------------------------------- /examples/three-state-model/three-states-b_K_i.dat: -------------------------------------------------------------------------------- 1 | 0.0000000000000000e+00 6.9314718055994529e-01 2 | 0.0000000000000000e+00 0.0000000000000000e+00 3 | 0.0000000000000000e+00 2.9957322735539909e+00 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.so 3 | build 4 | ext/lse/lse.c 5 | ext/dtram/dtram.c 6 | ext/xtram/xtram.c 7 | .ipynb_checkpoints 8 | pytram.egg-info 9 | doc/_build 10 | dist/ 11 | .DS_Store 12 | .coverage 13 | -------------------------------------------------------------------------------- /conda-recipe/bld.bat: -------------------------------------------------------------------------------- 1 | "%PYTHON%" setup.py install 2 | if errorlevel 1 exit 1 3 | 4 | :: Add more build steps here, if they are necessary. 5 | 6 | :: See 7 | :: http://docs.continuum.io/conda/build.html 8 | :: for a list of environment variables that are set during the build process. 9 | -------------------------------------------------------------------------------- /conda-recipe/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | $PYTHON setup.py install 4 | 5 | # Add more build steps here, if they are necessary. 6 | 7 | # See 8 | # http://docs.continuum.io/conda/build.html 9 | # for a list of environment variables that are set during the build process. 10 | -------------------------------------------------------------------------------- /doc/reader.rst: -------------------------------------------------------------------------------- 1 | .. _ref_reader: 2 | 3 | ==================== 4 | Reader documentation 5 | ==================== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | 12 | Reader 13 | ====== 14 | 15 | The reader allows to read trajectory data in the predefined pytram data format. 16 | 17 | .. autoclass:: pytram.Reader 18 | :members: 19 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [easy_install] 2 | 3 | 4 | # See the docstring in versioneer.py for instructions. Note that you must 5 | # re-run 'versioneer.py setup' after changing this section, and commit the 6 | # resulting files. 7 | 8 | [versioneer] 9 | VCS = git 10 | style = pep440 11 | versionfile_source = pytram/_version.py 12 | #versionfile_build = 13 | tag_prefix = 14 | parentdir_prefix = pytram- 15 | 16 | -------------------------------------------------------------------------------- /doc/tramdata.rst: -------------------------------------------------------------------------------- 1 | .. _ref_tramdata: 2 | 3 | ====================== 4 | TRAMData documentation 5 | ====================== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | 12 | TRAMData 13 | ======== 14 | 15 | Has the function of a data container and converter. It stores and converts all mathematical objects needed for any 16 | tram estimation. 17 | In particular it is best used in conjunction with the data preparation pipeline. 18 | 19 | .. autoclass:: pytram.TRAMData 20 | :members: 21 | -------------------------------------------------------------------------------- /ext/lse/_lse.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | lse.h - logsumexp implementation in C (header file) 4 | 5 | author: Christoph Wehmeyer 6 | 7 | */ 8 | 9 | #ifndef PYTRAM_LSE 10 | #define PYTRAM_LSE 11 | 12 | #include 13 | #include 14 | 15 | /* _sort()is based on examples from http://www.linux-related.de (2004) */ 16 | void _sort( double *array, int L, int R ); 17 | 18 | double _logsumexp( double *array, int length ); 19 | double _logsumexp_pair( double a, double b ); 20 | 21 | #endif 22 | -------------------------------------------------------------------------------- /conda-recipe/meta.yaml: -------------------------------------------------------------------------------- 1 | package: 2 | name: pytram 3 | version: !!str dev 4 | 5 | source: 6 | path: .. 7 | 8 | requirements: 9 | build: 10 | - python 11 | - setuptools 12 | - numpy x.x 13 | - cython >=0.20 14 | 15 | run: 16 | - python 17 | - numpy x.x 18 | 19 | test: 20 | requires: 21 | - nose 22 | - coverage ==4 23 | imports: 24 | - pytram 25 | - pytram._dtram 26 | - pytram._xtram 27 | 28 | about: 29 | home: https://github.com/markovmodel/pytram 30 | license: BSD License 31 | summary: 'The TRAM package' 32 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: c 2 | sudo: false 3 | env: 4 | global: 5 | - PATH=$HOME/miniconda/bin:$PATH 6 | matrix: 7 | include: 8 | - os: linux 9 | python : "2.7" 10 | env: CONDA_PY=27 CONDA_NPY=19 11 | - os: linux 12 | python : "2.7" 13 | env: CONDA_PY=27 CONDA_NPY=110 14 | before_install: 15 | - tools/install_miniconda.sh 16 | - conda config --set always_yes true 17 | - conda config --add channels omnia 18 | - conda install conda-build nose 19 | script: 20 | - conda build -q conda-recipe 21 | after_success: 22 | - pip install coveralls 23 | - coveralls 24 | -------------------------------------------------------------------------------- /tools/install_miniconda.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # make TARGET overrideable with env 4 | : ${TARGET:=$HOME/miniconda} 5 | 6 | function install_miniconda { 7 | if [ -d $TARGET ]; then echo "file exists"; return; fi 8 | echo "installing miniconda to $TARGET" 9 | if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then 10 | platform="Linux" 11 | elif [[ "$TRAVIS_OS_NAME" == "osx" ]]; then 12 | platform="MacOSX" 13 | fi 14 | wget http://repo.continuum.io/miniconda/Miniconda-latest-${platform}-x86_64.sh -O mc.sh -o /dev/null 15 | bash mc.sh -b -f -p $TARGET 16 | } 17 | 18 | install_miniconda 19 | export PATH=$TARGET/bin:$PATH 20 | -------------------------------------------------------------------------------- /GETTING_STARTED: -------------------------------------------------------------------------------- 1 | 2 | Getting started with pytram 3 | =========================== 4 | 5 | 6 | STEP 0) Get the package from https://github.com/markovmodel/pytram 7 | 8 | 9 | STEP 1) Build the extensions using the command 10 | 11 | python setup.py build_ext --inplace 12 | 13 | 14 | STEP 2) Test the package using the command 15 | 16 | python setup.py test 17 | 18 | 19 | STEP 3) Run the dTRAM example iPython notebook using the command 20 | 21 | ipython notebook dTRAM_example.ipynb 22 | 23 | and have a look how the API works for the given examples 24 | 25 | 26 | STEP 4) [OPTIONAL] Install and test the package; use 27 | 28 | python setup.py install 29 | 30 | or 31 | 32 | pip install . 33 | 34 | 35 | STEP 5) [OPTIONAL] Follow these examples and try TRAM with your own application 36 | -------------------------------------------------------------------------------- /pytram/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from ._version import get_versions 3 | __version__ = get_versions()['version'] 4 | del get_versions 5 | 6 | # raise custom exceptions onto the pytram package level 7 | from .estimator import ExpressionError, NotConvergedWarning 8 | 9 | # raise the TRAMData class onto the pytram package level 10 | from .reader import Reader 11 | 12 | # raise the TRAMData class onto the pytram package level 13 | from .tramdata import TRAMData 14 | 15 | # raise the dTRAM estimator class onto the pytram package level 16 | from ._dtram import DTRAM 17 | 18 | # raise the xTRAM estimator class onto the pytram package level 19 | from ._xtram import XTRAM 20 | 21 | # raise the API function onto the pytram level 22 | from .api import dtram, dtram_from_matrix, xtram, xtram_from_matrix 23 | 24 | import warnings 25 | warnings.warn("pytram is no longer supported; we recommend to use the PyEMMA package instead.") 26 | del warnings 27 | -------------------------------------------------------------------------------- /ext/lse/lse.pyx: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # lse.pyx - logsumexp implementation in C (cython wrapper) 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | import numpy as np 10 | cimport numpy as np 11 | 12 | cdef extern from "_lse.h": 13 | void _sort(double *array, int L, int R) 14 | double _logsumexp(double *array, int length) 15 | double _logsumexp_pair(double a, double b) 16 | 17 | # _sort()is based on examples from http://www.linux-related.de (2004) 18 | def sort(np.ndarray[double, ndim=1, mode="c"] array not None): 19 | _sort( np.PyArray_DATA(array), 0, array.shape[0]) 20 | 21 | def logsumexp(np.ndarray[double, ndim=1, mode="c"] array not None): 22 | return _logsumexp( np.PyArray_DATA(array), array.shape[0]) 23 | 24 | def logsumexp_pair(a, b): 25 | return _logsumexp_pair(a, b) 26 | -------------------------------------------------------------------------------- /test/test_exceptions.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_exceptions.py - testing the basic estimator class 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_true 10 | from pytram.estimator import ExpressionError, NotConvergedWarning 11 | 12 | def test_expression_error(): 13 | """test ExpressionError's attributes ad __str__""" 14 | ee = ExpressionError("Expression", "MSG") 15 | assert_true(ee.expression == "Expression") 16 | assert_true(ee.msg == "MSG") 17 | assert_true(ee.__str__() == "[Expression] MSG") 18 | 19 | def test_not_converged_warning(): 20 | """test NotConvergedWarning's attributes ad __str__""" 21 | ncw = NotConvergedWarning("Estimator", 0.1) 22 | assert_true(ncw.estimator == "Estimator") 23 | assert_true(ncw.increment == 0.1) 24 | assert_true(ncw.__str__() == "[Estimator] only reached increment %.3e" % 0.1) 25 | -------------------------------------------------------------------------------- /doc/api.rst: -------------------------------------------------------------------------------- 1 | .. _ref_api: 2 | 3 | ================= 4 | API documentation 5 | ================= 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | 12 | The dTRAM estimator 13 | =================== 14 | 15 | For running dTRAM, the following API functions are available on the package level, i.e., :: 16 | 17 | from pytram import dtram, dtram_from_matrix 18 | 19 | dtram 20 | ----- 21 | 22 | Run dTRAM using the TRAMData object as input: 23 | 24 | .. autofunction:: pytram.api.dtram 25 | 26 | dtram_from_matrix 27 | ----------------- 28 | 29 | Run dTRAM using the mathematical expressions as input: 30 | 31 | .. autofunction:: pytram.api.dtram_from_matrix 32 | 33 | The xTRAM estimator 34 | =================== 35 | 36 | For running xTRAM, the following API functions are available on the package level, i.e., :: 37 | 38 | from pytram import xtram, xtram_from_matrix 39 | 40 | xtram 41 | ----- 42 | 43 | Run xTRAM using the TRAMData object as input: 44 | 45 | .. autofunction:: pytram.api.xtram 46 | 47 | xtram_from_matrix 48 | ----------------- 49 | 50 | Run xTRAM using the mathematical expressions as input: 51 | 52 | .. autofunction:: pytram.api.xtram_from_matrix 53 | -------------------------------------------------------------------------------- /examples/three-state-model/README.rst: -------------------------------------------------------------------------------- 1 | ********************************** 2 | pytram example - three state model 3 | ********************************** 4 | 5 | This example addresses a model with three discrete states (0,1,2), i.e., two metastable 6 | states (0,2) and a transition state (1). 7 | 8 | The states have energy levels as given in the 9 | file ``three-states-f_i.dat`` and, thus, stationary weights as given in the file 10 | ``three-states-pi_i.dat``. The transition probabilities are given in the file 11 | ``three-states-P_0_ij.dat``. 12 | 13 | Further, we apply a bias (energy shifts) to increase the number of transitions when sampling 14 | a Markov chain. The biased transition matrix is given in the file ``three-states-P_1_ij.dat``, 15 | the full set of bias energies is given in the file ``three-states-b_K_i.dat``. 16 | 17 | To test your pytram installation with this example, use one of the runscripts on the trajecories, 18 | i.e., the files ``three-states-traj-0.dat`` and ``three-states-traj-1.dat``, or write your own 19 | analysis script using the pytram API functions. Note that dTRAM requires the ``b_K_i`` values. 20 | 21 | The estimated stationary distribution and free ebergy profile can be compared with the exact 22 | reference values. 23 | -------------------------------------------------------------------------------- /ext/xtram/_xtram.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | _xtram.h - xTRAM implementation in C (header file) 4 | 5 | author: Antonia Mey 6 | 7 | */ 8 | 9 | #ifndef PYTRAM_XTRAM 10 | #define PYTRAM_XTRAM 11 | 12 | #include 13 | #include 14 | #include 15 | 16 | 17 | typedef struct 18 | { 19 | int i; 20 | int j; 21 | double value; 22 | }sparse_x; 23 | 24 | 25 | 26 | void _b_i_IJ_equation( 27 | int T_length, 28 | int n_therm_states, 29 | int n_markov_states, 30 | int *T_x, 31 | int *M_x, 32 | int *N, 33 | double *f, 34 | double *w, 35 | double *u, 36 | double *b_i_IJ); 37 | 38 | double _iterate_x( 39 | long n_entries, 40 | long pi_length, 41 | long maxiter, 42 | double ftol, 43 | int *C_i, 44 | int *C_j, 45 | double *C_ij, 46 | double *C_ji, 47 | double *x_row, 48 | double *c_column, 49 | double *pi); 50 | 51 | void update_x( 52 | double *x_row, 53 | sparse_x *x, 54 | int *C_i, 55 | int *C_j, 56 | double *C_ij, 57 | double *C_ji, 58 | double *c_column, 59 | int L); 60 | void update_x_row(int L, sparse_x *x, double *x_row, int x_row_l); 61 | void compute_pi(double *pi, double *x_row, int l_pi); 62 | double converged(double *pi_old, double *pi_new, int l_pi); 63 | void printpi(double *pi, int l); 64 | 65 | 66 | #endif 67 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. pytram documentation master file, created by 2 | sphinx-quickstart on Sat Nov 29 23:53:07 2014. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ================== 7 | The pytram package 8 | ================== 9 | 10 | The pytram package is decprecated and no longer supported. We recommend to switch to `PyEMMA `_. 11 | 12 | This software offers the python interface to the TRAM methods family. TRAM (transition-based reweighting analysis method) is a collection of Markov state model (MSM) estimators for analysing multi-ensemble simulations. The pytram package is implemented mostly in `NumPy `_ and `SciPy `_, with `Cython `_-based C-extensions for numerically demanding tasks. For documentation of the API, please have a look at the :ref:`ref_api`. To install this software and additional dependencies refer to the :ref:`Installation Guide `. The :ref:`User Guide ` is a good starting point to learn how to use TRAM. For support/bug reports/sugguestions/complains, please visit us at `GitHub `_. 13 | 14 | Contents: 15 | 16 | .. toctree:: 17 | :maxdepth: 2 18 | 19 | install 20 | user 21 | api 22 | reader 23 | tramdata 24 | -------------------------------------------------------------------------------- /conda-recipe/run_test.py: -------------------------------------------------------------------------------- 1 | 2 | import subprocess 3 | import os 4 | import sys 5 | import shutil 6 | import re 7 | 8 | src_dir = os.getenv('SRC_DIR') 9 | 10 | def coverage_report(): 11 | fn = '.coverage' 12 | assert os.path.exists(fn) 13 | build_dir = os.getenv('TRAVIS_BUILD_DIR') 14 | dest = os.path.join(build_dir, fn) 15 | print( "copying coverage report to", dest) 16 | shutil.copy(fn, dest) 17 | assert os.path.exists(dest) 18 | 19 | # fix paths in .coverage file 20 | with open(dest, 'r') as fh: 21 | data = fh.read() 22 | match= '"/home/travis/miniconda/envs/_test/lib/python.+?/site-packages/.+?/(pytram/.+?)"' 23 | repl = '"%s/\\1"' % build_dir 24 | data = re.sub(match, repl, data) 25 | os.unlink(dest) 26 | with open(dest, 'w+') as fh: 27 | fh.write(data) 28 | 29 | nose_run = [ 30 | "nosetests", 31 | os.path.join(src_dir, "test"), 32 | "-vv", 33 | "--with-coverage", 34 | "--cover-inclusive", 35 | "--cover-package=pytram", 36 | "--with-doctest", 37 | "--doctest-options=+NORMALIZE_WHITESPACE,+ELLIPSIS"] 38 | 39 | shutil.copyfile( 40 | os.path.join(src_dir, ".coveragerc"), 41 | os.path.join(os.getcwd(), ".coveragerc")) 42 | 43 | res = subprocess.call(nose_run) 44 | 45 | # move .coverage file to git clone on Travis CI 46 | if os.getenv('TRAVIS', False): 47 | coverage_report() 48 | 49 | sys.exit(res) 50 | -------------------------------------------------------------------------------- /ext/dtram/_dtram.h: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | _dtram.h - dTRAM implementation in C (header file) 4 | 5 | author: Christoph Wehmeyer 6 | 7 | */ 8 | 9 | #ifndef PYTRAM_DTRAM 10 | #define PYTRAM_DTRAM 11 | 12 | #include 13 | #include "../lse/_lse.h" 14 | 15 | #define PYTRAM_DTRAM_PRIOR 1.0E-10 16 | #define PYTRAM_DTRAM_LOG_PRIOR -23.025850929940457 17 | 18 | void _log_nu_K_i_setter( 19 | double *log_nu_K_i, 20 | int *C_K_ij, 21 | int n_therm_states, 22 | int n_markov_states 23 | ); 24 | 25 | void _log_nu_K_i_equation( 26 | double *log_nu_K_i, 27 | double *b_K_i, 28 | double *f_i, 29 | int *C_K_ij, 30 | int n_therm_states, 31 | int n_markov_states, 32 | double *scratch_j, 33 | double *new_log_nu_K_i 34 | ); 35 | 36 | void _f_i_equation( 37 | double *log_nu_K_i, 38 | double *b_K_i, 39 | double *f_i, 40 | int *C_K_ij, 41 | int n_therm_states, 42 | int n_markov_states, 43 | double *scratch_K_j, 44 | double *scratch_j, 45 | double *new_f_i 46 | ); 47 | 48 | void _p_K_ij_equation( 49 | double *log_nu_K_i, 50 | double *b_K_i, 51 | double *f_i, 52 | int *C_K_ij, 53 | int n_therm_states, 54 | int n_markov_states, 55 | double *scratch_j, 56 | double *p_K_ij 57 | ); 58 | 59 | void _f_K_equation( 60 | double *b_K_i, 61 | double *f_i, 62 | int n_therm_states, 63 | int n_markov_states, 64 | double *scratch_j, 65 | double *f_K 66 | ); 67 | 68 | #endif 69 | -------------------------------------------------------------------------------- /doc/install.rst: -------------------------------------------------------------------------------- 1 | .. _ref_install: 2 | 3 | ================== 4 | Installation guide 5 | ================== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | There are different ways to install the pytram package. 12 | 13 | From the python package index 14 | ============================= 15 | 16 | Go to your shell and type 17 | 18 | .. code-block:: bash 19 | 20 | pip install pytram 21 | 22 | or 23 | 24 | .. code-block:: bash 25 | 26 | easy_install pytram 27 | 28 | Possible pitfalls can be the 'Cython' dependency. In case the installation fails, 29 | try to install Cython via the package index as well, by typing into your shell: 30 | 31 | .. code-block:: bash 32 | 33 | pip install cython 34 | 35 | 36 | via pip 37 | ------- 38 | 39 | Go to the repository's root directory and type 40 | 41 | .. code-block:: bash 42 | 43 | pip install . 44 | 45 | 46 | via setup 47 | --------- 48 | 49 | Go to the repository's root directory and type 50 | 51 | .. code-block:: bash 52 | 53 | python setup.py install [--user] 54 | 55 | To build the C-extensions in place, you can also run 56 | 57 | .. code-block:: bash 58 | 59 | python setup.py build_ext --inplace 60 | 61 | From the repository 62 | =================== 63 | 64 | First step: get the repository! 65 | 66 | Go to your shell and type 67 | 68 | .. code-block:: bash 69 | 70 | git clone https://github.com/markovmodel/pytram.git 71 | 72 | Then, install the package from source. 73 | 74 | If you expereince any other difficulties with the installation, please email us using the mailing list pytram@lists.fu-berlin.de. 75 | Ideally provide us with some basic information about your operating system/python distribution, so that we can try and reproduce your issues. 76 | 77 | 78 | 79 | -------------------------------------------------------------------------------- /ext/lse/_lse.c: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | lse.c - logsumexp implementation in C 4 | 5 | author: Christoph Wehmeyer 6 | 7 | */ 8 | 9 | #include "_lse.h" 10 | 11 | // old m$ visual studio is not c99 compliant (vs2010 eg. is not) 12 | #ifdef _MSC_VER 13 | #include 14 | #include 15 | #define INFINITY (DBL_MAX+DBL_MAX) 16 | #define NAN (INFINITY-INFINITY) 17 | #endif 18 | 19 | /* _sort()is based on examples from http://www.linux-related.de (2004) */ 20 | void _sort( double *array, int L, int R ) 21 | { 22 | int l, r; 23 | double swap; 24 | if( R-L > 25 ) /* use quicksort */ 25 | { 26 | l = L-1; 27 | r = R; 28 | for(;;) 29 | { 30 | while( array[++l] < array[R] ); 31 | while( array[--r] > array[R] && r > l ); 32 | if( l >= r ) break; 33 | swap = array[l]; 34 | array[l] = array[r]; 35 | array[r] = swap; 36 | } 37 | swap = array[l]; 38 | array[l] = array[R]; 39 | array[R] = swap; 40 | _sort( array, L, l-1 ); 41 | _sort( array, l+1, R ); 42 | } 43 | else /* use insertion sort */ 44 | { 45 | for( l=L+1; l<=R; ++l ) 46 | { 47 | swap = array[l]; 48 | for( r=l-1; r>=L && swap a ) 72 | return b + log( 1.0 + exp( a - b ) ); 73 | return a + log( 1.0 + exp( b - a ) ); 74 | } 75 | -------------------------------------------------------------------------------- /ext/xtram/xtram.pyx: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | cimport numpy as np 3 | 4 | cdef extern from "_xtram.h": 5 | void _b_i_IJ_equation( 6 | int T_length, 7 | int n_therm_states, 8 | int n_markov_states, 9 | int *T_x, 10 | int *M_x, 11 | int *N, 12 | double *f, 13 | double *w, 14 | double *u, 15 | double *b_i_IJ) 16 | 17 | double _iterate_x( 18 | int n_entries, 19 | int pi_length, 20 | int maxiter, 21 | double ftol, 22 | int *C_i, 23 | int *C_j, 24 | double *C_ij, 25 | double *C_ji, 26 | double *x_row, 27 | double *c_column, 28 | double *pi) 29 | 30 | def b_i_IJ_equation( 31 | np.ndarray[int, ndim=1, mode="c"] T_x not None, 32 | np.ndarray[int, ndim=1, mode="c"] M_x not None, 33 | np.ndarray[int, ndim=1, mode="c"] N_K not None, 34 | np.ndarray[double, ndim=1, mode="c"] f_K not None, 35 | np.ndarray[double, ndim=1, mode="c"] w_K not None, 36 | np.ndarray[double, ndim=2, mode="c"] u_K_x not None, 37 | np.ndarray[double, ndim=3, mode="c"] b_i_IJ not None 38 | ): 39 | _b_i_IJ_equation( 40 | T_x.shape[0], 41 | N_K.shape[0], 42 | b_i_IJ.shape[0], 43 | np.PyArray_DATA( T_x ), 44 | np.PyArray_DATA( M_x ), 45 | np.PyArray_DATA( N_K ), 46 | np.PyArray_DATA( f_K ), 47 | np.PyArray_DATA( w_K ), 48 | np.PyArray_DATA( u_K_x ), 49 | np.PyArray_DATA( b_i_IJ ) 50 | ) 51 | 52 | def iterate_x( 53 | long n_entries, 54 | long pi_length, 55 | long maxiter, 56 | double ftol, 57 | np.ndarray[int, ndim=1, mode="c"] C_i not None, 58 | np.ndarray[int, ndim=1, mode="c"] C_j not None, 59 | np.ndarray[double, ndim=1, mode="c"] C_ij not None, 60 | np.ndarray[double, ndim=1, mode="c"] C_ji not None, 61 | np.ndarray[double, ndim=1, mode="c"] x_row not None, 62 | np.ndarray[double, ndim=1, mode="c"] c_column not None, 63 | np.ndarray[double, ndim=1, mode="c"] pi not None 64 | ): 65 | return _iterate_x( 66 | n_entries, 67 | pi_length, 68 | maxiter, 69 | ftol, 70 | np.PyArray_DATA( C_i ), 71 | np.PyArray_DATA( C_j ), 72 | np.PyArray_DATA( C_ij ), 73 | np.PyArray_DATA( C_ji ), 74 | np.PyArray_DATA( x_row ), 75 | np.PyArray_DATA( c_column ), 76 | np.PyArray_DATA( pi ) 77 | ) 78 | 79 | -------------------------------------------------------------------------------- /test/test_tramdata.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_tramdata.py - testing the TRAMData class 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_true 10 | from pytram import TRAMData 11 | import numpy as np 12 | 13 | def test_Mx_Tx_nMTstates(): 14 | """test M_x and T_x state assignments""" 15 | seq = np.array(range(20), dtype=np.intc) 16 | trajs = [{'m': seq[:10], 't': seq[:10]}, {'m': seq[10:], 't': seq[10:]}] 17 | td = TRAMData(trajs) 18 | assert_true(np.all((seq - td.T_x) == 0)) 19 | assert_true(np.all((seq - td.M_x) == 0)) 20 | assert_true(td.n_markov_states == 20) 21 | assert_true(td.n_therm_states == 20) 22 | 23 | def test_N_K_i(): 24 | """test states counts""" 25 | trajs = [] 26 | for K in xrange(3): 27 | trajs.append({ 28 | 'm': np.array(range(4), dtype=np.intc), 29 | 't': np.ones(shape=(4,), dtype=np.intc) * K}) 30 | td = TRAMData(trajs) 31 | assert_true(np.all(td.N_K_i == 1)) 32 | assert_true(np.all(td.N_K == 4)) 33 | assert_true(td.N_K.sum() == 12) 34 | 35 | def test_get_C_K_ij_lag1(): 36 | """test transition counts at lag time 1""" 37 | trajs = [{'m': np.array([0, 1, 2, 0], dtype=np.intc), 't': np.zeros(shape=(4,), dtype=np.intc)}] 38 | td = TRAMData(trajs) 39 | C_K_ij = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]]], dtype=np.intc) 40 | assert_true(np.all(C_K_ij == td.get_C_K_ij(1))) 41 | 42 | def test_get_C_K_ij_lag2(): 43 | """test transition counts at lag time 2""" 44 | trajs = [{'m': np.array([0, 1, 2, 0], dtype=np.intc), 't': np.zeros(shape=(4,), dtype=np.intc)}] 45 | td = TRAMData(trajs) 46 | C_K_ij = np.array([[[0, 0, 1], [1, 0, 0], [0, 0, 0]]], dtype=np.intc) 47 | assert_true(np.all(C_K_ij == td.get_C_K_ij(2))) 48 | 49 | def test_get_C_K_ij_lag3(): 50 | """test transition counts at lag time 3""" 51 | trajs = [{'m': np.array([0, 1, 2, 0], dtype=np.intc), 't': np.zeros(shape=(4,), dtype=np.intc)}] 52 | td = TRAMData(trajs) 53 | C_K_ij = np.array([[[1, 0, 0], [0, 0, 0], [0, 0, 0]]], dtype=np.intc) 54 | assert_true(np.all(C_K_ij == td.get_C_K_ij(3))) 55 | -------------------------------------------------------------------------------- /test/test_estimator.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_estimator.py - testing the basic estimator class 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true, assert_equal 10 | from pytram.estimator import Estimator, ExpressionError 11 | import numpy as np 12 | 13 | def test_expression_error_None(): 14 | """test Estimator throws ExpressionError with None""" 15 | assert_raises(ExpressionError, Estimator, None) 16 | 17 | def test_expression_error_int(): 18 | """test Estimator throws ExpressionError with number""" 19 | assert_raises(ExpressionError, Estimator, 5) 20 | 21 | def test_expression_error_list(): 22 | """test Estimator throws ExpressionError with list""" 23 | assert_raises(ExpressionError, Estimator, [1, 2]) 24 | 25 | def test_expression_error_dim(): 26 | """test Estimator throws ExpressionError with wrong dimension""" 27 | assert_raises(ExpressionError, Estimator, np.ones(shape=(2, 2), dtype=np.intc)) 28 | 29 | def test_expression_error_markov(): 30 | """test Estimator throws ExpressionError with wrong Markov state count""" 31 | assert_raises(ExpressionError, Estimator, np.ones(shape=(2, 2, 3), dtype=np.intc)) 32 | 33 | def test_expression_error_float32(): 34 | """test Estimator throws ExpressionError with wrong dtype""" 35 | assert_raises(ExpressionError, Estimator, np.ones(shape=(2, 3, 3), dtype=np.float32)) 36 | 37 | def test_expression_error_zeros(): 38 | """test Estimator throws ExpressionError with zero counts""" 39 | assert_raises(ExpressionError, Estimator, np.zeros(shape=(2, 3, 3), dtype=np.intc)) 40 | 41 | def test_number_of_states(): 42 | """test Estimator calculates state numbers""" 43 | estimator = Estimator(np.ones(shape=(2, 3, 3), dtype=np.intc)) 44 | assert_equal(estimator.n_markov_states, 3) 45 | assert_equal(estimator.n_therm_states, 2) 46 | 47 | def test_not_implemented_error(): 48 | """test Estimator throws NotImplementedError when accessing f_K attribute""" 49 | estimator = Estimator(np.ones(shape=(2, 3, 3), dtype=np.intc)) 50 | try: 51 | estimator.f_K 52 | assert_true(False) 53 | except NotImplementedError: 54 | assert_true(True) 55 | -------------------------------------------------------------------------------- /test/test_lse.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_lse.py - testing the logsumexp implementation 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_true 10 | from pytram.lse import sort, logsumexp, logsumexp_pair 11 | import numpy as np 12 | 13 | def test_sort_urnd(): 14 | """test sort() for 10^5 uniform random numbers from [0,1]""" 15 | data = np.random.rand(100000).astype(np.float64) 16 | sort(data) 17 | for i in xrange(1, data.shape[0]): 18 | assert_true(data[i-1] <= data[i]) 19 | 20 | def test_sort_grnd(): 21 | """test sort() for 10^5 random numbers from N(0,1)""" 22 | data = np.random.randn(100000).astype(np.float64) 23 | sort(data) 24 | for i in xrange(1, data.shape[0]): 25 | assert_true(data[i-1] <= data[i]) 26 | 27 | def test_logsumexp_converged_geometric_series(): 28 | """test logsumexp() for a converging 10^4 element geometric series""" 29 | data = np.arange(10000).astype(np.float64) 30 | assert_true(np.abs(logsumexp(-data) - 0.45867514538708193) < 1.0E-15) 31 | 32 | def test_logsumexp_truncated_diverging_gemoetric_series(): 33 | """test logsumexp() for a truncated, non-converging 10^4 element geometric series""" 34 | data = np.arange(10000).astype(np.float64) 35 | assert_true(np.abs(logsumexp(data) - 9999.4586751453862) < 1.0E-15) 36 | 37 | def test_logsumexp_pair(): 38 | """test logsumexp_pair() for various pairs""" 39 | assert_true(np.abs(logsumexp_pair(0.0, 0.0) - np.log(2.0)) < 1.0E-15) 40 | assert_true(np.abs(logsumexp_pair(1.0, 1.0) - (1.0 + np.log(2.0))) < 1.0E-15) 41 | assert_true(np.abs(logsumexp_pair(10.0, 10.0) - (10.0 + np.log(2.0))) < 1.0E-15) 42 | assert_true(np.abs(logsumexp_pair(100.0, 100.0) - (100.0 + np.log(2.0))) < 1.0E-15) 43 | assert_true(np.abs(logsumexp_pair(1000.0, 1000.0) - (1000.0 + np.log(2.0))) < 1.0E-15) 44 | assert_true(np.abs(logsumexp_pair(10.0, 0.0) - 10.000045398899218) < 1.0E-15) 45 | assert_true(np.abs(logsumexp_pair(0.0, 10.0) - 10.000045398899218) < 1.0E-15) 46 | assert_true(np.abs(logsumexp_pair(100.0, 0.0) - 100.0) < 1.0E-15) 47 | assert_true(np.abs(logsumexp_pair(0.0, 100.0) - 100.0) < 1.0E-15) 48 | assert_true(np.abs(logsumexp_pair(1000.0, 0.0) - 1000.0) < 1.0E-15) 49 | assert_true(np.abs(logsumexp_pair(0.0, 1000.0) - 1000.0) < 1.0E-15) 50 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | 2 | pytram changelog 3 | ================ 4 | 5 | Version 0.1.0 (Alpha): 6 | 7 | Implemented the dTRAM estimator, nosetests, and a dTRAM example (iPython notebook). 8 | 9 | Version 0.1.1 (Alpha): 10 | 11 | Added a second example for dTRAM. Added docstrings for DTRAM and the API function. 12 | 13 | Version 0.1.2 (Alpha): 14 | 15 | Added the TRAMData class for easy input data preparation. Adapted API and dTRAM example. 16 | Added new dTRAM API function for simple use of TRAMData. Changed dTRAM dependency from 17 | reweighting factors to bias energies. Added method for transition matrix of a given 18 | thermodynamic state. 19 | 20 | Version 0.1.3 (Alpha): 21 | 22 | Added the Reader class for easy input data import. Added run script for dTRAM. 23 | Added u_I_x generators. in TRAMData for use with xTRAM. 24 | 25 | Version 0.1.4 (Alpha): 26 | 27 | Added xTRAM estimator, API, runscript, tests 28 | Added documentation. 29 | Minor bug fixes. 30 | 31 | Version 0.1.5 (Alpha): 32 | 33 | Preparation for pypi 34 | 35 | Version 0.1.6 (Beta) 36 | 37 | Added verbosity to TRAMData. Binning bugfix in xTRAM. Fixed None comparisons in dTRAM. 38 | 39 | Version 0.1.7 (Beta) 40 | 41 | Added stationary property getters to Estimator. Removed unsafe setters from Estimator classes. 42 | Added kT switch in TRAMData. Moved sanity checks to Estimator classes' constructors. 43 | Standardised verbose printouts. 44 | 45 | Version 0.2.0 (Beta) 46 | 47 | Introducing logsumexp summation scheme fro dTRAM. Interface cleanup (uniform getters, citation, 48 | naming conventions). 49 | 50 | Version 0.2.1 (Beta) 51 | 52 | Fix compilation under MS Visual Studio. Refactoring API functions: use _from_matrix instead 53 | of _me versions. 54 | 55 | Version 0.2.2 (Beta) 56 | 57 | Fix for sign error in f_K calculations. Partially fixing bad code style. 58 | 59 | Version 0.2.3 (Beta) 60 | 61 | Fix iteration variables of iterat_x in constructor of XTRAM 62 | 63 | Version 0.2.4 (Beta) 64 | 65 | Fix normalisation in biased thermodynamic states. Extended the test suite. Renaming the dtram 66 | xtram modules (pytram/dtram -> pytram/_dtram & pytram/xtram -> pytram/_xtram) to prevent a name 67 | space clash with the pytram.dtram and pytram.xtram API functions (fixes #43). 68 | 69 | Version 0.2.5 (Beta) 70 | 71 | Fixes broken __version__ on PyPI (#45). Migration to container-based infrastructure on Travis. 72 | Covering f_K in unit tests. 73 | 74 | Version 0.2.6 (Beta) 75 | 76 | Fixes error in xTRAM (#44) 77 | 78 | Version 0.3 (Inactive) 79 | 80 | Fix for #26 81 | pytram is no longer supported; use PyEMMA instead 82 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from distutils.core import Extension 3 | from sys import exit as sys_exit 4 | import versioneer 5 | 6 | try: 7 | from Cython.Distutils import build_ext 8 | except ImportError: 9 | print "ERROR - please install the cython dependency manually:" 10 | print "pip install cython" 11 | sys_exit( 1 ) 12 | 13 | try: 14 | from numpy import get_include as np_get_include 15 | except ImportError: 16 | print "ERROR - please install the numpy dependency manually:" 17 | print "pip install numpy" 18 | sys_exit( 1 ) 19 | 20 | ext_lse = Extension( 21 | "pytram.lse", 22 | sources=["ext/lse/lse.pyx", "ext/lse/_lse.c" ], 23 | include_dirs=[np_get_include()], 24 | extra_compile_args=["-O3"] 25 | ) 26 | ext_dtram = Extension( 27 | "pytram._dtram.ext", 28 | sources=["ext/dtram/dtram.pyx", "ext/dtram/_dtram.c", "ext/lse/_lse.c" ], 29 | include_dirs=[np_get_include()], 30 | extra_compile_args=["-O3"] 31 | ) 32 | ext_xtram = Extension( 33 | "pytram._xtram.ext", 34 | sources=["ext/xtram/xtram.pyx", "ext/xtram/_xtram.c" ], 35 | include_dirs=[np_get_include()], 36 | extra_compile_args=["-O3"] 37 | ) 38 | 39 | cmd_class = versioneer.get_cmdclass() 40 | cmd_class.update({'build_ext': build_ext}) 41 | 42 | setup( 43 | cmdclass=cmd_class, 44 | ext_modules=[ 45 | ext_lse, 46 | ext_dtram, 47 | ext_xtram 48 | ], 49 | name='pytram', 50 | version=versioneer.get_version(), 51 | description='The TRAM package', 52 | long_description='The python interface to the TRAM framework for estimating Markov state models from biased MD simulations.', 53 | classifiers=[ 54 | 'Development Status :: 7 - Inactive', 55 | 'Environment :: Console', 56 | 'Intended Audience :: Science/Research', 57 | 'License :: OSI Approved :: BSD License', 58 | 'Natural Language :: English', 59 | 'Operating System :: MacOS :: MacOS X', 60 | 'Operating System :: POSIX :: Linux', 61 | 'Programming Language :: C', 62 | 'Programming Language :: Cython', 63 | 'Programming Language :: Python :: 2.7', 64 | 'Topic :: Scientific/Engineering :: Bio-Informatics', 65 | 'Topic :: Scientific/Engineering :: Chemistry', 66 | 'Topic :: Scientific/Engineering :: Mathematics', 67 | 'Topic :: Scientific/Engineering :: Physics' 68 | ], 69 | keywords=[ 70 | 'free energy', 71 | 'Markov state model', 72 | 'TRAM', 73 | 'dTRAM', 74 | 'xTRAM' 75 | ], 76 | url='https://github.com/markovmodel/pytram', 77 | author='The pytram team', 78 | author_email='pytram@lists.fu-berlin.de', 79 | license='Simplified BSD License', 80 | setup_requires=[ 81 | 'numpy>=1.7.1', 82 | 'cython>=0.15', 83 | 'setuptools>=0.6' 84 | ], 85 | tests_require=[ 'numpy>=1.7.1', 'nose>=1.3' ], 86 | install_requires=[ 'numpy>=1.7.1' ], 87 | packages=[ 88 | 'pytram', 89 | 'pytram._dtram', 90 | 'pytram._xtram' 91 | ], 92 | test_suite='nose.collector', 93 | scripts=[ 94 | 'bin/dtram.py', 95 | 'bin/xtram.py' 96 | ] 97 | ) 98 | -------------------------------------------------------------------------------- /test/test_dtram.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_dtram.py - testing the dTRAM estimator 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true 10 | from pytram import DTRAM, ExpressionError, NotConvergedWarning 11 | import numpy as np 12 | 13 | def test_expression_error_None(): 14 | """test DTRAM throws ExpressionError with None""" 15 | assert_raises( 16 | ExpressionError, 17 | DTRAM, 18 | np.ones(shape=(2, 3, 3), dtype=np.intc), None) 19 | 20 | def test_expression_error_int(): 21 | """test DTRAM throws ExpressionError with number""" 22 | assert_raises( 23 | ExpressionError, 24 | DTRAM, 25 | np.ones(shape=(2, 3, 3), dtype=np.intc), 5) 26 | 27 | def test_expression_error_list(): 28 | """test DTRAM throws ExpressionError with list""" 29 | assert_raises( 30 | ExpressionError, 31 | DTRAM, 32 | np.ones(shape=(2, 3, 3), dtype=np.intc), [1, 2]) 33 | 34 | def test_expression_error_dim(): 35 | """test DTRAM throws ExpressionError with wrong dimension""" 36 | assert_raises( 37 | ExpressionError, 38 | DTRAM, 39 | np.ones(shape=(2, 3, 3), dtype=np.intc), 40 | np.ones(shape=(2, 2, 2), dtype=np.float64)) 41 | 42 | def test_expression_error_markov(): 43 | """test DTRAM throws ExpressionError with wrong Markov state count""" 44 | assert_raises( 45 | ExpressionError, 46 | DTRAM, 47 | np.ones(shape=(2, 3, 3), dtype=np.intc), 48 | np.ones(shape=(2, 2), dtype=np.float64)) 49 | 50 | def test_expression_error_therm(): 51 | """test DTRAM throws ExpressionError with wrong thermodynamic state count""" 52 | assert_raises( 53 | ExpressionError, 54 | DTRAM, 55 | np.ones(shape=(2, 3, 3), dtype=np.intc), 56 | np.ones(shape=(1, 3), dtype=np.float64)) 57 | 58 | def test_expression_error_int16(): 59 | """test DTRAM throws ExpressionError with wrong dtype (int16)""" 60 | assert_raises( 61 | ExpressionError, 62 | DTRAM, 63 | np.ones(shape=(2, 3, 3), dtype=np.intc), 64 | np.ones(shape=(2, 3), dtype=np.int16)) 65 | 66 | def test_expression_error_float32(): 67 | """test DTRAM throws ExpressionError with wrong dtype (float32)""" 68 | assert_raises( 69 | ExpressionError, 70 | DTRAM, 71 | np.ones(shape=(2, 3, 3), dtype=np.intc), 72 | np.ones(shape=(2, 3), dtype=np.float32)) 73 | 74 | def test_toy_model(): 75 | """test DTRAM with toy model""" 76 | C_K_ij = np.array([ 77 | [[2358, 29, 0], [29, 0, 32], [0, 32, 197518]], 78 | [[16818, 16763, 0], [16763, 0, 16510], [0, 16510, 16635]]], dtype=np.intc) 79 | b_K_i = np.array([[0.0, 0.0, 0.0], [4.0, 0.0, 8.0]], dtype=np.float64) 80 | dtram = DTRAM(C_K_ij, b_K_i) 81 | assert_raises(NotConvergedWarning, dtram.sc_iteration, maxiter=1, ftol=1.0E-80, verbose=False) 82 | dtram.sc_iteration(maxiter=200000, ftol=1.0E-15, verbose=True) 83 | pi = np.array([1.82026887e-02, 3.30458960e-04, 9.81466852e-01], dtype=np.float64) 84 | T = np.array([ 85 | [9.90504397e-01, 9.49560284e-03, 0.0], 86 | [5.23046803e-01, 0.0, 4.76953197e-01], 87 | [0.0, 1.60589690e-04, 9.99839410e-01]], dtype=np.float64) 88 | print pi 89 | print dtram.pi_i 90 | assert_true(np.max(np.abs(dtram.pi_i - pi)) < 1.0E-8) 91 | assert_true(np.max(np.abs(dtram.estimate_transition_matrix(0) - T)) < 1.0E-8) 92 | -------------------------------------------------------------------------------- /test/test_xtram.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_xtram.py - testing the xTRAM estimator 4 | # 5 | # author: Antonia Mey 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true 10 | from pytram import XTRAM, ExpressionError, NotConvergedWarning 11 | import numpy as np 12 | 13 | 14 | def test_expression_error_None(): 15 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(2,3), dtype=np.intc) ) 16 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ),None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 17 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),None, np.ones( shape =(2,3), dtype=np.intc) ) 18 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), None ) 19 | 20 | def test_expression_error_dim(): 21 | assert_raises( 22 | ExpressionError, 23 | XTRAM, 24 | np.ones((2,3), dtype=np.intc), 25 | np.ones((2,10), dtype=np.float64), 26 | np.ones(10, dtype=np.intc), 27 | np.ones(10, dtype=np.intc), 28 | np.ones((2,3), dtype=np.intc) ) 29 | 30 | assert_raises( 31 | ExpressionError, 32 | XTRAM, 33 | np.ones((2,3,3), dtype=np.intc), 34 | np.ones((10), dtype=np.float64), 35 | np.ones(10, dtype=np.intc), 36 | np.ones(10, dtype=np.intc), 37 | np.ones((2,3), dtype=np.intc) ) 38 | 39 | assert_raises( 40 | ExpressionError, 41 | XTRAM, 42 | np.ones((2,3,3), dtype=np.intc), 43 | np.ones((2,10), dtype=np.float64), 44 | np.ones(10, dtype=np.intc), 45 | np.ones(10, dtype=np.intc), 46 | np.ones(3, dtype=np.intc) ) 47 | 48 | def test_expression_error_markov(): 49 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,4), dtype=np.intc) ) 50 | def test_expression_error_therm(): 51 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(3,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 52 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(3,4), dtype=np.intc) ) 53 | 54 | '''def test_expression_error_int16(): 55 | assert_raises( ExpressionError, XTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.int16 ) ) 56 | 57 | def test_expression_error_float32(): 58 | assert_raises( ExpressionError, XTRAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.float32 ) ) 59 | 60 | ( self, C_K_ij, u_I_x, T_x, M_x, N_K_i, N_K, target = 0, verbose = False ): 61 | 62 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(3,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc) np.ones( shape =(3,3), dtype=np.intc), np.ones( shape =(3), dtype=np.intc) ) 63 | ''' 64 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ****** 2 | pytram 3 | ****** 4 | 5 | The pytram package is decprecated and no longer supported. We recommend to switch to `PyEMMA `_. 6 | 7 | .. image:: http://badges.github.io/stability-badges/dist/deprecated.svg 8 | :target: http://github.com/badges/stability-badges 9 | .. image:: https://travis-ci.org/markovmodel/pytram.svg?branch=devel 10 | :target: https://travis-ci.org/markovmodel/pytram 11 | .. image:: https://coveralls.io/repos/markovmodel/pytram/badge.svg?branch=devel 12 | :target: https://coveralls.io/r/markovmodel/pytram?branch=devel 13 | .. image:: https://badge.fury.io/py/pytram.svg 14 | :target: https://pypi.python.org/pypi/pytram 15 | 16 | This python package implements the transition-based reweighting analyis method (TRAM) estimators. 17 | 18 | 19 | 20 | Installation 21 | ============ 22 | 23 | Using conda:: 24 | 25 | conda install -c https://conda.binstar.org/omnia pytram 26 | 27 | 28 | Using pip from PyPI:: 29 | 30 | # you might have to install these dependencies manually 31 | pip install cython 32 | pip install numpy 33 | 34 | # install pytram 35 | pip install pytram 36 | 37 | 38 | Using pip from github (this will install the latest development version):: 39 | 40 | # you might have to install these dependencies manually 41 | pip install cython 42 | pip install numpy 43 | 44 | # install pytram - this might be slow 45 | pip install git+https://github.com/markovmodel/pytram.git@devel 46 | 47 | 48 | Authors 49 | ======= 50 | 51 | - Christoph Wehmeyer 52 | - Antonia Mey 53 | - Fabian Paul 54 | - Hao Wu 55 | - Frank Noé 56 | 57 | 58 | 59 | References 60 | ========== 61 | 62 | * **dTRAM**: *Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states*, Hao Wu, Antonia S.J.S. Mey, Edina Rosta, and Frank Noé, **J. Chem. Phys.** 141, 214106 (2014). 63 | 64 | Download: 65 | 66 | * **xTRAM**: *Estimating Equilibrium Expectations from Time-Correlated Simulation Data at Multiple Thermodynamic States*, Antonia S.J.S. Mey, Hao Wu, and Frank Noé, **Phys. Rev. X** 4, 041018 (2014). 67 | 68 | Download: 69 | 70 | 71 | 72 | Copyright notice 73 | ================ 74 | 75 | Copyright (c) 2014, Computational Molecular Biology Group, FU Berlin, 14195 Berlin, Germany. 76 | 77 | Redistribution and use in source and binary forms, with or without 78 | modification, are permitted provided that the following conditions 79 | are met: 80 | 81 | 1. Redistributions of source code must retain the above copyright notice, 82 | this list of conditions and the following disclaimer. 83 | 84 | 2. Redistributions in binary form must reproduce the above copyright 85 | notice, this list of conditions and the following disclaimer in the 86 | documentation and/or other materials provided with the distribution. 87 | 88 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 89 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 90 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 91 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 92 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 93 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 94 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 95 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 96 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 97 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 98 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 99 | -------------------------------------------------------------------------------- /ext/xtram/_xtram.c: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | _xtram.c - xTRAM implementation in C 4 | 5 | author: Antonia Mey 6 | 7 | */ 8 | 9 | #include "_xtram.h" 10 | 11 | 12 | void _b_i_IJ_equation( 13 | int T_length, 14 | int n_therm_states, 15 | int n_markov_states, 16 | int *T_x, 17 | int *M_x, 18 | int *N, 19 | double *f, 20 | double *w, 21 | double *u, 22 | double *b_i_IJ) 23 | { 24 | 25 | int x, i, I, J, TT; 26 | double delta, metropolis; 27 | 28 | TT=n_therm_states*n_therm_states; 29 | for(i = 0;i 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true, assert_equal 10 | from pytram import TRAMData, dtram, xtram 11 | import numpy as np 12 | from numpy.testing import assert_allclose 13 | 14 | def tower_sample(distribution): 15 | """draws random integers from the given distribution""" 16 | cdf = np.cumsum(distribution) 17 | rnd = np.random.rand() * cdf[-1] 18 | ind = (cdf > rnd) 19 | idx = np.where(ind == True) 20 | return np.min(idx) 21 | 22 | def evolve_chain(x, P, length): 23 | """generates a discrete Markov chain""" 24 | chain = np.zeros(length, dtype=np.intc) 25 | chain[0] = x 26 | for i in xrange(1, length): 27 | chain[i] = tower_sample(P[chain[i-1]]) 28 | return chain 29 | 30 | def assign_bias(dtraj, b_K_i): 31 | """assigns bias energies to discrete trajectories""" 32 | b = np.zeros(shape=(dtraj.shape[0], b_K_i.shape[0]), dtype=np.float64) 33 | for i in xrange(b_K_i.shape[1]): 34 | b[(dtraj == i), :] = (b_K_i[:, i])[np.newaxis, :] 35 | return b 36 | 37 | def generate_data(P, b_K_i): 38 | """generates pyram compatible input data""" 39 | dtraj_0 = [evolve_chain(1, P[0, :, :], 100) for i in xrange(100)] 40 | dtraj_1 = [evolve_chain(1, P[1, :, :], 100) for i in xrange(100)] 41 | inp = [{ 42 | 'm': d, 43 | 't': np.zeros(shape=d.shape, dtype=np.intc), 44 | 'b': assign_bias(d, b_K_i)} for d in dtraj_0] 45 | inp += [{ 46 | 'm': d, 47 | 't': np.ones(shape=d.shape, dtype=np.intc), 48 | 'b': assign_bias(d, b_K_i)} for d in dtraj_1] 49 | return inp 50 | 51 | class TestThreeStateModel(object): 52 | @classmethod 53 | def setup_class(cls): 54 | cls.energy = np.array([1.0, 2.0, 0.0], dtype=np.float64) 55 | cls.b_K_i = np.array([[0.0, 0.0, 0.0], 2.0 - cls.energy], dtype=np.float64) 56 | cls.pi_i = np.exp(-cls.energy) / np.exp(-cls.energy).sum() 57 | cls.f_i = -np.log(cls.pi_i) 58 | cls.F_K = 1.0 / (np.exp(-cls.b_K_i) * cls.pi_i[np.newaxis, :]).sum(axis=1) 59 | cls.f_K = np.log(cls.F_K) 60 | cls.pi_K_i = cls.F_K[:, np.newaxis] * np.exp(-cls.b_K_i) * cls.pi_i[np.newaxis, :] 61 | cls.f_K_i = -np.log(cls.pi_K_i) 62 | metropolis = cls.energy[np.newaxis, :] - cls.energy[:, np.newaxis] 63 | metropolis[(metropolis < 0.0)] = 0.0 64 | selection = np.array([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]], dtype=np.float64) 65 | metr_hast = selection * np.exp(-metropolis) 66 | for i in xrange(metr_hast.shape[0]): 67 | metr_hast[i, i] = 0.0 68 | metr_hast[i, i] = 1.0 - metr_hast[i, :].sum() 69 | cls.tmat = np.array([metr_hast, selection]) 70 | cls.inp = generate_data(cls.tmat, cls.b_K_i) 71 | @classmethod 72 | def teardown_class(cls): 73 | pass 74 | def setup(self): 75 | pass 76 | def teardown(self): 77 | pass 78 | def test_dtram_api(self): 79 | """testing the dTRAM API""" 80 | tramdata = TRAMData(self.inp, b_K_i=self.b_K_i, verbose=True) 81 | dtram_obj = dtram(tramdata, lag=1, maxiter=1, ftol=1.0E-14, verbose=True) 82 | dtram_obj = dtram(tramdata, lag=1, maxiter=100000, ftol=1.0E-14, verbose=True) 83 | maxerr = 1.0E-1 84 | assert_allclose(dtram_obj.f_K, self.f_K, atol=maxerr) 85 | assert_allclose(dtram_obj.f_i, self.f_i, atol=maxerr) 86 | assert_allclose(dtram_obj.pi_i, self.pi_i, atol=maxerr) 87 | assert_allclose(dtram_obj.f_K_i, self.f_K_i, atol=maxerr) 88 | assert_allclose(dtram_obj.pi_K_i, self.pi_K_i, atol=maxerr) 89 | assert_allclose(dtram_obj.estimate_transition_matrices(), self.tmat, atol=maxerr) 90 | def test_xtram_api(self): 91 | """testing the xTRAM API""" 92 | tramdata = TRAMData(self.inp, verbose=True) 93 | xtram_obj = xtram(tramdata, lag=1, maxiter=1, ftol=1.0E-13, verbose=True) 94 | xtram_obj = xtram(tramdata, lag=1, maxiter=10000, ftol=1.0E-13, verbose=True) 95 | maxerr = 1.0E-1 96 | assert_allclose(xtram_obj.f_K, self.f_K, atol=maxerr) 97 | assert_allclose(xtram_obj.f_i, self.f_i, atol=maxerr) 98 | assert_allclose(xtram_obj.pi_i, self.pi_i, atol=maxerr) 99 | assert_allclose(xtram_obj.f_K_i, self.f_K_i, atol=maxerr) 100 | assert_allclose(xtram_obj.pi_K_i, self.pi_K_i, atol=maxerr) 101 | -------------------------------------------------------------------------------- /pytram/reader.py: -------------------------------------------------------------------------------- 1 | r""" 2 | .. moduleauthor:: Christoph Wehmeyer , Antonia Mey 3 | 4 | """ 5 | 6 | import numpy as np 7 | 8 | 9 | 10 | #################################################################################################### 11 | # 12 | # READER CLASS FOR IMPORTING SEQUENTIAL SIMULATION DATA 13 | # 14 | #################################################################################################### 15 | 16 | class Reader(object): 17 | r""" 18 | Parameters 19 | ---------- 20 | files : array_like 21 | list of filenames of the to-be-imported trajectory files 22 | b_K_i_file : string (optional) 23 | name of the file with the discretised reduced bias energies (b_K_i) 24 | kT_file : string (optional) 25 | name of the file with kT values from a multi-temperature simulation 26 | skiprows : int (default=0) 27 | skip the leading lines 28 | maxlength : int (optional) 29 | limit the maximal number of samples to use 30 | verbose : boolean (default=False) 31 | show import progress 32 | 33 | Notes 34 | ----- 35 | I import trajectories from a list of files 36 | """ 37 | def __init__( 38 | self, files, b_K_i_file=None, kT_file=None, skiprows=0, maxlength=None, verbose=False): 39 | self.files = files 40 | self.b_K_i_file = b_K_i_file 41 | self.kT_file = kT_file 42 | self.maxlength = maxlength 43 | self.skiprows = skiprows 44 | self.verbose = verbose 45 | self._trajs = None 46 | self._b_K_i = None 47 | self._kT_K = None 48 | 49 | ############################################################################ 50 | # 51 | # trajs getter 52 | # 53 | ############################################################################ 54 | 55 | @property 56 | def trajs(self): 57 | if self._trajs is None: 58 | self._trajs = [] 59 | for f in self.files: 60 | if self.verbose: 61 | print "# Reading file <%s>" % f 62 | try: 63 | content = np.loadtxt(f, dtype=np.float64, skiprows=self.skiprows) 64 | except IOError: 65 | print "# ... cannot read file <%s> (ignored)" % f 66 | continue 67 | length = content.shape[0] 68 | if self.verbose: 69 | print "# ... length=%d" % length 70 | if (self.maxlength is not None) and (self.maxlength < length): 71 | length = self.maxlength 72 | if self.verbose: 73 | print "# ... truncating to length=%d" % self.maxlength 74 | m = content[:length, 0].astype(np.intc, copy=True) 75 | t = content[:length, 1].astype(np.intc, copy=True) 76 | b = None 77 | if content.shape[1] > 2: 78 | b = np.copy(content[:length, 2:]) 79 | if self.verbose: 80 | if None == b: 81 | print "# ... no energy data" 82 | else: 83 | print "# ... %d energy column(s)" % (content.shape[1] - 2) 84 | self._trajs.append({'m': m, 't': t, 'b': b}) 85 | return self._trajs 86 | 87 | ############################################################################ 88 | # 89 | # b_K_i getter 90 | # 91 | ############################################################################ 92 | 93 | @property 94 | def b_K_i(self): 95 | if self._b_K_i is None: 96 | if self.b_K_i_file is None: 97 | return None 98 | if self.verbose: 99 | print "# Reading b_K_i_file <%s>" % self.b_K_i_file 100 | try: 101 | self._b_K_i = np.loadtxt(self.b_K_i_file, dtype=np.float64).transpose().copy() 102 | if self.verbose: 103 | print "# ... found %d markov and %d thermodynamic states" \ 104 | % (self._b_K_i.shape[1], self._b_K_i.shape[0]) 105 | except IOError: 106 | print "# ... cannot read file <%s>" % self.b_K_i_file 107 | return self._b_K_i 108 | 109 | ############################################################################ 110 | # 111 | # kT getter 112 | # 113 | ############################################################################ 114 | 115 | @property 116 | def kT_K(self): 117 | if self._kT_K is None: 118 | if self.kT_file is None: 119 | return None 120 | if self.verbose: 121 | print "# Reading kT_file <%s>" % self.kT_file 122 | try: 123 | self._kT_K = np.loadtxt(self.kT_file, dtype=np.float64) 124 | if self._kT_K.ndim > 1: 125 | if self.verbose: 126 | print "# ... found %d columns - restricting to first column" \ 127 | % self._kT_K.shape[1] 128 | self._kT_K = self._kT_K[:, 0].copy() 129 | if self.verbose: 130 | print "# ... found %d thermodynamic states" % self._kT_K.shape[0] 131 | except IOError: 132 | print "# ... cannot read file <%s>" % self.kT_file 133 | return self._kT_K 134 | 135 | 136 | -------------------------------------------------------------------------------- /pytram/estimator.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ============================== 4 | Basic estimator class for TRAM 5 | ============================== 6 | 7 | .. moduleauthor:: Christoph Wehmeyer 8 | """ 9 | 10 | import numpy as np 11 | 12 | 13 | 14 | #################################################################################################### 15 | # 16 | # BASIC TRAM ESTIMATOR CLASS 17 | # 18 | #################################################################################################### 19 | 20 | class Estimator(object): 21 | 22 | r""" 23 | Estimator is the parent class for all estimators 24 | 25 | Parameters 26 | ---------- 27 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 28 | transition counts between the M discrete Markov states 29 | for each of the T thermodynamic ensembles 30 | """ 31 | def __init__(self, C_K_ij): 32 | # this check raises an exception if C_K_ij is not usable 33 | if self._check_C_K_ij(C_K_ij): 34 | self._C_K_ij = C_K_ij 35 | # if we reach this point, C_K_ij is save 36 | self._n_therm_states = C_K_ij.shape[0] 37 | self._n_markov_states = C_K_ij.shape[1] 38 | self.citation = [] 39 | 40 | def cite(self, pre=""): 41 | r""" 42 | Parameters 43 | ---------- 44 | pre : string (default="") 45 | prepend string for printing citation string 46 | """ 47 | for line in self.citation: 48 | print "%s%s" % (pre, line) 49 | 50 | ############################################################################ 51 | # 52 | # C_K_ij sanity checks and getter 53 | # 54 | ############################################################################ 55 | 56 | def _check_C_K_ij( self, C_K_ij ): 57 | if C_K_ij is None: 58 | raise ExpressionError("C_K_ij", "is None") 59 | if not isinstance(C_K_ij, (np.ndarray,)): 60 | raise ExpressionError("C_K_ij", "invalid type (%s)" % str(type(C_K_ij))) 61 | if 3 != C_K_ij.ndim: 62 | raise ExpressionError("C_K_ij", "invalid number of dimensions (%d)" % C_K_ij.ndim) 63 | if C_K_ij.shape[1] != C_K_ij.shape[2]: 64 | raise ExpressionError( 65 | "C_K_ij", 66 | "unmatching number of markov states (%d,%d)" % (C_K_ij.shape[1], C_K_ij.shape[2])) 67 | if np.intc != C_K_ij.dtype: 68 | raise ExpressionError("C_K_ij", "invalid dtype (%s)" % str(C_K_ij.dtype)) 69 | if not np.all(C_K_ij.sum(axis=(0, 2)) > 0): 70 | raise ExpressionError("C_K_ij", "contains unvisited states") 71 | # TODO: strong connectivity check? 72 | return True 73 | 74 | @property 75 | def C_K_ij(self): 76 | return self._C_K_ij 77 | 78 | ############################################################################ 79 | # 80 | # compute the TRAM log likelihood 81 | # 82 | ############################################################################ 83 | 84 | def estimate_log_L_TRAM(self, C_K_ij, p_K_ij): 85 | nonzero = C_K_ij.nonzero() 86 | return np.sum(C_K_ij[nonzero] * np.log(p_K_ij[nonzero])) 87 | 88 | ############################################################################ 89 | # 90 | # getters for stationary properties 91 | # 92 | ############################################################################ 93 | 94 | @property 95 | def n_therm_states(self): 96 | return self._n_therm_states 97 | 98 | @property 99 | def n_markov_states(self): 100 | return self._n_markov_states 101 | 102 | @property 103 | def pi_i(self): 104 | return np.exp(-self.f_i) 105 | 106 | @property 107 | def pi_K_i(self): 108 | return np.exp(-self.f_K_i) 109 | 110 | @property 111 | def f_K(self): 112 | raise NotImplementedError("Override in derived class!") 113 | 114 | @property 115 | def f_K_i(self): 116 | return -np.log(self.pi_K_i) 117 | 118 | @property 119 | def f_i( self ): 120 | return -np.log(self.pi_i) 121 | 122 | 123 | 124 | #################################################################################################### 125 | # 126 | # ERROR CLASS FOR MALFORMED ESTIMATOR ARGUMENTS 127 | # 128 | #################################################################################################### 129 | 130 | class ExpressionError(Exception): 131 | r""" 132 | Exception class for malformed expressions in the input 133 | """ 134 | def __init__(self, expression, msg): 135 | self.expression = expression 136 | self.msg = msg 137 | def __str__(self): 138 | return "[%s] %s" % (self.expression, self.msg) 139 | 140 | 141 | 142 | #################################################################################################### 143 | # 144 | # WARNING CLASS FOR PREMATURELY TERMINATED SCF ITERATIONS 145 | # 146 | #################################################################################################### 147 | 148 | class NotConvergedWarning(Exception): 149 | r""" 150 | Exception class for non-convergence of estimators 151 | """ 152 | def __init__(self, estimator, increment): 153 | self.estimator = estimator 154 | self.increment = increment 155 | def __str__(self): 156 | return "[%s] only reached increment %.3e" % (self.estimator, self.increment) 157 | -------------------------------------------------------------------------------- /ext/dtram/dtram.pyx: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # dtram.pyx - dTRAM implementation in C (cython wrapper) 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | import numpy as np 10 | cimport numpy as np 11 | 12 | cdef extern from "_dtram.h": 13 | void _log_nu_K_i_setter( 14 | double *log_nu_K_i, 15 | int *C_K_ij, 16 | int n_therm_states, 17 | int n_markov_states 18 | ) 19 | void _log_nu_K_i_equation( 20 | double *log_nu_K_i, 21 | double *b_K_i, 22 | double *f_i, 23 | int *C_K_ij, 24 | int n_therm_states, 25 | int n_markov_states, 26 | double *scratch_j, 27 | double *new_log_nu_K_i 28 | ) 29 | void _f_i_equation( 30 | double *log_nu_K_i, 31 | double *b_K_i, 32 | double *f_i, 33 | int *C_K_ij, 34 | int n_therm_states, 35 | int n_markov_states, 36 | double *scratch_K_j, 37 | double *scratch_j, 38 | double *new_f_i 39 | ) 40 | void _p_K_ij_equation( 41 | double *log_nu_K_i, 42 | double *b_K_i, 43 | double *f_i, 44 | int *C_K_ij, 45 | int n_therm_states, 46 | int n_markov_states, 47 | double *scratch_j, 48 | double *p_K_ij 49 | ) 50 | 51 | void _f_K_equation( 52 | double *b_K_i, 53 | double *f_i, 54 | int n_therm_states, 55 | int n_markov_states, 56 | double *scratch_j, 57 | double *f_K 58 | ) 59 | 60 | def log_nu_K_i_setter( 61 | np.ndarray[double, ndim=2, mode="c"] log_nu_K_i not None, 62 | np.ndarray[int, ndim=3, mode="c"] C_K_ij not None 63 | ): 64 | _log_nu_K_i_setter( 65 | np.PyArray_DATA( log_nu_K_i ), 66 | np.PyArray_DATA( C_K_ij ), 67 | log_nu_K_i.shape[0], 68 | log_nu_K_i.shape[1] 69 | ) 70 | 71 | def log_nu_K_i_equation( 72 | np.ndarray[double, ndim=2, mode="c"] log_nu_K_i not None, 73 | np.ndarray[double, ndim=2, mode="c"] b_K_i not None, 74 | np.ndarray[double, ndim=1, mode="c"] f_i not None, 75 | np.ndarray[int, ndim=3, mode="c"] C_K_ij not None, 76 | np.ndarray[double, ndim=1, mode="c"] scratch_j not None, 77 | np.ndarray[double, ndim=2, mode="c"] new_log_nu_K_i not None 78 | ): 79 | _log_nu_K_i_equation( 80 | np.PyArray_DATA( log_nu_K_i ), 81 | np.PyArray_DATA( b_K_i ), 82 | np.PyArray_DATA( f_i ), 83 | np.PyArray_DATA( C_K_ij ), 84 | log_nu_K_i.shape[0], 85 | log_nu_K_i.shape[1], 86 | np.PyArray_DATA( scratch_j ), 87 | np.PyArray_DATA( new_log_nu_K_i ) 88 | ) 89 | 90 | def f_i_equation( 91 | np.ndarray[double, ndim=2, mode="c"] log_nu_K_i not None, 92 | np.ndarray[double, ndim=2, mode="c"] b_K_i not None, 93 | np.ndarray[double, ndim=1, mode="c"] f_i not None, 94 | np.ndarray[int, ndim=3, mode="c"] C_K_ij not None, 95 | np.ndarray[double, ndim=2, mode="c"] scratch_K_j not None, 96 | np.ndarray[double, ndim=1, mode="c"] scratch_j not None, 97 | np.ndarray[double, ndim=1, mode="c"] new_f_i not None 98 | ): 99 | _f_i_equation( 100 | np.PyArray_DATA( log_nu_K_i ), 101 | np.PyArray_DATA( b_K_i ), 102 | np.PyArray_DATA( f_i ), 103 | np.PyArray_DATA( C_K_ij ), 104 | log_nu_K_i.shape[0], 105 | log_nu_K_i.shape[1], 106 | np.PyArray_DATA( scratch_K_j ), 107 | np.PyArray_DATA( scratch_j ), 108 | np.PyArray_DATA( new_f_i ) 109 | ) 110 | 111 | def p_K_ij_equation( 112 | np.ndarray[double, ndim=2, mode="c"] log_nu_K_i not None, 113 | np.ndarray[double, ndim=2, mode="c"] b_K_i not None, 114 | np.ndarray[double, ndim=1, mode="c"] f_i not None, 115 | np.ndarray[int, ndim=3, mode="c"] C_K_ij not None, 116 | np.ndarray[double, ndim=1, mode="c"] scratch_j not None, 117 | np.ndarray[double, ndim=3, mode="c"] p_K_ij not None 118 | ): 119 | _p_K_ij_equation( 120 | np.PyArray_DATA( log_nu_K_i ), 121 | np.PyArray_DATA( b_K_i ), 122 | np.PyArray_DATA( f_i ), 123 | np.PyArray_DATA( C_K_ij ), 124 | log_nu_K_i.shape[0], 125 | log_nu_K_i.shape[1], 126 | np.PyArray_DATA( scratch_j ), 127 | np.PyArray_DATA( p_K_ij ) 128 | ) 129 | 130 | def f_K_equation( 131 | np.ndarray[double, ndim=2, mode="c"] b_K_i not None, 132 | np.ndarray[double, ndim=1, mode="c"] f_i not None, 133 | np.ndarray[double, ndim=1, mode="c"] scratch_j not None, 134 | np.ndarray[double, ndim=1, mode="c"] f_K not None 135 | ): 136 | _f_K_equation( 137 | np.PyArray_DATA( b_K_i ), 138 | np.PyArray_DATA( f_i ), 139 | b_K_i.shape[0], 140 | b_K_i.shape[1], 141 | np.PyArray_DATA( scratch_j ), 142 | np.PyArray_DATA( f_K ) 143 | ) 144 | -------------------------------------------------------------------------------- /doc/user.rst: -------------------------------------------------------------------------------- 1 | .. _ref_user: 2 | 3 | ========== 4 | User guide 5 | ========== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | Getting started 12 | =============== 13 | 14 | The pytram package comes with two examples for dTRAM and xTRAM in the form of ipython notebooks. In order to run run them, go the root directory of the pytram repositroy and type 15 | 16 | .. code-block:: bash 17 | 18 | ipython notebook dTRAM_example.ipynb 19 | 20 | in your shell for dTRAM or 21 | 22 | .. code-block:: bash 23 | 24 | ipython notebook xTRAM_example.ipynb 25 | 26 | for xTRAM, respectively. These examples will illustrate the basic usage of the API functions and the necessary data preparation. 27 | 28 | Another way is a file-based approach which allows you to store simulation results for later analysis. To this aim, we have specified the required file format in the next section and implemented a reader function to import such data into pytram. Using file-based input data allows you to run pytram 29 | 30 | * from the API using a ``Reader`` and the ``TRAMData`` converter or 31 | * using run scripts directly from the shell. 32 | 33 | The latter option requires the least effort, however, also offers only limited flexibility. 34 | 35 | 36 | File format 37 | =========== 38 | 39 | The standard file format assumes text files with the following layout. :: 40 | 41 | # This is a comment line, you can several of those. 42 | # The next lines indicates the meaning of the columns, 43 | # [M] denotes Markov state indices (starting from zero), 44 | # [T] denotes thermodynamic state indices (starting from zero), 45 | # and [b_K] denotes the reduced bias energies b_K/kT 46 | # [M] [T] [b_0] [b_1] ... 47 | 0 0 3.1 18.2 48 | 1 0 3.2 18.3 49 | 2 0 4.8 19.9 50 | 3 0 7.4 22.5 51 | . . . . 52 | . . . . 53 | . . . . 54 | 55 | The minimal layout only requires the ``[M]`` and ``[T]`` columns and can only be used for dTRAM. These two columns contain the sequences of the Markov and generating thermodynamic states. For example, the entry ``3 0`` denotes that the actual sample corresponds to the Markov state ``3`` and was generated at thermodynamic state ``0``. 56 | 57 | **Important note**: in order to run dTRAM successfully, you need an additional ``b_K_i`` file as explained in the dTRAM section. 58 | 59 | The other TRAM estimators require at least one energy column. For this, we distinguish two different cases: 60 | temperature as the only thermodynamic variable, and all other thermodynamic conditions, i.e., different Hamiltonians, umbrella potentials, ... 61 | 62 | 63 | Temperature as only thermodynamic variable 64 | ------------------------------------------ 65 | 66 | In this case, you need the ``[M]`` and ``[T]`` columns, and one energy column ``[b_K]``; this column contains the reduced energy sequence. The energy is reduced according to the generating thermodynamic state. For example, the entry ``2 5 20.5`` denotes that the actual sample corresponds to the Markov state ``2``, was generated at temperature ``kT_5``, and the corresponding energy was reduced with ``kT_5``. 67 | 68 | **Important note**: for temperature-dependent simulations, you need an additional single column ``kT`` file wich indicates all generating temperatures times the Boltzmann constant (consistent with your energy units). Note that the order of ``kT`` values must be constistent with the numbering of the thermodynamic states. 69 | 70 | 71 | Hamiltonian replica exchange, umbrella sampling, etc 72 | ---------------------------------------------------- 73 | 74 | This is the most general application. Here, each sample must be evaluated at all thermodynamic states which means that you need as many energy columns as you have thermodynamic states. For example, the line ``2 1 3.0 2.9 1.0 0.3`` indicates that the actual sample corresponds to the Markov state ``2``, has been generated at thermodynamic state ``1``, the reduced energy is 75 | 76 | * ``3.0 kT`` at thermodynamic state ``0``, 77 | * ``2.9 kT`` at thermodynamic state ``1``, 78 | * ``1.0 kT`` at thermodynamic state ``2``, and 79 | * ``0.3 kT`` at thermodynamic state ``3``. 80 | 81 | This example also requires you to have exactly four thermodynamic states. 82 | 83 | 84 | Running dTRAM 85 | ============= 86 | 87 | from files 88 | ---------- 89 | 90 | Assume that we have two files ``file_1.dat`` and ``file_2.dat`` with simulation data. In addition to that, the dTRAM method requires the user to specify the reduced bias energies of all Markov states in each of the thermodynamic states. The corresponding file format is given by :: 91 | 92 | # we store the reduced bias energies b_K(x)/kT 93 | # at the discrete states x_i 94 | # [b_0] [b_1] ... 95 | 0.0 4.0 96 | 0.0 0.0 97 | 0.0 8.0 98 | 99 | In this example, we have three Markov states which are evaluated at two different thermodynamic states. 100 | 101 | Using the API, we can run dTRAM via the following code: 102 | 103 | .. code-block:: python 104 | 105 | # import the Reader, TRAMData and the dtram API function 106 | from pytram import Reader, TRAMData, dtram 107 | 108 | # specify your input data files 109 | files = [ 110 | 'path/to/file_1.dat', 111 | 'path/to/file_2.dat' 112 | ] 113 | b_K_i_file = 'path/to/b_K_i_file.dat' 114 | 115 | # import the files using the Reader 116 | reader = Reader( files, b_K_i_file=b_K_i_file, verbose=True ) 117 | 118 | # convert the input data using TRAMData 119 | data = TRAMData( reader.trajs, b_K_i=reader.b_K_i ) 120 | 121 | # run dTRAM using the API function 122 | dtram_obj = dtram( data, maxiter=1000, ftol=1.0E-10, verbose=True ) 123 | 124 | # show unbiased stationary distribution 125 | print dtram_obj.pi_i 126 | 127 | # get transition matrix for thermodynamic state 0 128 | T_0 = dtram_obj.estimate_transtition_matrix( 0 ) 129 | print T_0 130 | 131 | # show thermodynamic free energies 132 | print dtram_obj.f_K 133 | 134 | Optionally, we can use the dTRAM run script from shell. Simply type 135 | 136 | .. code-block:: bash 137 | 138 | dtram.py --b_K_i_file=path/to/b_K_i_file.dat --verbose path/to/file_1.dat path/to/file_2.dat 139 | 140 | and enjoy the show. Note that the run script will not work if the pytram package has not been properly installed! 141 | 142 | You can run 143 | 144 | .. code-block:: bash 145 | 146 | dtram.py --help 147 | 148 | for additional information on available parameters. 149 | 150 | 151 | from seqential data 152 | ------------------- 153 | 154 | The data preparation and the API usage is shown in the ipython example. 155 | 156 | 157 | 158 | 159 | 160 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pytram.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pytram.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pytram" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pytram" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /bin/dtram.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #################################################################################################### 4 | # # 5 | # RUN SCRIPT FOR THE DTRAM METHOD WITHIN THE PYTRAM package # 6 | # # 7 | # author: Christoph Wehmeyer # 8 | # # 9 | #################################################################################################### 10 | 11 | 12 | 13 | #################################################################################################### 14 | # 15 | # IMPORTS 16 | # 17 | #################################################################################################### 18 | 19 | from pytram import Reader, TRAMData, DTRAM, ExpressionError, NotConvergedWarning 20 | from argparse import ArgumentParser, FileType 21 | from sys import exit 22 | import numpy as np 23 | 24 | 25 | 26 | #################################################################################################### 27 | # 28 | # MAIN PART 29 | # 30 | #################################################################################################### 31 | 32 | if '__main__' == __name__: 33 | 34 | ############################################################################ 35 | # 36 | # capture the command line arguments 37 | # 38 | ############################################################################ 39 | parser = ArgumentParser() 40 | parser.add_argument( 41 | 'files', 42 | help='pytram compatible files for evaluation', 43 | nargs='*', 44 | metavar='FILE' 45 | ) 46 | parser.add_argument( 47 | "--b_K_i_file", 48 | help="specify a pytram compatible file with discretised bias energies", 49 | metavar="FILE" 50 | ) 51 | parser.add_argument( 52 | "--lag", 53 | help="specify a lag time for evaluation", 54 | type=int, 55 | default=1, 56 | metavar='INT' 57 | ) 58 | parser.add_argument( 59 | "--maxlength", 60 | help="limit the number of trajectory frames", 61 | type=int, 62 | default=None, 63 | metavar='INT' 64 | ) 65 | parser.add_argument( 66 | "--skiprows", 67 | help="Number of initial frames skipped", 68 | type=int, 69 | default=0, 70 | metavar='INT' 71 | ) 72 | parser.add_argument( 73 | "--maxiter", 74 | help="limit the number of fixed point iterations", 75 | type=int, 76 | default=1000, 77 | metavar='INT' 78 | ) 79 | parser.add_argument( 80 | "--ftol", 81 | help="limit the requested convergence level", 82 | type=float, 83 | default=1.0E-10, 84 | metavar='FLOAT' 85 | ) 86 | parser.add_argument( 87 | "--verbose", 88 | help="show the progress during the self-consistent-iteration", 89 | action='store_true' 90 | ) 91 | args = parser.parse_args() 92 | 93 | 94 | 95 | ############################################################################ 96 | # 97 | # check mandatory command line arguments 98 | # 99 | ############################################################################ 100 | if args.b_K_i_file is None: 101 | print "ERROR: you must set the --b_K_i_file option!" 102 | exit( 1 ) 103 | if 1 > len( args.files ): 104 | print "ERROR: you must give at least one pytram compatible trajectory file!" 105 | exit( 1 ) 106 | 107 | 108 | ############################################################################ 109 | # 110 | # write header 111 | # 112 | ############################################################################ 113 | print "\n\n###################################### PYTRAM ######################################" 114 | print "#\n# Invoking the dTRAM estimator" 115 | print "#\n### PARAMETERS\n#" 116 | print "# %25s %24d" % ( "[--lag]", args.lag ) 117 | print "# %25s %24d" % ( "[--maxiter]", args.maxiter ) 118 | print "# %25s %24.5e" % ( "[--ftol]", args.ftol ) 119 | 120 | 121 | 122 | ############################################################################ 123 | # 124 | # import the data 125 | # 126 | ############################################################################ 127 | print "#\n################################## IMPORTING DATA ##################################\n#" 128 | reader = Reader( 129 | args.files, 130 | b_K_i_file=args.b_K_i_file, 131 | maxlength=args.maxlength, 132 | skiprows=args.skiprows, 133 | verbose=True 134 | ) 135 | tramdata = TRAMData( reader.trajs, b_K_i=reader.b_K_i ) 136 | try: 137 | dtram_obj = DTRAM( tramdata.get_C_K_ij( args.lag ), tramdata.b_K_i ) 138 | except ExpressionError, e: 139 | print "#\n### ERROR\n#" 140 | print "# Your input was faulty!" 141 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 142 | print "#\n### ABORTING\n\n" 143 | exit( 1 ) 144 | print "#\n### SYSTEM INFORMATION\n#" 145 | print "# %25s %24d" % ( "[markov states]", tramdata.n_markov_states ) 146 | print "# %25s %24d" % ( "[thermodynamic states]", tramdata.n_therm_states ) 147 | 148 | 149 | 150 | ############################################################################ 151 | # 152 | # run the self-consistent-iteration 153 | # 154 | ############################################################################ 155 | print "#\n#################################### RUN DTRAM #####################################\n#" 156 | try: 157 | print "# Run self-consistent-iteration" 158 | dtram_obj.sc_iteration( maxiter=args.maxiter, ftol=args.ftol, verbose=args.verbose ) 159 | print "# ... converged!" 160 | except NotConvergedWarning, e: 161 | print "#\n### WARNING\n#\n# dTRAM is not converged - use these results carefuly!" 162 | print "#\n### RECOMMENDATION\n#\n# Run dtram.py again and increase --maxiter" 163 | 164 | 165 | 166 | ############################################################################ 167 | # 168 | # print out the results 169 | # 170 | ############################################################################ 171 | print "#\n##################################### RESULTS ######################################" 172 | print "#\n### UNBIASED STATIONARY VECTOR\n#" 173 | print "# %25s %25s" % ( "[markov state]", "[stationary probability]" ) 174 | for i in xrange( dtram_obj.pi_i.shape[0] ): 175 | print " %25d %25.12e" % ( i, dtram_obj.pi_i[i] ) 176 | print "#\n### UNBIASED FREE ENERGY\n#" 177 | print "# %25s %25s" % ( "[markov state]", "[reduced free energy]" ) 178 | for i in xrange( dtram_obj.f_i.shape[0] ): 179 | print " %25d %25.12e" % ( i, dtram_obj.f_i[i] ) 180 | print "#\n### THERMODYNAMIC FREE ENERGY\n#" 181 | print "# %25s %25s" % ( "[thermodynamic state]", "[reduced free energy]" ) 182 | for i in xrange( dtram_obj.f_K.shape[0] ): 183 | print " %25d %25.12e" % ( i, dtram_obj.f_K[i] ) 184 | 185 | 186 | 187 | ############################################################################ 188 | # 189 | # say good bye 190 | # 191 | ############################################################################ 192 | print "#\n#################################### THAT'S IT #####################################\n#" 193 | print "#\n# Thank you for using the pytram package!\n#\n#" 194 | print "### CITATION\n#" 195 | dtram_obj.cite( pre="# " ) 196 | print "#\n####################################################################################\n\n" 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | -------------------------------------------------------------------------------- /pytram/_dtram/dtram.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ====================== 4 | dTRAM estimator module 5 | ====================== 6 | 7 | .. moduleauthor:: Christoph Wehmeyer 8 | 9 | """ 10 | 11 | import numpy as np 12 | from ..estimator import Estimator, NotConvergedWarning, ExpressionError 13 | from .ext import log_nu_K_i_setter, log_nu_K_i_equation, f_i_equation, p_K_ij_equation, f_K_equation 14 | import warnings 15 | 16 | 17 | 18 | #################################################################################################### 19 | # 20 | # DTRAM ESTIMATOR CLASS 21 | # 22 | #################################################################################################### 23 | 24 | class DTRAM(Estimator): 25 | r""" 26 | This is the DTRAM class 27 | 28 | Parameters 29 | ---------- 30 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 31 | transition counts between the M discrete Markov states for each of the T 32 | thermodynamic ensembles 33 | b_K_i : numpy.ndarray( shape=(T,M), dtype=numpy.float64 ) 34 | bias energies in the T thermodynamic and M discrete Markov states 35 | """ 36 | 37 | def __init__(self, C_K_ij, b_K_i): 38 | warnings.warn("This Feature is deprecated and will be removed in the future; we recommend to use pyemma.thermo.DTRAM") 39 | super(DTRAM, self).__init__(C_K_ij) 40 | # this check raises an exception if b_K_i is not usable 41 | if self._check_b_K_i(b_K_i): 42 | self._b_K_i = b_K_i 43 | # hard-coded initial guess for pi_i and nu_K_i 44 | self._f_i = np.zeros(shape=(self.n_markov_states,), dtype=np.float64) 45 | self._log_nu_K_i = np.zeros( 46 | shape=(self.n_therm_states, self.n_markov_states), dtype=np.float64) 47 | log_nu_K_i_setter(self._log_nu_K_i, self.C_K_ij) 48 | # citation information 49 | self.citation = [ 50 | "Statistically optimal analysis of state-discretized trajectory data", 51 | "from multiple thermodynamic states;", 52 | "Hao Wu, Antonia S.J.S. Mey, Edina Rosta, and Frank Noe", 53 | "J. Chem. Phys. 141, 214106 (2014)"] 54 | 55 | ############################################################################ 56 | # 57 | # override getters for stationary properties 58 | # 59 | ############################################################################ 60 | 61 | @property 62 | def f_i(self): 63 | return self._f_i 64 | 65 | @property 66 | def f_K_i(self): 67 | return -self.f_K[:, np.newaxis] + self._b_K_i + self._f_i[np.newaxis, :] 68 | 69 | @property 70 | def f_K(self): 71 | _f_K = np.zeros(shape=(self.n_therm_states,), dtype=np.float64) 72 | scratch_j = np.zeros(shape=(self.n_markov_states,), dtype=np.float64) 73 | f_K_equation(self._b_K_i, self._f_i, scratch_j, _f_K) 74 | return -_f_K 75 | 76 | ############################################################################ 77 | # 78 | # getters for dTRAM-specific properties 79 | # 80 | ############################################################################ 81 | 82 | @property 83 | def b_K_i(self): 84 | return self._b_K_i 85 | 86 | @property 87 | def gamma_K_i(self): 88 | return np.exp(-self.b_K_i) 89 | 90 | @property 91 | def nu_K_i(self): 92 | return np.exp(self._log_nu_K_i) 93 | 94 | ############################################################################ 95 | # 96 | # self-consistent-iteration to converge pi_i 97 | # 98 | ############################################################################ 99 | 100 | def sc_iteration(self, maxiter=100, ftol=1.0E-5, verbose=False): 101 | r""" 102 | Run the self-consistent-iteration cycle to optimise the unbiased stationary 103 | probabilities (and Langrange multipliers) 104 | 105 | Parameters 106 | ---------- 107 | maxiter : int (default=100) 108 | maximum number of self-consistent-iteration steps 109 | ftol : float (default=1.0E-5) 110 | convergence criterion based on the max relative change in an 111 | self-consistent-iteration step 112 | verbose : boolean (default=False) 113 | writes convergence information to stdout during the self-consistent-iteration cycle 114 | """ 115 | scratch_K_j = np.zeros(shape=(self.n_therm_states, self.n_markov_states), dtype=np.float64) 116 | scratch_j = np.zeros(shape=(self.n_markov_states,), dtype=np.float64) 117 | if verbose: 118 | print "# %25s %25s" % ("[iteration step]", "[increment]") 119 | # start the iteration loop 120 | for i in xrange(maxiter): 121 | # iterate log_nu_K_i 122 | tmp_log_nu_K_i = np.copy(self._log_nu_K_i) 123 | log_nu_K_i_equation( 124 | tmp_log_nu_K_i, self._b_K_i, self._f_i, self.C_K_ij, scratch_j, self._log_nu_K_i) 125 | # iterate f_i 126 | tmp_f_i = np.copy(self._f_i) 127 | f_i_equation( 128 | self._log_nu_K_i, 129 | self._b_K_i, 130 | tmp_f_i, 131 | self.C_K_ij, 132 | scratch_K_j, 133 | scratch_j, 134 | self._f_i) 135 | # compute the absolute change of f_i 136 | finc = np.max(np.abs(tmp_f_i - self._f_i)) 137 | # write out progress if requested 138 | if verbose: 139 | print " %25d %25.12e" % (i+1, finc) 140 | # break loop if we're converged 141 | if finc < ftol: 142 | break 143 | # complain if we're not yet converged 144 | if finc > ftol: 145 | raise NotConvergedWarning("DTRAM", finc) 146 | 147 | ############################################################################ 148 | # 149 | # transition matrix estimation 150 | # 151 | ############################################################################ 152 | 153 | def estimate_transition_matrices(self): 154 | r""" 155 | Estimate the transition matrices for all thermodynamic states 156 | 157 | Returns 158 | ------- 159 | p_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.float64 ) 160 | the transition matrices for all thermodynamic states 161 | """ 162 | p_K_ij = np.zeros(shape=self.C_K_ij.shape, dtype=np.float64) 163 | scratch_j = np.zeros(shape=(self.n_markov_states,), dtype=np.float64) 164 | p_K_ij_equation(self._log_nu_K_i, self._b_K_i, self._f_i, self.C_K_ij, scratch_j, p_K_ij) 165 | return p_K_ij 166 | 167 | def estimate_transition_matrix(self, I): 168 | r""" 169 | Estimate the transition matrices for one thermodynamic state 170 | 171 | Parameters 172 | ---------- 173 | I : int 174 | target thermodynamic state 175 | 176 | Returns 177 | ------- 178 | p_K_ij[I] : numpy.ndarray( shape=(M,M), dtype=numpy.float64 ) 179 | the transition matrix for the Ith thermodynamic state 180 | """ 181 | return self.estimate_transition_matrices()[I, :, :] 182 | 183 | ############################################################################ 184 | # 185 | # gamma_K_i sanity checks 186 | # 187 | ############################################################################ 188 | 189 | def _check_b_K_i(self, b_K_i): 190 | if b_K_i is None: 191 | raise ExpressionError("b_K_i", "is None") 192 | if not isinstance(b_K_i, (np.ndarray,)): 193 | raise ExpressionError("b_K_i", "invalid type (%s)" % str(type(b_K_i))) 194 | if 2 != b_K_i.ndim: 195 | raise ExpressionError("b_K_i", "invalid number of dimensions (%d)" % b_K_i.ndim) 196 | if b_K_i.shape[0] != self.n_therm_states: 197 | raise ExpressionError("b_K_i", "not matching number of thermodynamic states (%d,%d)" \ 198 | % (b_K_i.shape[0], self.n_therm_states)) 199 | if b_K_i.shape[1] != self.n_markov_states: 200 | raise ExpressionError("b_K_i", "not matching number of markov states (%d,%d)" \ 201 | % (b_K_i.shape[1], self.n_markov_states)) 202 | if np.float64 != b_K_i.dtype: 203 | raise ExpressionError("b_K_i", "invalid dtype (%s)" % str(b_K_i.dtype)) 204 | return True 205 | 206 | -------------------------------------------------------------------------------- /ext/dtram/_dtram.c: -------------------------------------------------------------------------------- 1 | /* 2 | 3 | _dtram.c - dTRAM implementation in C 4 | 5 | author: Christoph Wehmeyer 6 | 7 | */ 8 | 9 | #include "_dtram.h" 10 | 11 | // old m$ visual studio is not c99 compliant (vs2010 eg. is not) 12 | #ifdef _MSC_VER 13 | #include 14 | #include 15 | #define INFINITY (DBL_MAX+DBL_MAX) 16 | #define NAN (INFINITY-INFINITY) 17 | #endif 18 | 19 | 20 | void _log_nu_K_i_setter( 21 | double *log_nu_K_i, 22 | int *C_K_ij, 23 | int n_therm_states, 24 | int n_markov_states 25 | ) 26 | { 27 | int i, j, K; 28 | int MM = n_markov_states * n_markov_states, KMM; 29 | int sum; 30 | for( K=0; K # 8 | # author: Antonia Mey # 9 | # # 10 | #################################################################################################### 11 | 12 | 13 | 14 | #################################################################################################### 15 | # 16 | # IMPORTS 17 | # 18 | #################################################################################################### 19 | 20 | from pytram import Reader, TRAMData, XTRAM, ExpressionError, NotConvergedWarning 21 | from argparse import ArgumentParser, FileType 22 | from sys import exit 23 | import numpy as np 24 | 25 | 26 | 27 | #################################################################################################### 28 | # 29 | # MAIN PART 30 | # 31 | #################################################################################################### 32 | 33 | if '__main__' == __name__: 34 | 35 | ############################################################################ 36 | # 37 | # capture the command line arguments 38 | # 39 | ############################################################################ 40 | parser = ArgumentParser() 41 | parser.add_argument( 42 | 'files', 43 | help='pytram compatible files for evaluation (trajectory files)', 44 | nargs='*', 45 | metavar='FILE' 46 | ) 47 | parser.add_argument( 48 | "--kT_file", 49 | help="specify a pytram compatible file containing kT information", 50 | metavar="FILE" 51 | ) 52 | parser.add_argument( 53 | "--lag", 54 | help="specify a lag time for evaluation", 55 | type=int, 56 | default=1, 57 | metavar='INT' 58 | ) 59 | parser.add_argument( 60 | "--maxlength", 61 | help="limit the number of trajectory frames", 62 | type=int, 63 | default=None, 64 | metavar='INT' 65 | ) 66 | parser.add_argument( 67 | "--skiprows", 68 | help="Number of initial frames skipped", 69 | type=int, 70 | default=0, 71 | metavar='INT' 72 | ) 73 | parser.add_argument( 74 | "--maxiter", 75 | help="limit the number of fixed point iterations", 76 | type=int, 77 | default=1000, 78 | metavar='INT' 79 | ) 80 | parser.add_argument( 81 | "--ftol", 82 | help="limit the requested convergence level", 83 | type=float, 84 | default=1.0E-10, 85 | metavar='FLOAT' 86 | ) 87 | parser.add_argument( 88 | "--kT_target", 89 | help="The kT value for which the free energy and probabilities should be calculated", 90 | type=int, 91 | default=0, 92 | metavar='INT' 93 | ) 94 | parser.add_argument( 95 | "--verbose", 96 | help="show the progress during the self-consistent-iteration", 97 | action='store_true' 98 | ) 99 | args = parser.parse_args() 100 | 101 | 102 | 103 | ############################################################################ 104 | # 105 | # check mandatory command line arguments 106 | # 107 | ############################################################################ 108 | if 1 > len( args.files ): 109 | print "ERROR: you must give at least one pytram compatible trajectory file!" 110 | exit( 1 ) 111 | 112 | 113 | ############################################################################ 114 | # 115 | # write header 116 | # 117 | ############################################################################ 118 | print "\n\n###################################### PYTRAM ######################################" 119 | print "#\n# Invoking the xTRAM estimator" 120 | print "#\n### PARAMETERS\n#" 121 | print "# %25s %24d" % ( "[--lag]", args.lag ) 122 | print "# %25s %24d" % ( "[--maxiter]", args.maxiter ) 123 | print "# %25s %24.5e" % ( "[--ftol]", args.ftol ) 124 | 125 | 126 | 127 | ############################################################################ 128 | # 129 | # import the data 130 | # 131 | ############################################################################ 132 | print "#\n################################## IMPORTING DATA ##################################\n#" 133 | reader = Reader( 134 | args.files, 135 | kT_file=args.kT_file, 136 | maxlength=args.maxlength, 137 | skiprows=args.skiprows, 138 | verbose=True 139 | ) 140 | tramdata = TRAMData( reader.trajs, kT_K=reader.kT_K, kT_target = args.kT_target) 141 | try: 142 | xtram_obj = XTRAM( tramdata.get_C_K_ij( args.lag ), tramdata.u_I_x, tramdata.T_x, tramdata.M_x, tramdata.N_K_i, target = tramdata.kT_target ) 143 | except ExpressionError, e: 144 | print "#\n### ERROR\n#" 145 | print "# Your input was faulty!" 146 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 147 | print "#\n### ABORTING\n\n" 148 | exit( 1 ) 149 | print "#\n### SYSTEM INFORMATION\n#" 150 | print "# %25s %24d" % ( "[markov states]", tramdata.n_markov_states ) 151 | print "# %25s %24d" % ( "[thermodynamic states]", tramdata.n_therm_states ) 152 | 153 | 154 | 155 | ############################################################################ 156 | # 157 | # run the self-consistent-iteration 158 | # 159 | ############################################################################ 160 | print "#\n#################################### RUN XTRAM #####################################\n#" 161 | try: 162 | print "# Run self-consistent-iteration" 163 | xtram_obj.sc_iteration( maxiter=args.maxiter, ftol=args.ftol, verbose=args.verbose ) 164 | print "# ... converged!" 165 | except NotConvergedWarning, e: 166 | print "#\n### WARNING\n#\n# xTRAM is not converged - use these results carefuly!" 167 | print "#\n### RECOMMENDATION\n#\n# Run xtram.py again and increase --maxiter" 168 | 169 | 170 | 171 | ############################################################################ 172 | # 173 | # print out the results 174 | # 175 | ############################################################################ 176 | print "#\n##################################### RESULTS ######################################" 177 | print "#\n### UNBIASED STATIONARY VECTOR\n#" 178 | print "# %25s %25s" % ( "[markov state]", "[stationary probability]" ) 179 | for i in xrange( xtram_obj.pi_i.shape[0] ): 180 | print " %25d %25.12e" % ( i, xtram_obj.pi_i[i] ) 181 | print "#\n### UNBIASED FREE ENERGY\n#" 182 | print "# %25s %25s" % ( "[markov state]", "[reduced free energy]" ) 183 | for i in xrange( xtram_obj.f_i.shape[0] ): 184 | print " %25d %25.12e" % ( i, xtram_obj.f_i[i] ) 185 | print "#\n### THERMODYNAMIC FREE ENERGY\n#" 186 | print "# %25s %25s" % ( "[thermodynamic state]", "[reduced free energy]" ) 187 | for i in xrange( xtram_obj.f_K.shape[0] ): 188 | print " %25d %25.12e" % ( i, xtram_obj.f_K[i] ) 189 | 190 | 191 | 192 | ############################################################################ 193 | # 194 | # say good bye 195 | # 196 | ############################################################################ 197 | print "#\n#################################### THAT'S IT #####################################\n#" 198 | print "#\n# Thank you for using the pytram package!\n#\n#" 199 | print "### CITATION\n#" 200 | xtram_obj.cite( pre="# " ) 201 | print "#\n####################################################################################\n\n" 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | -------------------------------------------------------------------------------- /pytram/api.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ======================== 4 | API for the TRAM package 5 | ======================== 6 | 7 | .. moduleauthor:: Christoph Wehmeyer 8 | .. moduleauthor:: Antonia Mey 9 | """ 10 | 11 | from ._dtram import DTRAM 12 | from ._xtram import XTRAM 13 | from . import NotConvergedWarning, ExpressionError 14 | 15 | 16 | 17 | #################################################################################################### 18 | # 19 | # dTRAM API function using the mathematical expressions at input 20 | # 21 | #################################################################################################### 22 | 23 | def dtram_from_matrix(C_K_ij, b_K_i, maxiter=100, ftol=1.0E-5, verbose=False): 24 | r""" 25 | The dTRAM API function 26 | 27 | Parameters 28 | ---------- 29 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 30 | transition counts between the M discrete Markov states for each of the T 31 | thermodynamic ensembles 32 | b_K_i : numpy.ndarray( shape=(T,M), dtype=numpy.float64 ) 33 | reduced bias energies at the T thermodynamic and M discrete Markov states 34 | maxiter : int (default=100) 35 | maximum number of SCF iteration steps during the optimisation of the 36 | stationary probabilities 37 | ftol : float (default=1.0E-5) 38 | convergence criterion based on the max change in an self-consistent-iteration step 39 | verbose : boolean (default=False) 40 | writes convergence information to stdout during the self-consistent-iteration cycle 41 | 42 | Returns 43 | ------- 44 | dtram_obj : object 45 | dTRAM estimator object with optimised unbiased stationary probabilities 46 | """ 47 | # try to create the DTRAM object 48 | try: 49 | dtram_obj = DTRAM(C_K_ij, b_K_i) 50 | except ExpressionError, e: 51 | print "# ERROR ############################################################################" 52 | print "# Your input was faulty!" 53 | print "# The < %s > object is malformed: %s" % (e.expression, e.msg) 54 | print "# ABORTING #########################################################################" 55 | raise 56 | # try to converge the stationary probabilities 57 | try: 58 | dtram_obj.sc_iteration(maxiter=maxiter, ftol=ftol, verbose=verbose) 59 | except NotConvergedWarning, e: 60 | print "# WARNING ##########################################################################" 61 | print "# dTRAM did not converge within %d steps!" % maxiter 62 | print "# The last increment was %.6e." % e.increment 63 | print "# You should run the < sc_iteration > method again." 64 | print "# USE RESULTS WITH CARE ############################################################" 65 | finally: 66 | return dtram_obj 67 | 68 | 69 | 70 | #################################################################################################### 71 | # 72 | # dTRAM API function from TRAMData 73 | # 74 | #################################################################################################### 75 | 76 | def dtram(tramdata, lag=1, sliding_window=True, maxiter=100, ftol=1.0E-5, verbose=False): 77 | r""" 78 | The dTRAM API function 79 | 80 | Parameters 81 | ---------- 82 | tramdata : object 83 | container/converter for TRAM input data 84 | lag : int (default=1) 85 | specify the lag time for C_K_ij calculation 86 | sliding_window : boolean (default=true) 87 | use sliding windows to calculate C_K_ij 88 | maxiter : int (default=100) 89 | maximum number of SCF iteration steps during the optimisation of the 90 | stationary probabilities 91 | ftol : float (default=1.0E-5) 92 | convergence criterion based on the max change in an self-consistent-iteration step 93 | verbose : boolean (default=False) 94 | writes convergence information to stdout during the self-consistent-iteration cycle 95 | 96 | Returns 97 | ------- 98 | dtram_obj : object 99 | dTRAM estimator object with optimised unbiased stationary probabilities 100 | """ 101 | return dtram_from_matrix( 102 | tramdata.get_C_K_ij(lag), tramdata.b_K_i, maxiter=maxiter, ftol=ftol, verbose=verbose) 103 | 104 | 105 | #################################################################################################### 106 | # 107 | # xTRAM API function using expressions at input 108 | # 109 | #################################################################################################### 110 | 111 | def xtram_from_matrix( 112 | C_K_ij, b_K_x, T_x, M_x, N_K_i, maxiter=100, ftol=1.0E-5, target=0, verbose=False): 113 | r""" 114 | The xTRAM API function 115 | 116 | Parameters 117 | ---------- 118 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 119 | transition counts between the M discrete Markov states for each of the T 120 | thermodynamic ensembles 121 | b_K_x : numpy.ndarray( shape=(T,L), dtype=numpy.float64 ) 122 | bias energy evaluated at thermodynamic states T over all sampled data of length L 123 | T_x : numpy.ndarray( shape=(L), dtype=numpy.intc ) 124 | thermodynamic states over all sampled data of length L 125 | M_x : numpy.ndarray( shape=(L), dytpe=numpy.intc ) 126 | Markov states over all sampled data of length L 127 | N_K_i : numpy.ndarray( shape=(T,M), dtype=numpy.intc ) 128 | number of times a markov state (M) is seen in a thermodynamic state (T) 129 | maxiter : int (default=100) 130 | maximum number of self consistent iteration steps during the optimisation 131 | of the stationary probabilities 132 | ftol : float (> 0.0) (default=1.0E-5) 133 | convergence criterion based on the max change in an self-consistent-iteration step 134 | target : int (default=0) 135 | integer of the thermodynamic state for which results should be estiamted 136 | verbose : boolean (default=False) 137 | writes convergence information to stdout during the self-consistent-iteration cycle 138 | 139 | Returns 140 | ------- 141 | xtram_obj : object 142 | xTRAM estimator object with optimised unbiased stationary probabilities 143 | """ 144 | # try to create the XTRAM object 145 | try: 146 | xtram_obj = XTRAM(C_K_ij, b_K_x, T_x, M_x, N_K_i, target) 147 | except ExpressionError, e: 148 | print "# ERROR ############################################################################" 149 | print "# Your input was faulty!" 150 | print "# The < %s > object is malformed: %s" % (e.expression, e.msg) 151 | print "# ABORTING #########################################################################" 152 | raise 153 | # try to converge the stationary probabilities 154 | try: 155 | xtram_obj.sc_iteration(maxiter=maxiter, ftol=ftol, verbose=verbose) 156 | except NotConvergedWarning, e: 157 | print "# WARNING ##########################################################################" 158 | print "# xTRAM did not converge within %d steps!" % maxiter 159 | print "# The last increment was %.6e." % e.increment 160 | print "# You should run the < sc_iteration > method again." 161 | print "# USE RESULTS WITH CARE ############################################################" 162 | finally: 163 | return xtram_obj 164 | 165 | 166 | #################################################################################################### 167 | # 168 | # xTRAM API function from TRAMData 169 | # 170 | #################################################################################################### 171 | 172 | def xtram(tramdata, lag=1, sliding_window=True, maxiter=100, ftol=1.0E-5, target=0, verbose=False): 173 | r""" 174 | The xTRAM API function 175 | 176 | Parameters 177 | ---------- 178 | tramdata : object 179 | container/converter for TRAM input data 180 | lag : int (default=1) 181 | specify the lag time for C_K_ij calculation 182 | sliding_window : boolean (default=True) 183 | use sliding windows to calculate C_K_ij 184 | maxiter : int (default=100) 185 | maximum number of SCF iteration steps during the optimisation of the 186 | stationary probabilities 187 | ftol : float (default=1.0E-5) 188 | convergence criterion based on the max change in an self-consistent-iteration step 189 | target : int (default=0) 190 | integer of the thermodynamic state for which results should be estiamted 191 | verbose : boolean (default=False) 192 | writes convergence information to stdout during the self-consistent-iteration cycle. 193 | 194 | Returns 195 | ------- 196 | xtram_obj : object 197 | xTRAM estimator object with optimised unbiased stationary probabilities 198 | """ 199 | return xtram_from_matrix( 200 | tramdata.get_C_K_ij(lag), 201 | tramdata.b_K_x, 202 | tramdata.T_x, 203 | tramdata.M_x, 204 | tramdata.N_K_i, 205 | maxiter=maxiter, 206 | ftol=ftol, 207 | target=target, 208 | verbose=verbose) 209 | -------------------------------------------------------------------------------- /pytram/tramdata.py: -------------------------------------------------------------------------------- 1 | r""" 2 | .. moduleauthor:: Antonia Mey , Christoph Wehmeyer 3 | 4 | """ 5 | 6 | from .estimator import ExpressionError 7 | import numpy as np 8 | 9 | 10 | 11 | #################################################################################################### 12 | # 13 | # TRAMDATA CLASS FOR STORING SEQUENTIAL SIMULATION DATA AND CONVERSION TO TRAM INPUT EXPRESSIONS 14 | # 15 | #################################################################################################### 16 | 17 | class TRAMData(object): 18 | r""" 19 | Parameters 20 | ---------- 21 | trajs : list of dictionaries 22 | each dictionary contains the following entries: 23 | 'm' markov sequence in a 1-D numpy array of integers 24 | 't' thermodynamic sequence in a 1-D numpy array of integers 25 | 'b' reduced bias energy sequences in a 2-D numpy array of floats 26 | b_K_i : numpy.ndarray( shape=(T,M), dtype=numpy.float64 ) (optional) 27 | reduced bias energies at the T thermodynamic and M discrete Markov states 28 | kT_K : numpy.ndarray( shape=(T), dtype=numpy.float64 ) (optional) 29 | array of reduced temperatures of each thermodynamic state K 30 | kT_target : int (optional) 31 | integer of the thermodynamic state K of the kT_K array at which estimators 32 | observables should be estimated 33 | verbose : boolean (default=False) 34 | be loud and noisy 35 | 36 | Notes 37 | ----- 38 | I convert/process the list of trajectory dictionaries into data types that 39 | are need for pytram estimators. 40 | """ 41 | def __init__(self, trajs, b_K_i=None, kT_K=None, kT_target=None, verbose=False): 42 | self.trajs = trajs 43 | self._n_therm_states = None 44 | self._n_markov_states = None 45 | self._N_K_i = None 46 | self._N_K = None 47 | self._M_x = None 48 | self._T_x = None 49 | self._b_K_x = None 50 | self.b_K_i = b_K_i 51 | self.kT_K = kT_K 52 | self.kT_target = kT_target 53 | self.verbose = verbose 54 | if (kT_K is not None) and (kT_target is None): 55 | self.kT_target = 0 56 | 57 | ############################################################################ 58 | # 59 | # n_markov_states / n_therm_states getters 60 | # 61 | ############################################################################ 62 | 63 | @property 64 | def n_markov_states(self): 65 | if self._n_markov_states is None: 66 | if self.verbose: 67 | print "# Counting Markov states" 68 | self._n_markov_states = 0 69 | for traj in self.trajs: 70 | max_state = np.max(traj['m']) 71 | if max_state > self._n_markov_states: 72 | self._n_markov_states = max_state 73 | self._n_markov_states += 1 74 | if self.verbose: 75 | print "# ... found %d Markov states" % self._n_markov_states 76 | return self._n_markov_states 77 | 78 | @property 79 | def n_therm_states(self): 80 | if self._n_therm_states is None: 81 | if self.verbose: 82 | print "# Counting thermodynamic states" 83 | self._n_therm_states = 0 84 | for traj in self.trajs: 85 | max_state = np.max(traj['t']) 86 | if max_state > self._n_therm_states: 87 | self._n_therm_states = max_state 88 | self._n_therm_states += 1 89 | if self.verbose: 90 | print "# ... found %d thermodynamic states" % self._n_therm_states 91 | return self._n_therm_states 92 | 93 | ############################################################################ 94 | # 95 | # N_K_i / N_K getters 96 | # 97 | ############################################################################ 98 | 99 | @property 100 | def N_K_i(self): 101 | if self._N_K_i is None: 102 | if self.verbose: 103 | print "# Counting visited Markov states" 104 | self._N_K_i = np.zeros( 105 | shape=(self.n_therm_states, self.n_markov_states), dtype=np.intc) 106 | for traj in self.trajs: 107 | for K in xrange(self.n_therm_states): 108 | inc_K = (traj['t'] == K) 109 | for i in xrange(self.n_markov_states): 110 | inc_i = (traj['m'][inc_K] == i) 111 | self._N_K_i[K, i] += inc_i.sum() 112 | if self.verbose: 113 | print "# ... done" 114 | return self._N_K_i 115 | 116 | @property 117 | def N_K(self): 118 | if self._N_K is None: 119 | self._N_K = self.N_K_i.sum(axis=1) 120 | return self._N_K.astype(np.intc) 121 | 122 | ############################################################################ 123 | # 124 | # M_x / T_x getters 125 | # 126 | ############################################################################ 127 | 128 | @property 129 | def M_x(self): 130 | if self._M_x is None: 131 | if self.verbose: 132 | print "# Copying Markov state sequences" 133 | self._M_x = np.zeros(shape=(self.N_K.sum(),), dtype=np.intc) 134 | a = 0 135 | for traj in self.trajs: 136 | b = a + traj['m'].shape[0] 137 | self._M_x[a:b] = traj['m'][:] 138 | a = b 139 | if self.verbose: 140 | print "# ... done" 141 | return self._M_x 142 | 143 | @property 144 | def T_x(self): 145 | if self._T_x is None: 146 | if self.verbose: 147 | print "# Copying thermodynamic state sequences" 148 | self._T_x = np.zeros(shape=(self.N_K.sum(),), dtype=np.intc) 149 | a = 0 150 | for traj in self.trajs: 151 | b = a + traj['t'].shape[0] 152 | self._T_x[a:b] = traj['t'][:] 153 | a = b 154 | if self.verbose: 155 | print "# ... done" 156 | return self._T_x 157 | 158 | @property 159 | def b_K_x(self): 160 | if self._b_K_x is None: 161 | if self.verbose: 162 | print "# Copying bias energy sequences" 163 | if not self.kT_K is None: 164 | self._gen_b_K_x_from_kT_K() 165 | else: 166 | self._gen_b_K_x() 167 | if self.verbose: 168 | print "# ... done" 169 | return self._b_K_x 170 | 171 | 172 | def get_C_K_ij( self, lag, sliding_window=True ): 173 | r""" 174 | Parameters 175 | ---------- 176 | lag : int 177 | lagtime tau, at which the countmatrix should be evaluated 178 | sliding_window : boolean (default=True) 179 | lag is applied by mean of a sliding window or skipping data entries. 180 | 181 | Returns 182 | ------- 183 | C_K_ij : numpy.ndarray(shape=(T,M,M)) 184 | count matrices C_ij at each termodynamic state K 185 | """ 186 | C_K_ij = np.zeros( 187 | shape=(self.n_therm_states, self.n_markov_states, self.n_markov_states), 188 | dtype=np.intc) 189 | for traj in self.trajs: 190 | t = 0 191 | while t < traj['m'].shape[0]-lag: 192 | K = traj['t'][t] 193 | if np.all(traj['t'][t:t+lag+1] == K): 194 | C_K_ij[K, traj['m'][t], traj['m'][t+lag]] += 1 195 | if sliding_window: 196 | t += 1 197 | else: 198 | t += lag 199 | return C_K_ij 200 | 201 | ############################################################################ 202 | # 203 | # b_K_x getter and helper functions 204 | # 205 | ############################################################################ 206 | 207 | 208 | def _gen_b_K_x(self): 209 | self._b_K_x = np.zeros(shape=(self.n_therm_states, self.N_K.sum()), dtype=np.float64) 210 | a = 0 211 | for traj in self.trajs: 212 | if traj['b'].shape[1] == 1: 213 | raise ExpressionError( 214 | "b_K_x", 215 | "Trajectory with single energy columns detected - use kT file and kT target") 216 | if traj['b'].shape[1] != self.n_therm_states: 217 | raise ExpressionError( 218 | "b_K_x", 219 | "Trajectory with wrong number of energy columns detected (%d!=%d)" \ 220 | % (traj['b'].shape[1], self.n_therm_states)) 221 | b = a + traj['b'].shape[0] 222 | self._b_K_x[:, a:b] = traj['b'][:, :].transpose().copy() 223 | a = b 224 | 225 | def _gen_b_K_x_from_kT_K(self): 226 | b_x = np.zeros(shape=(self.N_K.sum(),), dtype=np.float64) 227 | a = 0 228 | for traj in self.trajs: 229 | b = a + traj['b'].shape[0] 230 | b_x[a:b] = traj['b'][:, 0] 231 | a = b 232 | for K in xrange(self.kT_K.shape[0]): 233 | b_x[(self.T_x == K)] *= self.kT_K[K] 234 | self._b_K_x = np.zeros(shape=(self.kT_K.shape[0], self.N_K.sum()), dtype=np.float64) 235 | for K in xrange(self.kT_K.shape[0]): 236 | self._b_K_x[K, :] = (1.0/self.kT_K[K] - 1.0/self.kT_K[self.kT_target]) * b_x[:] 237 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # pytram documentation build configuration file, created by 4 | # sphinx-quickstart on Sat Nov 29 23:53:07 2014. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | sys.path.insert(0, os.path.abspath("../../")) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | 'sphinx.ext.autodoc', 33 | 'sphinx.ext.autosummary', 34 | 'sphinx.ext.doctest', 35 | 'sphinx.ext.pngmath', 36 | 'numpydoc' 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix of source filenames. 43 | source_suffix = '.rst' 44 | 45 | # The encoding of source files. 46 | #source_encoding = 'utf-8-sig' 47 | 48 | # The master toctree document. 49 | master_doc = 'index' 50 | 51 | # General information about the project. 52 | project = u'pytram' 53 | copyright = u'2014-2015, Christoph Wehmeyer, Antonia Mey, Fabian Paul, Hao Wu, Frank Noé' 54 | 55 | # The version info for the project you're documenting, acts as replacement for 56 | # |version| and |release|, also used in various other places throughout the 57 | # built documents. 58 | # 59 | # The short X.Y version. 60 | version = '0.2' 61 | # The full version, including alpha/beta/rc tags. 62 | release = '0.2.0' 63 | 64 | # The language for content autogenerated by Sphinx. Refer to documentation 65 | # for a list of supported languages. 66 | #language = None 67 | 68 | # There are two options for replacing |today|: either, you set today to some 69 | # non-false value, then it is used: 70 | #today = '' 71 | # Else, today_fmt is used as the format for a strftime call. 72 | #today_fmt = '%B %d, %Y' 73 | 74 | # List of patterns, relative to source directory, that match files and 75 | # directories to ignore when looking for source files. 76 | exclude_patterns = ['_build'] 77 | 78 | # The reST default role (used for this markup: `text`) to use for all 79 | # documents. 80 | #default_role = None 81 | 82 | # If true, '()' will be appended to :func: etc. cross-reference text. 83 | add_function_parentheses = True 84 | 85 | # If true, the current module name will be prepended to all description 86 | # unit titles (such as .. function::). 87 | #add_module_names = True 88 | 89 | # If true, sectionauthor and moduleauthor directives will be shown in the 90 | # output. They are ignored by default. 91 | show_authors = True 92 | 93 | # The name of the Pygments (syntax highlighting) style to use. 94 | pygments_style = 'sphinx' 95 | 96 | # A list of ignored prefixes for module index sorting. 97 | #modindex_common_prefix = [] 98 | 99 | # If true, keep warnings as "system message" paragraphs in the built documents. 100 | #keep_warnings = False 101 | 102 | 103 | # -- Options for HTML output ---------------------------------------------- 104 | 105 | # The theme to use for HTML and HTML Help pages. See the documentation for 106 | # a list of builtin themes. 107 | try: 108 | import sphinx_rtd_theme 109 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 110 | html_theme = 'sphinx_rtd_theme' 111 | except ImportError: 112 | html_theme = 'default' 113 | 114 | # Theme options are theme-specific and customize the look and feel of a theme 115 | # further. For a list of options available for each theme, see the 116 | # documentation. 117 | #html_theme_options = {} 118 | 119 | # Add any paths that contain custom themes here, relative to this directory. 120 | #html_theme_path = [] 121 | 122 | # The name for this set of Sphinx documents. If None, it defaults to 123 | # " v documentation". 124 | #html_title = None 125 | 126 | # A shorter title for the navigation bar. Default is the same as html_title. 127 | #html_short_title = None 128 | 129 | # The name of an image file (relative to this directory) to place at the top 130 | # of the sidebar. 131 | #html_logo = None 132 | 133 | # The name of an image file (within the static path) to use as favicon of the 134 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 135 | # pixels large. 136 | #html_favicon = None 137 | 138 | # Add any paths that contain custom static files (such as style sheets) here, 139 | # relative to this directory. They are copied after the builtin static files, 140 | # so a file named "default.css" will overwrite the builtin "default.css". 141 | html_static_path = ['_static'] 142 | 143 | # Add any extra paths that contain custom files (such as robots.txt or 144 | # .htaccess) here, relative to this directory. These files are copied 145 | # directly to the root of the documentation. 146 | #html_extra_path = [] 147 | 148 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 149 | # using the given strftime format. 150 | html_last_updated_fmt = '%b %d, %Y' 151 | 152 | # If true, SmartyPants will be used to convert quotes and dashes to 153 | # typographically correct entities. 154 | html_use_smartypants = True 155 | 156 | # Custom sidebar templates, maps document names to template names. 157 | #html_sidebars = {} 158 | 159 | # Additional templates that should be rendered to pages, maps page names to 160 | # template names. 161 | #html_additional_pages = {} 162 | 163 | # If false, no module index is generated. 164 | #html_domain_indices = True 165 | 166 | # If false, no index is generated. 167 | #html_use_index = True 168 | 169 | # If true, the index is split into individual pages for each letter. 170 | #html_split_index = False 171 | 172 | # If true, links to the reST sources are added to the pages. 173 | #html_show_sourcelink = True 174 | 175 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 176 | html_show_sphinx = True 177 | 178 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 179 | html_show_copyright = True 180 | 181 | # If true, an OpenSearch description file will be output, and all pages will 182 | # contain a tag referring to it. The value of this option must be the 183 | # base URL from which the finished HTML is served. 184 | #html_use_opensearch = '' 185 | 186 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 187 | #html_file_suffix = None 188 | 189 | # Output file base name for HTML help builder. 190 | htmlhelp_basename = 'pytramdoc' 191 | 192 | 193 | # -- Options for LaTeX output --------------------------------------------- 194 | 195 | latex_elements = { 196 | # The paper size ('letterpaper' or 'a4paper'). 197 | #'papersize': 'letterpaper', 198 | 199 | # The font size ('10pt', '11pt' or '12pt'). 200 | #'pointsize': '10pt', 201 | 202 | # Additional stuff for the LaTeX preamble. 203 | #'preamble': '', 204 | } 205 | 206 | # Grouping the document tree into LaTeX files. List of tuples 207 | # (source start file, target name, title, 208 | # author, documentclass [howto, manual, or own class]). 209 | latex_documents = [ 210 | ('index', 'pytram.tex', u'pytram Documentation', 211 | u'Christoph Wehmeyer, Antonia Mey, Fabian Paul, Hao Wu, Frank Noé', 'manual'), 212 | ] 213 | 214 | # The name of an image file (relative to this directory) to place at the top of 215 | # the title page. 216 | #latex_logo = None 217 | 218 | # For "manual" documents, if this is true, then toplevel headings are parts, 219 | # not chapters. 220 | #latex_use_parts = False 221 | 222 | # If true, show page references after internal links. 223 | #latex_show_pagerefs = False 224 | 225 | # If true, show URL addresses after external links. 226 | #latex_show_urls = False 227 | 228 | # Documents to append as an appendix to all manuals. 229 | #latex_appendices = [] 230 | 231 | # If false, no module index is generated. 232 | #latex_domain_indices = True 233 | 234 | 235 | # -- Options for manual page output --------------------------------------- 236 | 237 | # One entry per manual page. List of tuples 238 | # (source start file, name, description, authors, manual section). 239 | man_pages = [ 240 | ('index', 'pytram', u'pytram Documentation', 241 | [u'Christoph Wehmeyer, Antonia Mey, Fabian Paul, Hao Wu, Frank Noé'], 1) 242 | ] 243 | 244 | # If true, show URL addresses after external links. 245 | #man_show_urls = False 246 | 247 | 248 | # -- Options for Texinfo output ------------------------------------------- 249 | 250 | # Grouping the document tree into Texinfo files. List of tuples 251 | # (source start file, target name, title, author, 252 | # dir menu entry, description, category) 253 | texinfo_documents = [ 254 | ('index', 'pytram', u'pytram Documentation', 255 | u'Christoph Wehmeyer, Antonia Mey, Fabian Paul, Hao Wu, Frank Noé', 'pytram', 'One line description of project.', 256 | 'Miscellaneous'), 257 | ] 258 | 259 | #numpydoc_show_class_members = False 260 | 261 | # Documents to append as an appendix to all manuals. 262 | #texinfo_appendices = [] 263 | 264 | # If false, no module index is generated. 265 | #texinfo_domain_indices = True 266 | 267 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 268 | #texinfo_show_urls = 'footnote' 269 | 270 | # If true, do not generate a @detailmenu in the "Top" node's menu. 271 | #texinfo_no_detailmenu = False 272 | -------------------------------------------------------------------------------- /xTRAM_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:979341b2a17200789b17ee3d676dc8858ea7b417d41622926f7082c45e447d5e" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "How to use the xTRAM estimator" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "First we will import all we need from the pytram package" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "collapsed": false, 29 | "input": [ 30 | "%pylab inline\n", 31 | "from pytram import TRAMData, xtram\n", 32 | "import matplotlib.pylab as plt" 33 | ], 34 | "language": "python", 35 | "metadata": {}, 36 | "outputs": [] 37 | }, 38 | { 39 | "cell_type": "heading", 40 | "level": 2, 41 | "metadata": {}, 42 | "source": [ 43 | "Simulated tempering example: asymmetric double well potential" 44 | ] 45 | }, 46 | { 47 | "cell_type": "markdown", 48 | "metadata": {}, 49 | "source": [ 50 | "Below we construct a Brownian dynamics simulation of an asymmetric double well potential similar to the one used in the xTRAM paper [1]. We will generate a single simulated tempering trajectory in a form that pytram will understand. All energies will be reduced, i.e. u/kT. " 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "collapsed": false, 56 | "input": [ 57 | "# assymetric double well potential \n", 58 | "def U( x ):\n", 59 | " return 2*(x-2) - 6*(x-2)**2 + (x-2)**4\n", 60 | "# gradient of the potential\n", 61 | "def G( x ):\n", 62 | " return 4*x**3-24*x**2+36*x-6\n", 63 | "# simulated tempering temperature exchange step\n", 64 | "def change_temp( ct, E, temp_array,f):\n", 65 | " newt = np.random.randint(temp_array.shape[0])\n", 66 | " b_c = 1.0/temp_array[ct]\n", 67 | " b_n = 1.0/temp_array[newt]\n", 68 | " if (-(b_n-b_c)*E+(f[newt]-f[ct])) >=0:\n", 69 | " return newt\n", 70 | " elif np.random.rand() < np.exp( (-(b_n-b_c)*E+(f[newt]-f[ct])) ):\n", 71 | " return newt\n", 72 | " else:\n", 73 | " return ct\n", 74 | " \n", 75 | "#Brownian Dynamics simulation\n", 76 | "def bd( x0, L, nex, kT_idx = None, kT = None, f = None):\n", 77 | " dt =0.005\n", 78 | " mass = 1\n", 79 | " damping = 1\n", 80 | " beta = 1.0/kT[kT_idx]\n", 81 | " coeff_A = dt / ( mass * damping )\n", 82 | " coeff_B = np.sqrt( 2.0 * dt / ( beta * mass * damping ) )\n", 83 | " x = x0\n", 84 | " P = [x0]\n", 85 | " E = U( x0 )\n", 86 | " T = [kT_idx]\n", 87 | " u = [E]\n", 88 | " for ex in xrange( nex ):\n", 89 | " for t in xrange( L ):\n", 90 | " g = G( x )\n", 91 | " x = x - coeff_A * g + coeff_B * np.random.normal()\n", 92 | " P.append( x )\n", 93 | " T.append( kT_idx )\n", 94 | " e = U( x )\n", 95 | " u.append( e*beta )\n", 96 | " kT_idx = change_temp( kT_idx, e, kT, f )\n", 97 | " beta = 1.0/kT[kT_idx]\n", 98 | " coeff_B = np.sqrt( 2.0 * dt / ( beta * mass * damping ) )\n", 99 | " \n", 100 | " return ( np.array( P, dtype=np.float64 ),np.array( T, dtype=np.intc ),np.array( u, dtype=np.float64 ).reshape(len(u),1) )\n", 101 | "\n", 102 | "# discretisation scheme\n", 103 | "def discretise( T, centers ):\n", 104 | " D = np.zeros( shape=T.shape, dtype=np.int32 )\n", 105 | " half_width = (centers[2]-centers[1])*0.5\n", 106 | " for i in xrange( T.shape[0] ):\n", 107 | " if T[i] <= centers[0]:\n", 108 | " D[i] = 0\n", 109 | " elif T[i] >= centers[-1]:\n", 110 | " D[i] = centers.shape[0]-1\n", 111 | " else:\n", 112 | " for j in xrange( centers.shape[0] ):\n", 113 | " if T[i] < centers[j]+half_width:\n", 114 | " D[i] = j\n", 115 | " break\n", 116 | " return D\n", 117 | "# generate trajectories\n", 118 | "def get_trajectories( centers, L, nex, kT_idx=0, kT = None ,f=None):\n", 119 | " r = [] #trajectory list\n", 120 | " #run the Brownian dynamics integrator\n", 121 | " traj,therm, b = bd( -1.0, L ,nex, kT_idx = kT_idx, kT= kT, f=f)\n", 122 | " dtraj = discretise( traj, centers ) #discretise the trajectory into the defined bins\n", 123 | " r.append( { 'm':dtraj, 't': therm, 'b': b } ) # list of dictionaries in the correct pytram format\n", 124 | " return r\n", 125 | "\n", 126 | "\n", 127 | "# set the discretisation and simulation parameters\n", 128 | "# kT array\n", 129 | "TEMPS = np.array([2.0,4.0,7.0, 15.0] )\n", 130 | "# factor used to make sure temperatures are roughly sampled equally\n", 131 | "f = np.array([-5.95994519, -3.37600304, -2.50314141, -2.04173675] ) \n", 132 | "#definition of the binning\n", 133 | "NBINS = 50\n", 134 | "centers = np.linspace(-0.6,4.2,NBINS)\n", 135 | "L = 100 #trajectoy length before an exchange is attempted \n", 136 | "nex = 2000 #number of times a temperature exchange is attempted\n", 137 | "# run the simulations\n", 138 | "trajs = get_trajectories(centers, L, nex, kT_idx=0, kT=TEMPS, f=f)" 139 | ], 140 | "language": "python", 141 | "metadata": {}, 142 | "outputs": [] 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "metadata": {}, 147 | "source": [ 148 | "With the generated list of trajectories we can now use xTRAM to estimate the free energy for each bin for the lowest temperature we sampled at. This means, rather than just giving a list of trajectory dictionaries to the TRAMdata object, we also have to indicate the temperatures the trajectory was sampled at and the target temperature for which we want to know the final probability distribution/ free energy profile. " 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "collapsed": false, 154 | "input": [ 155 | "tramdata = TRAMData(trajs, kT_K=TEMPS, kT_target=0)" 156 | ], 157 | "language": "python", 158 | "metadata": {}, 159 | "outputs": [] 160 | }, 161 | { 162 | "cell_type": "markdown", 163 | "metadata": {}, 164 | "source": [ 165 | "Now we can just pass the TRAMdata object to the API function ```python xtram()``` indicate the lag at which the count matrix should be estimated and set the number of iterations and tolerance of the estimate. This will run the estimator." 166 | ] 167 | }, 168 | { 169 | "cell_type": "code", 170 | "collapsed": false, 171 | "input": [ 172 | "xtram_obj = xtram(tramdata, 1, maxiter=100, ftol=1.0e-15, verbose=True)" 173 | ], 174 | "language": "python", 175 | "metadata": {}, 176 | "outputs": [] 177 | }, 178 | { 179 | "cell_type": "markdown", 180 | "metadata": {}, 181 | "source": [ 182 | "Here we compute the exact solution which can then be used to our estimate" 183 | ] 184 | }, 185 | { 186 | "cell_type": "code", 187 | "collapsed": false, 188 | "input": [ 189 | "Z = np.exp(-f)\n", 190 | "def prob( x, kT, Z ):\n", 191 | " return exp(-U( x )/kT[0])/Z[0]\n", 192 | "exact = []\n", 193 | "for x in centers:\n", 194 | " exact.append( prob( x, TEMPS, Z ) )\n", 195 | "exact = exact/np.sum(exact)" 196 | ], 197 | "language": "python", 198 | "metadata": {}, 199 | "outputs": [] 200 | }, 201 | { 202 | "cell_type": "markdown", 203 | "metadata": {}, 204 | "source": [ 205 | "Now let us plot the results of the estimator and compare it to the exact solution. The central object from the estimator is xtram_obj.pi_i, which contains the stationary probability of each bin defined at the target temperature indicated. " 206 | ] 207 | }, 208 | { 209 | "cell_type": "code", 210 | "collapsed": false, 211 | "input": [ 212 | "plt.figure(figsize=(8,6))\n", 213 | "plt.plot(centers,xtram_obj.f_i, color = 'b', linewidth =1.5, label ='xtram', marker = 'o')\n", 214 | "plt.plot(centers,-np.log(exact), color = 'k', linewidth = 1.5, label = 'exact')\n", 215 | "plt.plot(centers,-np.log(tramdata.N_K_i[0]/float(tramdata.N_K[0])), color = 'r', linestyle = '--', linewidth = 1.5, label = 'counts')\n", 216 | "plt.xlabel('x in [a.u.]', fontsize = 20)\n", 217 | "plt.ylabel('F in [kT]', fontsize = 20)\n", 218 | "plt.legend( loc =4)\n", 219 | "plt.tick_params(axis='both', which='major', labelsize=20)" 220 | ], 221 | "language": "python", 222 | "metadata": {}, 223 | "outputs": [] 224 | }, 225 | { 226 | "cell_type": "heading", 227 | "level": 2, 228 | "metadata": {}, 229 | "source": [ 230 | "Concluding remark" 231 | ] 232 | }, 233 | { 234 | "cell_type": "markdown", 235 | "metadata": {}, 236 | "source": [ 237 | "We hope that this example helps you to understand the basic usage of the xTRAM method in the pytram package. Feel free to contact us via:\n", 238 | "\n", 239 | " pytram@lists.fu-berlin.de\n", 240 | "\n", 241 | "if any problems or questions arise." 242 | ] 243 | }, 244 | { 245 | "cell_type": "code", 246 | "collapsed": false, 247 | "input": [ 248 | "print \"If you find xTRAM useful, please cite\\n\"\n", 249 | "xtram_obj.cite()" 250 | ], 251 | "language": "python", 252 | "metadata": {}, 253 | "outputs": [] 254 | } 255 | ], 256 | "metadata": {} 257 | } 258 | ] 259 | } -------------------------------------------------------------------------------- /dTRAM_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:32cf7fd884672fd482711c6d5701c3ac6bb65f00402928831be84bea85cdb445" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "How to use the dTRAM estimator" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "In this short tutorial, we will learn use the dTRAM method to estimate stationary distributions and transition matrices from multi-ensemble simulations.\n", 24 | "\n", 25 | "We start by importing the TRAMData class and dTRAM's API function from the pytram package." 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "collapsed": false, 31 | "input": [ 32 | "%pylab inline\n", 33 | "from pytram import TRAMData, dtram # this is the dTRAM API function" 34 | ], 35 | "language": "python", 36 | "metadata": {}, 37 | "outputs": [] 38 | }, 39 | { 40 | "cell_type": "heading", 41 | "level": 2, 42 | "metadata": {}, 43 | "source": [ 44 | "First example: three discrete states with piecewise constant potentials" 45 | ] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "The first example uses a system with three discrete states (L,C,R), where each state corresponds to a certain energy. We allow transitions between states L and C and between C and R. Furthermore, the state C corresponds to a higher potential energy than states L and R. Thus, state C acts as a transition state between the metastable states L and R.\n", 52 | "\n", 53 | "We perform two unbiased Metropolis Monte Carlo simulations starting in each metastable state and an additional biased simulation starting from the transition state. The bias raises the energies of the metastable states and allows for free diffusion between L and C, and C and R." 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "collapsed": false, 59 | "input": [ 60 | "# piecewise contant potentials\n", 61 | "u_i = np.array( [ 4.0, 8.0, 0.0 ], dtype=np.float64 )\n", 62 | "# bias energies\n", 63 | "b_K_i = np.array( [ [ 0.0, 0.0, 0.0 ], [ 4.0, 0.0, 8.0 ] ], dtype=np.float64 )\n", 64 | "# proposal probabilities\n", 65 | "S = np.ones( shape=(3,3), dtype=np.float64 ) * 0.5\n", 66 | "S[0,2] = 0.0\n", 67 | "S[1,1] = 0.0\n", 68 | "S[2,0] = 0.0\n", 69 | "# transition matrix generator\n", 70 | "def tmat( s, e ):\n", 71 | " a = np.zeros( shape=s.shape, dtype=np.float64 )\n", 72 | " for i in xrange( e.shape[0] ):\n", 73 | " for j in xrange( e.shape[0] ):\n", 74 | " a[i,j] = e[j] - e[i]\n", 75 | " idx = ( a < 0.0 )\n", 76 | " a[idx] = 0.0\n", 77 | " a = np.exp( -a )\n", 78 | " T = s*a\n", 79 | " for i in xrange( T.shape[0] ):\n", 80 | " T[i,i] = 0.0\n", 81 | " T[i,i] = 1.0 - T[i,:].sum()\n", 82 | " return T\n", 83 | "# unbiased transition matrix at kT=1.0\n", 84 | "Tu = tmat( S, u_i+b_K_i[0,:] )\n", 85 | "# biased transition matrix at kT=1.0\n", 86 | "Tb = tmat( S, u_i+b_K_i[1,:] )\n", 87 | "# throw an index dice according to the distribution T_i\n", 88 | "def n_dice( T_i ):\n", 89 | " rnd = np.random.rand()\n", 90 | " cdf = np.cumsum( T_i )\n", 91 | " ind = ( cdf > rnd )\n", 92 | " idx = np.where( ind == True )\n", 93 | " return np.min( idx )\n", 94 | "# get a markov chain according to a transition matrix T, staring from state i with length L\n", 95 | "def m_chain( T, i, L ):\n", 96 | " dtraj = np.zeros( shape=(L,), dtype=np.intc )\n", 97 | " dtraj[0] = i\n", 98 | " for l in xrange( 1, L ):\n", 99 | " dtraj[l] = n_dice( T[dtraj[l-1],:] )\n", 100 | " return dtraj\n", 101 | "# get the sequential data from a single simulation\n", 102 | "def get_sequence( T, i, L, therm_state ):\n", 103 | " m = m_chain( T, i, L )\n", 104 | " t = np.ones( shape=m.shape, dtype=np.intc ) * therm_state\n", 105 | " return { 'm': m, 't': t }\n", 106 | "# run the simulations of length L\n", 107 | "L = 10000\n", 108 | "T0 = get_sequence( Tu, 0, L, 0 )\n", 109 | "T1 = get_sequence( Tu, 2, L, 0 )\n", 110 | "T2 = get_sequence( Tb, 1, L, 1 )" 111 | ], 112 | "language": "python", 113 | "metadata": {}, 114 | "outputs": [] 115 | }, 116 | { 117 | "cell_type": "markdown", 118 | "metadata": {}, 119 | "source": [ 120 | "In the next step, we prepare our input data for dTRAM, i.e., we let the TRAMData object handle the preparation." 121 | ] 122 | }, 123 | { 124 | "cell_type": "code", 125 | "collapsed": false, 126 | "input": [ 127 | "tramdata = TRAMData( [T0,T1,T2], b_K_i=b_K_i )" 128 | ], 129 | "language": "python", 130 | "metadata": {}, 131 | "outputs": [] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "Now, we call the dTRAM API function which estimates the unbiased staionary probabilities of the three states L, C, and R." 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "collapsed": false, 143 | "input": [ 144 | "dtram_obj = dtram( tramdata, 1, maxiter=100000, ftol=1.0E-15, verbose=False )" 145 | ], 146 | "language": "python", 147 | "metadata": {}, 148 | "outputs": [] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "If we have not made any mistakes, the dtram_obj should now carry the stationary distribution, which we will compare against the exact solution." 155 | ] 156 | }, 157 | { 158 | "cell_type": "code", 159 | "collapsed": false, 160 | "input": [ 161 | "pi_exact = np.exp( -u_i )\n", 162 | "pi_exact /= pi_exact.sum()\n", 163 | "print \"pi_estimated :\", dtram_obj.pi_i\n", 164 | "print \"pi_exact :\", pi_exact\n", 165 | "print \"pi_estimated normalization :\", dtram_obj.pi_i.sum()\n", 166 | "print \"||pi_estimated - pi_exact||:\", np.linalg.norm( dtram_obj.pi_i - pi_exact )" 167 | ], 168 | "language": "python", 169 | "metadata": {}, 170 | "outputs": [] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "Furthermore, we can also estimate a transition matrix for every thermodynamic state." 177 | ] 178 | }, 179 | { 180 | "cell_type": "code", 181 | "collapsed": false, 182 | "input": [ 183 | "T_estimated = dtram_obj.estimate_transition_matrices()" 184 | ], 185 | "language": "python", 186 | "metadata": {}, 187 | "outputs": [] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "Finally, we compare the estimated transition matrices against the exact ones." 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "collapsed": false, 199 | "input": [ 200 | "print \"T_estimated_unbiased - Tu:\"\n", 201 | "print (T_estimated[0,:,:] - Tu)\n", 202 | "print \"||T_estimated_unbiased - Tu||:\", np.linalg.norm( T_estimated[0,:,:] - Tu )\n", 203 | "print \"T_estimated_biased - Tb:\"\n", 204 | "print (T_estimated[1,:,:] - Tb)\n", 205 | "print \"||T_estimated_biased - Tb||:\", np.linalg.norm( T_estimated[1,:,:] - Tb )" 206 | ], 207 | "language": "python", 208 | "metadata": {}, 209 | "outputs": [] 210 | }, 211 | { 212 | "cell_type": "heading", 213 | "level": 2, 214 | "metadata": {}, 215 | "source": [ 216 | "Second example: symmetric double well potential" 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "The second example addresses a double well potential in one dimension, i.e., we have still two metastable states which are separated by a potential barrier, but we do not have piecewise constant potentials anymore.\n", 224 | "\n", 225 | "Again, we use the Metropolis Monte Carlo approach to generate trajectories, which have additional harmonic restraints centered at several points across the potential barrier to increase the number of transition events. This is called the umbrella sampling approach." 226 | ] 227 | }, 228 | { 229 | "cell_type": "code", 230 | "collapsed": false, 231 | "input": [ 232 | "# harmonic bias potential\n", 233 | "def B( x, xk ):\n", 234 | " if None == xk:\n", 235 | " return 0.0\n", 236 | " return 4.0*( x - xk )**2\n", 237 | "# double well potential (with otiopnal bias)\n", 238 | "def U( x, xk=None ):\n", 239 | " return 0.25*x**4 - 5.0*x**2 - 9.9874 + B( x, xk )\n", 240 | "# Metropolis Monte Carlo\n", 241 | "def mmc( x0, L, xk=None ):\n", 242 | " T = [x0]\n", 243 | " E = U( x0, xk )\n", 244 | " for t in xrange( L ):\n", 245 | " x = T[-1] + 0.4*( np.random.rand() - 0.5 )\n", 246 | " e = U( x, xk )\n", 247 | " accept = False\n", 248 | " if e < E:\n", 249 | " accept = True\n", 250 | " elif np.random.rand() < np.exp( -( e - E ) ):\n", 251 | " accept = True\n", 252 | " if accept:\n", 253 | " T.append( x )\n", 254 | " E = e\n", 255 | " else:\n", 256 | " T.append( T[-1] )\n", 257 | " return np.array( T, dtype=np.float64 )\n", 258 | "# discretisation scheme\n", 259 | "def discretise( T, centers ):\n", 260 | " D = np.zeros( shape=T.shape, dtype=np.int32 )\n", 261 | " for i in xrange( T.shape[0] ):\n", 262 | " if T[i] <= centers[0]:\n", 263 | " D[i] = 0\n", 264 | " elif T[i] >= centers[-1]:\n", 265 | " D[i] = centers.shape[0]-1\n", 266 | " else:\n", 267 | " for j in xrange( centers.shape[0] ):\n", 268 | " if T[i] < centers[j]+0.05:\n", 269 | " D[i] = j\n", 270 | " break\n", 271 | " return D\n", 272 | "# generate trajectories\n", 273 | "def get_trajectories( centers, xk, L ):\n", 274 | " r = []\n", 275 | " for i in xrange( xk.shape[0] ):\n", 276 | " traj = mmc( xk[i], L, xk=xk[i] )\n", 277 | " dtraj = discretise( traj, centers )\n", 278 | " r.append( { 'm': dtraj, 't': np.ones( shape=dtraj.shape, dtype=np.intc ) * i } )\n", 279 | " N_K_i = np.zeros( shape=(xk.shape[0],centers.shape[0]), dtype=np.intc )\n", 280 | " for K in xrange( xk.shape[0] ):\n", 281 | " for i in xrange( centers.shape[0] ):\n", 282 | " N_K_i[K,i] = ( r[K]['m'] == i ).sum()\n", 283 | " N_i = N_K_i.sum( axis=0 )\n", 284 | " idx = ( N_i > 0 )\n", 285 | " NSTATES = idx.sum()\n", 286 | " for i in reversed( xrange( centers.shape[0] ) ):\n", 287 | " if idx[i]:\n", 288 | " continue\n", 289 | " for K in xrange( xk.shape[0] ):\n", 290 | " idx2 = ( r[K]['m'] > i )\n", 291 | " r[K]['m'][idx2] -= 1\n", 292 | " return r , idx\n", 293 | "# set the discretisation and simulation parameters\n", 294 | "NBINS = 101\n", 295 | "centers = np.linspace( -5.0, 5.0, NBINS )\n", 296 | "NBIAS = 11\n", 297 | "xk = np.linspace( -5.0, 5.0, NBIAS )\n", 298 | "L = 10000\n", 299 | "# run the simulations\n", 300 | "trajs , idx = get_trajectories( centers, xk, L )\n", 301 | "# calculate the number of visisted states\n", 302 | "NSTATES = idx.sum()\n", 303 | "# calculate the bias energies for the visited states\n", 304 | "b_K_i = np.zeros( shape=(xk.shape[0],NSTATES), dtype=np.float64 )\n", 305 | "for K in xrange( xk.shape[0] ):\n", 306 | " b_K_i[K,:] = B( centers[idx], xk[K] )" 307 | ], 308 | "language": "python", 309 | "metadata": {}, 310 | "outputs": [] 311 | }, 312 | { 313 | "cell_type": "markdown", 314 | "metadata": {}, 315 | "source": [ 316 | "In the next step, we prepare our input data for dTRAM, i.e., we let the TRAMData object handle the preparation." 317 | ] 318 | }, 319 | { 320 | "cell_type": "code", 321 | "collapsed": false, 322 | "input": [ 323 | "tramdata = TRAMData( trajs, b_K_i=b_K_i )" 324 | ], 325 | "language": "python", 326 | "metadata": {}, 327 | "outputs": [] 328 | }, 329 | { 330 | "cell_type": "markdown", 331 | "metadata": {}, 332 | "source": [ 333 | "We call the API function..." 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "collapsed": false, 339 | "input": [ 340 | "dtram_obj = dtram( tramdata, 1, maxiter=100000, ftol=1.0E-4, verbose=False )" 341 | ], 342 | "language": "python", 343 | "metadata": {}, 344 | "outputs": [] 345 | }, 346 | { 347 | "cell_type": "markdown", 348 | "metadata": {}, 349 | "source": [ 350 | "...and compare our estimation of the unbiased stationary probabilities via the corresponding free energies against the exactly known double well potential." 351 | ] 352 | }, 353 | { 354 | "cell_type": "code", 355 | "collapsed": false, 356 | "input": [ 357 | "nz = dtram_obj.pi_i.nonzero()\n", 358 | "cnz = (centers[idx])[nz]\n", 359 | "u1 = U( cnz )\n", 360 | "u2 = dtram_obj.f_i[nz]\n", 361 | "u2 -= u2.min() - u1.min()\n", 362 | "plt.plot( cnz, u2, '--o', color='green', label=\"dTRAM\" )\n", 363 | "plt.plot( cnz, u1, label=\"exact\" )\n", 364 | "plt.legend( loc=1, fontsize=10 )\n", 365 | "plt.xlabel( r\"$x$ / a.u.\", fontsize=15 )\n", 366 | "plt.ylabel( r\"$U(x)$ / kT\", fontsize=15 )" 367 | ], 368 | "language": "python", 369 | "metadata": {}, 370 | "outputs": [] 371 | }, 372 | { 373 | "cell_type": "heading", 374 | "level": 2, 375 | "metadata": {}, 376 | "source": [ 377 | "Concluding remark" 378 | ] 379 | }, 380 | { 381 | "cell_type": "markdown", 382 | "metadata": {}, 383 | "source": [ 384 | "We hope that these examples help to understand the basic usage of the dTRAM method in the pytram package. Feel free to contact us via\n", 385 | "\n", 386 | " pytram@lists.fu-berlin.de\n", 387 | "\n", 388 | "if any problems or questions arise." 389 | ] 390 | }, 391 | { 392 | "cell_type": "code", 393 | "collapsed": false, 394 | "input": [ 395 | "print \"If you find dTRAM useful, please cite\\n\"\n", 396 | "dtram_obj.cite()" 397 | ], 398 | "language": "python", 399 | "metadata": {}, 400 | "outputs": [] 401 | } 402 | ], 403 | "metadata": {} 404 | } 405 | ] 406 | } -------------------------------------------------------------------------------- /pytram/_version.py: -------------------------------------------------------------------------------- 1 | 2 | # This file helps to compute a version number in source trees obtained from 3 | # git-archive tarball (such as those provided by githubs download-from-tag 4 | # feature). Distribution tarballs (built by setup.py sdist) and build 5 | # directories (produced by setup.py build) will contain a much shorter file 6 | # that just contains the computed version number. 7 | 8 | # This file is released into the public domain. Generated by 9 | # versioneer-0.14+dev (https://github.com/warner/python-versioneer) 10 | 11 | import errno 12 | import os 13 | import re 14 | import subprocess 15 | import sys 16 | 17 | 18 | def get_keywords(): 19 | # these strings will be replaced by git during git-archive. 20 | # setup.py/versioneer.py will grep for the variable names, so they must 21 | # each be defined on a line of their own. _version.py will just call 22 | # get_keywords(). 23 | git_refnames = " (HEAD -> master, tag: 0.3.0)" 24 | git_full = "5e888e81990f5bd2bc2596523dc44611b0b78529" 25 | keywords = {"refnames": git_refnames, "full": git_full} 26 | return keywords 27 | 28 | 29 | class VersioneerConfig: 30 | pass 31 | 32 | 33 | def get_config(): 34 | # these strings are filled in when 'setup.py versioneer' creates 35 | # _version.py 36 | cfg = VersioneerConfig() 37 | cfg.VCS = "git" 38 | cfg.style = "pep440" 39 | cfg.tag_prefix = "" 40 | cfg.parentdir_prefix = "pytram-" 41 | cfg.versionfile_source = "pytram/_version.py" 42 | cfg.verbose = False 43 | return cfg 44 | 45 | 46 | class NotThisMethod(Exception): 47 | pass 48 | 49 | 50 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): 51 | assert isinstance(commands, list) 52 | p = None 53 | for c in commands: 54 | try: 55 | # remember shell=False, so use git.cmd on windows, not just git 56 | p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, 57 | stderr=(subprocess.PIPE if hide_stderr 58 | else None)) 59 | break 60 | except EnvironmentError: 61 | e = sys.exc_info()[1] 62 | if e.errno == errno.ENOENT: 63 | continue 64 | if verbose: 65 | print("unable to run %s" % args[0]) 66 | print(e) 67 | return None 68 | else: 69 | if verbose: 70 | print("unable to find command, tried %s" % (commands,)) 71 | return None 72 | stdout = p.communicate()[0].strip() 73 | if sys.version_info[0] >= 3: 74 | stdout = stdout.decode() 75 | if p.returncode != 0: 76 | if verbose: 77 | print("unable to run %s (error)" % args[0]) 78 | return None 79 | return stdout 80 | 81 | 82 | def versions_from_parentdir(parentdir_prefix, root, verbose): 83 | # Source tarballs conventionally unpack into a directory that includes 84 | # both the project name and a version string. 85 | dirname = os.path.basename(root) 86 | if not dirname.startswith(parentdir_prefix): 87 | if verbose: 88 | print("guessing rootdir is '%s', but '%s' doesn't start with " 89 | "prefix '%s'" % (root, dirname, parentdir_prefix)) 90 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 91 | return {"version": dirname[len(parentdir_prefix):], 92 | "full-revisionid": None, 93 | "dirty": False, "error": None} 94 | 95 | 96 | def git_get_keywords(versionfile_abs): 97 | # the code embedded in _version.py can just fetch the value of these 98 | # keywords. When used from setup.py, we don't want to import _version.py, 99 | # so we do it with a regexp instead. This function is not used from 100 | # _version.py. 101 | keywords = {} 102 | try: 103 | f = open(versionfile_abs, "r") 104 | for line in f.readlines(): 105 | if line.strip().startswith("git_refnames ="): 106 | mo = re.search(r'=\s*"(.*)"', line) 107 | if mo: 108 | keywords["refnames"] = mo.group(1) 109 | if line.strip().startswith("git_full ="): 110 | mo = re.search(r'=\s*"(.*)"', line) 111 | if mo: 112 | keywords["full"] = mo.group(1) 113 | f.close() 114 | except EnvironmentError: 115 | pass 116 | return keywords 117 | 118 | 119 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 120 | if not keywords: 121 | raise NotThisMethod("no keywords at all, weird") 122 | refnames = keywords["refnames"].strip() 123 | if refnames.startswith("$Format"): 124 | if verbose: 125 | print("keywords are unexpanded, not using") 126 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 127 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 128 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 129 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 130 | TAG = "tag: " 131 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 132 | if not tags: 133 | # Either we're using git < 1.8.3, or there really are no tags. We use 134 | # a heuristic: assume all version tags have a digit. The old git %d 135 | # expansion behaves like git log --decorate=short and strips out the 136 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 137 | # between branches and tags. By ignoring refnames without digits, we 138 | # filter out many common branch names like "release" and 139 | # "stabilization", as well as "HEAD" and "master". 140 | tags = set([r for r in refs if re.search(r'\d', r)]) 141 | if verbose: 142 | print("discarding '%s', no digits" % ",".join(refs-tags)) 143 | if verbose: 144 | print("likely tags: %s" % ",".join(sorted(tags))) 145 | for ref in sorted(tags): 146 | # sorting will prefer e.g. "2.0" over "2.0rc1" 147 | if ref.startswith(tag_prefix): 148 | r = ref[len(tag_prefix):] 149 | if verbose: 150 | print("picking %s" % r) 151 | return {"version": r, 152 | "full-revisionid": keywords["full"].strip(), 153 | "dirty": False, "error": None 154 | } 155 | # no suitable tags, so version is "0+unknown", but full hex is still there 156 | if verbose: 157 | print("no suitable tags, using unknown + full revision id") 158 | return {"version": "0+unknown", 159 | "full-revisionid": keywords["full"].strip(), 160 | "dirty": False, "error": "no suitable tags"} 161 | 162 | 163 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 164 | # this runs 'git' from the root of the source tree. This only gets called 165 | # if the git-archive 'subst' keywords were *not* expanded, and 166 | # _version.py hasn't already been rewritten with a short version string, 167 | # meaning we're inside a checked out source tree. 168 | 169 | if not os.path.exists(os.path.join(root, ".git")): 170 | if verbose: 171 | print("no .git in %s" % root) 172 | raise NotThisMethod("no .git directory") 173 | 174 | GITS = ["git"] 175 | if sys.platform == "win32": 176 | GITS = ["git.cmd", "git.exe"] 177 | # if there is a tag, this yields TAG-NUM-gHEX[-dirty] 178 | # if there are no tags, this yields HEX[-dirty] (no NUM) 179 | describe_out = run_command(GITS, ["describe", "--tags", "--dirty", 180 | "--always", "--long"], 181 | cwd=root) 182 | # --long was added in git-1.5.5 183 | if describe_out is None: 184 | raise NotThisMethod("'git describe' failed") 185 | describe_out = describe_out.strip() 186 | full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 187 | if full_out is None: 188 | raise NotThisMethod("'git rev-parse' failed") 189 | full_out = full_out.strip() 190 | 191 | pieces = {} 192 | pieces["long"] = full_out 193 | pieces["short"] = full_out[:7] # maybe improved later 194 | pieces["error"] = None 195 | 196 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 197 | # TAG might have hyphens. 198 | git_describe = describe_out 199 | 200 | # look for -dirty suffix 201 | dirty = git_describe.endswith("-dirty") 202 | pieces["dirty"] = dirty 203 | if dirty: 204 | git_describe = git_describe[:git_describe.rindex("-dirty")] 205 | 206 | # now we have TAG-NUM-gHEX or HEX 207 | 208 | if "-" in git_describe: 209 | # TAG-NUM-gHEX 210 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 211 | if not mo: 212 | # unparseable. Maybe git-describe is misbehaving? 213 | pieces["error"] = ("unable to parse git-describe output: '%s'" 214 | % describe_out) 215 | return pieces 216 | 217 | # tag 218 | full_tag = mo.group(1) 219 | if not full_tag.startswith(tag_prefix): 220 | if verbose: 221 | fmt = "tag '%s' doesn't start with prefix '%s'" 222 | print(fmt % (full_tag, tag_prefix)) 223 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" 224 | % (full_tag, tag_prefix)) 225 | return pieces 226 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 227 | 228 | # distance: number of commits since tag 229 | pieces["distance"] = int(mo.group(2)) 230 | 231 | # commit: short hex revision ID 232 | pieces["short"] = mo.group(3) 233 | 234 | else: 235 | # HEX: no tags 236 | pieces["closest-tag"] = None 237 | count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], 238 | cwd=root) 239 | pieces["distance"] = int(count_out) # total number of commits 240 | 241 | return pieces 242 | 243 | 244 | def plus_or_dot(pieces): 245 | if "+" in pieces.get("closest-tag", ""): 246 | return "." 247 | return "+" 248 | 249 | 250 | def render_pep440(pieces): 251 | # now build up version string, with post-release "local version 252 | # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 253 | # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 254 | 255 | # exceptions: 256 | # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 257 | 258 | if pieces["closest-tag"]: 259 | rendered = pieces["closest-tag"] 260 | if pieces["distance"] or pieces["dirty"]: 261 | rendered += plus_or_dot(pieces) 262 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 263 | if pieces["dirty"]: 264 | rendered += ".dirty" 265 | else: 266 | # exception #1 267 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], 268 | pieces["short"]) 269 | if pieces["dirty"]: 270 | rendered += ".dirty" 271 | return rendered 272 | 273 | 274 | def render_pep440_pre(pieces): 275 | # TAG[.post.devDISTANCE] . No -dirty 276 | 277 | # exceptions: 278 | # 1: no tags. 0.post.devDISTANCE 279 | 280 | if pieces["closest-tag"]: 281 | rendered = pieces["closest-tag"] 282 | if pieces["distance"]: 283 | rendered += ".post.dev%d" % pieces["distance"] 284 | else: 285 | # exception #1 286 | rendered = "0.post.dev%d" % pieces["distance"] 287 | return rendered 288 | 289 | 290 | def render_pep440_post(pieces): 291 | # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that 292 | # .dev0 sorts backwards (a dirty tree will appear "older" than the 293 | # corresponding clean one), but you shouldn't be releasing software with 294 | # -dirty anyways. 295 | 296 | # exceptions: 297 | # 1: no tags. 0.postDISTANCE[.dev0] 298 | 299 | if pieces["closest-tag"]: 300 | rendered = pieces["closest-tag"] 301 | if pieces["distance"] or pieces["dirty"]: 302 | rendered += ".post%d" % pieces["distance"] 303 | if pieces["dirty"]: 304 | rendered += ".dev0" 305 | rendered += plus_or_dot(pieces) 306 | rendered += "g%s" % pieces["short"] 307 | else: 308 | # exception #1 309 | rendered = "0.post%d" % pieces["distance"] 310 | if pieces["dirty"]: 311 | rendered += ".dev0" 312 | rendered += "+g%s" % pieces["short"] 313 | return rendered 314 | 315 | 316 | def render_pep440_old(pieces): 317 | # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. 318 | 319 | # exceptions: 320 | # 1: no tags. 0.postDISTANCE[.dev0] 321 | 322 | if pieces["closest-tag"]: 323 | rendered = pieces["closest-tag"] 324 | if pieces["distance"] or pieces["dirty"]: 325 | rendered += ".post%d" % pieces["distance"] 326 | if pieces["dirty"]: 327 | rendered += ".dev0" 328 | else: 329 | # exception #1 330 | rendered = "0.post%d" % pieces["distance"] 331 | if pieces["dirty"]: 332 | rendered += ".dev0" 333 | return rendered 334 | 335 | 336 | def render_git_describe(pieces): 337 | # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty 338 | # --always' 339 | 340 | # exceptions: 341 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 342 | 343 | if pieces["closest-tag"]: 344 | rendered = pieces["closest-tag"] 345 | if pieces["distance"]: 346 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 347 | else: 348 | # exception #1 349 | rendered = pieces["short"] 350 | if pieces["dirty"]: 351 | rendered += "-dirty" 352 | return rendered 353 | 354 | 355 | def render_git_describe_long(pieces): 356 | # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty 357 | # --always -long'. The distance/hash is unconditional. 358 | 359 | # exceptions: 360 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 361 | 362 | if pieces["closest-tag"]: 363 | rendered = pieces["closest-tag"] 364 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 365 | else: 366 | # exception #1 367 | rendered = pieces["short"] 368 | if pieces["dirty"]: 369 | rendered += "-dirty" 370 | return rendered 371 | 372 | 373 | def render(pieces, style): 374 | if pieces["error"]: 375 | return {"version": "unknown", 376 | "full-revisionid": pieces.get("long"), 377 | "dirty": None, 378 | "error": pieces["error"]} 379 | 380 | if not style or style == "default": 381 | style = "pep440" # the default 382 | 383 | if style == "pep440": 384 | rendered = render_pep440(pieces) 385 | elif style == "pep440-pre": 386 | rendered = render_pep440_pre(pieces) 387 | elif style == "pep440-post": 388 | rendered = render_pep440_post(pieces) 389 | elif style == "pep440-old": 390 | rendered = render_pep440_old(pieces) 391 | elif style == "git-describe": 392 | rendered = render_git_describe(pieces) 393 | elif style == "git-describe-long": 394 | rendered = render_git_describe_long(pieces) 395 | else: 396 | raise ValueError("unknown style '%s'" % style) 397 | 398 | return {"version": rendered, "full-revisionid": pieces["long"], 399 | "dirty": pieces["dirty"], "error": None} 400 | 401 | 402 | def get_versions(): 403 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 404 | # __file__, we can work backwards from there to the root. Some 405 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 406 | # case we can only use expanded keywords. 407 | 408 | cfg = get_config() 409 | verbose = cfg.verbose 410 | 411 | try: 412 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 413 | verbose) 414 | except NotThisMethod: 415 | pass 416 | 417 | try: 418 | root = os.path.realpath(__file__) 419 | # versionfile_source is the relative path from the top of the source 420 | # tree (where the .git directory might live) to this file. Invert 421 | # this to find the root from __file__. 422 | for i in cfg.versionfile_source.split('/'): 423 | root = os.path.dirname(root) 424 | except NameError: 425 | return {"version": "0+unknown", "full-revisionid": None, 426 | "dirty": None, 427 | "error": "unable to find root of source tree"} 428 | 429 | try: 430 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 431 | return render(pieces, cfg.style) 432 | except NotThisMethod: 433 | pass 434 | 435 | try: 436 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 437 | except NotThisMethod: 438 | pass 439 | 440 | return {"version": "0+unknown", "full-revisionid": None, 441 | "dirty": None, 442 | "error": "unable to compute version"} 443 | -------------------------------------------------------------------------------- /pytram/_xtram/xtram.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ====================== 4 | xTRAM estimator module 5 | ====================== 6 | 7 | .. moduleauthor:: Antonia Mey 8 | 9 | """ 10 | 11 | import numpy as np 12 | import warnings 13 | from pytram.estimator import Estimator, NotConvergedWarning, ExpressionError 14 | from .ext import b_i_IJ_equation, iterate_x 15 | 16 | 17 | 18 | 19 | ######################################################################## 20 | # # 21 | # XTRAM ESTIMATOR CLASS # 22 | # # 23 | ######################################################################## 24 | 25 | 26 | class XTRAM(Estimator): 27 | r""" 28 | This is the XTRAM estimator class 29 | 30 | Parameters 31 | ---------- 32 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 33 | transition counts between the M discrete Markov states for each of the T 34 | thermodynamic ensembles 35 | b_K_x : numpy.ndarray( shape=(T,nsamples), dtype=float ) 36 | Biasing tensor 37 | T_x : numpy.ndarray( shape=(nsamples), dtype=float ) 38 | Thermodynamic state trajectory 39 | M_x : numpy.ndarray( shape=(nsamples), dtype=numpy.intc ) 40 | Markov state trajectories 41 | N_K_i : numpy.ndarray( shape=(T,M), dtype=float ) 42 | Number of markov samples (M) in each thermodynamic state (T) 43 | target : int (default=0) 44 | target state for which pi_i should be computed 45 | """ 46 | def __init__(self, C_K_ij, b_K_x, T_x, M_x, N_K_i, target=0): 47 | 48 | super(XTRAM, self).__init__(C_K_ij) 49 | if self._check_b_K_x(b_K_x): 50 | self._b_K_x = b_K_x 51 | if self._check_M_x(M_x): 52 | self._M_x = M_x 53 | if self._check_T_x(T_x): 54 | self._T_x = T_x 55 | if self._check_N_K_i(N_K_i): 56 | self._N_K_i = N_K_i.astype(np.intc) 57 | self._N_K = np.sum(N_K_i, axis=1).astype(np.intc) 58 | self._N = np.sum(self._N_K) 59 | self.n_samples = len(M_x) 60 | self.w_K = self._compute_w_K() 61 | self._f_K = self._init_f_K() 62 | self._pi_K_i = self._compute_pi_K_i() 63 | self.target = target 64 | # inner iteration 65 | self._maxiter_inner = 100000 66 | self._ftol_inner = 1.0e-15 67 | # citation information 68 | self.citation = [ 69 | "xTRAM: Estimating Equilibrium Expectations from Time-Correlated Simulation", 70 | "Data at Multiple Thermodynamic States;", 71 | "Antonia S.J.S. Mey, Hao Wu, and Frank Noe", 72 | "Phys. Rev. X 4, 041018 (2014)"] 73 | 74 | ############################################################################ 75 | # 76 | # override getters for stationary properties 77 | # 78 | ############################################################################ 79 | 80 | @property 81 | def pi_i(self): 82 | return self.pi_K_i[self.target] 83 | 84 | @property 85 | def pi_K_i(self): 86 | return self._pi_K_i / self._pi_K_i.sum(axis=1)[:, np.newaxis] 87 | 88 | @property 89 | def f_K(self): 90 | return self._f_K 91 | 92 | ############################################################################ 93 | # 94 | # self-consistent-iteration 95 | # 96 | ############################################################################ 97 | 98 | def sc_iteration(self, maxiter=100, ftol=1.0E-5, verbose=False): 99 | r""" 100 | sc_iteration function 101 | 102 | Parameters 103 | ---------- 104 | maxiter : int (default=100) 105 | maximum number of self-consistent-iteration steps 106 | ftol : float (default=1.0E-5) 107 | convergence criterion based on the max relative change in a 108 | self-consistent-iteration step 109 | verbose : boolean (default=False) 110 | Be loud and noisy 111 | """ 112 | finc = 0.0 113 | f_old = np.zeros(self.f_K.shape[0]) 114 | self.b_i_IJ = np.zeros((self.n_markov_states, self.n_therm_states, self.n_therm_states), dtype=np.float64) 115 | if verbose: 116 | print "# %25s %25s" % ("[Step]", "[rel. Increment]") 117 | for i in xrange(maxiter): 118 | f_old[:] = self.f_K[:] 119 | # compute thermodynamic count matrix 120 | self.b_i_IJ = self._count_matrices_thermo() 121 | # TODO: should be fixed in C code that is currently skipped 122 | # b_i_IJ_equation( 123 | # self.T_x, self.M_x, self.N_K, self.f_K, self.w_K, self.b_K_x, self.b_i_IJ) 124 | N_tilde = self._compute_sparse_N() 125 | C_i, C_j, C_ij, C_ji = self._compute_individual_N() 126 | x_row, c_column = self._initialise_X_and_N(N_tilde) 127 | ferr = iterate_x( 128 | N_tilde.shape[0], 129 | x_row.shape[0], 130 | self._maxiter_inner, 131 | self._ftol_inner, 132 | C_i, 133 | C_j, 134 | C_ij, 135 | C_ji, 136 | x_row, 137 | c_column, 138 | x_row/x_row.sum()) 139 | #print 'ferr'+str( ferr ) 140 | pi_curr = x_row / np.sum(x_row) 141 | self._update_pi_K_i(pi_curr) 142 | self._update_free_energies() 143 | finc = np.sum(np.abs(f_old - self.f_K)) 144 | if verbose: 145 | print " %25d %25.12e" % (i+1, finc) 146 | if finc < ftol: 147 | break 148 | if finc > ftol: 149 | warnings.warn("XTRAM only reached increment %.3e" % finc) 150 | 151 | def _initialise_X_and_N(self, N_tilde): 152 | r""" 153 | sets default values for x_i and N_i 154 | """ 155 | X_row = np.zeros(int(np.max(N_tilde[:, 0])+1)) 156 | N_column = np.zeros(int(np.max(N_tilde[:, 0])+1)) 157 | for i in xrange(len(N_tilde)): 158 | entry = N_tilde[i] 159 | if entry[0] == entry[1]: 160 | X_row[int(entry[0])] += (entry[2] + entry[3]) * 0.5 161 | N_column[int(entry[0])] += entry[2] 162 | else: 163 | N_column[int(entry[0])] += entry[2] #Check that this is the right summation! 164 | N_column[int(entry[1])] += entry[3] 165 | X_row[int(entry[0])] += (entry[2] + entry[3]) * 0.5 166 | X_row[int(entry[1])] += (entry[2] + entry[3]) * 0.5 167 | return (X_row, N_column) 168 | 169 | def _update_pi_K_i(self, pi_curr): 170 | r""" 171 | copies the current iteration pi_curr into the pi_K_i variable and normalises it as required 172 | """ 173 | for K in xrange(self.n_therm_states): 174 | initial = K * self.n_markov_states 175 | final = K * self.n_markov_states + self.n_markov_states 176 | self._pi_K_i[K][:] = pi_curr[initial:final] / np.sum(pi_curr[:]) 177 | 178 | def _update_free_energies(self): 179 | r""" 180 | computes the free energies based on the current pi_K_i 181 | """ 182 | for K in xrange(self.f_K.shape[0]): 183 | self._f_K[K] = self._f_K[K] - np.log( 184 | (np.sum(self.N_K).astype(float) / self.N_K[K]) * (np.sum(self._pi_K_i[K, :]))) 185 | 186 | #################################################################### 187 | # # 188 | # Computes the extended count matrix # 189 | # # 190 | #################################################################### 191 | 192 | #def _count_matrices_conf(ttrajs, dtrajs, lag): 193 | # import msmtools.estimation as msmest 194 | # nthermo = msmest.number_of_states(ttrajs) 195 | # nstates = msmest.number_of_states(dtrajs) 196 | # ntrajs = len(ttrajs) 197 | # Cs = np.zeros((nthermo, nstates, nstates), dtype=np.intc) 198 | # for i in xrange(ntrajs): 199 | # ttraj = ttrajs[i] 200 | # dtraj = dtrajs[i] 201 | # for t in xrange(len(ttraj)-lag): 202 | # Cs[ttraj[t], dtraj[t], dtraj[t+lag]] += 1 203 | # return Cs 204 | 205 | def _count_matrices_thermo(self, metropolis=True): 206 | """Computes count matrix between thermodynamic states 207 | """ 208 | N_k = self._N_K.astype(float) 209 | N = np.sum(N_k) 210 | Bs = np.zeros((self.n_markov_states, self.n_therm_states, self.n_therm_states), dtype=np.float64) 211 | for I in range(self.n_therm_states): 212 | # get samples starting from I 213 | indI = np.where(self._T_x == I)[0] 214 | # look at all targets 215 | p_IJ = np.zeros((self.n_therm_states, len(indI))) 216 | for J in range(self.n_therm_states): 217 | if I != J: 218 | if metropolis: 219 | p_IJ[J] = np.minimum(1.0, (N_k[J]/N_k[I]) * np.exp(self._f_K[J] - self._b_K_x[J, indI] 220 | - self._f_K[I] + self._b_K_x[I, indI])) 221 | p_IJ[J] *= N_k[I]/N 222 | else: 223 | raise NotImplementedError() 224 | p_IJ[I] = np.ones(len(indI)) - p_IJ.sum(axis=0) 225 | # accumulate counts by discrete state 226 | d_arr_i = self._M_x[indI] 227 | for i in range(self.n_markov_states): 228 | indi = np.where(d_arr_i == i)[0] 229 | Bs[i, I, :] = p_IJ[:, indi].sum(axis=1) 230 | return Bs 231 | 232 | def _compute_individual_N(self, factor=1.0): 233 | C_i = [] 234 | C_j = [] 235 | C_ij = [] 236 | C_ji = [] 237 | for I in xrange(self.n_therm_states): 238 | for i in xrange(self.n_markov_states): 239 | for j in xrange(i,self.n_markov_states): 240 | s1 = i + I * self.n_markov_states 241 | s2 = j + I * self.n_markov_states 242 | if i == j: 243 | n_ij = (self.C_K_ij[I, i, j] * factor + self.b_i_IJ[i, I, I]) 244 | n_ji = (self.C_K_ij[I, i, j] * factor + self.b_i_IJ[i, I, I]) 245 | C_i.append(s1) 246 | C_j.append(s2) 247 | C_ij.append(n_ij) 248 | C_ji.append(n_ji) 249 | else: 250 | n_ij = self.C_K_ij[I, i, j] * factor 251 | n_ji = self.C_K_ij[I, j, i] * factor 252 | if n_ij or n_ji != 0: # TODO: CHECK THIS LINE 253 | C_i.append(s1) 254 | C_j.append(s2) 255 | C_ij.append(n_ij) 256 | C_ji.append(n_ji) 257 | for I in xrange(self.n_therm_states): 258 | for J in xrange(I, self.n_therm_states): 259 | for i in xrange(self.n_markov_states): 260 | s1 = self.n_markov_states * I + i 261 | s2 = self.n_markov_states * J + i 262 | if I != J: 263 | n_ij = self.b_i_IJ[i, I, J] 264 | n_ji = self.b_i_IJ[i, J, I] 265 | C_i.append(s1) 266 | C_j.append(s2) 267 | C_ij.append(n_ij) 268 | C_ji.append(n_ji) 269 | return ( 270 | np.array(C_i).astype(np.intc), 271 | np.array(C_j).astype(dtype=np.intc), 272 | np.array(C_ij), 273 | np.array(C_ji)) 274 | 275 | def _compute_sparse_N(self , factor=1.0): 276 | r"""Computes a Nx4 array containing the count matrix in a sparse format 277 | 278 | Parameters 279 | ---------- 280 | factor : float 281 | multiplication factor default of 1 is fine 282 | 283 | Returns 284 | ------- 285 | N_tilde : numpy 2d-array 286 | N-4 numpy array containing the count matrix N-tilde 287 | """ 288 | N_tilde = [] 289 | for I in xrange(self.n_therm_states): 290 | for i in xrange(self.n_markov_states): 291 | for j in xrange(i, self.n_markov_states): 292 | s1 = i + I * self.n_markov_states 293 | s2 = j + I * self.n_markov_states 294 | if i == j: 295 | n_ij = (self.C_K_ij[I, i, j] * factor + self.b_i_IJ[i, I, I]) 296 | n_ji = (self.C_K_ij[I, i, j] * factor + self.b_i_IJ[i, I, I]) 297 | entry = np.zeros(4) 298 | entry[0] = s1 299 | entry[1] = s2 300 | entry[2] = n_ij 301 | entry[3] = n_ji 302 | N_tilde.append(entry) 303 | else: 304 | n_ij = self.C_K_ij[I, i, j] * factor 305 | n_ji = self.C_K_ij[I, j, i] * factor 306 | if n_ij or n_ji != 0: # TODO: CHECK THIS LINE 307 | entry = np.zeros(4) 308 | entry[0] = s1 309 | entry[1] = s2 310 | entry[2] = n_ij 311 | entry[3] = n_ji 312 | N_tilde.append(entry) 313 | for I in xrange(self.n_therm_states): 314 | for J in xrange(I, self.n_therm_states): 315 | for i in xrange(self.n_markov_states): 316 | s1 = self.n_markov_states * I + i 317 | s2 = self.n_markov_states * J + i 318 | if I != J: 319 | n_ij = self.b_i_IJ[i, I, J] 320 | n_ji = self.b_i_IJ[i, J, I] 321 | entry = np.zeros(4) 322 | entry[0] = s1 323 | entry[1] = s2 324 | entry[2] = n_ij 325 | entry[3] = n_ji 326 | N_tilde.append(entry) 327 | return np.array(N_tilde) 328 | 329 | 330 | def _init_f_K(self): 331 | """ Computes the initial guess of free energies via bar ratios 332 | """ 333 | I_plus_one = np.zeros(self.n_therm_states) 334 | I_minus_one = np.zeros(self.n_therm_states) 335 | for x in xrange(self.n_samples): 336 | I = self._T_x[x] 337 | if I > 0: 338 | I_minus_one[I] += self._metropolis(self._b_K_x[I, x], self._b_K_x[I-1, x]) 339 | if I < self.n_therm_states-1: 340 | I_plus_one[I] += self._metropolis(self._b_K_x[I, x], self._b_K_x[I+1, x]) 341 | # compute BAR free energies 342 | f_K = np.zeros(self.n_therm_states) 343 | for I in xrange(1, self.n_therm_states): 344 | bar_ratio = (I_plus_one[I-1] / float(self._N_K[I-1])) / (I_minus_one[I] / float(self._N_K[I])) 345 | f_K[I] = f_K[I-1] - np.log(bar_ratio) 346 | return f_K 347 | 348 | def _metropolis(self, u_1, u_2): 349 | """ Metropolis function 350 | """ 351 | if (u_1 - u_2) > 0: 352 | return 1.0 353 | else: 354 | return np.exp(u_1 - u_2) 355 | 356 | def _compute_pi_K_i(self): 357 | """Initializes the stationary probabilities 358 | """ 359 | _pi_K_i = np.ones( 360 | self.n_therm_states * self.n_markov_states).reshape( 361 | self.n_therm_states, self.n_markov_states) 362 | return _pi_K_i 363 | 364 | def _compute_w_K(self): 365 | """Computes the the weight at each thermodynamic state """ 366 | return self.N_K.astype(np.float64) / np.sum(self.N_K) 367 | #weight array based on thermodynamics sample counts 368 | 369 | #################################################################### 370 | # # 371 | # Getters and setters and checks # 372 | # # 373 | #################################################################### 374 | 375 | @property 376 | def b_K_x(self): 377 | return self._b_K_x 378 | 379 | def _check_b_K_x(self, b_K_x): 380 | if b_K_x is None: 381 | raise ExpressionError("b_K_x", "is None") 382 | if not isinstance(b_K_x, (np.ndarray,)): 383 | raise ExpressionError("b_K_x", "invalid type (%s)" % str(type(b_K_x))) 384 | if 2 != b_K_x.ndim: 385 | raise ExpressionError("b_K_x", "invalid number of dimensions (%d)" % b_K_x.ndim) 386 | if b_K_x.shape[0] != self.n_therm_states: 387 | raise ExpressionError("b_K_x", "not matching number of thermodynamic states (%d,%d)" \ 388 | % (b_K_x.shape[0], self.n_therm_states)) 389 | if np.float64 != b_K_x.dtype: 390 | raise ExpressionError("b_K_x", "invalid dtype (%s)" % str(b_K_x.dtype)) 391 | return True 392 | 393 | @property 394 | def M_x(self): 395 | return self._M_x 396 | 397 | def _check_M_x(self, M_x): 398 | if M_x is None: 399 | raise ExpressionError("M_x", "is None") 400 | if not isinstance(M_x, (np.ndarray,)): 401 | raise ExpressionError("M_x", "invalid type (%s)" % str(type(M_x))) 402 | if 1 != M_x.ndim: 403 | raise ExpressionError("M_x", "invalid number of dimensions (%d)" % M_x.ndim) 404 | if M_x.shape[0] != self.b_K_x.shape[1]: 405 | raise ExpressionError("M_x", "not matching number thermodynamic samples (%d,%d)" \ 406 | % (M_x.shape[0], self.b_K_x.shape[1])) 407 | if np.intc != M_x.dtype: 408 | raise ExpressionError("M_x", "invalid dtype (%s)" % str(M_x.dtype)) 409 | return True 410 | 411 | @property 412 | def T_x(self): 413 | return self._T_x 414 | 415 | def _check_T_x(self, T_x): 416 | if T_x is None: 417 | raise ExpressionError("T_x", "is None") 418 | if not isinstance(T_x, (np.ndarray,)): 419 | raise ExpressionError("T_x", "invalid type (%s)" % str(type(T_x))) 420 | if 1 != T_x.ndim: 421 | raise ExpressionError("T_x", "invalid number of dimensions (%d)" % T_x.ndim) 422 | if T_x.shape[0] != self.b_K_x.shape[1]: 423 | raise ExpressionError("T_x", "not matching number thermodynamic samples (%d,%d)" \ 424 | % (T_x.shape[0], self.b_K_x.shape[1])) 425 | if np.intc != T_x.dtype: 426 | raise ExpressionError("T_x", "invalid dtype (%s)" % str(T_x.dtype)) 427 | return True 428 | 429 | @property 430 | def N_K_i(self): 431 | return self._N_K_i 432 | 433 | def _check_N_K_i(self, N_K_i): 434 | if N_K_i is None: 435 | raise ExpressionError("N_K_i", "is None") 436 | if not isinstance(N_K_i, (np.ndarray,)): 437 | raise ExpressionError("N_K_i", "invalid type (%s)" % str(type(N_K_i))) 438 | if 2 != N_K_i.ndim: 439 | raise ExpressionError("N_K_i", "invalid number of dimensions (%d)" % N_K_i.ndim) 440 | if N_K_i.shape[0] != self.n_therm_states: 441 | raise ExpressionError("N_K_i", "not matching number of thermodynamic states (%d,%d)" \ 442 | % (N_K_i.shape[0], self.n_therm_states)) 443 | if N_K_i.shape[1] != self.n_markov_states: 444 | raise ExpressionError("N_K_i", "not matching number of Markov states (%d,%d)" \ 445 | % (N_K_i.shape[1], self.n_markov_states)) 446 | return True 447 | 448 | @property 449 | def N_K(self): 450 | return self._N_K 451 | --------------------------------------------------------------------------------