├── test ├── test_api.py ├── test_forge.py ├── test_mbar.py ├── test_tram.py ├── test_reader.py ├── .DS_Store ├── test_wham.py ├── test_xtram.py ├── test_dtram.py └── test_three_state_model.py ├── .coveragerc ├── .gitattributes ├── pyfeat ├── forge │ ├── __init__.py │ └── forge.py ├── reader │ ├── __init__.py │ └── reader.py ├── estimator │ ├── __init__.py │ ├── dtram.py │ ├── tram.py │ ├── xtram.py │ ├── mbar.py │ └── wham.py ├── api │ ├── __init__.py │ └── api.py ├── __init__.py └── _version.py ├── .gitignore ├── MANIFEST.in ├── setup.cfg ├── doc ├── source │ ├── index.rst │ ├── install.rst │ ├── usr.rst │ └── conf.py └── Makefile ├── examples ├── README.md ├── trajectory_factory.py └── double_well_example.ipynb ├── .travis.yml ├── CHANGELOG ├── LICENSE ├── setup.py ├── README.rst ├── bin └── run_pyfeat.py └── versioneer.py /test/test_api.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/test_forge.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/test_mbar.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/test_tram.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /test/test_reader.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = */_version.py 3 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | pyfeat/_version.py export-subst 2 | -------------------------------------------------------------------------------- /pyfeat/forge/__init__.py: -------------------------------------------------------------------------------- 1 | from .forge import Forge 2 | -------------------------------------------------------------------------------- /pyfeat/reader/__init__.py: -------------------------------------------------------------------------------- 1 | from .reader import Reader 2 | -------------------------------------------------------------------------------- /test/.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/markovmodel/pyfeat/master/test/.DS_Store -------------------------------------------------------------------------------- /pyfeat/estimator/__init__.py: -------------------------------------------------------------------------------- 1 | from .wham import WHAM 2 | from .xtram import XTRAM 3 | from .dtram import DTRAM 4 | from .tram import TRAM 5 | #from .mbar import MBAR 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.so 3 | build 4 | .ipynb_checkpoints 5 | pyfeat.egg-info 6 | doc/_build 7 | .eggs/ 8 | *.dat 9 | .DS_Store 10 | *.egg 11 | dist 12 | .coverage 13 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.rst 2 | include CHANGELOG 3 | include examples/double_well_example.ipynb 4 | include examples/trajectory_factory.py 5 | include examples/README.md 6 | include versioneer.py 7 | include pyfeat/_version.py 8 | -------------------------------------------------------------------------------- /pyfeat/api/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import wham 2 | from .api import wham_from_matrix 3 | from .api import dtram 4 | from .api import dtram_from_matrix 5 | from .api import xtram 6 | from .api import xtram_from_matrix 7 | from .api import read_files 8 | from .api import convert_data 9 | #from .api import mbar 10 | #from .api import mbar_me 11 | 12 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | 2 | # See the docstring in versioneer.py for instructions. Note that you must 3 | # re-run 'versioneer.py setup' after changing this section, and commit the 4 | # resulting files. 5 | 6 | [versioneer] 7 | VCS = git 8 | style = pep440 9 | versionfile_source = pyfeat/_version.py 10 | #versionfile_build = 11 | tag_prefix = 12 | parentdir_prefix = pyfeat- 13 | 14 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. pyfeat documentation master file, created by 2 | sphinx-quickstart on Wed Feb 4 14:05:25 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | ================================ 7 | The pyfeat package documentation 8 | ================================ 9 | 10 | Contents: 11 | 12 | .. toctree:: 13 | :maxdepth: 2 14 | 15 | install 16 | usr 17 | 18 | 19 | -------------------------------------------------------------------------------- /pyfeat/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | # raise the Reader class onto the pyfeat package level 3 | from .reader import Reader 4 | 5 | # raise the Forge class onto the pyfeat package level 6 | from .forge import Forge 7 | 8 | #raise API functions onto package level 9 | from .api import xtram, xtram_from_matrix, dtram, dtram_from_matrix, wham, wham_from_matrix, read_files, convert_data 10 | 11 | from ._version import get_versions 12 | __version__ = get_versions()['version'] 13 | del get_versions 14 | -------------------------------------------------------------------------------- /pyfeat/reader/reader.py: -------------------------------------------------------------------------------- 1 | r""" 2 | ====== 3 | Reader 4 | ====== 5 | 6 | .. moduleauthor:: Antonia Mey 7 | 8 | """ 9 | 10 | import pytram as pt 11 | import numpy as np 12 | 13 | class Reader( pt.Reader ): 14 | """I am the pyfeat reader and read all trajectory information 15 | """ 16 | def __init__( self, files, b_K_i_file=None, kT_file=None, skiprows=0, maxlength=None, verbose=False ): 17 | super( Reader, self ).__init__( files, b_K_i_file=b_K_i_file, kT_file=kT_file, skiprows=skiprows, maxlength=maxlength, verbose=verbose ) 18 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | pyfeat-examples 2 | =============== 3 | 4 | We have comprised a series of examples to illustrate the functionality of 5 | pyfeat and how it can be used for different stationary estimators to compute 6 | free energies and probabilties. 7 | 8 | Content 9 | ------- 10 | 11 | * Asymetric double well potential - simulated with 2 different simulation 12 | protocols: Temperature replica exchange and umbrella sampling 13 | 14 | 15 | How to start the examples 16 | ------------------------- 17 | 18 | Make sure you have ipython notebooks installed (if not you can get it via 19 | the python anaconda distribution for example) 20 | Then go to your terminal and type: 21 | 22 |
ipython notebook double_well_example.ipynb
23 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.7 4 | before_install: 5 | - wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O miniconda.sh 6 | - bash miniconda.sh -b 7 | - export PATH=$HOME/miniconda/bin:$PATH 8 | - conda update --yes conda 9 | - sudo rm -rf /dev/shm 10 | - sudo ln -s /run/shm /dev/shm 11 | install: 12 | - conda install --yes python=$TRAVIS_PYTHON_VERSION numpy scipy cython nose 13 | - pip install pytram 14 | - python setup.py develop 15 | - pip install coveralls 16 | script: 17 | - coverage run --source=pyfeat setup.py test 18 | after_success: 19 | - coveralls 20 | deploy: 21 | provider: pypi 22 | user: pyfeat-user 23 | password: 24 | secure: henjeFXpv8u7fDzIDOg62eQ6jvfmECD5bKC38Lz1ztVURlKJFIX1yIgALylmaed6jZFf2IA7Kgmv5QeT68bAGuchWE9as2TZnUCsfRDldoEWKu3P0ccNiIyIISFV5PlkB509tmizvTx9dohnXgX8mFHBXx3YYy/Z3nFDLDhbAA8= 25 | on: 26 | tags: true 27 | repo: markovmodel/pyfeat 28 | branch: master 29 | -------------------------------------------------------------------------------- /doc/source/install.rst: -------------------------------------------------------------------------------- 1 | .. _ref_install: 2 | 3 | ================== 4 | Installation guide 5 | ================== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | There are different ways to install the pyfeat package. 12 | 13 | 14 | 15 | From the repository 16 | =================== 17 | 18 | First step: get the repository! 19 | 20 | Go to your shell and type 21 | 22 | .. code-block:: bash 23 | 24 | git clone https://github.com/markovmodel/pyfeat.git 25 | 26 | Then, install the package from source. 27 | 28 | 29 | via pip 30 | ------- 31 | 32 | Go to the repository's root directory and type 33 | 34 | .. code-block:: bash 35 | 36 | pip install . 37 | 38 | 39 | via setup 40 | --------- 41 | 42 | Go to the repository's root directory and type: 43 | 44 | .. code-block:: bash 45 | 46 | python setup.py install [--user] 47 | 48 | For a development installation run: 49 | 50 | .. code-block:: bash 51 | 52 | python setup.py develop --user --verbose 53 | 54 | 55 | 56 | From the python package index 57 | ============================= 58 | 59 | Go to your shell and type 60 | 61 | .. code-block:: bash 62 | 63 | pip install pyfeat 64 | 65 | or 66 | 67 | .. code-block:: bash 68 | 69 | easy_install pyfeat 70 | -------------------------------------------------------------------------------- /CHANGELOG: -------------------------------------------------------------------------------- 1 | ********* 2 | CHANGELOG 3 | ********* 4 | 5 | 6 | 0.1.x 7 | ===== 8 | 9 | Implemented WHAM and MBAR (internal Alpha version - not released) 10 | 11 | 12 | 0.2.0 13 | ===== 14 | 15 | WHAM + MBAR functional (Beta version) 16 | 17 | 0.2.1 18 | ----- 19 | 20 | Switching dependencies to numpy>=1.7.1; adding missing files. 21 | 22 | 0.2.2 23 | ----- 24 | 25 | Bugfix regarding the thermodynamic free energy array in WHAM 26 | 27 | 0.2.3 28 | ----- 29 | 30 | Restructured the MBAR implementation for speed up and reduced the cython dependency to 0.15 31 | 32 | 0.3.0 33 | ===== 34 | 35 | Restructured API 36 | Implentation of new Reader 37 | Implementation of new Data Converter (Forge) 38 | new implementation of a runscript. 39 | Complete new implementation of WHAM 40 | Removed MBAR legacy code to include pymbar in futrue release 41 | Added pytram dependency with implementation of DTRAM and XTRAM 42 | Added example ipython notebook for asymatric double-well potential 43 | f_K now retrun the same quantities for all estimators 44 | 45 | 0.3.1 46 | ----- 47 | Fix Sign error in WHAM f_K. 48 | Fix Not converged warning. 49 | Added versioneering. 50 | Fixing typos. 51 | 52 | 0.3.2 53 | ----- 54 | Fix normailsation in higher thermodynamic states (WHAM). 55 | Extended the test suite. 56 | Renaming the *_me API functions to *_from_matrix. 57 | 58 | DEVEL 59 | ----- 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015, Markov model 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright notice, this 8 | list of conditions and the following disclaimer. 9 | 10 | * Redistributions in binary form must reproduce the above copyright notice, 11 | this list of conditions and the following disclaimer in the documentation 12 | and/or other materials provided with the distribution. 13 | 14 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 15 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 17 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 18 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 20 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 21 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 22 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 23 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 | 25 | -------------------------------------------------------------------------------- /test/test_wham.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_wham.py - testing the pyfeat wham class 4 | # 5 | # author: Christoph Wehmeyer 6 | # author: Antonia Mey 7 | # 8 | ################################################################################ 9 | 10 | from nose.tools import assert_raises, assert_true 11 | from pyfeat.estimator import WHAM 12 | from pytram import ExpressionError, NotConvergedWarning 13 | import numpy as np 14 | 15 | #WHAM testing 16 | def test_expression_error_None(): 17 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), None ) 18 | def test_expression_error_int(): 19 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), 5 ) 20 | def test_expression_error_list(): 21 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), [1,2] ) 22 | def test_expression_error_dim(): 23 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,2,2), dtype=np.float64 ) ) 24 | def test_expression_error_markov(): 25 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,2), dtype=np.float64 ) ) 26 | def test_expression_error_therm(): 27 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(1,3), dtype=np.float64 ) ) 28 | def test_expression_error_int16(): 29 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.int16 ) ) 30 | def test_expression_error_float32(): 31 | assert_raises( ExpressionError, WHAM, np.ones( shape=(2,3,3), dtype=np.intc ), np.ones( shape=(2,3), dtype=np.float32 ) ) 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from os.path import join, dirname 3 | import versioneer 4 | 5 | setup( 6 | cmdclass=versioneer.get_cmdclass(), 7 | name='pyfeat', 8 | version=versioneer.get_version(), 9 | description='The python free energy analysis toolkit', 10 | long_description='Commandline toolkit that allows the use of different free energy estimators using a single format', 11 | classifiers=[ 12 | 'Development Status :: 3 - Alpha', 13 | 'Environment :: Console', 14 | 'Intended Audience :: Science/Research', 15 | 'License :: OSI Approved :: BSD License', 16 | 'Natural Language :: English', 17 | 'Operating System :: MacOS :: MacOS X', 18 | 'Operating System :: POSIX :: Linux', 19 | 'Programming Language :: C', 20 | 'Programming Language :: Cython', 21 | 'Programming Language :: Python :: 2.7', 22 | 'Topic :: Scientific/Engineering :: Bio-Informatics', 23 | 'Topic :: Scientific/Engineering :: Chemistry', 24 | 'Topic :: Scientific/Engineering :: Mathematics', 25 | 'Topic :: Scientific/Engineering :: Physics' 26 | ], 27 | keywords=[ 'TRAM', 'WHAM', 'free energy' ], 28 | url='http://github.com/markovmodel/pyfeat', 29 | author='The pyfeat team', 30 | author_email='pyfeat@lists.fu-berlin.de', 31 | license='Simplified BSD License', 32 | setup_requires=[ 'numpy>=1.7.1', 'setuptools>=0.6', 'pytram>=0.2.0' ], 33 | tests_require=[ 'numpy>=1.7.1', 'nose>=1.3' ], 34 | install_requires=[ 'numpy>=1.7.1' ], 35 | packages=[ 36 | 'pyfeat', 37 | 'pyfeat.reader', 38 | 'pyfeat.forge', 39 | 'pyfeat.estimator', 40 | 'pyfeat.api' 41 | ], 42 | test_suite='nose.collector', 43 | scripts=[ 44 | 'bin/run_pyfeat.py' 45 | ] 46 | ) 47 | -------------------------------------------------------------------------------- /pyfeat/estimator/dtram.py: -------------------------------------------------------------------------------- 1 | r""" 2 | ======================= 3 | dTRAM estimator wrapper 4 | ======================= 5 | 6 | .. moduleauthor:: Antonia Mey , Christoph Wehmeyer 7 | 8 | """ 9 | 10 | import pytram as pt 11 | 12 | 13 | class DTRAM(object): 14 | r""" 15 | I am the dTRAM wrapper 16 | """ 17 | def __init__(self, C_K_ij, b_K_i): 18 | r""" 19 | Initialize the DTRAM object 20 | 21 | Parameters 22 | ---------- 23 | C_K_ij : numpy.ndarray(shape=(T, M, M), dtype=numpy.intc) 24 | transition counts between the M discrete Markov states for each of 25 | the T thermodynamic ensembles 26 | b_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 27 | bias energies in the T thermodynamic and M discrete Markov states 28 | """ 29 | self._dtram_obj = pt.DTRAM(C_K_ij, b_K_i) 30 | 31 | def sc_iteration(self, maxiter=100, ftol=1.0e-5, verbose=False): 32 | r""" 33 | sc_iteration function 34 | 35 | Parameters 36 | ---------- 37 | maxiter : int 38 | maximum number of self-consistent-iteration steps 39 | ftol : float (> 0.0) 40 | convergence criterion based on the max relative change in an self-consistent-iteration step 41 | verbose : boolean 42 | Be loud and noisy 43 | """ 44 | self._dtram_obj.sc_iteration(ftol=ftol, maxiter=maxiter, verbose=verbose) 45 | 46 | @property 47 | def pi_i(self): 48 | return self._dtram_obj.pi_i 49 | 50 | @property 51 | def pi_K_i(self): 52 | return self._dtram_obj.pi_K_i 53 | 54 | @property 55 | def f_K(self): 56 | return self._dtram_obj.f_K 57 | 58 | @property 59 | def f_K_i(self): 60 | return self._dtram_obj.f_K_i 61 | 62 | @property 63 | def f_i(self): 64 | return self._dtram_obj.f_i 65 | 66 | @property 67 | def n_therm_states(self): 68 | return self._dtram_obj.n_therm_states 69 | 70 | @property 71 | def n_markov_states(self): 72 | return self._dtram_obj.n_markov_states 73 | 74 | @property 75 | def citation(self): 76 | return self._dtram_obj.citation 77 | 78 | def cite(self, pre=""): 79 | self._dtram_obj.cite(pre=pre) 80 | -------------------------------------------------------------------------------- /pyfeat/estimator/tram.py: -------------------------------------------------------------------------------- 1 | r""" 2 | ====================== 3 | TRAM estimator wrapper 4 | ====================== 5 | 6 | .. moduleauthor:: Christoph Wehmeyer 7 | 8 | """ 9 | 10 | import pytram as pt 11 | import numpy as np 12 | 13 | class TRAM( object ): 14 | r""" 15 | I am the TRAM wrapper 16 | """ 17 | def __init__( self, C_K_ij, b_K_x, M_x, N_K_i ): 18 | r""" 19 | Initialize the TRAM object 20 | 21 | Parameters 22 | ---------- 23 | C_K_ij : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 24 | transition counts between the M discrete Markov states for each of the T thermodynamic ensembles 25 | b_K_x : numpy.ndarray( shape=(T,samples), dtype=numpy.float ) 26 | all samples for biases at thermodynamic state K 27 | M_x : numpy.ndarray( shape=(samples), dtype=numpy.intc ) 28 | trajectory of Markov states sampled 29 | N_K_i : numpy.ndarray( shape=(T,M), dtype=numpy.intc ) 30 | total number of counts from simulation at T in M discrete Markov state (bin) 31 | """ 32 | try: 33 | self._tram_obj = pt.TRAM( C_K_ij, b_K_x, M_x, N_K_i ) 34 | except AttributeError, e: 35 | raise NotImplementedError( "The TRAM estimator is not yet implemented in the pytram package" ) 36 | 37 | def sc_iteration( self , maxiter=100, ftol=1.0E-5, verbose=False ): 38 | r""" 39 | sc_iteration function 40 | 41 | Parameters 42 | ---------- 43 | maxiter : int 44 | maximum number of self-consistent-iteration steps 45 | ftol : float (> 0.0) 46 | convergence criterion based on the max relative change in an self-consistent-iteration step 47 | verbose : boolean 48 | Be loud and noisy 49 | """ 50 | self._tram_obj.sc_iteration( maxiter=maxiter, ftol=ftol, verbose=verbose ) 51 | 52 | @property 53 | def pi_i( self ): 54 | return self._tram_obj.pi_i 55 | 56 | @property 57 | def pi_K_i( self ): 58 | return self._tram_obj.pi_K_i 59 | 60 | @property 61 | def f_K( self ): 62 | return self._tram_obj.f_K 63 | 64 | @property 65 | def f_K_i( self ): 66 | return -np.log( self.pi_K_i ) 67 | 68 | @property 69 | def f_i( self ): 70 | return -np.log( self.pi_i ) 71 | 72 | @property 73 | def citation( self ): 74 | return self._tram_obj.citation 75 | 76 | def cite( self, pre="" ): 77 | self._tram_obj.cite( pre=pre ) 78 | -------------------------------------------------------------------------------- /pyfeat/estimator/xtram.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ======================= 4 | XTRAM estimator wrapper 5 | ======================= 6 | 7 | .. moduleauthor:: Antonia Mey 8 | 9 | """ 10 | 11 | import pytram as pt 12 | import numpy as np 13 | from pytram import NotConvergedWarning, ExpressionError 14 | 15 | 16 | class XTRAM( object ): 17 | r""" 18 | I am the xTRAM wrapper 19 | """ 20 | def __init__( self, C_K_ij, b_K_x, T_x, M_x, N_K_i, target=0 ): 21 | 22 | r""" 23 | Initialize the XTRAM object 24 | 25 | Parameters 26 | ---------- 27 | C_K_ij : 3-D numpy array 28 | Countmatrix for each thermodynamic state K 29 | b_K_x : 2-D numpy array 30 | Biasing tensor 31 | T_x : 1-D numpy array 32 | Thermodynamic state trajectory 33 | M_x : 1-D numpy array 34 | Markov state trajectories 35 | N_K_i : 2-D numpy array 36 | Number of markov samples in each thermodynamic state 37 | """ 38 | 39 | self._xtram_obj = pt.XTRAM( C_K_ij, b_K_x, T_x, M_x, N_K_i, target = 0 ) 40 | self.n_therm_states = np.shape(N_K_i)[0] 41 | self.n_markov_states = np.shape(N_K_i)[1] 42 | 43 | 44 | def sc_iteration( self, maxiter=100, ftol=1.0E-5, verbose=False ): 45 | r""" 46 | sc_iteration function 47 | 48 | Parameters 49 | ---------- 50 | maxiter : int 51 | maximum number of self-consistent-iteration steps 52 | ftol : float (> 0.0) 53 | convergence criterion based on the max relative change in an self-consistent-iteration step 54 | verbose : boolean 55 | Be loud and noisy 56 | """ 57 | self._xtram_obj.sc_iteration( maxiter, ftol, verbose) 58 | 59 | @property 60 | def pi_i( self ): 61 | return self._xtram_obj.pi_i 62 | 63 | @property 64 | def pi_K_i( self ): 65 | return self._xtram_obj.pi_K_i 66 | 67 | @property 68 | def f_K( self ): 69 | return self._xtram_obj.f_K 70 | 71 | @property 72 | def f_K_i( self ): 73 | return -np.log(self.pi_K_i) 74 | 75 | @property 76 | def f_i( self ): 77 | return -np.log(self.pi_i) 78 | 79 | @property 80 | def n_therm_states( self ): 81 | return self._n_therm_states 82 | 83 | @n_therm_states.setter 84 | def n_therm_states( self, m ): 85 | self._n_therm_states = m 86 | 87 | @property 88 | def n_markov_states( self ): 89 | return self._n_markov_states 90 | 91 | @n_markov_states.setter 92 | def n_markov_states( self, n ): 93 | self._n_markov_states = n 94 | 95 | @property 96 | def citation( self ): 97 | return self._xtram_obj.citation 98 | 99 | def cite( self, pre="" ): 100 | self._xtram_obj.cite( pre=pre ) 101 | 102 | -------------------------------------------------------------------------------- /test/test_xtram.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_xtram.py - testing the pyfeat xtram class 4 | # 5 | # author: Christoph Wehmeyer 6 | # author: Antonia Mey 7 | # 8 | ################################################################################ 9 | 10 | from nose.tools import assert_raises, assert_true 11 | from pyfeat.estimator import XTRAM 12 | from pytram import ExpressionError, NotConvergedWarning 13 | import numpy as np 14 | 15 | #XTRAM testing 16 | def test_expression_error_None(): 17 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(2,3), dtype=np.intc) ) 18 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ),None, np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 19 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),None, np.ones( shape =(2,3), dtype=np.intc) ) 20 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), None ) 21 | 22 | def test_expression_error_dim(): 23 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 24 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 25 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64 ), np.ones( shape =(10), dtype=np.intc ),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(3), dtype=np.intc) ) 26 | 27 | def test_expression_error_markov(): 28 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,4), dtype=np.intc) ) 29 | def test_expression_error_therm(): 30 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(3,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(2,3), dtype=np.intc) ) 31 | assert_raises( ExpressionError, XTRAM, np.ones( shape =(2,3,3), dtype=np.intc), np.ones( shape =(2,10), dtype=np.float64), np.ones( shape =(10), dtype=np.intc),np.ones( shape =(10), dtype=np.intc), np.ones( shape =(3,4), dtype=np.intc) ) 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /pyfeat/forge/forge.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ============== 4 | Data container 5 | ============== 6 | 7 | .. moduleauthor:: Antonia Mey , Christoph Wehmeyer 8 | 9 | """ 10 | 11 | import pytram as pt 12 | import numpy as np 13 | 14 | class Forge( pt.TRAMData ): 15 | """I am the data forge 16 | """ 17 | def __init__( self, trajs, b_K_i=None, kT_K=None, kT_target=None, verbose=False ): 18 | super( Forge, self ).__init__( trajs, b_K_i=b_K_i, kT_K=kT_K, kT_target=kT_target, verbose=verbose ) 19 | self._M_K_x = None 20 | self._b_IK_x = None 21 | 22 | @property 23 | def M_K_x( self ): 24 | if self._M_K_x is None: 25 | if self.verbose: 26 | print "# Copying Markov state sequences" 27 | self._M_K_x = np.zeros( shape=(self.n_therm_states,self.N_K.max()), dtype=np.intc ) 28 | t_K = np.zeros( shape=(self.n_therm_states,), dtype=np.intc ) 29 | for traj in self.trajs: 30 | # SPEEDUP POSSIBLE! 31 | for t in xrange( traj['t'].shape[0] ): 32 | K = traj['t'][t] 33 | self._M_K_x[K,t_K[K]] = traj['m'][t] 34 | t_K[K] += 1 35 | if self.verbose: 36 | print "# ... done" 37 | return self._M_K_x 38 | 39 | @property 40 | def b_IK_x( self ): 41 | if self._b_IK_x is None: 42 | if self.verbose: 43 | print "# Copying bias energy sequences" 44 | self._b_IK_x = np.zeros( shape=(self.n_therm_states,self.n_therm_states,self.N_K.max()), dtype=np.intc ) 45 | if not self.kT_K is None: 46 | self.gen_b_IK_x_from_kT_K() 47 | else: 48 | self.gen_b_IK_x() 49 | if self.verbose: 50 | print "# ... done" 51 | return self._b_IK_x 52 | 53 | def gen_b_IK_x_from_kT_K( self ): 54 | t_K = np.zeros( shape=(self.n_therm_states,), dtype=np.intc ) 55 | for traj in self.trajs: 56 | # SPEEDUP POSSIBLE! 57 | for t in xrange( traj['t'].shape[0] ): 58 | K = traj['t'][t] 59 | for I in xrange( self.n_therm_states ): 60 | self._b_IK_x[I,K,t_K[K]] = self.kT_K[K] * ( 1.0/self.kT_K[I] - 1.0/self.kT_K[self.kT_target] ) * traj['b'][t] 61 | t_K[K] += 1 62 | 63 | def gen_b_IK_x( self ): 64 | t_K = np.zeros( shape=(self.n_therm_states,), dtype=np.intc ) 65 | for traj in self.trajs: 66 | if traj['u'].shape[1] == 1: 67 | raise pt.ExpressionError( 68 | "b_IK_x", 69 | "Trajectory with single energy columns detected - use kT file and kT target" 70 | ) 71 | if traj['u'].shape[1] != self.n_therm_states: 72 | raise pt.ExpressionError( 73 | "b_IK_x", 74 | "Trajectory with wrong number of energy columns detected (%d!=%d)" \ 75 | % ( traj['u'].shape[1], self.n_therm_states ) 76 | ) 77 | # SPEEDUP POSSIBLE! 78 | for t in xrange( traj['t'].shape[0] ): 79 | K = traj['t'][t] 80 | for I in xrange( self.n_therm_states ): 81 | self._b_IK_x[I,K,t_K[K]] = traj['b'][I,t] 82 | t_K[K] += 1 83 | 84 | 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /test/test_dtram.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_dtram.py - testing the pyfeat dtram class 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true 10 | from pyfeat.estimator import DTRAM 11 | from pytram import ExpressionError, NotConvergedWarning 12 | import numpy as np 13 | 14 | def test_expression_error_None(): 15 | """test DTRAM throws ExpressionError with None""" 16 | assert_raises( 17 | ExpressionError, 18 | DTRAM, 19 | np.ones(shape=(2, 3, 3), dtype=np.intc), None) 20 | 21 | def test_expression_error_int(): 22 | """test DTRAM throws ExpressionError with number""" 23 | assert_raises( 24 | ExpressionError, 25 | DTRAM, 26 | np.ones(shape=(2, 3, 3), dtype=np.intc), 5) 27 | 28 | def test_expression_error_list(): 29 | """test DTRAM throws ExpressionError with list""" 30 | assert_raises( 31 | ExpressionError, 32 | DTRAM, 33 | np.ones(shape=(2, 3, 3), dtype=np.intc), [1, 2]) 34 | 35 | def test_expression_error_dim(): 36 | """test DTRAM throws ExpressionError with wrong dimension""" 37 | assert_raises( 38 | ExpressionError, 39 | DTRAM, 40 | np.ones(shape=(2, 3, 3), dtype=np.intc), 41 | np.ones(shape=(2, 2, 2), dtype=np.float64)) 42 | 43 | def test_expression_error_markov(): 44 | """test DTRAM throws ExpressionError with wrong Markov state count""" 45 | assert_raises( 46 | ExpressionError, 47 | DTRAM, 48 | np.ones(shape=(2, 3, 3), dtype=np.intc), 49 | np.ones(shape=(2, 2), dtype=np.float64)) 50 | 51 | def test_expression_error_therm(): 52 | """test DTRAM throws ExpressionError with wrong thermodynamic state count""" 53 | assert_raises( 54 | ExpressionError, 55 | DTRAM, 56 | np.ones(shape=(2, 3, 3), dtype=np.intc), 57 | np.ones(shape=(1, 3), dtype=np.float64)) 58 | 59 | def test_expression_error_int16(): 60 | """test DTRAM throws ExpressionError with wrong dtype (int16)""" 61 | assert_raises( 62 | ExpressionError, 63 | DTRAM, 64 | np.ones(shape=(2, 3, 3), dtype=np.intc), 65 | np.ones(shape=(2, 3), dtype=np.int16)) 66 | 67 | def test_expression_error_float32(): 68 | """test DTRAM throws ExpressionError with wrong dtype (float32)""" 69 | assert_raises( 70 | ExpressionError, 71 | DTRAM, 72 | np.ones(shape=(2, 3, 3), dtype=np.intc), 73 | np.ones(shape=(2, 3), dtype=np.float32)) 74 | 75 | def test_toy_model(): 76 | """test DTRAM with toy model""" 77 | C_K_ij = np.array([ 78 | [[2358, 29, 0], [29, 0, 32], [0, 32, 197518]], 79 | [[16818, 16763, 0], [16763, 0, 16510], [0, 16510, 16635]]], dtype=np.intc) 80 | b_K_i = np.array([[0.0, 0.0, 0.0], [4.0, 0.0, 8.0]], dtype=np.float64) 81 | dtram = DTRAM(C_K_ij, b_K_i) 82 | assert_raises(NotConvergedWarning, dtram.sc_iteration, maxiter=1, ftol=1.0E-80, verbose=False) 83 | dtram.sc_iteration(maxiter=200000, ftol=1.0E-15, verbose=True) 84 | pi = np.array([1.82026887e-02, 3.30458960e-04, 9.81466852e-01], dtype=np.float64) 85 | print pi 86 | print dtram.pi_i 87 | assert_true(np.max(np.abs(dtram.pi_i - pi)) < 1.0E-8) 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ****** 2 | pyfeat 3 | ****** 4 | 5 | .. image:: https://travis-ci.org/markovmodel/pyfeat.svg?branch=devel 6 | :target: https://travis-ci.org/markovmodel/pyfeat 7 | .. image:: https://coveralls.io/repos/markovmodel/pyfeat/badge.svg?branch=devel 8 | :target: https://coveralls.io/r/markovmodel/pyfeat?branch=devel 9 | .. image:: https://badge.fury.io/py/pyfeat.svg 10 | :target: https://pypi.python.org/pypi/pyfeat 11 | 12 | The python free energy analysis toolkit. 13 | 14 | **This code is under very heavy development!** 15 | 16 | **Note**: 17 | 18 | Currently the following algorithms are featured: 19 | 20 | - WHAM 21 | - DTRAM 22 | - XTRAM 23 | 24 | In future releases the list of available free energy algorithms will be extended. Watch this space! 25 | 26 | 27 | Installation 28 | ============ 29 | With pip from PyPI:: 30 | 31 | # you might have to install these dependencies manually 32 | pip install cython 33 | pip install numpy 34 | pip install pytram 35 | 36 | # install pyfeat 37 | pip install pyfeat 38 | 39 | With using conda:: 40 | 41 | #Conda should automoatically take care of all dependencies 42 | conda install -c https://conda.binstar.org/omnia pytram 43 | 44 | Authors 45 | ======= 46 | 47 | - Antonia Mey\ :superscript:`*` 48 | - Christoph Wehmeyer\ :superscript:`*` 49 | - Fabian Paul 50 | - Hao Wu 51 | - Frank Noé 52 | 53 | ``*``) equal contribution 54 | 55 | References 56 | ========== 57 | 58 | * **dTRAM**: *Statistically optimal analysis of state-discretized trajectory data from multiple thermodynamic states*, Hao Wu, Antonia S.J.S. Mey, Edina Rosta, and Frank Noé, **J. Chem. Phys.** 141, 214106 (2014). 59 | 60 | Download: 61 | 62 | * **xTRAM**: *Estimating Equilibrium Expectations from Time-Correlated Simulation Data at Multiple Thermodynamic States*, Antonia S.J.S. Mey, Hao Wu, and Frank Noé, **Phys. Rev. X** 4, 041018 (2014). 63 | 64 | Download: 65 | 66 | * **WHAM**: *The weighted histogram analysis method for free-energy calculations on biomolecules. I. The method*, Shankar Kumar, John M. Rosenberg, Djamal Bouzida, Robert H. Swendsen and Peter A. Kollman, **J. Comput. Chem.** 13, 1011–1021 (1992) 67 | 68 | Download: 69 | 70 | Copyright notice 71 | ================ 72 | 73 | Copyright (c) 2014, Computational Molecular Biology Group, FU Berlin, 14195 Berlin, Germany. 74 | 75 | Redistribution and use in source and binary forms, with or without 76 | modification, are permitted provided that the following conditions 77 | are met: 78 | 79 | 1. Redistributions of source code must retain the above copyright notice, 80 | this list of conditions and the following disclaimer. 81 | 82 | 2. Redistributions in binary form must reproduce the above copyright 83 | notice, this list of conditions and the following disclaimer in the 84 | documentation and/or other materials provided with the distribution. 85 | 86 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 87 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 88 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 89 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 90 | HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 91 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 92 | TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 93 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 94 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 95 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 96 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 97 | 98 | 99 | -------------------------------------------------------------------------------- /test/test_three_state_model.py: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # 3 | # test_three_state_model.py - testing pyfeat with a simple three state model 4 | # 5 | # author: Christoph Wehmeyer 6 | # 7 | ################################################################################ 8 | 9 | from nose.tools import assert_raises, assert_true, assert_equal 10 | from pyfeat import Forge, dtram, xtram, wham 11 | import numpy as np 12 | from numpy.testing import assert_allclose 13 | 14 | def tower_sample(distribution): 15 | """draws random integers from the given distribution""" 16 | cdf = np.cumsum(distribution) 17 | rnd = np.random.rand() * cdf[-1] 18 | ind = (cdf > rnd) 19 | idx = np.where(ind == True) 20 | return np.min(idx) 21 | 22 | def evolve_chain(x, P, length): 23 | """generates a discrete Markov chain""" 24 | chain = np.zeros(length, dtype=np.intc) 25 | chain[0] = x 26 | for i in xrange(1, length): 27 | chain[i] = tower_sample(P[chain[i-1]]) 28 | return chain 29 | 30 | def assign_bias(dtraj, b_K_i): 31 | """assigns bias energies to discrete trajectories""" 32 | b = np.zeros(shape=(dtraj.shape[0], b_K_i.shape[0]), dtype=np.float64) 33 | for i in xrange(b_K_i.shape[1]): 34 | b[(dtraj == i), :] = (b_K_i[:, i])[np.newaxis, :] 35 | return b 36 | 37 | def generate_data(P, b_K_i): 38 | """generates pyram compatible input data""" 39 | dtraj_0 = [evolve_chain(1, P[0, :, :], 100) for i in xrange(100)] 40 | dtraj_1 = [evolve_chain(1, P[1, :, :], 100) for i in xrange(100)] 41 | inp = [{ 42 | 'm': d, 43 | 't': np.zeros(shape=d.shape, dtype=np.intc), 44 | 'b': assign_bias(d, b_K_i)} for d in dtraj_0] 45 | inp += [{ 46 | 'm': d, 47 | 't': np.ones(shape=d.shape, dtype=np.intc), 48 | 'b': assign_bias(d, b_K_i)} for d in dtraj_1] 49 | return inp 50 | 51 | class TestThreeStateModel(object): 52 | @classmethod 53 | def setup_class(cls): 54 | cls.energy = np.array([1.0, 2.0, 0.0], dtype=np.float64) 55 | cls.b_K_i = np.array([[0.0, 0.0, 0.0], 2.0 - cls.energy], dtype=np.float64) 56 | cls.pi_i = np.exp(-cls.energy) / np.exp(-cls.energy).sum() 57 | cls.f_i = -np.log(cls.pi_i) 58 | cls.F_K = 1.0 / (np.exp(-cls.b_K_i) * cls.pi_i[np.newaxis, :]).sum(axis=1) 59 | cls.pi_K_i = cls.F_K[:, np.newaxis] * np.exp(-cls.b_K_i) * cls.pi_i[np.newaxis, :] 60 | cls.f_K_i = -np.log(cls.pi_K_i) 61 | metropolis = cls.energy[np.newaxis, :] - cls.energy[:, np.newaxis] 62 | metropolis[(metropolis < 0.0)] = 0.0 63 | selection = np.array([[0.5, 0.5, 0.0], [0.5, 0.0, 0.5], [0.0, 0.5, 0.5]], dtype=np.float64) 64 | metr_hast = selection * np.exp(-metropolis) 65 | for i in xrange(metr_hast.shape[0]): 66 | metr_hast[i, i] = 0.0 67 | metr_hast[i, i] = 1.0 - metr_hast[i, :].sum() 68 | cls.tmat = np.array([metr_hast, selection]) 69 | cls.inp = generate_data(cls.tmat, cls.b_K_i) 70 | @classmethod 71 | def teardown_class(cls): 72 | pass 73 | def setup(self): 74 | pass 75 | def teardown(self): 76 | pass 77 | def test_wham_api(self): 78 | """testing the WHAM API""" 79 | forge = Forge(self.inp, b_K_i=self.b_K_i, verbose=True) 80 | wham_obj = wham(forge, maxiter=100000, ftol=1.0E-14, verbose=True) 81 | maxerr = 5.0E-1 82 | assert_allclose(wham_obj.f_i, self.f_i, atol=maxerr) 83 | assert_allclose(wham_obj.pi_i, self.pi_i, atol=maxerr) 84 | assert_allclose(wham_obj.f_K_i, self.f_K_i, atol=maxerr) 85 | assert_allclose(wham_obj.pi_K_i, self.pi_K_i, atol=maxerr) 86 | def test_dtram_api(self): 87 | """testing the dTRAM API""" 88 | forge = Forge(self.inp, b_K_i=self.b_K_i, verbose=True) 89 | dtram_obj = dtram(forge, lag=1, maxiter=100000, ftol=1.0E-14, verbose=True) 90 | maxerr = 1.0E-1 91 | assert_allclose(dtram_obj.f_i, self.f_i, maxerr) 92 | assert_allclose(dtram_obj.pi_i, self.pi_i, maxerr) 93 | assert_allclose(dtram_obj.f_K_i, self.f_K_i, maxerr) 94 | assert_allclose(dtram_obj.pi_K_i, self.pi_K_i, maxerr) 95 | def test_xtram_api(self): 96 | """testing the xTRAM API""" 97 | forge = Forge(self.inp, verbose=True) 98 | xtram_obj = xtram(forge, lag=1, maxiter=10000, ftol=1.0E-13, verbose=True) 99 | maxerr = 1.0E-1 100 | assert_allclose(xtram_obj.f_i, self.f_i, maxerr) 101 | assert_allclose(xtram_obj.pi_i, self.pi_i, maxerr) 102 | assert_allclose(xtram_obj.f_K_i, self.f_K_i, maxerr) 103 | assert_allclose(xtram_obj.pi_K_i, self.pi_K_i, maxerr) 104 | -------------------------------------------------------------------------------- /pyfeat/estimator/mbar.py: -------------------------------------------------------------------------------- 1 | #r""" 2 | 3 | #======================= 4 | #MBAR estimator wrapper 5 | #======================= 6 | 7 | #..moduleauthor::Christoph Wehmeyer , Antonia Mey , 8 | 9 | #""" 10 | 11 | #import pymbar as mb 12 | #import numpy as np 13 | #from pytram import ExpressionError 14 | 15 | #class MBAR( object ): 16 | # r""" 17 | # I am the mbar wrapper 18 | # """ 19 | # def __init__( self, b_IK_x, b_K_x, M_I_x, N_K ): 20 | # self._n_therm_states = None 21 | # self._n_markov_states = None 22 | # self.N_K = N_K 23 | # self.M_I_x = M_I_x 24 | # self.b_IK_x = b_IK_x 25 | # self.b_K_x = b_K_x 26 | # self._mbar_obj = mb.MBAR( b_IK_x, N_K, maximum_iterations=0, verbose=False ) 27 | # self.citation = [ 28 | # "Statistically optimal analysis of samples from multiple equilibrium states;", 29 | # "Michael R Shirts and John D Chodera ", 30 | # "J. Chem. Phys. 129:124105 (2008)" 31 | # ] 32 | 33 | # def sc_iteration( self, maxiter=1000, ftol=1.0e-6, verbose=False ): 34 | # self._mbar_obj._selfConsistentIteration( maximum_iterations=maxiter, relative_tolerance=ftol, verbose=verbose ) 35 | 36 | # def cite( self, pre="" ): 37 | # for line in self.citation: 38 | # print "%s%s" % ( pre, line ) 39 | 40 | # @property 41 | # def n_therm_states( self ): 42 | # if self._n_therm_states is None: 43 | # self._n_therm_states = self.N_K.shape[0] 44 | # return self._n_therm_states 45 | 46 | # @property 47 | # def n_markov_states( self ): 48 | # if self._n_markov_states is None: 49 | # self._n_markov_states = self.M_I_x.max() + 1 50 | # return self._n_markov_states 51 | 52 | # @property 53 | # def f_K( self ): 54 | # return self._mbar_obj.f_k 55 | 56 | # @property 57 | # def f_K_i( self ): 58 | # raise NotImplementedError('Property not available yet') 59 | 60 | # @property 61 | # def pi_i( self ): 62 | # raise NotImplementedError('Property not available yet') 63 | 64 | # @property 65 | # def pi_K_i( self ): 66 | # raise NotImplementedError('Property not available yet') 67 | 68 | 69 | ####################################################################### 70 | # MBAR Legacy class # 71 | ####################################################################### 72 | 73 | # class MBAR_LEGACY( object ): 74 | # r""" 75 | # I am the mbar legacy class 76 | # """ 77 | # def __init__( self, b_IK_x, M_I_x, N_K ): 78 | # raise NotImplementedError('I need to be implemented') 79 | 80 | # def _check_N_K( self, N_K ): 81 | # if None is N_K: 82 | # raise ExpressionError( "N_K", "is None" ) 83 | # if not isinstance( N_K, (np.ndarray,) ): 84 | # raise ExpressionError( "N_K", "invalid type (%s)" % str( type( N_K ) ) ) 85 | # if 1 != N_K.ndim: 86 | # raise ExpressionError( "N_K", "invalid number of dimensions (%d)" % N_K.ndim ) 87 | # if np.intc != N_K.dtype: 88 | # raise ExpressionError( "N_K", "invalid dtype (%s)" % str( N_K.dtype ) ) 89 | # if not np.all( N_K > 0 ): 90 | # raise ExpressionError( "N_K", "contains non-positive length entries" ) 91 | # return True 92 | 93 | # @property 94 | # def N_K( self ): 95 | # return self._N_K 96 | 97 | # @N_K.setter 98 | # def N_K( self, N_K ): 99 | # self._N_K = None 100 | # if self._check_N_K( N_K ): 101 | # self._N_K = N_K 102 | 103 | # def _check_M_I_x( self, M_I_x ): 104 | # if None is M_I_x: 105 | # raise ExpressionError( "M_I_x", "is None" ) 106 | # if not isinstance( M_I_x, (np.ndarray,) ): 107 | # raise ExpressionError( "M_I_x", "invalid type (%s)" % str( type( M_I_x ) ) ) 108 | # if 1 != M_I_x.ndim: 109 | # raise ExpressionError( "M_I_x", "invalid number of dimensions (%d)" % M_I_x.ndim ) 110 | # if M_I_x.shape[0] != self.N_K.max(): 111 | # raise ExpressionError( "M_I_x", "unmatching number of samples (%d,%d)" % (M_I_x.shape[0], self.N_K.max()) ) 112 | # if np.intc != M_I_x.dtype: 113 | # raise ExpressionError( "M_I_x", "invalid dtype (%s)" % str( M_I_x.dtype ) ) 114 | # if not np.all( M_I_x >= 0 ): 115 | # raise ExpressionError( "M_I_x", "contains negative state indices" ) 116 | # if not np.all( M_I_x < self.n_markov_states ): 117 | # raise ExpressionError( "M_I_x", "contains too large state indices" ) 118 | # return True 119 | 120 | # @property 121 | # def M_I_x( self ): 122 | # return self._M_I_x 123 | 124 | # @M_I_x.setter 125 | # def M_I_x( self, M_I_x ): 126 | # self._M_I_x = None 127 | # if self._check_M_I_x( M_I_x ): 128 | # self._M_I_x = M_I_x 129 | 130 | # def _check_b_IK_x( self, b_IK_x ): 131 | # if None is b_IK_x: 132 | # raise ExpressionError( "b_IK_x", "is None" ) 133 | # if not isinstance( b_IK_x, (np.ndarray,) ): 134 | # raise ExpressionError( "b_IK_x", "invalid type (%s)" % str( type( b_IK_x ) ) ) 135 | # if 3 != b_IK_x.ndim: 136 | # raise ExpressionError( "b_IK_x", "invalid number of dimensions (%d)" % b_IK_x.ndim ) 137 | # if b_IK_x.shape[0] != self.n_therm_states: 138 | # raise ExpressionError( "b_IK_x", "unmatching number of thermodynamic states (%d,%d)" % (b_IK_x.shape[0], self.n_therm_states) ) 139 | # if b_IK_x.shape[1] != self.n_therm_states: 140 | # raise ExpressionError( "b_IK_x", "unmatching number of thermodynamic states (%d,%d)" % (b_IK_x.shape[1], self.n_therm_states) ) 141 | # if b_IK_x.shape[2] != self.M_I_x.shape[1]: 142 | # raise ExpressionError( "b_IK_x_x", "unmatching sample dimension (%d,%d)" % (b_IK_x.shape[2], self.M_I_x.shape[1]) ) 143 | # if np.float64 != b_IK_x.dtype: 144 | # raise ExpressionError( "b_IK_x", "invalid dtype (%s)" % str( b_IK_x.dtype ) ) 145 | # return True 146 | 147 | # @property 148 | # def b_IK_x( self ): 149 | # return self._b_IK_x 150 | 151 | # @u_I_x.setter 152 | # def b_IK_x( self, b_IK_x ): 153 | # self._b_IK_x = None 154 | # if self._check_b_IK_x( b_IK_x ): 155 | # self._b_IK_x = b_IK_x 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /doc/source/usr.rst: -------------------------------------------------------------------------------- 1 | .. _ref_user: 2 | 3 | ========== 4 | User guide 5 | ========== 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | 11 | File format 12 | =========== 13 | 14 | The standard file format assumes text files with the following layout. :: 15 | 16 | # This is a comment line, you can several of those. 17 | # The next lines indicates the meaning of the columns, 18 | # [M] denotes Markov state indices (starting from zero), 19 | # [T] denotes thermodynamic state indices (starting from zero), 20 | # and [b_K] denotes the reduced bias energies b_K/kT 21 | # [M] [T] [b_0] [b_1] ... 22 | 0 0 3.1 18.2 23 | 1 0 3.2 18.3 24 | 2 0 4.8 19.9 25 | 3 0 7.4 22.5 26 | . . . . 27 | . . . . 28 | . . . . 29 | 30 | The minimal layout only requires the ``[M]`` and ``[T]`` columns and can only be used for discrete estimators (dTRAM or WHAM). These two columns contain the sequences of the Markov and generating thermodynamic states. For example, the entry ``3 0`` denotes that the actual sample corresponds to the Markov state ``3`` and was generated at thermodynamic state ``0``. 31 | 32 | **Important note**: in order to run dTRAM/WHAM successfully, you need an additional ``b_K_i`` file as explained in the discrete estimators section. 33 | 34 | The other TRAM estimators require at least one energy column. For this, we distinguish two different cases: 35 | temperature as the only thermodynamic variable, and all other thermodynamic conditions, i.e., different Hamiltonians, umbrella potentials, ... 36 | 37 | 38 | Temperature as only thermodynamic variable 39 | ------------------------------------------ 40 | 41 | In this case, you need the ``[M]`` and ``[T]`` columns, and one reduced potential energy column ``[u_K]``. In this case the third column only contains reduced potential energies: u_K = u(x)/kT_K. The energy is reduced according to the generating thermodynamic state. For example, the entry ``2 5 20.5`` denotes that the actual sample corresponds to the Markov state ``2``, was generated at temperature ``kT_5``, and the corresponding energy was reduced with ``kT_5``. 42 | 43 | **Important note**: for temperature-dependent simulations, you need an additional single column ``kT`` file wich indicates all generating temperatures times the Boltzmann constant (consistent with your energy units). Note that the order of ``kT`` values must be constistent with the numbering of the thermodynamic states. Additionally the ``kT.dat`` file will need to contain all tempeartures multiplied by the Botzmann constant. (Make sure that you are suing the correct units here when reducing your potential energies.) The ``kT.dat`` file is just a single column file. :: 44 | 45 | #This is a kT file 46 | #[kT] 47 | 1.2 48 | 1.4 49 | 1.7 50 | 1.9 51 | . 52 | . 53 | . 54 | 55 | Hamiltonian replica exchange, umbrella sampling, etc 56 | ---------------------------------------------------- 57 | 58 | This is the most general application. Here, each sample must be evaluated at all thermodynamic states which means that you need as many energy columns as you have thermodynamic states. For example, the line ``2 1 3.0 2.9 1.0 0.3`` indicates that the actual sample corresponds to the Markov state ``2``, has been generated at thermodynamic state ``1``, the reduced energy is 59 | 60 | * ``3.0 kT`` at thermodynamic state ``0``, 61 | * ``2.9 kT`` at thermodynamic state ``1``, 62 | * ``1.0 kT`` at thermodynamic state ``2``, and 63 | * ``0.3 kT`` at thermodynamic state ``3``. 64 | 65 | This example also requires you to have exactly four thermodynamic states. 66 | 67 | 68 | Discrete Estimators (WHAM, dTRAM) 69 | ================================= 70 | 71 | from files 72 | ---------- 73 | 74 | Assume that we have two files ``file_1.dat`` and ``file_2.dat`` with e.g. umbrella sampling simulation data. In addition to that, the discrete estimator methods require the user to specify the reduced bias energies of all Markov states in each of the thermodynamic states. The corresponding file format is given by :: 75 | 76 | # we store the reduced bias energies b_K(x)/kT 77 | # at the discrete states x_i 78 | # [b_0] [b_1] ... 79 | 0.0 4.0 80 | 0.0 0.0 81 | 0.0 8.0 82 | 83 | In this example, we have three Markov states which are evaluated at two different thermodynamic states. 84 | 85 | Using the API, we can run WHAM via the following code (DTRAM works in the same way, just replace wham with dtram): 86 | 87 | .. code-block:: python 88 | 89 | # import the Reader, Forge and the wham API function 90 | from pyfeat import Reader, Forge, wham 91 | 92 | # specify your input data files 93 | files = [ 94 | 'path/to/file_1.dat', 95 | 'path/to/file_2.dat' 96 | ] 97 | b_K_i_file = 'path/to/b_K_i_file.dat' 98 | 99 | # import the files using the Reader 100 | reader = Reader( files, b_K_i_file=b_K_i_file, verbose=True ) 101 | 102 | # convert the input data using TRAMData 103 | data_forge = Forge( reader.trajs, b_K_i=reader.b_K_i ) 104 | 105 | # run WHAM using the API function 106 | wham_obj = wham( data_forge, maxiter=1000, ftol=1.0E-10, verbose=True ) 107 | 108 | # show unbiased stationary distribution 109 | print wham_obj.pi_i 110 | 111 | # show thermodynamic free energies 112 | print wham_obj.f_K 113 | 114 | If the data is generated from a multiple temperature simulation, the user can still follow the above usage of the WHAM or DTRAM estiamtor, but this time give an additional kT file using the appropritate flag in the Reader and the Forge. Additionaly, now the estimator object ``est.pi_i`` returns by default the probability of all states at the lowest temperatures, if this is not the tempearture of interest, the forge can be given an extra arguemnt, ``target_kT``, to estiamte the stationary probabilties at a different tempearture. 115 | 116 | from file using the runscript 117 | ----------------------------- 118 | 119 | Or we can run WHAM with the runscript in the following way from the commandline (DTRAM would will work in the same way using DTRAM as the commanline parameter for the ``--estimator`` flag): 120 | 121 | ``run_pyfeat file*.dat --estimator WHAM --b_K_i_file b_K_i.dat --maxiter=1000 --ftol 1.0E-5`` 122 | 123 | For all options given in the runscript, use: 124 | 125 | ``run_pyfeat --help`` 126 | 127 | 128 | 129 | Continuous Estimators (xTRAM) 130 | =================================== 131 | 132 | Currently, it is only safe to use multiple temperature ensemble simulations with this estimator. In a future rease the fuctionality of this will be enhanced. 133 | 134 | An example usage from file with a ST simulation can be found in the examples directory in the ipython notebook double-well.ipynb. 135 | 136 | 137 | -------------------------------------------------------------------------------- /pyfeat/estimator/wham.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ===================== 4 | WHAM estimator module 5 | ===================== 6 | 7 | .. moduleauthor:: Antonia Mey , Christoph Wehmeyer 8 | 9 | """ 10 | 11 | import numpy as np 12 | from pytram import NotConvergedWarning, ExpressionError 13 | 14 | 15 | #################################################################################################### 16 | # # 17 | # WHAM ESTIMATOR CLASS # 18 | # # 19 | #################################################################################################### 20 | class WHAM(object): 21 | r""" 22 | I run the WHAM estimator 23 | """ 24 | def __init__(self, N_K_i, b_K_i): 25 | r""" 26 | Initialize the WHAM object 27 | 28 | Parameters 29 | ---------- 30 | N_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.intc) 31 | total number of counts from simulation at T in M discrete Markov state (bin) 32 | b_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 33 | bias energies in the T thermodynamic and M discrete Markov states 34 | """ 35 | self._n_therm_states = N_K_i.shape[0] 36 | self._n_markov_states = N_K_i.shape[1] 37 | self.gamma_K_i = b_K_i 38 | self._N_K_i = N_K_i 39 | self._pi_i = np.zeros( shape=(self.n_markov_states,), dtype=np.float64 ) 40 | self._f_K = None 41 | self._pi_K_i = None 42 | self.citation = [ 43 | "The weighted histogram analysis method for free-energy calculations on biomolecules;", 44 | "Shankar Kumar, John M. Rosenberg, Djamal Bouzida, Robert H. Swendsen and Peter A. Kollman", 45 | "J. Comput. Chem. 13, 1011-1021 (1992)"] 46 | 47 | def cite(self, pre=""): 48 | for line in self.citation: 49 | print "%s%s" % (pre, line) 50 | 51 | def sc_iteration(self, maxiter=100, ftol=1.0E-5, verbose=False): 52 | r""" 53 | sc_iteration function 54 | 55 | Parameters 56 | ---------- 57 | maxiter : int 58 | maximum number of self-consistent-iteration steps 59 | ftol : float (> 0.0) 60 | convergence criterion based on the max relative change in a 61 | self-consistent-iteration step 62 | verbose : boolean 63 | Be loud and noisy 64 | """ 65 | # reset internal storage variables 66 | self._pi_K_i = None 67 | self._f_K = np.ones(shape=(self.n_therm_states,), dtype=np.float64) 68 | finc = None 69 | if verbose: 70 | print "# %25s %25s" % ("[iteration step]", "[relative increment]") 71 | if np.all(self.pi_i == 0.0): 72 | self._pi_i[:] = self._p_step() 73 | for i in xrange(maxiter): 74 | f_new = self._f_step() 75 | nonzero = self._f_K.nonzero() 76 | finc = np.max( np.abs((f_new[nonzero] - self._f_K[nonzero]) / self._f_K[nonzero])) 77 | self._f_K[:] = f_new[:] 78 | self._pi_i = self._p_step() 79 | if verbose: 80 | print "%25d %25.12e" % (i+1, finc) 81 | if finc =ftol: 84 | raise NotConvergedWarning("WHAM", finc) 85 | self._pi_i /= self._pi_i.sum() 86 | 87 | def _f_step(self): 88 | return 1.0 / np.dot(self.gamma_K_i, self.pi_i) + np.log(self.pi_i.sum()) 89 | def _p_step(self): 90 | return self._N_K_i.sum(axis=0) / np.dot( 91 | self._N_K_i.sum(axis=1) * self._f_K , self.gamma_K_i) 92 | 93 | @property 94 | def n_therm_states(self): 95 | return self._n_therm_states 96 | 97 | @property 98 | def n_markov_states(self): 99 | return self._n_markov_states 100 | 101 | @property 102 | def pi_i(self): 103 | return self._pi_i 104 | 105 | @property 106 | def f_i(self): 107 | return -np.log(self._pi_i) 108 | 109 | @property 110 | def f_K_i(self): 111 | return -np.log(self.pi_K_i) 112 | 113 | @property 114 | def f_K(self): 115 | if self._f_K is None: 116 | self._f_K = 1.0 / np.dot(self.gamma_K_i, self.pi_i) 117 | return np.log(self._f_K) 118 | 119 | @property 120 | def pi_K_i(self): 121 | if self._pi_K_i is None: 122 | self._pi_K_i = np.exp(self.f_K)[:, np.newaxis] * self.pi_i[np.newaxis, :] * self.gamma_K_i 123 | return self._pi_K_i 124 | 125 | ############################################################################ 126 | # # 127 | # _b_K_i sanity checks # 128 | # # 129 | ############################################################################ 130 | 131 | def _check_b_K_i(self, b_K_i): 132 | if b_K_i is None: 133 | raise ExpressionError("b_K_i", "is None") 134 | if not isinstance( b_K_i, (np.ndarray,)): 135 | raise ExpressionError("b_K_i", "invalid type (%s)" % str(type(b_K_i))) 136 | if 2 != b_K_i.ndim: 137 | raise ExpressionError("b_K_i", "invalid number of dimensions (%d)" % b_K_i.ndim) 138 | if b_K_i.shape[0] != self.n_therm_states: 139 | raise ExpressionError( 140 | "b_K_i", "unmatching number of thermodynamic states (%d,%d)" \ 141 | % (b_K_i.shape[0], self.n_therm_states)) 142 | if b_K_i.shape[1] != self.n_markov_states: 143 | raise ExpressionError( 144 | "b_K_i", 145 | "unmatching number of markov states (%d,%d)" \ 146 | % (b_K_i.shape[1], self.n_markov_states)) 147 | if np.float64 != b_K_i.dtype: 148 | raise ExpressionError("b_K_i", "invalid dtype (%s)" % str(b_K_i.dtype)) 149 | return True 150 | 151 | ############################################################################ 152 | # # 153 | # gamma_K_i getter and setter # 154 | # # 155 | ############################################################################ 156 | 157 | @property 158 | def gamma_K_i(self): 159 | return self._gamma_K_i 160 | 161 | @gamma_K_i.setter 162 | def gamma_K_i(self, b_K_i): 163 | self._gamma_K_i = None 164 | if self._check_b_K_i(b_K_i): 165 | self._gamma_K_i = np.exp(b_K_i.min() - b_K_i) 166 | 167 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/pyfeat.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/pyfeat.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/pyfeat" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/pyfeat" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /doc/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # pyfeat documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Feb 4 14:05:25 2015. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute, like shown here. 21 | #sys.path.insert(0, os.path.abspath('.')) 22 | 23 | # -- General configuration ------------------------------------------------ 24 | 25 | # If your documentation needs a minimal Sphinx version, state it here. 26 | #needs_sphinx = '1.0' 27 | 28 | # Add any Sphinx extension module names here, as strings. They can be 29 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 30 | # ones. 31 | extensions = [ 32 | 'sphinx.ext.autodoc', 33 | 'sphinx.ext.doctest', 34 | 'sphinx.ext.intersphinx', 35 | 'sphinx.ext.mathjax', 36 | 'numpydoc' 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix of source filenames. 43 | source_suffix = '.rst' 44 | 45 | # The encoding of source files. 46 | #source_encoding = 'utf-8-sig' 47 | 48 | # The master toctree document. 49 | master_doc = 'index' 50 | 51 | # General information about the project. 52 | project = u'pyfeat' 53 | copyright = u'2015, Antonia Mey, Christoph Wehmeyer, Fabian Paul, Hao Wu, Frank Noé' 54 | 55 | # The version info for the project you're documenting, acts as replacement for 56 | # |version| and |release|, also used in various other places throughout the 57 | # built documents. 58 | # 59 | # The short X.Y version. 60 | version = '0.3' 61 | # The full version, including alpha/beta/rc tags. 62 | release = '0.3.0' 63 | 64 | # The language for content autogenerated by Sphinx. Refer to documentation 65 | # for a list of supported languages. 66 | #language = None 67 | 68 | # There are two options for replacing |today|: either, you set today to some 69 | # non-false value, then it is used: 70 | #today = '' 71 | # Else, today_fmt is used as the format for a strftime call. 72 | #today_fmt = '%B %d, %Y' 73 | 74 | # List of patterns, relative to source directory, that match files and 75 | # directories to ignore when looking for source files. 76 | exclude_patterns = [] 77 | 78 | # The reST default role (used for this markup: `text`) to use for all 79 | # documents. 80 | #default_role = None 81 | 82 | # If true, '()' will be appended to :func: etc. cross-reference text. 83 | #add_function_parentheses = True 84 | 85 | # If true, the current module name will be prepended to all description 86 | # unit titles (such as .. function::). 87 | #add_module_names = True 88 | 89 | # If true, sectionauthor and moduleauthor directives will be shown in the 90 | # output. They are ignored by default. 91 | #show_authors = False 92 | 93 | # The name of the Pygments (syntax highlighting) style to use. 94 | pygments_style = 'sphinx' 95 | 96 | # A list of ignored prefixes for module index sorting. 97 | #modindex_common_prefix = [] 98 | 99 | # If true, keep warnings as "system message" paragraphs in the built documents. 100 | #keep_warnings = False 101 | 102 | 103 | # -- Options for HTML output ---------------------------------------------- 104 | 105 | # The theme to use for HTML and HTML Help pages. See the documentation for 106 | # a list of builtin themes. 107 | try: 108 | import sphinx_rtd_theme 109 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 110 | html_theme = 'sphinx_rtd_theme' 111 | except ImportError: 112 | html_theme = 'default' 113 | 114 | # Theme options are theme-specific and customize the look and feel of a theme 115 | # further. For a list of options available for each theme, see the 116 | # documentation. 117 | #html_theme_options = {} 118 | 119 | # Add any paths that contain custom themes here, relative to this directory. 120 | #html_theme_path = [] 121 | 122 | # The name for this set of Sphinx documents. If None, it defaults to 123 | # " v documentation". 124 | #html_title = None 125 | 126 | # A shorter title for the navigation bar. Default is the same as html_title. 127 | #html_short_title = None 128 | 129 | # The name of an image file (relative to this directory) to place at the top 130 | # of the sidebar. 131 | #html_logo = None 132 | 133 | # The name of an image file (within the static path) to use as favicon of the 134 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 135 | # pixels large. 136 | #html_favicon = None 137 | 138 | # Add any paths that contain custom static files (such as style sheets) here, 139 | # relative to this directory. They are copied after the builtin static files, 140 | # so a file named "default.css" will overwrite the builtin "default.css". 141 | html_static_path = ['_static'] 142 | 143 | # Add any extra paths that contain custom files (such as robots.txt or 144 | # .htaccess) here, relative to this directory. These files are copied 145 | # directly to the root of the documentation. 146 | #html_extra_path = [] 147 | 148 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 149 | # using the given strftime format. 150 | #html_last_updated_fmt = '%b %d, %Y' 151 | 152 | # If true, SmartyPants will be used to convert quotes and dashes to 153 | # typographically correct entities. 154 | #html_use_smartypants = True 155 | 156 | # Custom sidebar templates, maps document names to template names. 157 | #html_sidebars = {} 158 | 159 | # Additional templates that should be rendered to pages, maps page names to 160 | # template names. 161 | #html_additional_pages = {} 162 | 163 | # If false, no module index is generated. 164 | #html_domain_indices = True 165 | 166 | # If false, no index is generated. 167 | #html_use_index = True 168 | 169 | # If true, the index is split into individual pages for each letter. 170 | #html_split_index = False 171 | 172 | # If true, links to the reST sources are added to the pages. 173 | #html_show_sourcelink = True 174 | 175 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 176 | #html_show_sphinx = True 177 | 178 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 179 | #html_show_copyright = True 180 | 181 | # If true, an OpenSearch description file will be output, and all pages will 182 | # contain a tag referring to it. The value of this option must be the 183 | # base URL from which the finished HTML is served. 184 | #html_use_opensearch = '' 185 | 186 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 187 | #html_file_suffix = None 188 | 189 | # Output file base name for HTML help builder. 190 | htmlhelp_basename = 'pyfeatdoc' 191 | 192 | 193 | # -- Options for LaTeX output --------------------------------------------- 194 | 195 | latex_elements = { 196 | # The paper size ('letterpaper' or 'a4paper'). 197 | #'papersize': 'letterpaper', 198 | 199 | # The font size ('10pt', '11pt' or '12pt'). 200 | #'pointsize': '10pt', 201 | 202 | # Additional stuff for the LaTeX preamble. 203 | #'preamble': '', 204 | } 205 | 206 | # Grouping the document tree into LaTeX files. List of tuples 207 | # (source start file, target name, title, 208 | # author, documentclass [howto, manual, or own class]). 209 | latex_documents = [ 210 | ('index', 'pyfeat.tex', u'pyfeat Documentation', 211 | u'Antonia Mey, Christoph Wehmeyer, Fabian Paul, Hao Wu, Frank Noé', 'manual'), 212 | ] 213 | 214 | # The name of an image file (relative to this directory) to place at the top of 215 | # the title page. 216 | #latex_logo = None 217 | 218 | # For "manual" documents, if this is true, then toplevel headings are parts, 219 | # not chapters. 220 | #latex_use_parts = False 221 | 222 | # If true, show page references after internal links. 223 | #latex_show_pagerefs = False 224 | 225 | # If true, show URL addresses after external links. 226 | #latex_show_urls = False 227 | 228 | # Documents to append as an appendix to all manuals. 229 | #latex_appendices = [] 230 | 231 | # If false, no module index is generated. 232 | #latex_domain_indices = True 233 | 234 | 235 | # -- Options for manual page output --------------------------------------- 236 | 237 | # One entry per manual page. List of tuples 238 | # (source start file, name, description, authors, manual section). 239 | man_pages = [ 240 | ('index', 'pyfeat', u'pyfeat Documentation', 241 | [u'Antonia Mey, Christoph Wehmeyer, Fabian Paul, Hao Wu, Frank Noé'], 1) 242 | ] 243 | 244 | # If true, show URL addresses after external links. 245 | #man_show_urls = False 246 | 247 | 248 | # -- Options for Texinfo output ------------------------------------------- 249 | 250 | # Grouping the document tree into Texinfo files. List of tuples 251 | # (source start file, target name, title, author, 252 | # dir menu entry, description, category) 253 | texinfo_documents = [ 254 | ('index', 'pyfeat', u'pyfeat Documentation', 255 | u'Antonia Mey, Christoph Wehmeyer, Fabian Paul, Hao Wu, Frank Noé', 'pyfeat', 'One line description of project.', 256 | 'Miscellaneous'), 257 | ] 258 | 259 | # Documents to append as an appendix to all manuals. 260 | #texinfo_appendices = [] 261 | 262 | # If false, no module index is generated. 263 | #texinfo_domain_indices = True 264 | 265 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 266 | #texinfo_show_urls = 'footnote' 267 | 268 | # If true, do not generate a @detailmenu in the "Top" node's menu. 269 | #texinfo_no_detailmenu = False 270 | 271 | 272 | # Example configuration for intersphinx: refer to the Python standard library. 273 | intersphinx_mapping = {'http://docs.python.org/': None} 274 | -------------------------------------------------------------------------------- /bin/run_pyfeat.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | #################################################################################################### 4 | # # 5 | # RUN SCRIPT FOR THE ALL ESTIMATORS IN THE PYFEATPACKAGE # 6 | # # 7 | # author: Antonia Mey # 8 | # author: Christoph Wehmeyer # 9 | # # 10 | #################################################################################################### 11 | 12 | 13 | 14 | #################################################################################################### 15 | # 16 | # IMPORTS 17 | # 18 | #################################################################################################### 19 | 20 | from pyfeat import Reader, Forge 21 | from pyfeat.estimator import WHAM, XTRAM, DTRAM 22 | from pytram import ExpressionError, NotConvergedWarning 23 | from argparse import ArgumentParser, FileType 24 | from sys import exit 25 | import numpy as np 26 | 27 | 28 | 29 | #################################################################################################### 30 | # 31 | # MAIN PART 32 | # 33 | #################################################################################################### 34 | 35 | if '__main__' == __name__: 36 | 37 | ############################################################################ 38 | # 39 | # capture the command line arguments 40 | # 41 | ############################################################################ 42 | parser = ArgumentParser() 43 | parser.add_argument( 44 | 'files', 45 | help='pyfeat compatible files for evaluation (trajectory files)', 46 | nargs='*', 47 | metavar='FILE' 48 | ) 49 | parser.add_argument( 50 | '--estimator', 51 | help='specify an available estimator choices are: XTRAM, DTRAM, WHAM', 52 | default='WHAM', 53 | metavar='STR' 54 | ) 55 | parser.add_argument( 56 | "--b_K_i_file", 57 | help="specify a pytram compatible file containing kT information", 58 | default=None, 59 | metavar="FILE" 60 | ) 61 | parser.add_argument( 62 | "--kT_file", 63 | help="specify a pytram compatible file containing kT information", 64 | default=None, 65 | metavar="FILE" 66 | ) 67 | parser.add_argument( 68 | "--kT_target", 69 | help="The kT value for which the free energy and probabilities should be calculated", 70 | type=int, 71 | default=None, 72 | metavar='INT' 73 | ) 74 | parser.add_argument( 75 | "--lag", 76 | help="specify a lag time for evaluation", 77 | type=int, 78 | default=1, 79 | metavar='INT' 80 | ) 81 | parser.add_argument( 82 | "--maxlength", 83 | help="limit the number of trajectory frames", 84 | type=int, 85 | default=None, 86 | metavar='INT' 87 | ) 88 | parser.add_argument( 89 | "--skiprows", 90 | help="Number of initial frames skipped", 91 | type=int, 92 | default=0, 93 | metavar='INT' 94 | ) 95 | parser.add_argument( 96 | "--maxiter", 97 | help="limit the number of fixed point iterations", 98 | type=int, 99 | default=100, 100 | metavar='INT' 101 | ) 102 | parser.add_argument( 103 | "--ftol", 104 | help="limit the requested convergence level", 105 | type=float, 106 | default=1.0E-5, 107 | metavar='FLOAT' 108 | ) 109 | parser.add_argument( 110 | "--verbose", 111 | help="show the progress during the self-consistent-iteration", 112 | action='store_true' 113 | ) 114 | args = parser.parse_args() 115 | 116 | 117 | 118 | ############################################################################ 119 | # 120 | # check mandatory command line arguments 121 | # 122 | ############################################################################ 123 | if 1 > len( args.files ): 124 | print "ERROR: you must give at least one pytram compatible trajectory file!" 125 | exit( 1 ) 126 | ############################################################################ 127 | # 128 | # check estimator consistancy 129 | # 130 | ############################################################################ 131 | if args.estimator == 'DTRAM' or args.estimator == 'WHAM': 132 | if args.b_K_i_file is None: 133 | print "Error: you must set the --b_K_i_file option, with %s as your estimator choice!" %args.estimator 134 | exit( 1 ) 135 | 136 | ############################################################################ 137 | # 138 | # write header 139 | # 140 | ############################################################################ 141 | print "\n\n###################################### PYFEAT ######################################" 142 | print "#\n# Invoking the %s estimator" %args.estimator 143 | print "#\n### PARAMETERS\n#" 144 | print "# %25s %24d" % ( "[--lag]", args.lag ) 145 | print "# %25s %24d" % ( "[--maxiter]", args.maxiter ) 146 | print "# %25s %24.5e" % ( "[--ftol]", args.ftol ) 147 | 148 | 149 | 150 | ############################################################################ 151 | # 152 | # import the data and initialize the estimator 153 | # 154 | ############################################################################ 155 | print "#\n################################## IMPORTING DATA ##################################\n#" 156 | reader = None 157 | forge = None 158 | estimator = None 159 | if args.estimator == 'DTRAM' or args.estimator == 'WHAM': 160 | reader = Reader( 161 | args.files, 162 | b_K_i_file=args.b_K_i_file, 163 | maxlength=args.maxlength, 164 | skiprows=args.skiprows, 165 | verbose=True 166 | ) 167 | forge = Forge( reader.trajs, b_K_i = reader.b_K_i) 168 | if args.estimator == 'WHAM': 169 | print "#\n### WARNING\n#" 170 | print "# You chose an estimator that your input data samples from a global equilibrium and is uncorrelated!" 171 | print "# Have you subsampled your data appropriately to account for this?" 172 | print "# Maybe consider using one of the TRAM estimator instead!" 173 | try: 174 | estimator = WHAM( forge.N_K_i, forge.b_K_i ) 175 | except ExpressionError, e: 176 | print "#\n### ERROR\n#" 177 | print "# Your input was faulty!" 178 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 179 | print "#\n### ABORTING\n\n" 180 | exit( 1 ) 181 | else: 182 | try: 183 | estimator = DTRAM( forge.get_C_K_ij(args.lag), forge.b_K_i ) 184 | except ExpressionError, e: 185 | print "#\n### ERROR\n#" 186 | print "# Your input was faulty!" 187 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 188 | print "#\n### ABORTING\n\n" 189 | exit( 1 ) 190 | else: 191 | reader = Reader( 192 | args.files, 193 | kT_file=args.kT_file, 194 | maxlength=args.maxlength, 195 | skiprows=args.skiprows, 196 | verbose=True 197 | ) 198 | forge = Forge( reader.trajs, kT_K=reader.kT_K, kT_target=args.kT_target ) 199 | if args.estimator == 'XTRAM': 200 | try: 201 | estimator = XTRAM( forge.get_C_K_ij(args.lag), forge.b_K_x, forge.T_x, forge.M_x, forge.N_K_i, target = forge.kT_target ) 202 | except ExpressionError, e: 203 | print "#\n### ERROR\n#" 204 | print "# Your input was faulty!" 205 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 206 | print "#\n### ABORTING\n\n" 207 | exit( 1 ) 208 | elif args.estimator == 'TRAM': 209 | print "#\n### ERROR\n#" 210 | print 'TRAM cannot be used yet!' 211 | exit(1) 212 | 213 | print "#\n### SYSTEM INFORMATION\n#" 214 | print "# %25s %24d" % ( "[markov states]", forge.n_markov_states ) 215 | print "# %25s %24d" % ( "[thermodynamic states]", forge.n_therm_states ) 216 | 217 | 218 | 219 | ############################################################################ 220 | # 221 | # run the self-consistent-iteration 222 | # 223 | ############################################################################ 224 | print "#\n#################################### RUN WHAM #####################################\n#" 225 | try: 226 | print "# Run self-consistent-iteration" 227 | estimator.sc_iteration( maxiter=args.maxiter, ftol=args.ftol, verbose=args.verbose ) 228 | print "# ... converged!" 229 | except NotConvergedWarning, e: 230 | print "#\n### WARNING\n#\n# %s is not converged - use these results carefuly!" %args.estimator 231 | print "#\n### RECOMMENDATION\n#\n# Run run_pyfeat.py again and increase --maxiter" 232 | 233 | 234 | 235 | ############################################################################ 236 | # 237 | # print out the results 238 | # 239 | ############################################################################ 240 | print "#\n##################################### RESULTS ######################################" 241 | print "#\n### UNBIASED STATIONARY VECTOR\n#" 242 | print "# %25s %25s" % ( "[markov state]", "[stationary probability]" ) 243 | for i in xrange( estimator.pi_i.shape[0] ): 244 | print " %25d %25.12e" % ( i, estimator.pi_i[i] ) 245 | print "#\n### UNBIASED FREE ENERGY\n#" 246 | print "# %25s %25s" % ( "[markov state]", "[reduced free energy]" ) 247 | for i in xrange( estimator.f_i.shape[0] ): 248 | print " %25d %25.12e" % ( i, estimator.f_i[i] ) 249 | print "#\n### THERMODYNAMIC FREE ENERGY\n#" 250 | print "# %25s %25s" % ( "[thermodynamic state]", "[reduced free energy]" ) 251 | for i in xrange( estimator.f_K.shape[0] ): 252 | print " %25d %25.12e" % ( i, estimator.f_K[i] ) 253 | 254 | 255 | 256 | ############################################################################ 257 | # 258 | # say good bye 259 | # 260 | ############################################################################ 261 | print "#\n###################That's it, now it is time to put the kettle on ##############################\n#" 262 | print "#\n# Thank you for using %s in the pyfeat package!\n#\n#" %args.estimator 263 | print "### CITATION\n#" 264 | estimator.cite( pre="# " ) 265 | print "#\n################################################################################################\n\n" 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | 275 | 276 | 277 | -------------------------------------------------------------------------------- /examples/trajectory_factory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | 4 | r""" 5 | 6 | ==================== 7 | Example Data factory 8 | ==================== 9 | 10 | .. moduleauthor:: Antonia Mey 11 | 12 | """ 13 | 14 | 15 | #imports 16 | import numpy as np 17 | from scipy.integrate import quad 18 | import math 19 | import os 20 | 21 | 22 | 23 | #some helper functions/classes 24 | class AssymetricDoubleWellPotential ( object ): 25 | r"""class for an assymetic double well potential 26 | """ 27 | def __init__( self ): 28 | self.Z = [] 29 | self.inner_edges = np.linspace(-0.4,4.2,20) 30 | self.n_bins = self.inner_edges.shape[0]+1 31 | self.bin_centers = np.zeros(self.n_bins) 32 | self.bin_centers[1:-1] = self.inner_edges[1:]-0.5*( self.inner_edges[1]-self.inner_edges[0] ) 33 | self.bin_centers[0] = -0.7 34 | self.bin_centers[-1] = 4.5 35 | 36 | def energy( self, x ): 37 | return 2*(x-2)-6*(x-2)**2 + (x-2)**4 38 | def gradient( self, x ): 39 | return 4*x**3-24*x**2+36*x-6 40 | def integ( self , x , kT ): 41 | return np.exp( - self.energy( x )/kT ) 42 | def get_partition_function ( self, kT ): 43 | for T in kT: 44 | self.Z.append( quad(self.integ, -100.0, 100.0, args=(T,))[0] ) 45 | self.Z = np.array( self.Z ) 46 | return self.Z 47 | 48 | class HarmonicRestraint( object ): 49 | r"""class for harmonic restraints used in US 50 | """ 51 | 52 | def __init__( self, r0, k ): 53 | self.r0 = r0 54 | self.k = k 55 | def energy( self, r): 56 | return 0.5 * self.k * ( r - self.r0 )**2 57 | def gradient( self, r ): 58 | return self.k*(r-self.r0) 59 | 60 | 61 | class BrownianIntegrator( object ): 62 | r""" 63 | class that does brownian dynamics integration 64 | """ 65 | def __init__( self, potential, dt, beta=1.0, mass=1.0, damping=1.0 ): 66 | r""" constructor function 67 | 68 | Parameters 69 | ---------- 70 | potential : FoldingPotential object 71 | object that contains all the information about the potential in which the particle svolves 72 | dt : double 73 | timestep of the integrator 74 | beta : double 75 | inverse temperature of the simulation Default = 1 76 | mass : double 77 | mass of the particle, Default = 1.0 78 | damping : double 79 | damping constant, Default = 1.0 80 | """ 81 | self.potential = potential 82 | # store parameters 83 | self.dt = dt 84 | self.beta = beta 85 | self.mass = mass 86 | self.damping = damping 87 | # compute coefficients 88 | self.coeff_A = dt / ( mass * damping ) 89 | self.coeff_B = np.sqrt( 2.0 * dt / ( beta * mass * damping ) ) 90 | self.x = None 91 | self.t_index = None #thermodynamic index 92 | def step( self, restraint = None ): 93 | r"""function that carries out a single integration step 94 | """ 95 | gradient = self.potential.gradient( self.x ) 96 | if None != restraint: 97 | gradient += restraint.gradient( self.x ) 98 | self.x = self.x - self.coeff_A * gradient + self.coeff_B * np.random.normal( size=self.n_dim ) 99 | pos = self.x[0] 100 | if self.n_dim > 1 : 101 | pos = np.linalg.norm(self.x) 102 | return np.array( [ pos , self.t_index, self.potential.energy( pos ) ] ) 103 | 104 | def set_temperature( self, beta ): 105 | r"""function that sets a new inverse temperature 106 | Parameters 107 | ---------- 108 | beta : double 109 | inverse temperature beta 110 | """ 111 | self.beta = beta 112 | self.coeff_B = np.sqrt( 2.0 * self.dt / ( self.beta * self.mass * self.damping ) ) 113 | def set_position( self, x ): 114 | r"""function that sets the current position of the integrator 115 | Parameters 116 | ---------- 117 | x : double 118 | current position of the particle 119 | """ 120 | self.x = x 121 | self.n_dim = x.shape[0] 122 | def set_t_index( self, t ): 123 | r"""function that sets the temperature index, should there be more than one 124 | Parameters 125 | ---------- 126 | t : int 127 | temperature index in the temperature array 128 | """ 129 | self.t_index = t 130 | 131 | class STReplica( object ): 132 | def __init__( self,Z, integrator, kT ): 133 | r""" Constructor function 134 | Parameters 135 | ---------- 136 | 137 | Z : double array 138 | containing normalisation factors used for weighting different temperatures 139 | integrator : BrownianIntegrator object 140 | the actual simulation 141 | kT : double array 142 | array contianing the possible temperatures at which sampling occurs 143 | """ 144 | self.Z = Z 145 | self.integrator = integrator 146 | self.kT = kT 147 | self.trajectory = [] 148 | def run( self, nsteps=1 ): 149 | r""" function that runs the replica exchange simulation 150 | Parameters 151 | ---------- 152 | nsteps : int 153 | number of steps the integrator should compute between exchanges 154 | Default=1 155 | """ 156 | for i in xrange( nsteps ): 157 | self.trajectory.append(self.integrator.step()) 158 | 159 | def change_temperature( self ): 160 | r"""Metropolis hastings steps that allows to change between two randomly chosen temperatures 161 | """ 162 | r = np.random.randint( self.kT.shape[0] ) 163 | beta_new = 1.0/self.kT[r] 164 | beta_old = self.integrator.beta 165 | beta_int = np.where( self.kT == 1.0/beta_old ) 166 | deltaG = -np.log ( self.integrator.potential.Z[r] ) + np.log( self.integrator.potential.Z[beta_int] ) 167 | enExp = -self.trajectory[-1][2]*(beta_new-beta_old) 168 | exponent = enExp + deltaG 169 | 170 | if ( exponent >= 0 ) or ( np.random.random()< np.exp(exponent) ): 171 | self.integrator.set_temperature(1.0/self.kT[r]) 172 | self.integrator.set_t_index(r) 173 | #print "New temperature is: "+str(self.integrator.beta) 174 | 175 | 176 | class USReplica( object ): 177 | r""" class descriptor 178 | """ 179 | def __init__( self,integrator, RS ): 180 | self.integrator = integrator 181 | self.RS = RS 182 | self.trajectory = [] 183 | self.n_therm_states = len(RS) 184 | self.ndim=1 185 | 186 | def run( self, nsteps ): 187 | count = 0 188 | for r in self.RS: 189 | r_traj = [] 190 | self.integrator.set_position(np.ones(self.ndim)*r.r0/np.sqrt(self.ndim)) 191 | self.integrator.t_index = count 192 | for i in xrange( nsteps ): 193 | int_return = self.integrator.step(r) 194 | bias = self.calculate_bias(self.integrator.x) 195 | r_traj.append(np.hstack((int_return,bias))) 196 | count = count+1 197 | self.trajectory.append(r_traj) 198 | print 'US simulation completed sucessfully!' 199 | 200 | def calculate_bias( self, pos ): 201 | bias = np.zeros(self.n_therm_states) 202 | for b in xrange(self.n_therm_states): 203 | bias[b]=self.RS[b].energy(pos) 204 | return bias 205 | 206 | 207 | def discretize( x, inner_edges ): 208 | if x=inner_edges[-1]: 211 | return inner_edges.shape[0] 212 | for i in xrange( inner_edges.shape[0]-1 ): 213 | if ( inner_edges[i]<=x) and ( x< inner_edges[i+1] ): 214 | return i+1 215 | 216 | #the core run functions 217 | 218 | 219 | 220 | 221 | def run_st_simulation(): 222 | N_EXCHANGES=2000 223 | #checking if directories for writing exist 224 | directory="ST/" 225 | if not os.path.exists(directory): 226 | os.makedirs(directory) 227 | 228 | kT = np.array([ 2.0, 4.0, 7.0, 10.0, 15.0]) 229 | n_therm_states = len(kT) 230 | initial_x = np.array([3.5]) 231 | initial_t = 0 232 | 233 | dwp = AssymetricDoubleWellPotential() 234 | Z = dwp.get_partition_function(kT) 235 | integrator = BrownianIntegrator( dwp, 0.005, 1.0/kT[initial_t], 1.0, 1.0 ) 236 | integrator.set_t_index(initial_t) 237 | integrator.set_position(initial_x) 238 | replica = STReplica(Z, integrator, kT ) 239 | for i in xrange(N_EXCHANGES): 240 | replica.run(100) 241 | replica.change_temperature() 242 | traj = np.array(replica.trajectory) 243 | 244 | n_traj_frames = np.shape(replica.trajectory)[0] 245 | fh = open( directory+"Traj.dat", 'w' ) 246 | for t in xrange( n_traj_frames ): 247 | fh.write( "%6d %6d %+.6e" % ( discretize( traj[t,0], dwp.inner_edges ), traj[t,1], traj[t,2] / kT[traj[t,1]] ) ) 248 | fh.write( "\n" ) 249 | fh.close() 250 | np.savetxt(directory+"kT.dat", kT) 251 | 252 | #constructing wham file 253 | # -> themodynamic state 254 | # | 255 | # v markov state 256 | #we need a target temperature \beta_0 257 | target = 0 258 | wham_f = os.path.join(directory,"b_K_i.dat") 259 | fh = open(wham_f, 'w') 260 | for c in xrange( dwp.bin_centers.shape[0] ): 261 | for i in xrange( n_therm_states ): 262 | fh.write( " %+.6e" % ( dwp.energy( dwp.bin_centers[c] ) * (1.0/kT[i]-1.0/kT[target] ) ) ) 263 | fh.write( "\n" ) 264 | fh.close() 265 | 266 | #exact probability distribution for comparison 267 | fh = open( directory+"exact.dat", 'w' ) 268 | for c in xrange( dwp.bin_centers.shape[0] ): 269 | p=np.exp(-dwp.energy( dwp.bin_centers[c] )* 1.0/kT[target]) 270 | fh.write( "%4f %+.6e" % ( dwp.bin_centers[c] , p ) ) 271 | fh.write("\n") 272 | fh.close() 273 | 274 | 275 | 276 | def run_us_simulation(): 277 | #checking if directories for writing exist 278 | directory="US/" 279 | if not os.path.exists(directory): 280 | os.makedirs(directory) 281 | 282 | #setting the simulation temperature 283 | kT = np.array( [1.0] ) 284 | nsteps=1000 285 | 286 | dwp = AssymetricDoubleWellPotential() 287 | Z = dwp.get_partition_function(kT) 288 | #exact probability distribution for comparison 289 | e_file = os.path.join(directory,"exact.dat") 290 | fh = open( e_file, 'w' ) 291 | for c in xrange( dwp.bin_centers.shape[0] ): 292 | p=np.exp(-dwp.energy( dwp.bin_centers[c] )* 1.0/kT[0]) 293 | fh.write( "%4f %+.6e" % ( dwp.bin_centers[c] , p ) ) 294 | fh.write("\n") 295 | fh.close() 296 | 297 | integrator = BrownianIntegrator( dwp, 0.005, 1.0/kT[0], 1.0, 1.0) 298 | restraints_pos = np.linspace(-0.9,4.7,30) 299 | restraint_k = np.ones(30)*90 300 | #restraints_pos = np.linspace(-1.8,1.8,16) 301 | n_therm_states = restraints_pos.shape[0] 302 | 303 | 304 | restraints = [] 305 | for i in xrange(restraints_pos.shape[0]): 306 | restraints.append(HarmonicRestraint(restraints_pos[i],restraint_k[i])) 307 | replica = USReplica(integrator, restraints) 308 | replica.run(nsteps) 309 | for r in xrange(restraints_pos.shape[0]): 310 | traj = np.array(replica.trajectory[r]) 311 | n_traj_frames = traj.shape[0] 312 | r_file = os.path.join(directory,"Traj"+str(r)+".dat") 313 | fh =open(r_file, 'w') 314 | for t in xrange( n_traj_frames ): 315 | fh.write( "%6d %6d " % (discretize( traj[t,0], dwp.inner_edges ) , traj[t,1] ) ) 316 | for j in xrange(restraints_pos.shape[0]): 317 | fh.write("%+.6e " % (traj[t,3+j]/kT[0])) 318 | fh.write("\n") 319 | fh.close() 320 | 321 | wham_f = os.path.join(directory,"b_K_i.dat") 322 | fh = open(wham_f, 'w') 323 | for c in xrange( dwp.bin_centers.shape[0] ): 324 | for i in xrange( n_therm_states ): 325 | fh.write( " %+.8e" % ( restraints[i].energy( dwp.bin_centers[c] ) / kT[0] ) ) 326 | fh.write( "\n" ) 327 | fh.close() 328 | 329 | -------------------------------------------------------------------------------- /examples/double_well_example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "metadata": { 3 | "name": "", 4 | "signature": "sha256:b59b1f160df2f903935ed6fe2007cd29136228151a97884a9bfbe920f1bd5bc8" 5 | }, 6 | "nbformat": 3, 7 | "nbformat_minor": 0, 8 | "worksheets": [ 9 | { 10 | "cells": [ 11 | { 12 | "cell_type": "heading", 13 | "level": 1, 14 | "metadata": {}, 15 | "source": [ 16 | "Pyfeat Tutorial" 17 | ] 18 | }, 19 | { 20 | "cell_type": "heading", 21 | "level": 3, 22 | "metadata": {}, 23 | "source": [ 24 | "1. Toy system: Data Generation" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "We will walk you through using the API of pyfeat for different simulation types and the different available estimators. As a simple initial example we will consider an asymmetric double well as the potential landscape and a single particle diffusing in this potential according to Brownian dynamics. For this purpose we have prepared a set of scripts that will run short simulations and write the trajectories to file in the correct pyfeat format. If you want to learn how to prepare data in a pyfeat format, it might be worthwhile to have a closer look at the data generating files (trajectory_factory.py). " 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "collapsed": false, 37 | "input": [ 38 | "#imports\n", 39 | "#allow for the embedding of plots into the ipython notebook. \n", 40 | "%pylab inline \n", 41 | "import trajectory_factory as tf #this is the package that allows the quick generation of input data for pyfeat" 42 | ], 43 | "language": "python", 44 | "metadata": {}, 45 | "outputs": [] 46 | }, 47 | { 48 | "cell_type": "markdown", 49 | "metadata": {}, 50 | "source": [ 51 | "Let us run a simulated tempering simulation. CAREFUL the data generation may take a little while." 52 | ] 53 | }, 54 | { 55 | "cell_type": "code", 56 | "collapsed": false, 57 | "input": [ 58 | "tf.run_st_simulation() #generates simulated tempering data for pyfeat in the directory ST/" 59 | ], 60 | "language": "python", 61 | "metadata": {}, 62 | "outputs": [] 63 | }, 64 | { 65 | "cell_type": "markdown", 66 | "metadata": {}, 67 | "source": [ 68 | "Have a look at the *examples/ST/Traj.dat* file. This will give you a good idea as to how a trajectory should be written out for your own data.\n", 69 | "All additional files written out will be needed for the analysis. The file *kT.dat* contains the reduced temperatures, *b_k_i.dat* is needed for WHAM and \n", 70 | "dTRAM and *exact.dat* contains the true stationary probabilities to which we will compare later." 71 | ] 72 | }, 73 | { 74 | "cell_type": "heading", 75 | "level": 3, 76 | "metadata": {}, 77 | "source": [ 78 | "2. Toy system: Simulated tempering data analysis" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "metadata": {}, 84 | "source": [ 85 | "Now we have generated the data we want for the pyfeat analysis, it is time to import all the necessary packages from pyfeat" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "collapsed": false, 91 | "input": [ 92 | "from pyfeat import xtram, wham, dtram #api function for pyfeat\n", 93 | "from pyfeat import Reader #allows you to read the data from file in the correct format\n", 94 | "from pyfeat import Forge #contains all the preformatted data that will then be passed to the estimators" 95 | ], 96 | "language": "python", 97 | "metadata": {}, 98 | "outputs": [] 99 | }, 100 | { 101 | "cell_type": "markdown", 102 | "metadata": {}, 103 | "source": [ 104 | "The usual workflow for pyfeat is: Read the data with the reader, who will take a list of files as an agrument plus, any helper files such as a *kT file* or a *b_K_i file*. The reader object is then passed to the data converter (Forge)." 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "collapsed": false, 110 | "input": [ 111 | "trajlist = ['ST/Traj.dat']\n", 112 | "reader = Reader( trajlist, b_K_i_file = 'ST/b_K_i.dat', kT_file='ST/kT.dat' ) #read trajectory and 'helper files'\n", 113 | "forge = Forge( reader.trajs, kT_K = reader.kT_K, b_K_i = reader.b_K_i, kT_target = 0 ) #pass read data to the data forge" 114 | ], 115 | "language": "python", 116 | "metadata": {}, 117 | "outputs": [] 118 | }, 119 | { 120 | "cell_type": "code", 121 | "collapsed": false, 122 | "input": [ 123 | "#load all the exact results\n", 124 | "exact = np.loadtxt('ST/exact.dat')\n", 125 | "exact[:,1] = exact[:,1]/np.sum(exact[:,1])" 126 | ], 127 | "language": "python", 128 | "metadata": {}, 129 | "outputs": [] 130 | }, 131 | { 132 | "cell_type": "markdown", 133 | "metadata": {}, 134 | "source": [ 135 | "Now we have read all the data and can run our different estimators" 136 | ] 137 | }, 138 | { 139 | "cell_type": "heading", 140 | "level": 4, 141 | "metadata": {}, 142 | "source": [ 143 | "2.1 DTRAM" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "collapsed": false, 149 | "input": [ 150 | "dtram_est = dtram( forge, lag=1 , maxiter=1000, ftol=1.0e-4, verbose=False )\n", 151 | "print \"#===============Thank you for using DTRAM=============================\"\n", 152 | "dtram_est.cite(pre=\"# \")\n", 153 | "print \"#=====================================================================\"" 154 | ], 155 | "language": "python", 156 | "metadata": {}, 157 | "outputs": [] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "collapsed": false, 162 | "input": [ 163 | "fig = plt.figure(1, figsize=(10,5))\n", 164 | "fig.add_subplot(121)\n", 165 | "plt.plot( exact[:,0],dtram_est.f_i , color = 'r', linewidth = 2, linestyle='--', label='DTRAM' )\n", 166 | "plt.plot( exact[:,0], -np.log(exact[:,1]), color='k', label='exact' )\n", 167 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 168 | "plt.ylabel( 'F(x) in [kT]', fontsize = 20 )\n", 169 | "plt.legend(loc=4)\n", 170 | "fig.add_subplot(122)\n", 171 | "plt.plot( exact[:,0],dtram_est.pi_i , color = 'r', linewidth = 2, linestyle='--', label='DTRAM' )\n", 172 | "plt.plot( exact[:,0], exact[:,1], color='k', label='exact' )\n", 173 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 174 | "plt.ylabel( 'P(x)', fontsize = 20 )\n", 175 | "plt.semilogy()\n", 176 | "plt.legend(loc=1)\n", 177 | "plt.tight_layout()" 178 | ], 179 | "language": "python", 180 | "metadata": {}, 181 | "outputs": [] 182 | }, 183 | { 184 | "cell_type": "heading", 185 | "level": 4, 186 | "metadata": {}, 187 | "source": [ 188 | "2.2 WHAM" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "collapsed": false, 194 | "input": [ 195 | "wham_est = wham( forge, maxiter=1000, ftol=1.0e-7, verbose=False ) # and we are done, now we can analyse the results\n", 196 | "print \"#===============Thank you for using WHAM==================================================\"\n", 197 | "wham_est.cite(pre=\"# \")\n", 198 | "print \"#=========================================================================================\"" 199 | ], 200 | "language": "python", 201 | "metadata": {}, 202 | "outputs": [] 203 | }, 204 | { 205 | "cell_type": "code", 206 | "collapsed": false, 207 | "input": [ 208 | "fig = plt.figure(1, figsize=(10,5))\n", 209 | "fig.add_subplot(121)\n", 210 | "plt.plot( exact[:,0],wham_est.f_i , color = 'r', linewidth = 2, linestyle='--', label='WHAM' )\n", 211 | "plt.plot( exact[:,0], -np.log(exact[:,1]), color='k', label='exact' )\n", 212 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 213 | "plt.ylabel( 'F(x) in [kT]', fontsize = 20 )\n", 214 | "plt.legend(loc=4)\n", 215 | "fig.add_subplot(122)\n", 216 | "plt.plot( exact[:,0],wham_est.pi_i , color = 'r', linewidth = 2, linestyle='--', label='WHAM' )\n", 217 | "plt.plot( exact[:,0], exact[:,1], color='k', label='exact' )\n", 218 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 219 | "plt.ylabel( 'P(x)', fontsize = 20 )\n", 220 | "plt.semilogy()\n", 221 | "plt.legend(loc=1)\n", 222 | "plt.tight_layout()" 223 | ], 224 | "language": "python", 225 | "metadata": {}, 226 | "outputs": [] 227 | }, 228 | { 229 | "cell_type": "heading", 230 | "level": 4, 231 | "metadata": {}, 232 | "source": [ 233 | "2.3 XTRAM" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "collapsed": false, 239 | "input": [ 240 | "xtram_est = xtram( forge, lag=1 , maxiter=1000, ftol=1.0e-15, verbose=True )\n", 241 | "print \"#===============Thank you for using XTRAM====================================\"\n", 242 | "xtram_est.cite(pre=\"# \")\n", 243 | "print \"#============================================================================\"" 244 | ], 245 | "language": "python", 246 | "metadata": {}, 247 | "outputs": [] 248 | }, 249 | { 250 | "cell_type": "code", 251 | "collapsed": false, 252 | "input": [ 253 | "fig = plt.figure(1, figsize=(10,5))\n", 254 | "fig.add_subplot(121)\n", 255 | "plt.plot( exact[:,0],xtram_est.f_i , color = 'r', linewidth = 2, linestyle='--', label='XTRAM' )\n", 256 | "plt.plot( exact[:,0], -np.log(exact[:,1]), color='k', label='exact' )\n", 257 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 258 | "plt.ylabel( 'F(x) in [kT]', fontsize = 20 )\n", 259 | "plt.legend(loc=4)\n", 260 | "fig.add_subplot(122)\n", 261 | "plt.plot( exact[:,0],xtram_est.pi_i , color = 'r', linewidth = 2, linestyle='--', label='XTRAM' )\n", 262 | "plt.plot( exact[:,0], exact[:,1], color='k', label='exact' )\n", 263 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 264 | "plt.ylabel( 'P(x)', fontsize = 20 )\n", 265 | "plt.semilogy()\n", 266 | "plt.legend(loc=1)\n", 267 | "plt.tight_layout()" 268 | ], 269 | "language": "python", 270 | "metadata": {}, 271 | "outputs": [] 272 | }, 273 | { 274 | "cell_type": "heading", 275 | "level": 3, 276 | "metadata": {}, 277 | "source": [ 278 | "3. Toy system: Umbrella sampling data analysis" 279 | ] 280 | }, 281 | { 282 | "cell_type": "code", 283 | "collapsed": false, 284 | "input": [ 285 | "tf.run_us_simulation() #generates umbrella sampling data for pyfeat in the directory US/" 286 | ], 287 | "language": "python", 288 | "metadata": {}, 289 | "outputs": [] 290 | }, 291 | { 292 | "cell_type": "code", 293 | "collapsed": false, 294 | "input": [ 295 | "trajlist = ['US/Traj0.dat', 'US/Traj1.dat', 'US/Traj2.dat', 'US/Traj3.dat', 'US/Traj4.dat', 'US/Traj5.dat', 'US/Traj6.dat',\n", 296 | " 'US/Traj7.dat', 'US/Traj8.dat', 'US/Traj9.dat', 'US/Traj10.dat', 'US/Traj11.dat', 'US/Traj12.dat', 'US/Traj13.dat',\n", 297 | " 'US/Traj14.dat', 'US/Traj15.dat', 'US/Traj16.dat', 'US/Traj17.dat', 'US/Traj18.dat', 'US/Traj19.dat', 'US/Traj20.dat',\n", 298 | " 'US/Traj21.dat', 'US/Traj22.dat', 'US/Traj23.dat', 'US/Traj24.dat', 'US/Traj25.dat', 'US/Traj26.dat', 'US/Traj27.dat',\n", 299 | " 'US/Traj28.dat', 'US/Traj29.dat']\n", 300 | "reader = Reader( trajlist, b_K_i_file = 'US/b_K_i.dat' ) #read trajectory and 'helper files'\n", 301 | "forge = Forge( reader.trajs, b_K_i = reader.b_K_i ) #pass read data to the data forge" 302 | ], 303 | "language": "python", 304 | "metadata": {}, 305 | "outputs": [] 306 | }, 307 | { 308 | "cell_type": "code", 309 | "collapsed": false, 310 | "input": [ 311 | "#load all the exact results\n", 312 | "exact = np.loadtxt('US/exact.dat')\n", 313 | "exact[:,1] = exact[:,1]/np.sum(exact[:,1])" 314 | ], 315 | "language": "python", 316 | "metadata": {}, 317 | "outputs": [] 318 | }, 319 | { 320 | "cell_type": "heading", 321 | "level": 4, 322 | "metadata": {}, 323 | "source": [ 324 | "3.1 DTRAM" 325 | ] 326 | }, 327 | { 328 | "cell_type": "code", 329 | "collapsed": false, 330 | "input": [ 331 | "dtram_est = dtram( forge, lag=1 , maxiter=10000, ftol=1.0e-6, verbose=False )\n", 332 | "print \"#===============Thank you for using DTRAM=============================\"\n", 333 | "dtram_est.cite(pre=\"# \")\n", 334 | "print \"#=====================================================================\"" 335 | ], 336 | "language": "python", 337 | "metadata": {}, 338 | "outputs": [] 339 | }, 340 | { 341 | "cell_type": "code", 342 | "collapsed": false, 343 | "input": [ 344 | "fig = plt.figure(1, figsize=(10,5))\n", 345 | "fig.add_subplot(121)\n", 346 | "plt.plot( exact[:,0],dtram_est.f_i , color = 'r', linewidth = 2, linestyle='--', label='DTRAM' )\n", 347 | "plt.plot( exact[:,0], -np.log(exact[:,1]), color='k', label='exact' )\n", 348 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 349 | "plt.ylabel( 'F(x) in [kT]', fontsize = 20 )\n", 350 | "plt.legend(loc=4)\n", 351 | "fig.add_subplot(122)\n", 352 | "plt.plot( exact[:,0],dtram_est.pi_i , color = 'r', linewidth = 2, linestyle='--', label='DTRAM' )\n", 353 | "plt.plot( exact[:,0], exact[:,1], color='k', label='exact' )\n", 354 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 355 | "plt.ylabel( 'P(x)', fontsize = 20 )\n", 356 | "plt.semilogy()\n", 357 | "plt.legend(loc=1)\n", 358 | "plt.tight_layout()" 359 | ], 360 | "language": "python", 361 | "metadata": {}, 362 | "outputs": [] 363 | }, 364 | { 365 | "cell_type": "heading", 366 | "level": 4, 367 | "metadata": {}, 368 | "source": [ 369 | "3.2 WHAM" 370 | ] 371 | }, 372 | { 373 | "cell_type": "code", 374 | "collapsed": false, 375 | "input": [ 376 | "wham_est = wham( forge, maxiter=20000, ftol=1.0e-7, verbose=False ) # and we are done, now we can analyse the results\n", 377 | "print \"#===============Thank you for using WHAM==================================================\"\n", 378 | "wham_est.cite(pre=\"# \")\n", 379 | "print \"#=========================================================================================\"" 380 | ], 381 | "language": "python", 382 | "metadata": {}, 383 | "outputs": [] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "collapsed": false, 388 | "input": [ 389 | "fig = plt.figure(1, figsize=(10,5))\n", 390 | "fig.add_subplot(121)\n", 391 | "plt.plot( exact[:,0],wham_est.f_i , color='r', linewidth=2, linestyle='--', label='WHAM' )\n", 392 | "plt.plot( exact[:,0], -np.log(exact[:,1]), color='k', label='exact' )\n", 393 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 394 | "plt.ylabel( 'F(x) in [kT]', fontsize = 20 )\n", 395 | "plt.legend(loc=4)\n", 396 | "fig.add_subplot(122)\n", 397 | "plt.plot( exact[:,0],wham_est.pi_i , color='r', linewidth=2, linestyle='--', label='WHAM' )\n", 398 | "plt.plot( exact[:,0], exact[:,1], color='k', label='exact' )\n", 399 | "plt.xlabel( 'x in [a.u.]', fontsize = 20 )\n", 400 | "plt.ylabel( 'P(x)', fontsize = 20 )\n", 401 | "plt.semilogy()\n", 402 | "plt.legend(loc=1)\n", 403 | "plt.tight_layout()" 404 | ], 405 | "language": "python", 406 | "metadata": {}, 407 | "outputs": [] 408 | }, 409 | { 410 | "cell_type": "markdown", 411 | "metadata": {}, 412 | "source": [ 413 | "Thank you for running the pyfeat asymetric double-well example. If you have any further questions, or notice any bugs, please notify us on the mailinglist **pyfeat@lists.fu-berlin.de**." 414 | ] 415 | }, 416 | { 417 | "cell_type": "code", 418 | "collapsed": false, 419 | "input": [], 420 | "language": "python", 421 | "metadata": {}, 422 | "outputs": [] 423 | } 424 | ], 425 | "metadata": {} 426 | } 427 | ] 428 | } -------------------------------------------------------------------------------- /pyfeat/_version.py: -------------------------------------------------------------------------------- 1 | 2 | # This file helps to compute a version number in source trees obtained from 3 | # git-archive tarball (such as those provided by githubs download-from-tag 4 | # feature). Distribution tarballs (built by setup.py sdist) and build 5 | # directories (produced by setup.py build) will contain a much shorter file 6 | # that just contains the computed version number. 7 | 8 | # This file is released into the public domain. Generated by 9 | # versioneer-0.14+dev (https://github.com/warner/python-versioneer) 10 | 11 | import errno 12 | import os 13 | import re 14 | import subprocess 15 | import sys 16 | 17 | 18 | def get_keywords(): 19 | # these strings will be replaced by git during git-archive. 20 | # setup.py/versioneer.py will grep for the variable names, so they must 21 | # each be defined on a line of their own. _version.py will just call 22 | # get_keywords(). 23 | git_refnames = " (HEAD -> master, tag: 0.3.2)" 24 | git_full = "ee5cb6bd0f2c1900f7fc2665e83d4f152b167465" 25 | keywords = {"refnames": git_refnames, "full": git_full} 26 | return keywords 27 | 28 | 29 | class VersioneerConfig: 30 | pass 31 | 32 | 33 | def get_config(): 34 | # these strings are filled in when 'setup.py versioneer' creates 35 | # _version.py 36 | cfg = VersioneerConfig() 37 | cfg.VCS = "git" 38 | cfg.style = "pep440" 39 | cfg.tag_prefix = "" 40 | cfg.parentdir_prefix = "pyfeat-" 41 | cfg.versionfile_source = "pyfeat/_version.py" 42 | cfg.verbose = False 43 | return cfg 44 | 45 | 46 | class NotThisMethod(Exception): 47 | pass 48 | 49 | 50 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): 51 | assert isinstance(commands, list) 52 | p = None 53 | for c in commands: 54 | try: 55 | # remember shell=False, so use git.cmd on windows, not just git 56 | p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, 57 | stderr=(subprocess.PIPE if hide_stderr 58 | else None)) 59 | break 60 | except EnvironmentError: 61 | e = sys.exc_info()[1] 62 | if e.errno == errno.ENOENT: 63 | continue 64 | if verbose: 65 | print("unable to run %s" % args[0]) 66 | print(e) 67 | return None 68 | else: 69 | if verbose: 70 | print("unable to find command, tried %s" % (commands,)) 71 | return None 72 | stdout = p.communicate()[0].strip() 73 | if sys.version_info[0] >= 3: 74 | stdout = stdout.decode() 75 | if p.returncode != 0: 76 | if verbose: 77 | print("unable to run %s (error)" % args[0]) 78 | return None 79 | return stdout 80 | 81 | 82 | def versions_from_parentdir(parentdir_prefix, root, verbose): 83 | # Source tarballs conventionally unpack into a directory that includes 84 | # both the project name and a version string. 85 | dirname = os.path.basename(root) 86 | if not dirname.startswith(parentdir_prefix): 87 | if verbose: 88 | print("guessing rootdir is '%s', but '%s' doesn't start with " 89 | "prefix '%s'" % (root, dirname, parentdir_prefix)) 90 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 91 | return {"version": dirname[len(parentdir_prefix):], 92 | "full-revisionid": None, 93 | "dirty": False, "error": None} 94 | 95 | 96 | def git_get_keywords(versionfile_abs): 97 | # the code embedded in _version.py can just fetch the value of these 98 | # keywords. When used from setup.py, we don't want to import _version.py, 99 | # so we do it with a regexp instead. This function is not used from 100 | # _version.py. 101 | keywords = {} 102 | try: 103 | f = open(versionfile_abs, "r") 104 | for line in f.readlines(): 105 | if line.strip().startswith("git_refnames ="): 106 | mo = re.search(r'=\s*"(.*)"', line) 107 | if mo: 108 | keywords["refnames"] = mo.group(1) 109 | if line.strip().startswith("git_full ="): 110 | mo = re.search(r'=\s*"(.*)"', line) 111 | if mo: 112 | keywords["full"] = mo.group(1) 113 | f.close() 114 | except EnvironmentError: 115 | pass 116 | return keywords 117 | 118 | 119 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 120 | if not keywords: 121 | raise NotThisMethod("no keywords at all, weird") 122 | refnames = keywords["refnames"].strip() 123 | if refnames.startswith("$Format"): 124 | if verbose: 125 | print("keywords are unexpanded, not using") 126 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 127 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 128 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 129 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 130 | TAG = "tag: " 131 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 132 | if not tags: 133 | # Either we're using git < 1.8.3, or there really are no tags. We use 134 | # a heuristic: assume all version tags have a digit. The old git %d 135 | # expansion behaves like git log --decorate=short and strips out the 136 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 137 | # between branches and tags. By ignoring refnames without digits, we 138 | # filter out many common branch names like "release" and 139 | # "stabilization", as well as "HEAD" and "master". 140 | tags = set([r for r in refs if re.search(r'\d', r)]) 141 | if verbose: 142 | print("discarding '%s', no digits" % ",".join(refs-tags)) 143 | if verbose: 144 | print("likely tags: %s" % ",".join(sorted(tags))) 145 | for ref in sorted(tags): 146 | # sorting will prefer e.g. "2.0" over "2.0rc1" 147 | if ref.startswith(tag_prefix): 148 | r = ref[len(tag_prefix):] 149 | if verbose: 150 | print("picking %s" % r) 151 | return {"version": r, 152 | "full-revisionid": keywords["full"].strip(), 153 | "dirty": False, "error": None 154 | } 155 | # no suitable tags, so version is "0+unknown", but full hex is still there 156 | if verbose: 157 | print("no suitable tags, using unknown + full revision id") 158 | return {"version": "0+unknown", 159 | "full-revisionid": keywords["full"].strip(), 160 | "dirty": False, "error": "no suitable tags"} 161 | 162 | 163 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 164 | # this runs 'git' from the root of the source tree. This only gets called 165 | # if the git-archive 'subst' keywords were *not* expanded, and 166 | # _version.py hasn't already been rewritten with a short version string, 167 | # meaning we're inside a checked out source tree. 168 | 169 | if not os.path.exists(os.path.join(root, ".git")): 170 | if verbose: 171 | print("no .git in %s" % root) 172 | raise NotThisMethod("no .git directory") 173 | 174 | GITS = ["git"] 175 | if sys.platform == "win32": 176 | GITS = ["git.cmd", "git.exe"] 177 | # if there is a tag, this yields TAG-NUM-gHEX[-dirty] 178 | # if there are no tags, this yields HEX[-dirty] (no NUM) 179 | describe_out = run_command(GITS, ["describe", "--tags", "--dirty", 180 | "--always", "--long"], 181 | cwd=root) 182 | # --long was added in git-1.5.5 183 | if describe_out is None: 184 | raise NotThisMethod("'git describe' failed") 185 | describe_out = describe_out.strip() 186 | full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 187 | if full_out is None: 188 | raise NotThisMethod("'git rev-parse' failed") 189 | full_out = full_out.strip() 190 | 191 | pieces = {} 192 | pieces["long"] = full_out 193 | pieces["short"] = full_out[:7] # maybe improved later 194 | pieces["error"] = None 195 | 196 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 197 | # TAG might have hyphens. 198 | git_describe = describe_out 199 | 200 | # look for -dirty suffix 201 | dirty = git_describe.endswith("-dirty") 202 | pieces["dirty"] = dirty 203 | if dirty: 204 | git_describe = git_describe[:git_describe.rindex("-dirty")] 205 | 206 | # now we have TAG-NUM-gHEX or HEX 207 | 208 | if "-" in git_describe: 209 | # TAG-NUM-gHEX 210 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 211 | if not mo: 212 | # unparseable. Maybe git-describe is misbehaving? 213 | pieces["error"] = ("unable to parse git-describe output: '%s'" 214 | % describe_out) 215 | return pieces 216 | 217 | # tag 218 | full_tag = mo.group(1) 219 | if not full_tag.startswith(tag_prefix): 220 | if verbose: 221 | fmt = "tag '%s' doesn't start with prefix '%s'" 222 | print(fmt % (full_tag, tag_prefix)) 223 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" 224 | % (full_tag, tag_prefix)) 225 | return pieces 226 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 227 | 228 | # distance: number of commits since tag 229 | pieces["distance"] = int(mo.group(2)) 230 | 231 | # commit: short hex revision ID 232 | pieces["short"] = mo.group(3) 233 | 234 | else: 235 | # HEX: no tags 236 | pieces["closest-tag"] = None 237 | count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], 238 | cwd=root) 239 | pieces["distance"] = int(count_out) # total number of commits 240 | 241 | return pieces 242 | 243 | 244 | def plus_or_dot(pieces): 245 | if "+" in pieces.get("closest-tag", ""): 246 | return "." 247 | return "+" 248 | 249 | 250 | def render_pep440(pieces): 251 | # now build up version string, with post-release "local version 252 | # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 253 | # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 254 | 255 | # exceptions: 256 | # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 257 | 258 | if pieces["closest-tag"]: 259 | rendered = pieces["closest-tag"] 260 | if pieces["distance"] or pieces["dirty"]: 261 | rendered += plus_or_dot(pieces) 262 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 263 | if pieces["dirty"]: 264 | rendered += ".dirty" 265 | else: 266 | # exception #1 267 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], 268 | pieces["short"]) 269 | if pieces["dirty"]: 270 | rendered += ".dirty" 271 | return rendered 272 | 273 | 274 | def render_pep440_pre(pieces): 275 | # TAG[.post.devDISTANCE] . No -dirty 276 | 277 | # exceptions: 278 | # 1: no tags. 0.post.devDISTANCE 279 | 280 | if pieces["closest-tag"]: 281 | rendered = pieces["closest-tag"] 282 | if pieces["distance"]: 283 | rendered += ".post.dev%d" % pieces["distance"] 284 | else: 285 | # exception #1 286 | rendered = "0.post.dev%d" % pieces["distance"] 287 | return rendered 288 | 289 | 290 | def render_pep440_post(pieces): 291 | # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that 292 | # .dev0 sorts backwards (a dirty tree will appear "older" than the 293 | # corresponding clean one), but you shouldn't be releasing software with 294 | # -dirty anyways. 295 | 296 | # exceptions: 297 | # 1: no tags. 0.postDISTANCE[.dev0] 298 | 299 | if pieces["closest-tag"]: 300 | rendered = pieces["closest-tag"] 301 | if pieces["distance"] or pieces["dirty"]: 302 | rendered += ".post%d" % pieces["distance"] 303 | if pieces["dirty"]: 304 | rendered += ".dev0" 305 | rendered += plus_or_dot(pieces) 306 | rendered += "g%s" % pieces["short"] 307 | else: 308 | # exception #1 309 | rendered = "0.post%d" % pieces["distance"] 310 | if pieces["dirty"]: 311 | rendered += ".dev0" 312 | rendered += "+g%s" % pieces["short"] 313 | return rendered 314 | 315 | 316 | def render_pep440_old(pieces): 317 | # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. 318 | 319 | # exceptions: 320 | # 1: no tags. 0.postDISTANCE[.dev0] 321 | 322 | if pieces["closest-tag"]: 323 | rendered = pieces["closest-tag"] 324 | if pieces["distance"] or pieces["dirty"]: 325 | rendered += ".post%d" % pieces["distance"] 326 | if pieces["dirty"]: 327 | rendered += ".dev0" 328 | else: 329 | # exception #1 330 | rendered = "0.post%d" % pieces["distance"] 331 | if pieces["dirty"]: 332 | rendered += ".dev0" 333 | return rendered 334 | 335 | 336 | def render_git_describe(pieces): 337 | # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty 338 | # --always' 339 | 340 | # exceptions: 341 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 342 | 343 | if pieces["closest-tag"]: 344 | rendered = pieces["closest-tag"] 345 | if pieces["distance"]: 346 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 347 | else: 348 | # exception #1 349 | rendered = pieces["short"] 350 | if pieces["dirty"]: 351 | rendered += "-dirty" 352 | return rendered 353 | 354 | 355 | def render_git_describe_long(pieces): 356 | # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty 357 | # --always -long'. The distance/hash is unconditional. 358 | 359 | # exceptions: 360 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 361 | 362 | if pieces["closest-tag"]: 363 | rendered = pieces["closest-tag"] 364 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 365 | else: 366 | # exception #1 367 | rendered = pieces["short"] 368 | if pieces["dirty"]: 369 | rendered += "-dirty" 370 | return rendered 371 | 372 | 373 | def render(pieces, style): 374 | if pieces["error"]: 375 | return {"version": "unknown", 376 | "full-revisionid": pieces.get("long"), 377 | "dirty": None, 378 | "error": pieces["error"]} 379 | 380 | if not style or style == "default": 381 | style = "pep440" # the default 382 | 383 | if style == "pep440": 384 | rendered = render_pep440(pieces) 385 | elif style == "pep440-pre": 386 | rendered = render_pep440_pre(pieces) 387 | elif style == "pep440-post": 388 | rendered = render_pep440_post(pieces) 389 | elif style == "pep440-old": 390 | rendered = render_pep440_old(pieces) 391 | elif style == "git-describe": 392 | rendered = render_git_describe(pieces) 393 | elif style == "git-describe-long": 394 | rendered = render_git_describe_long(pieces) 395 | else: 396 | raise ValueError("unknown style '%s'" % style) 397 | 398 | return {"version": rendered, "full-revisionid": pieces["long"], 399 | "dirty": pieces["dirty"], "error": None} 400 | 401 | 402 | def get_versions(): 403 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 404 | # __file__, we can work backwards from there to the root. Some 405 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 406 | # case we can only use expanded keywords. 407 | 408 | cfg = get_config() 409 | verbose = cfg.verbose 410 | 411 | try: 412 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 413 | verbose) 414 | except NotThisMethod: 415 | pass 416 | 417 | try: 418 | root = os.path.realpath(__file__) 419 | # versionfile_source is the relative path from the top of the source 420 | # tree (where the .git directory might live) to this file. Invert 421 | # this to find the root from __file__. 422 | for i in cfg.versionfile_source.split('/'): 423 | root = os.path.dirname(root) 424 | except NameError: 425 | return {"version": "0+unknown", "full-revisionid": None, 426 | "dirty": None, 427 | "error": "unable to find root of source tree"} 428 | 429 | try: 430 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 431 | return render(pieces, cfg.style) 432 | except NotThisMethod: 433 | pass 434 | 435 | try: 436 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 437 | except NotThisMethod: 438 | pass 439 | 440 | return {"version": "0+unknown", "full-revisionid": None, 441 | "dirty": None, 442 | "error": "unable to compute version"} 443 | -------------------------------------------------------------------------------- /pyfeat/api/api.py: -------------------------------------------------------------------------------- 1 | r""" 2 | 3 | ========================== 4 | API for the pyfeat package 5 | ========================== 6 | 7 | .. moduleauthor:: Antonia Mey 8 | .. moduleauthor:: Christoph Wehmeyer 9 | 10 | """ 11 | 12 | from ..estimator import WHAM, XTRAM, DTRAM, TRAM 13 | from pytram import NotConvergedWarning, ExpressionError 14 | from ..reader import Reader 15 | from ..forge import Forge 16 | 17 | 18 | 19 | ######################################################################## 20 | # # 21 | # WHAM API function using the mathematical expressions at input # 22 | # # 23 | ######################################################################## 24 | 25 | def wham_from_matrix(N_K_i, b_K_i, maxiter=100, ftol=1.0E-5, verbose=False): 26 | r""" 27 | The WHAM API function 28 | 29 | Parameters 30 | ---------- 31 | N_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 32 | total number of counts from simulation at T in M discrete Markov state (bin) 33 | b_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 34 | reduced bias energies at the T thermodynamic and M discrete Markov states 35 | maxiter : int 36 | maximum number of self-consistant iteration steps during the optimisation 37 | of the stationary probabilities 38 | ftol : float (> 0.0) 39 | convergence criterion based on the max change in an self-consistent-iteration step 40 | verbose : boolean 41 | writes convergence information to stdout during the self-consistent-iteration cycle 42 | 43 | Returns 44 | ------- 45 | wham_obj : object 46 | WHAM estimator object with optimised unbiased stationary probabilities 47 | """ 48 | # try to create the WHAM object 49 | try: 50 | wham_obj = WHAM(N_K_i, b_K_i) 51 | except ExpressionError, e: 52 | print "# ERROR ############################################################################" 53 | print "# Your input was faulty!" 54 | print "# The < %s > object is malformed: %s" % (e.expression, e.msg) 55 | print "# ABORTING #########################################################################" 56 | raise 57 | # try to converge the stationary probabilities 58 | try: 59 | wham_obj.sc_iteration(maxiter=maxiter, ftol=ftol, verbose=verbose) 60 | except NotConvergedWarning, e: 61 | print "# WARNING ##########################################################################" 62 | print "# WHAM did not converge within %d steps!" % maxiter 63 | print "# The last increment was %.6e." % e.increment 64 | print "# You should run the < sc_iteration > method again." 65 | print "# USE RESULTS WITH CARE ############################################################" 66 | finally: 67 | return wham_obj 68 | 69 | 70 | def wham(forge, maxiter=100, ftol=1.0E-5, verbose=False): 71 | r""" 72 | The WHAM API function 73 | 74 | Parameters 75 | ---------- 76 | forge : object 77 | data forge or container for pyfeat input data 78 | maxiter : int 79 | maximum number of SC iteration steps during the optimisation of the stationary probabilities 80 | ftol : float (> 0.0) 81 | convergence criterion based on the max change in an self-consistent-iteration step 82 | verbose : boolean 83 | writes convergence information to stdout during the self-consistent-iteration cycle 84 | 85 | Returns 86 | ------- 87 | wham_obj : object 88 | WHAM estimator object with optimised unbiased stationary probabilities 89 | """ 90 | return wham_from_matrix(forge.N_K_i, forge.b_K_i, maxiter=maxiter, ftol=ftol, verbose=verbose) 91 | 92 | ######################################################################## 93 | # # 94 | # dTRAM API function using the mathematical expressions at input # 95 | # # 96 | ######################################################################## 97 | 98 | def dtram_from_matrix(C_K_ij, b_K_i, maxiter=100, ftol=1.0e-5, verbose=False): 99 | r""" 100 | Parameters 101 | ---------- 102 | C_K_ij : numpy.ndarray(shape=(T, M, M), dtype=numpy.intc) 103 | transition counts between the M discrete Markov states for each of 104 | the T thermodynamic ensembles 105 | b_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 106 | bias energies in the T thermodynamic and M discrete Markov states 107 | 108 | Returns: 109 | ------- 110 | dtram_obj : obj 111 | dTRAM estimator object with optimised stationary properties 112 | 113 | """ 114 | 115 | # try to create the DTRAM object 116 | try: 117 | dtram_obj = DTRAM(C_K_ij, b_K_i) 118 | except ExpressionError, e: 119 | print "# ERROR ############################################################################" 120 | print "# Your input was faulty!" 121 | print "# The < %s > object is malformed: %s" % (e.expression, e.msg) 122 | print "# ABORTING #########################################################################" 123 | raise 124 | # try to converge the stationary probabilities 125 | try: 126 | dtram_obj.sc_iteration(maxiter=maxiter, ftol=ftol, verbose=verbose) 127 | except NotConvergedWarning, e: 128 | print "# WARNING ##########################################################################" 129 | print "# dTRAM did not converge within %d steps!" % maxiter 130 | print "# The last increment was %.6e." % e.increment 131 | print "# You should run the < sc_iteration > method again." 132 | print "# USE RESULTS WITH CARE ############################################################" 133 | return dtram_obj 134 | 135 | 136 | ######################################################################## 137 | # # 138 | # dTRAM API function # 139 | # # 140 | ######################################################################## 141 | 142 | def dtram(forge, lag=1, maxiter=100, ftol=1.0e-5, verbose=False): 143 | r""" 144 | Parameters 145 | ---------- 146 | forge : object 147 | data forge or container for pyfeat input data 148 | lag : int 149 | lagtime at which the countmatrix is estimated 150 | maxiter : int 151 | maximum number of SC iteration steps during the optimisation of the stationary probabilities 152 | ftol : float (> 0.0) 153 | convergence criterion based on the max change in an self-consistent-iteration step 154 | verbose : boolean 155 | writes convergence information to stdout during the self-consistent-iteration cycle 156 | 157 | Returns 158 | ------- 159 | dtram_obj : object 160 | dTRAM estimator object with optimised stationary properties 161 | """ 162 | return dtram_from_matrix( 163 | forge.get_C_K_ij(lag), forge.b_K_i, maxiter=maxiter, ftol=ftol, verbose=verbose) 164 | 165 | ######################################################################## 166 | # # 167 | # MBAR API function using the mathematical expressions at input # 168 | # # 169 | ######################################################################## 170 | #def mbar_me( b_IK_x, M_I_x, N_K, maxiter=1000, ftol=1.0e-10, verbose=False ): 171 | # r""" 172 | # Parameters 173 | # ---------- 174 | # b_IK_x : numpy.ndarray( shape=(T,M,M), dtype=numpy.intc ) 175 | # transition counts between the M discrete Markov states for each of the T thermodynamic ensembles 176 | # M_I_x : numpy.ndarray( shape=(T,M), dtype=numpy.float64 ) 177 | # bias energies in the T thermodynamic and M discrete Markov states 178 | # N_K : numpy.ndarray( shape=(T), dtype=numpy.float64 ) 179 | # number of samples at each thermodynamic state T 180 | # maxiter : int 181 | # maximum number of SC iteration steps during the optimisation of the stationary probabilities 182 | # ftol : float (> 0.0) 183 | # convergence criterion based on the max relative change in an self-consistent-iteration step 184 | # verbose : boolean 185 | # writes convergence information to stdout during the self-consistent-iteration cycle 186 | # Returns: 187 | # ------- 188 | # mbar_obj : obj 189 | # mbar estimator object with optimised stationary properties 190 | # """ 191 | # # try to create the MBAR object 192 | # try: 193 | # mbar_obj = MBAR( b_IK_x, M_I_x, N_K ) 194 | # except ExpressionError, e: 195 | # print "# ERROR ############################################################################" 196 | # print "# Your input was faulty!" 197 | # print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 198 | # print "# ABORTING #########################################################################" 199 | # raise 200 | # # try to converge the stationary probabilities 201 | # try: 202 | # mbar_obj.sc_iteration( maxiter=maxiter, ftol=ftol, verbose=verbose ) 203 | # except NotConvergedWarning, e: 204 | # print "# WARNING ##########################################################################" 205 | # print "# WHAM did not converge within %d steps!" % maxiter 206 | # print "# The last relative increment was %.6e." % e.relative_increment 207 | # print "# You should run the < sc_iteration > method again." 208 | # print "# USE RESULTS WITH CARE ############################################################" 209 | # finally: 210 | # return mbar_obj 211 | 212 | 213 | ######################################################################## 214 | # # 215 | # MBAR API function # 216 | # # 217 | ######################################################################## 218 | #def mbar( forge, maxiter = 1000, ftol = 1.0e-10, verbose = False ): 219 | # r""" 220 | # Parameters 221 | # ---------- 222 | # forge : object 223 | # data forge or container for pyfeat input data 224 | # maxiter : int 225 | # maximum number of SC iteration steps during the optimisation of the stationary probabilities 226 | # ftol : float (> 0.0) 227 | # convergence criterion based on the max relative change in an self-consistent-iteration step 228 | # verbose : boolean 229 | # writes convergence information to stdout during the self-consistent-iteration cycle 230 | # Returns 231 | # ------- 232 | # mbar_obj : object 233 | # mbar estimator object with optimised stationary properties 234 | # """ 235 | # return mbar_me( forge.b_IK_x, forge.M_K_x, forge.N_K, maxiter=maxiter, ftol=ftol, verbose=verbose ) 236 | 237 | 238 | ######################################################################## 239 | # # 240 | # xTRAM API function using the mathematical expressions as input # 241 | # # 242 | ######################################################################## 243 | 244 | def xtram_from_matrix(C_K_ij, b_K_x, T_x, M_x, N_K_i, maxiter=100, ftol=1.0e-5, verbose=False): 245 | r""" 246 | Parameters 247 | ---------- 248 | C_K_ij : numpy.ndarray(shape=(T, M, M), dtype=numpy.intc) 249 | transition counts between the M discrete Markov states for each of 250 | the T thermodynamic ensembles 251 | b_K_x : numpy.ndarray(shape=(T, N), dtype=numpy.float64) 252 | Biasing tensor 253 | T_x : numpy.ndarray(shape=(N,), dtype=numpy.intc) 254 | Thermodynamic state trajectory 255 | M_x : numpy.ndarray(shape=(N,), dtype=numpy.intc) 256 | Markov state trajectories 257 | N_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.intc) 258 | Number of markov samples in each thermodynamic state 259 | 260 | Returns 261 | ------- 262 | xtram_obj : object 263 | xTRAM estimator object with optimised stationary properties 264 | """ 265 | 266 | # try to create the XTRAM object 267 | try: 268 | xtram_obj = XTRAM(C_K_ij, b_K_x, T_x, M_x, N_K_i) 269 | except ExpressionError, e: 270 | print "# ERROR ############################################################################" 271 | print "# Your input was faulty!" 272 | print "# The < %s > object is malformed: %s" % ( e.expression, e.msg ) 273 | print "# ABORTING #########################################################################" 274 | raise 275 | # try to converge the stationary probabilities 276 | try: 277 | xtram_obj.sc_iteration(maxiter=maxiter, ftol=ftol, verbose=verbose) 278 | except NotConvergedWarning, e: 279 | print "# WARNING ##########################################################################" 280 | print "# xTRAM did not converge within %d steps!" % maxiter 281 | print "# The last increment was %.6e." % e.increment 282 | print "# You should run the < sc_iteration > method again." 283 | print "# USE RESULTS WITH CARE ############################################################" 284 | finally: 285 | return xtram_obj 286 | 287 | 288 | ######################################################################## 289 | # # 290 | # xTRAM API function # 291 | # # 292 | ######################################################################## 293 | 294 | def xtram(forge, lag=1, maxiter=100, ftol=1.0e-5, verbose=False): 295 | r""" 296 | Parameters 297 | ---------- 298 | forge : object 299 | data forge or container for pyfeat input data 300 | lag : int 301 | lagtime at which the countmatrix is estimated 302 | maxiter : int 303 | maximum number of SC iteration steps during the optimisation of the stationary probabilities 304 | ftol : float (> 0.0) 305 | convergence criterion based on the max change in an self-consistent-iteration step 306 | verbose : boolean 307 | writes convergence information to stdout during the self-consistent-iteration cycle 308 | 309 | Returns 310 | ------- 311 | xtram_obj : object 312 | xTRAM estimator object with optimised stationary properties 313 | """ 314 | return xtram_from_matrix( 315 | forge.get_C_K_ij(lag), 316 | forge.b_K_x, 317 | forge.T_x, 318 | forge.M_x, 319 | forge.N_K_i, 320 | maxiter=maxiter, 321 | ftol=ftol, 322 | verbose=verbose) 323 | 324 | 325 | ######################################################################## 326 | # # 327 | # TRAM API function using the mathematical expressions at input # 328 | # # 329 | ######################################################################## 330 | def tram_me(): 331 | raise NotImplementedError('tram_me function has not been implemented yet') 332 | 333 | ######################################################################## 334 | # # 335 | # TRAM API function # 336 | # # 337 | ######################################################################## 338 | def tram(): 339 | raise NotImplementedError('TRAM API function has not been implemented yet') 340 | 341 | 342 | ######################################################################## 343 | # # 344 | # Forge API function # 345 | # # 346 | ######################################################################## 347 | 348 | def convert_data(trajs, b_K_i=None, kT_K=None, kT_target=None): 349 | r""" 350 | API function for creating the data forge object which will then be used for the estimators 351 | 352 | Parameters 353 | ---------- 354 | trajs : list of dictionaries 355 | simulation trajectories 356 | b_K_i : numpy.ndarray(shape=(T, M), dtype=numpy.float64) 357 | reduced bias energies at the T thermodynamic and M discrete Markov states 358 | kT_K : numpy.nparray(shape=(T,), dtype=numpy.float64) 359 | list of kT values of each thermodynamic state 360 | kT_target : int 361 | target thermodynamic state for which pi_i and f_i will be computed 362 | 363 | Returns 364 | ------- 365 | forge : pyfeat.Forge object 366 | container holding all data 367 | """ 368 | forge = Forge(trajs, b_K_i, kT_K, kT_target) 369 | return forge 370 | 371 | ######################################################################## 372 | # # 373 | # Reader API function # 374 | # # 375 | ######################################################################## 376 | 377 | def read_files(files, b_K_i_file=None, kT_file=None, skiprows=0, maxlength=None, verbose=False): 378 | r""" 379 | API function for reading files 380 | 381 | Parameters 382 | ---------- 383 | files : list of strings 384 | file names to read 385 | b_K_i_file : string (optional) 386 | file name for discrete estimator data 387 | kT_file : string (optional) 388 | file name for kT value listing 389 | skiprows : int (optional) 390 | specify the number of skipped lines when reading the trajectory files 391 | maxlength : int (optional) 392 | maximum number of data points read from file 393 | verbose : boolean (optional) 394 | verbose output during the reading/building process 395 | 396 | Returns 397 | ------- 398 | reader : pyfeat.Reader object 399 | augmented trajectory information to be passed to the forge 400 | """ 401 | reader = Reader(files, b_K_i_file, kT_file, skiprows, maxlength, verbose) 402 | return reader 403 | 404 | -------------------------------------------------------------------------------- /versioneer.py: -------------------------------------------------------------------------------- 1 | 2 | # Version: 0.14+dev 3 | 4 | """ 5 | The Versioneer 6 | ============== 7 | 8 | * like a rocketeer, but for versions! 9 | * https://github.com/warner/python-versioneer 10 | * Brian Warner 11 | * License: Public Domain 12 | * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy 13 | * [![Latest Version] 14 | (https://pypip.in/version/versioneer/badge.svg?style=flat) 15 | ](https://pypi.python.org/pypi/versioneer/) 16 | * [![Build Status] 17 | (https://travis-ci.org/warner/python-versioneer.png?branch=master) 18 | ](https://travis-ci.org/warner/python-versioneer) 19 | 20 | This is a tool for managing a recorded version number in distutils-based 21 | python projects. The goal is to remove the tedious and error-prone "update 22 | the embedded version string" step from your release process. Making a new 23 | release should be as easy as recording a new tag in your version-control 24 | system, and maybe making new tarballs. 25 | 26 | 27 | ## Quick Install 28 | 29 | * `pip install versioneer` to somewhere to your $PATH 30 | * add a `[versioneer]` section to your setup.cfg (see below) 31 | * run `versioneer install` in your source tree, commit the results 32 | 33 | ## Version Identifiers 34 | 35 | Source trees come from a variety of places: 36 | 37 | * a version-control system checkout (mostly used by developers) 38 | * a nightly tarball, produced by build automation 39 | * a snapshot tarball, produced by a web-based VCS browser, like github's 40 | "tarball from tag" feature 41 | * a release tarball, produced by "setup.py sdist", distributed through PyPI 42 | 43 | Within each source tree, the version identifier (either a string or a number, 44 | this tool is format-agnostic) can come from a variety of places: 45 | 46 | * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows 47 | about recent "tags" and an absolute revision-id 48 | * the name of the directory into which the tarball was unpacked 49 | * an expanded VCS keyword ($Id$, etc) 50 | * a `_version.py` created by some earlier build step 51 | 52 | For released software, the version identifier is closely related to a VCS 53 | tag. Some projects use tag names that include more than just the version 54 | string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool 55 | needs to strip the tag prefix to extract the version identifier. For 56 | unreleased software (between tags), the version identifier should provide 57 | enough information to help developers recreate the same tree, while also 58 | giving them an idea of roughly how old the tree is (after version 1.2, before 59 | version 1.3). Many VCS systems can report a description that captures this, 60 | for example 'git describe --tags --dirty --always' reports things like 61 | "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 62 | 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has 63 | uncommitted changes. 64 | 65 | The version identifier is used for multiple purposes: 66 | 67 | * to allow the module to self-identify its version: `myproject.__version__` 68 | * to choose a name and prefix for a 'setup.py sdist' tarball 69 | 70 | ## Theory of Operation 71 | 72 | Versioneer works by adding a special `_version.py` file into your source 73 | tree, where your `__init__.py` can import it. This `_version.py` knows how to 74 | dynamically ask the VCS tool for version information at import time. However, 75 | when you use "setup.py build" or "setup.py sdist", `_version.py` in the new 76 | copy is replaced by a small static file that contains just the generated 77 | version data. 78 | 79 | `_version.py` also contains `$Revision$` markers, and the installation 80 | process marks `_version.py` to have this marker rewritten with a tag name 81 | during the "git archive" command. As a result, generated tarballs will 82 | contain enough information to get the proper version. 83 | 84 | 85 | ## Installation 86 | 87 | First, decide on values for the following configuration variables: 88 | 89 | * `VCS`: the version control system you use. Currently accepts "git". 90 | 91 | * `style`: the style of version string to be produced. See "Styles" below for 92 | details. Defaults to "pep440", which looks like 93 | `TAG[+DISTANCE.gSHORTHASH[.dirty]]`. 94 | 95 | * `versionfile_source`: 96 | 97 | A project-relative pathname into which the generated version strings should 98 | be written. This is usually a `_version.py` next to your project's main 99 | `__init__.py` file, so it can be imported at runtime. If your project uses 100 | `src/myproject/__init__.py`, this should be `src/myproject/_version.py`. 101 | This file should be checked in to your VCS as usual: the copy created below 102 | by `setup.py setup_versioneer` will include code that parses expanded VCS 103 | keywords in generated tarballs. The 'build' and 'sdist' commands will 104 | replace it with a copy that has just the calculated version string. 105 | 106 | This must be set even if your project does not have any modules (and will 107 | therefore never import `_version.py`), since "setup.py sdist" -based trees 108 | still need somewhere to record the pre-calculated version strings. Anywhere 109 | in the source tree should do. If there is a `__init__.py` next to your 110 | `_version.py`, the `setup.py setup_versioneer` command (described below) 111 | will append some `__version__`-setting assignments, if they aren't already 112 | present. 113 | 114 | * `versionfile_build`: 115 | 116 | Like `versionfile_source`, but relative to the build directory instead of 117 | the source directory. These will differ when your setup.py uses 118 | 'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`, 119 | then you will probably have `versionfile_build='myproject/_version.py'` and 120 | `versionfile_source='src/myproject/_version.py'`. 121 | 122 | If this is set to None, then `setup.py build` will not attempt to rewrite 123 | any `_version.py` in the built tree. If your project does not have any 124 | libraries (e.g. if it only builds a script), then you should use 125 | `versionfile_build = None` and override `distutils.command.build_scripts` 126 | to explicitly insert a copy of `versioneer.get_version()` into your 127 | generated script. 128 | 129 | * `tag_prefix`: 130 | 131 | a string, like 'PROJECTNAME-', which appears at the start of all VCS tags. 132 | If your tags look like 'myproject-1.2.0', then you should use 133 | tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this 134 | should be an empty string. 135 | 136 | * `parentdir_prefix`: 137 | 138 | a string, frequently the same as tag_prefix, which appears at the start of 139 | all unpacked tarball filenames. If your tarball unpacks into 140 | 'myproject-1.2.0', this should be 'myproject-'. 141 | 142 | This tool provides one script, named `versioneer`. That script has one mode, 143 | "install", which writes a copy of `versioneer.py` into the current directory 144 | and runs `versioneer.py setup` to finish the installation. 145 | 146 | To versioneer-enable your project: 147 | 148 | * 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and 149 | populating it with the configuration values you decided earlier: 150 | 151 | ```` 152 | [versioneer] 153 | VCS = git 154 | style = pep440 155 | versionfile_source = src/myproject/_version.py 156 | versionfile_build = myproject/_version.py 157 | tag_prefix = "" 158 | parentdir_prefix = myproject- 159 | ```` 160 | 161 | * 2: Run `versioneer install`. This will do the following: 162 | 163 | * copy `versioneer.py` into the top of your source tree 164 | * create `_version.py` in the right place (`versionfile_source`) 165 | * modify your `__init__.py` (if one exists next to `_version.py`) to define 166 | `__version__` (by calling a function from `_version.py`) 167 | * modify your `MANIFEST.in` to include both `versioneer.py` and the 168 | generated `_version.py` in sdist tarballs 169 | 170 | * 3: add a `import versioneer` to your setup.py, and add the following 171 | arguments to the setup() call: 172 | 173 | version=versioneer.get_version(), 174 | cmdclass=versioneer.get_cmdclass(), 175 | 176 | * 4: commit these changes to your VCS. To make sure you won't forget, 177 | `versioneer install` will mark everything it touched for addition using 178 | `git add`. Don't forget to add `setup.py` and `setup.cfg` too. 179 | 180 | ## Post-Installation Usage 181 | 182 | Once established, all uses of your tree from a VCS checkout should get the 183 | current version string. All generated tarballs should include an embedded 184 | version string (so users who unpack them will not need a VCS tool installed). 185 | 186 | If you distribute your project through PyPI, then the release process should 187 | boil down to two steps: 188 | 189 | * 1: git tag 1.0 190 | * 2: python setup.py register sdist upload 191 | 192 | If you distribute it through github (i.e. users use github to generate 193 | tarballs with `git archive`), the process is: 194 | 195 | * 1: git tag 1.0 196 | * 2: git push; git push --tags 197 | 198 | Currently, all version strings must be based upon a tag. Versioneer will 199 | report "unknown" until your tree has at least one tag in its history. This 200 | restriction will be fixed eventually (see issue #12). 201 | 202 | ## Version-String Flavors 203 | 204 | Code which uses Versioneer can learn about its version string at runtime by 205 | importing `_version` from your main `__init__.py` file and running the 206 | `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can 207 | import the top-level `versioneer.py` and run `get_versions()`. 208 | 209 | Both functions return a dictionary with different flavors of version 210 | information: 211 | 212 | * `['version']`: A condensed version string, rendered using the selected 213 | style. This is the most commonly used value for the project's version 214 | string. The default "pep440" style yields strings like `0.11`, 215 | `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section 216 | below for alternative styles. 217 | 218 | * `['full-revisionid']`: detailed revision identifier. For Git, this is the 219 | full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". 220 | 221 | * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that 222 | this is only accurate if run in a VCS checkout, otherwise it is likely to 223 | be False or None 224 | 225 | * `['error']`: if the version string could not be computed, this will be set 226 | to a string describing the problem, otherwise it will be None. It may be 227 | useful to throw an exception in setup.py if this is set, to avoid e.g. 228 | creating tarballs with a version string of "unknown". 229 | 230 | Some variants are more useful than others. Including `full-revisionid` in a 231 | bug report should allow developers to reconstruct the exact code being tested 232 | (or indicate the presence of local changes that should be shared with the 233 | developers). `version` is suitable for display in an "about" box or a CLI 234 | `--version` output: it can be easily compared against release notes and lists 235 | of bugs fixed in various releases. 236 | 237 | The installer adds the following text to your `__init__.py` to place a basic 238 | version in `YOURPROJECT.__version__`: 239 | 240 | from ._version import get_versions 241 | __version__ = get_versions()['version'] 242 | del get_versions 243 | 244 | ## Styles 245 | 246 | The setup.cfg `style=` configuration controls how the VCS information is 247 | rendered into a version string. 248 | 249 | The default style, "pep440", produces a PEP440-compliant string, equal to the 250 | un-prefixed tag name for actual releases, and containing an additional "local 251 | version" section with more detail for in-between builds. For Git, this is 252 | TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags 253 | --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the 254 | tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and 255 | that this commit is two revisions ("+2") beyond the "0.11" tag. For released 256 | software (exactly equal to a known tag), the identifier will only contain the 257 | stripped tag, e.g. "0.11". 258 | 259 | Other styles are available. See details.md in the Versioneer source tree for 260 | descriptions. 261 | 262 | 263 | ## Updating Versioneer 264 | 265 | To upgrade your project to a new release of Versioneer, do the following: 266 | 267 | * install the new Versioneer (`pip install -U versioneer` or equivalent) 268 | * edit `setup.cfg`, if necessary, to include any new configuration settings 269 | indicated by the release notes 270 | * re-run `versioneer install` in your source tree, to replace 271 | `SRC/_version.py` 272 | * commit any changed files 273 | 274 | ### Upgrading from 0.10 to 0.11 275 | 276 | You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running 277 | `setup.py setup_versioneer`. This will enable the use of additional 278 | version-control systems (SVN, etc) in the future. 279 | 280 | ### Upgrading from 0.11 to 0.12 281 | 282 | Nothing special. 283 | 284 | ## Upgrading to 0.14 285 | 286 | 0.14 changes the format of the version string. 0.13 and earlier used 287 | hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a 288 | plus-separated "local version" section strings, with dot-separated 289 | components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old 290 | format, but should be ok with the new one. 291 | 292 | ## Upgrading to XXX 293 | 294 | Starting with this version, Versioneer is configured with a `[versioneer]` 295 | section in your `setup.cfg` file. Earlier versions required the `setup.py` to 296 | set attributes on the `versioneer` module immediately after import. The new 297 | version will refuse to run (exception during import) until you have provided 298 | the necessary `setup.cfg` section. 299 | 300 | ## Future Directions 301 | 302 | This tool is designed to make it easily extended to other version-control 303 | systems: all VCS-specific components are in separate directories like 304 | src/git/ . The top-level `versioneer.py` script is assembled from these 305 | components by running make-versioneer.py . In the future, make-versioneer.py 306 | will take a VCS name as an argument, and will construct a version of 307 | `versioneer.py` that is specific to the given VCS. It might also take the 308 | configuration arguments that are currently provided manually during 309 | installation by editing setup.py . Alternatively, it might go the other 310 | direction and include code from all supported VCS systems, reducing the 311 | number of intermediate scripts. 312 | 313 | 314 | ## License 315 | 316 | To make Versioneer easier to embed, all its code is hereby released into the 317 | public domain. The `_version.py` that it creates is also in the public 318 | domain. 319 | 320 | """ 321 | 322 | from __future__ import print_function 323 | try: 324 | import configparser 325 | except ImportError: 326 | import ConfigParser as configparser 327 | import errno 328 | import json 329 | import os 330 | import re 331 | import subprocess 332 | import sys 333 | from distutils.command.build import build as _build 334 | from distutils.command.sdist import sdist as _sdist 335 | from distutils.core import Command 336 | 337 | 338 | class VersioneerConfig: 339 | pass 340 | 341 | 342 | def find_setup_cfg(): 343 | try: 344 | setup_cfg = os.path.join(os.path.dirname(os.path.realpath(__file__)), 345 | "setup.cfg") 346 | except NameError: 347 | setup_cfg = "setup.cfg" 348 | return setup_cfg 349 | 350 | 351 | def get_config(): 352 | # This might raise EnvironmentError (if setup.cfg is missing), or 353 | # configparser.NoSectionError (if it lacks a [versioneer] section), or 354 | # configparser.NoOptionError (if it lacks "VCS="). See the docstring at 355 | # the top of versioneer.py for instructions on writing your setup.cfg . 356 | parser = configparser.SafeConfigParser() 357 | setup_cfg = find_setup_cfg() 358 | with open(setup_cfg, "r") as f: 359 | parser.readfp(f) 360 | VCS = parser.get("versioneer", "VCS") # mandatory 361 | 362 | def get(parser, name): 363 | if parser.has_option("versioneer", name): 364 | return parser.get("versioneer", name) 365 | return None 366 | cfg = VersioneerConfig() 367 | cfg.VCS = VCS 368 | cfg.style = get(parser, "style") or "" 369 | cfg.versionfile_source = get(parser, "versionfile_source") 370 | cfg.versionfile_build = get(parser, "versionfile_build") 371 | cfg.tag_prefix = get(parser, "tag_prefix") 372 | cfg.parentdir_prefix = get(parser, "parentdir_prefix") 373 | cfg.verbose = get(parser, "verbose") 374 | return cfg 375 | 376 | 377 | class NotThisMethod(Exception): 378 | pass 379 | 380 | # these dictionaries contain VCS-specific tools 381 | LONG_VERSION_PY = {} 382 | 383 | 384 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): 385 | assert isinstance(commands, list) 386 | p = None 387 | for c in commands: 388 | try: 389 | # remember shell=False, so use git.cmd on windows, not just git 390 | p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, 391 | stderr=(subprocess.PIPE if hide_stderr 392 | else None)) 393 | break 394 | except EnvironmentError: 395 | e = sys.exc_info()[1] 396 | if e.errno == errno.ENOENT: 397 | continue 398 | if verbose: 399 | print("unable to run %s" % args[0]) 400 | print(e) 401 | return None 402 | else: 403 | if verbose: 404 | print("unable to find command, tried %s" % (commands,)) 405 | return None 406 | stdout = p.communicate()[0].strip() 407 | if sys.version_info[0] >= 3: 408 | stdout = stdout.decode() 409 | if p.returncode != 0: 410 | if verbose: 411 | print("unable to run %s (error)" % args[0]) 412 | return None 413 | return stdout 414 | LONG_VERSION_PY['git'] = ''' 415 | # This file helps to compute a version number in source trees obtained from 416 | # git-archive tarball (such as those provided by githubs download-from-tag 417 | # feature). Distribution tarballs (built by setup.py sdist) and build 418 | # directories (produced by setup.py build) will contain a much shorter file 419 | # that just contains the computed version number. 420 | 421 | # This file is released into the public domain. Generated by 422 | # versioneer-0.14+dev (https://github.com/warner/python-versioneer) 423 | 424 | import errno 425 | import os 426 | import re 427 | import subprocess 428 | import sys 429 | 430 | 431 | def get_keywords(): 432 | # these strings will be replaced by git during git-archive. 433 | # setup.py/versioneer.py will grep for the variable names, so they must 434 | # each be defined on a line of their own. _version.py will just call 435 | # get_keywords(). 436 | git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" 437 | git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" 438 | keywords = {"refnames": git_refnames, "full": git_full} 439 | return keywords 440 | 441 | 442 | class VersioneerConfig: 443 | pass 444 | 445 | 446 | def get_config(): 447 | # these strings are filled in when 'setup.py versioneer' creates 448 | # _version.py 449 | cfg = VersioneerConfig() 450 | cfg.VCS = "git" 451 | cfg.style = "%(STYLE)s" 452 | cfg.tag_prefix = "%(TAG_PREFIX)s" 453 | cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" 454 | cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" 455 | cfg.verbose = False 456 | return cfg 457 | 458 | 459 | class NotThisMethod(Exception): 460 | pass 461 | 462 | 463 | def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False): 464 | assert isinstance(commands, list) 465 | p = None 466 | for c in commands: 467 | try: 468 | # remember shell=False, so use git.cmd on windows, not just git 469 | p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE, 470 | stderr=(subprocess.PIPE if hide_stderr 471 | else None)) 472 | break 473 | except EnvironmentError: 474 | e = sys.exc_info()[1] 475 | if e.errno == errno.ENOENT: 476 | continue 477 | if verbose: 478 | print("unable to run %%s" %% args[0]) 479 | print(e) 480 | return None 481 | else: 482 | if verbose: 483 | print("unable to find command, tried %%s" %% (commands,)) 484 | return None 485 | stdout = p.communicate()[0].strip() 486 | if sys.version_info[0] >= 3: 487 | stdout = stdout.decode() 488 | if p.returncode != 0: 489 | if verbose: 490 | print("unable to run %%s (error)" %% args[0]) 491 | return None 492 | return stdout 493 | 494 | 495 | def versions_from_parentdir(parentdir_prefix, root, verbose): 496 | # Source tarballs conventionally unpack into a directory that includes 497 | # both the project name and a version string. 498 | dirname = os.path.basename(root) 499 | if not dirname.startswith(parentdir_prefix): 500 | if verbose: 501 | print("guessing rootdir is '%%s', but '%%s' doesn't start with " 502 | "prefix '%%s'" %% (root, dirname, parentdir_prefix)) 503 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 504 | return {"version": dirname[len(parentdir_prefix):], 505 | "full-revisionid": None, 506 | "dirty": False, "error": None} 507 | 508 | 509 | def git_get_keywords(versionfile_abs): 510 | # the code embedded in _version.py can just fetch the value of these 511 | # keywords. When used from setup.py, we don't want to import _version.py, 512 | # so we do it with a regexp instead. This function is not used from 513 | # _version.py. 514 | keywords = {} 515 | try: 516 | f = open(versionfile_abs, "r") 517 | for line in f.readlines(): 518 | if line.strip().startswith("git_refnames ="): 519 | mo = re.search(r'=\s*"(.*)"', line) 520 | if mo: 521 | keywords["refnames"] = mo.group(1) 522 | if line.strip().startswith("git_full ="): 523 | mo = re.search(r'=\s*"(.*)"', line) 524 | if mo: 525 | keywords["full"] = mo.group(1) 526 | f.close() 527 | except EnvironmentError: 528 | pass 529 | return keywords 530 | 531 | 532 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 533 | if not keywords: 534 | raise NotThisMethod("no keywords at all, weird") 535 | refnames = keywords["refnames"].strip() 536 | if refnames.startswith("$Format"): 537 | if verbose: 538 | print("keywords are unexpanded, not using") 539 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 540 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 541 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 542 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 543 | TAG = "tag: " 544 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 545 | if not tags: 546 | # Either we're using git < 1.8.3, or there really are no tags. We use 547 | # a heuristic: assume all version tags have a digit. The old git %%d 548 | # expansion behaves like git log --decorate=short and strips out the 549 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 550 | # between branches and tags. By ignoring refnames without digits, we 551 | # filter out many common branch names like "release" and 552 | # "stabilization", as well as "HEAD" and "master". 553 | tags = set([r for r in refs if re.search(r'\d', r)]) 554 | if verbose: 555 | print("discarding '%%s', no digits" %% ",".join(refs-tags)) 556 | if verbose: 557 | print("likely tags: %%s" %% ",".join(sorted(tags))) 558 | for ref in sorted(tags): 559 | # sorting will prefer e.g. "2.0" over "2.0rc1" 560 | if ref.startswith(tag_prefix): 561 | r = ref[len(tag_prefix):] 562 | if verbose: 563 | print("picking %%s" %% r) 564 | return {"version": r, 565 | "full-revisionid": keywords["full"].strip(), 566 | "dirty": False, "error": None 567 | } 568 | # no suitable tags, so version is "0+unknown", but full hex is still there 569 | if verbose: 570 | print("no suitable tags, using unknown + full revision id") 571 | return {"version": "0+unknown", 572 | "full-revisionid": keywords["full"].strip(), 573 | "dirty": False, "error": "no suitable tags"} 574 | 575 | 576 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 577 | # this runs 'git' from the root of the source tree. This only gets called 578 | # if the git-archive 'subst' keywords were *not* expanded, and 579 | # _version.py hasn't already been rewritten with a short version string, 580 | # meaning we're inside a checked out source tree. 581 | 582 | if not os.path.exists(os.path.join(root, ".git")): 583 | if verbose: 584 | print("no .git in %%s" %% root) 585 | raise NotThisMethod("no .git directory") 586 | 587 | GITS = ["git"] 588 | if sys.platform == "win32": 589 | GITS = ["git.cmd", "git.exe"] 590 | # if there is a tag, this yields TAG-NUM-gHEX[-dirty] 591 | # if there are no tags, this yields HEX[-dirty] (no NUM) 592 | describe_out = run_command(GITS, ["describe", "--tags", "--dirty", 593 | "--always", "--long"], 594 | cwd=root) 595 | # --long was added in git-1.5.5 596 | if describe_out is None: 597 | raise NotThisMethod("'git describe' failed") 598 | describe_out = describe_out.strip() 599 | full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 600 | if full_out is None: 601 | raise NotThisMethod("'git rev-parse' failed") 602 | full_out = full_out.strip() 603 | 604 | pieces = {} 605 | pieces["long"] = full_out 606 | pieces["short"] = full_out[:7] # maybe improved later 607 | pieces["error"] = None 608 | 609 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 610 | # TAG might have hyphens. 611 | git_describe = describe_out 612 | 613 | # look for -dirty suffix 614 | dirty = git_describe.endswith("-dirty") 615 | pieces["dirty"] = dirty 616 | if dirty: 617 | git_describe = git_describe[:git_describe.rindex("-dirty")] 618 | 619 | # now we have TAG-NUM-gHEX or HEX 620 | 621 | if "-" in git_describe: 622 | # TAG-NUM-gHEX 623 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 624 | if not mo: 625 | # unparseable. Maybe git-describe is misbehaving? 626 | pieces["error"] = ("unable to parse git-describe output: '%%s'" 627 | %% describe_out) 628 | return pieces 629 | 630 | # tag 631 | full_tag = mo.group(1) 632 | if not full_tag.startswith(tag_prefix): 633 | if verbose: 634 | fmt = "tag '%%s' doesn't start with prefix '%%s'" 635 | print(fmt %% (full_tag, tag_prefix)) 636 | pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" 637 | %% (full_tag, tag_prefix)) 638 | return pieces 639 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 640 | 641 | # distance: number of commits since tag 642 | pieces["distance"] = int(mo.group(2)) 643 | 644 | # commit: short hex revision ID 645 | pieces["short"] = mo.group(3) 646 | 647 | else: 648 | # HEX: no tags 649 | pieces["closest-tag"] = None 650 | count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], 651 | cwd=root) 652 | pieces["distance"] = int(count_out) # total number of commits 653 | 654 | return pieces 655 | 656 | 657 | def plus_or_dot(pieces): 658 | if "+" in pieces.get("closest-tag", ""): 659 | return "." 660 | return "+" 661 | 662 | 663 | def render_pep440(pieces): 664 | # now build up version string, with post-release "local version 665 | # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 666 | # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 667 | 668 | # exceptions: 669 | # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 670 | 671 | if pieces["closest-tag"]: 672 | rendered = pieces["closest-tag"] 673 | if pieces["distance"] or pieces["dirty"]: 674 | rendered += plus_or_dot(pieces) 675 | rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) 676 | if pieces["dirty"]: 677 | rendered += ".dirty" 678 | else: 679 | # exception #1 680 | rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], 681 | pieces["short"]) 682 | if pieces["dirty"]: 683 | rendered += ".dirty" 684 | return rendered 685 | 686 | 687 | def render_pep440_pre(pieces): 688 | # TAG[.post.devDISTANCE] . No -dirty 689 | 690 | # exceptions: 691 | # 1: no tags. 0.post.devDISTANCE 692 | 693 | if pieces["closest-tag"]: 694 | rendered = pieces["closest-tag"] 695 | if pieces["distance"]: 696 | rendered += ".post.dev%%d" %% pieces["distance"] 697 | else: 698 | # exception #1 699 | rendered = "0.post.dev%%d" %% pieces["distance"] 700 | return rendered 701 | 702 | 703 | def render_pep440_post(pieces): 704 | # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that 705 | # .dev0 sorts backwards (a dirty tree will appear "older" than the 706 | # corresponding clean one), but you shouldn't be releasing software with 707 | # -dirty anyways. 708 | 709 | # exceptions: 710 | # 1: no tags. 0.postDISTANCE[.dev0] 711 | 712 | if pieces["closest-tag"]: 713 | rendered = pieces["closest-tag"] 714 | if pieces["distance"] or pieces["dirty"]: 715 | rendered += ".post%%d" %% pieces["distance"] 716 | if pieces["dirty"]: 717 | rendered += ".dev0" 718 | rendered += plus_or_dot(pieces) 719 | rendered += "g%%s" %% pieces["short"] 720 | else: 721 | # exception #1 722 | rendered = "0.post%%d" %% pieces["distance"] 723 | if pieces["dirty"]: 724 | rendered += ".dev0" 725 | rendered += "+g%%s" %% pieces["short"] 726 | return rendered 727 | 728 | 729 | def render_pep440_old(pieces): 730 | # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. 731 | 732 | # exceptions: 733 | # 1: no tags. 0.postDISTANCE[.dev0] 734 | 735 | if pieces["closest-tag"]: 736 | rendered = pieces["closest-tag"] 737 | if pieces["distance"] or pieces["dirty"]: 738 | rendered += ".post%%d" %% pieces["distance"] 739 | if pieces["dirty"]: 740 | rendered += ".dev0" 741 | else: 742 | # exception #1 743 | rendered = "0.post%%d" %% pieces["distance"] 744 | if pieces["dirty"]: 745 | rendered += ".dev0" 746 | return rendered 747 | 748 | 749 | def render_git_describe(pieces): 750 | # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty 751 | # --always' 752 | 753 | # exceptions: 754 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 755 | 756 | if pieces["closest-tag"]: 757 | rendered = pieces["closest-tag"] 758 | if pieces["distance"]: 759 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 760 | else: 761 | # exception #1 762 | rendered = pieces["short"] 763 | if pieces["dirty"]: 764 | rendered += "-dirty" 765 | return rendered 766 | 767 | 768 | def render_git_describe_long(pieces): 769 | # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty 770 | # --always -long'. The distance/hash is unconditional. 771 | 772 | # exceptions: 773 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 774 | 775 | if pieces["closest-tag"]: 776 | rendered = pieces["closest-tag"] 777 | rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) 778 | else: 779 | # exception #1 780 | rendered = pieces["short"] 781 | if pieces["dirty"]: 782 | rendered += "-dirty" 783 | return rendered 784 | 785 | 786 | def render(pieces, style): 787 | if pieces["error"]: 788 | return {"version": "unknown", 789 | "full-revisionid": pieces.get("long"), 790 | "dirty": None, 791 | "error": pieces["error"]} 792 | 793 | if not style or style == "default": 794 | style = "pep440" # the default 795 | 796 | if style == "pep440": 797 | rendered = render_pep440(pieces) 798 | elif style == "pep440-pre": 799 | rendered = render_pep440_pre(pieces) 800 | elif style == "pep440-post": 801 | rendered = render_pep440_post(pieces) 802 | elif style == "pep440-old": 803 | rendered = render_pep440_old(pieces) 804 | elif style == "git-describe": 805 | rendered = render_git_describe(pieces) 806 | elif style == "git-describe-long": 807 | rendered = render_git_describe_long(pieces) 808 | else: 809 | raise ValueError("unknown style '%%s'" %% style) 810 | 811 | return {"version": rendered, "full-revisionid": pieces["long"], 812 | "dirty": pieces["dirty"], "error": None} 813 | 814 | 815 | def get_versions(): 816 | # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have 817 | # __file__, we can work backwards from there to the root. Some 818 | # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which 819 | # case we can only use expanded keywords. 820 | 821 | cfg = get_config() 822 | verbose = cfg.verbose 823 | 824 | try: 825 | return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, 826 | verbose) 827 | except NotThisMethod: 828 | pass 829 | 830 | try: 831 | root = os.path.realpath(__file__) 832 | # versionfile_source is the relative path from the top of the source 833 | # tree (where the .git directory might live) to this file. Invert 834 | # this to find the root from __file__. 835 | for i in cfg.versionfile_source.split('/'): 836 | root = os.path.dirname(root) 837 | except NameError: 838 | return {"version": "0+unknown", "full-revisionid": None, 839 | "dirty": None, 840 | "error": "unable to find root of source tree"} 841 | 842 | try: 843 | pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) 844 | return render(pieces, cfg.style) 845 | except NotThisMethod: 846 | pass 847 | 848 | try: 849 | return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 850 | except NotThisMethod: 851 | pass 852 | 853 | return {"version": "0+unknown", "full-revisionid": None, 854 | "dirty": None, 855 | "error": "unable to compute version"} 856 | ''' 857 | 858 | 859 | def git_get_keywords(versionfile_abs): 860 | # the code embedded in _version.py can just fetch the value of these 861 | # keywords. When used from setup.py, we don't want to import _version.py, 862 | # so we do it with a regexp instead. This function is not used from 863 | # _version.py. 864 | keywords = {} 865 | try: 866 | f = open(versionfile_abs, "r") 867 | for line in f.readlines(): 868 | if line.strip().startswith("git_refnames ="): 869 | mo = re.search(r'=\s*"(.*)"', line) 870 | if mo: 871 | keywords["refnames"] = mo.group(1) 872 | if line.strip().startswith("git_full ="): 873 | mo = re.search(r'=\s*"(.*)"', line) 874 | if mo: 875 | keywords["full"] = mo.group(1) 876 | f.close() 877 | except EnvironmentError: 878 | pass 879 | return keywords 880 | 881 | 882 | def git_versions_from_keywords(keywords, tag_prefix, verbose): 883 | if not keywords: 884 | raise NotThisMethod("no keywords at all, weird") 885 | refnames = keywords["refnames"].strip() 886 | if refnames.startswith("$Format"): 887 | if verbose: 888 | print("keywords are unexpanded, not using") 889 | raise NotThisMethod("unexpanded keywords, not a git-archive tarball") 890 | refs = set([r.strip() for r in refnames.strip("()").split(",")]) 891 | # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of 892 | # just "foo-1.0". If we see a "tag: " prefix, prefer those. 893 | TAG = "tag: " 894 | tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) 895 | if not tags: 896 | # Either we're using git < 1.8.3, or there really are no tags. We use 897 | # a heuristic: assume all version tags have a digit. The old git %d 898 | # expansion behaves like git log --decorate=short and strips out the 899 | # refs/heads/ and refs/tags/ prefixes that would let us distinguish 900 | # between branches and tags. By ignoring refnames without digits, we 901 | # filter out many common branch names like "release" and 902 | # "stabilization", as well as "HEAD" and "master". 903 | tags = set([r for r in refs if re.search(r'\d', r)]) 904 | if verbose: 905 | print("discarding '%s', no digits" % ",".join(refs-tags)) 906 | if verbose: 907 | print("likely tags: %s" % ",".join(sorted(tags))) 908 | for ref in sorted(tags): 909 | # sorting will prefer e.g. "2.0" over "2.0rc1" 910 | if ref.startswith(tag_prefix): 911 | r = ref[len(tag_prefix):] 912 | if verbose: 913 | print("picking %s" % r) 914 | return {"version": r, 915 | "full-revisionid": keywords["full"].strip(), 916 | "dirty": False, "error": None 917 | } 918 | # no suitable tags, so version is "0+unknown", but full hex is still there 919 | if verbose: 920 | print("no suitable tags, using unknown + full revision id") 921 | return {"version": "0+unknown", 922 | "full-revisionid": keywords["full"].strip(), 923 | "dirty": False, "error": "no suitable tags"} 924 | 925 | 926 | def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command): 927 | # this runs 'git' from the root of the source tree. This only gets called 928 | # if the git-archive 'subst' keywords were *not* expanded, and 929 | # _version.py hasn't already been rewritten with a short version string, 930 | # meaning we're inside a checked out source tree. 931 | 932 | if not os.path.exists(os.path.join(root, ".git")): 933 | if verbose: 934 | print("no .git in %s" % root) 935 | raise NotThisMethod("no .git directory") 936 | 937 | GITS = ["git"] 938 | if sys.platform == "win32": 939 | GITS = ["git.cmd", "git.exe"] 940 | # if there is a tag, this yields TAG-NUM-gHEX[-dirty] 941 | # if there are no tags, this yields HEX[-dirty] (no NUM) 942 | describe_out = run_command(GITS, ["describe", "--tags", "--dirty", 943 | "--always", "--long"], 944 | cwd=root) 945 | # --long was added in git-1.5.5 946 | if describe_out is None: 947 | raise NotThisMethod("'git describe' failed") 948 | describe_out = describe_out.strip() 949 | full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) 950 | if full_out is None: 951 | raise NotThisMethod("'git rev-parse' failed") 952 | full_out = full_out.strip() 953 | 954 | pieces = {} 955 | pieces["long"] = full_out 956 | pieces["short"] = full_out[:7] # maybe improved later 957 | pieces["error"] = None 958 | 959 | # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] 960 | # TAG might have hyphens. 961 | git_describe = describe_out 962 | 963 | # look for -dirty suffix 964 | dirty = git_describe.endswith("-dirty") 965 | pieces["dirty"] = dirty 966 | if dirty: 967 | git_describe = git_describe[:git_describe.rindex("-dirty")] 968 | 969 | # now we have TAG-NUM-gHEX or HEX 970 | 971 | if "-" in git_describe: 972 | # TAG-NUM-gHEX 973 | mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) 974 | if not mo: 975 | # unparseable. Maybe git-describe is misbehaving? 976 | pieces["error"] = ("unable to parse git-describe output: '%s'" 977 | % describe_out) 978 | return pieces 979 | 980 | # tag 981 | full_tag = mo.group(1) 982 | if not full_tag.startswith(tag_prefix): 983 | if verbose: 984 | fmt = "tag '%s' doesn't start with prefix '%s'" 985 | print(fmt % (full_tag, tag_prefix)) 986 | pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" 987 | % (full_tag, tag_prefix)) 988 | return pieces 989 | pieces["closest-tag"] = full_tag[len(tag_prefix):] 990 | 991 | # distance: number of commits since tag 992 | pieces["distance"] = int(mo.group(2)) 993 | 994 | # commit: short hex revision ID 995 | pieces["short"] = mo.group(3) 996 | 997 | else: 998 | # HEX: no tags 999 | pieces["closest-tag"] = None 1000 | count_out = run_command(GITS, ["rev-list", "HEAD", "--count"], 1001 | cwd=root) 1002 | pieces["distance"] = int(count_out) # total number of commits 1003 | 1004 | return pieces 1005 | 1006 | 1007 | def do_vcs_install(manifest_in, versionfile_source, ipy): 1008 | GITS = ["git"] 1009 | if sys.platform == "win32": 1010 | GITS = ["git.cmd", "git.exe"] 1011 | files = [manifest_in, versionfile_source] 1012 | if ipy: 1013 | files.append(ipy) 1014 | try: 1015 | me = __file__ 1016 | if me.endswith(".pyc") or me.endswith(".pyo"): 1017 | me = os.path.splitext(me)[0] + ".py" 1018 | versioneer_file = os.path.relpath(me) 1019 | except NameError: 1020 | versioneer_file = "versioneer.py" 1021 | files.append(versioneer_file) 1022 | present = False 1023 | try: 1024 | f = open(".gitattributes", "r") 1025 | for line in f.readlines(): 1026 | if line.strip().startswith(versionfile_source): 1027 | if "export-subst" in line.strip().split()[1:]: 1028 | present = True 1029 | f.close() 1030 | except EnvironmentError: 1031 | pass 1032 | if not present: 1033 | f = open(".gitattributes", "a+") 1034 | f.write("%s export-subst\n" % versionfile_source) 1035 | f.close() 1036 | files.append(".gitattributes") 1037 | run_command(GITS, ["add", "--"] + files) 1038 | 1039 | 1040 | def versions_from_parentdir(parentdir_prefix, root, verbose): 1041 | # Source tarballs conventionally unpack into a directory that includes 1042 | # both the project name and a version string. 1043 | dirname = os.path.basename(root) 1044 | if not dirname.startswith(parentdir_prefix): 1045 | if verbose: 1046 | print("guessing rootdir is '%s', but '%s' doesn't start with " 1047 | "prefix '%s'" % (root, dirname, parentdir_prefix)) 1048 | raise NotThisMethod("rootdir doesn't start with parentdir_prefix") 1049 | return {"version": dirname[len(parentdir_prefix):], 1050 | "full-revisionid": None, 1051 | "dirty": False, "error": None} 1052 | 1053 | SHORT_VERSION_PY = """ 1054 | # This file was generated by 'versioneer.py' (0.14+dev) from 1055 | # revision-control system data, or from the parent directory name of an 1056 | # unpacked source archive. Distribution tarballs contain a pre-generated copy 1057 | # of this file. 1058 | 1059 | import json 1060 | import sys 1061 | 1062 | version_json = ''' 1063 | %s 1064 | ''' # END VERSION_JSON 1065 | 1066 | 1067 | def get_versions(): 1068 | return json.loads(version_json) 1069 | """ 1070 | 1071 | 1072 | def versions_from_file(filename): 1073 | try: 1074 | with open(filename) as f: 1075 | contents = f.read() 1076 | except EnvironmentError: 1077 | raise NotThisMethod("unable to read _version.py") 1078 | mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", 1079 | contents, re.M | re.S) 1080 | if not mo: 1081 | raise NotThisMethod("no version_json in _version.py") 1082 | return json.loads(mo.group(1)) 1083 | 1084 | 1085 | def write_to_version_file(filename, versions): 1086 | os.unlink(filename) 1087 | contents = json.dumps(versions, sort_keys=True, 1088 | indent=1, separators=(",", ": ")) 1089 | with open(filename, "w") as f: 1090 | f.write(SHORT_VERSION_PY % contents) 1091 | 1092 | print("set %s to '%s'" % (filename, versions["version"])) 1093 | 1094 | 1095 | def plus_or_dot(pieces): 1096 | if "+" in pieces.get("closest-tag", ""): 1097 | return "." 1098 | return "+" 1099 | 1100 | 1101 | def render_pep440(pieces): 1102 | # now build up version string, with post-release "local version 1103 | # identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you 1104 | # get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty 1105 | 1106 | # exceptions: 1107 | # 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] 1108 | 1109 | if pieces["closest-tag"]: 1110 | rendered = pieces["closest-tag"] 1111 | if pieces["distance"] or pieces["dirty"]: 1112 | rendered += plus_or_dot(pieces) 1113 | rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) 1114 | if pieces["dirty"]: 1115 | rendered += ".dirty" 1116 | else: 1117 | # exception #1 1118 | rendered = "0+untagged.%d.g%s" % (pieces["distance"], 1119 | pieces["short"]) 1120 | if pieces["dirty"]: 1121 | rendered += ".dirty" 1122 | return rendered 1123 | 1124 | 1125 | def render_pep440_pre(pieces): 1126 | # TAG[.post.devDISTANCE] . No -dirty 1127 | 1128 | # exceptions: 1129 | # 1: no tags. 0.post.devDISTANCE 1130 | 1131 | if pieces["closest-tag"]: 1132 | rendered = pieces["closest-tag"] 1133 | if pieces["distance"]: 1134 | rendered += ".post.dev%d" % pieces["distance"] 1135 | else: 1136 | # exception #1 1137 | rendered = "0.post.dev%d" % pieces["distance"] 1138 | return rendered 1139 | 1140 | 1141 | def render_pep440_post(pieces): 1142 | # TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that 1143 | # .dev0 sorts backwards (a dirty tree will appear "older" than the 1144 | # corresponding clean one), but you shouldn't be releasing software with 1145 | # -dirty anyways. 1146 | 1147 | # exceptions: 1148 | # 1: no tags. 0.postDISTANCE[.dev0] 1149 | 1150 | if pieces["closest-tag"]: 1151 | rendered = pieces["closest-tag"] 1152 | if pieces["distance"] or pieces["dirty"]: 1153 | rendered += ".post%d" % pieces["distance"] 1154 | if pieces["dirty"]: 1155 | rendered += ".dev0" 1156 | rendered += plus_or_dot(pieces) 1157 | rendered += "g%s" % pieces["short"] 1158 | else: 1159 | # exception #1 1160 | rendered = "0.post%d" % pieces["distance"] 1161 | if pieces["dirty"]: 1162 | rendered += ".dev0" 1163 | rendered += "+g%s" % pieces["short"] 1164 | return rendered 1165 | 1166 | 1167 | def render_pep440_old(pieces): 1168 | # TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. 1169 | 1170 | # exceptions: 1171 | # 1: no tags. 0.postDISTANCE[.dev0] 1172 | 1173 | if pieces["closest-tag"]: 1174 | rendered = pieces["closest-tag"] 1175 | if pieces["distance"] or pieces["dirty"]: 1176 | rendered += ".post%d" % pieces["distance"] 1177 | if pieces["dirty"]: 1178 | rendered += ".dev0" 1179 | else: 1180 | # exception #1 1181 | rendered = "0.post%d" % pieces["distance"] 1182 | if pieces["dirty"]: 1183 | rendered += ".dev0" 1184 | return rendered 1185 | 1186 | 1187 | def render_git_describe(pieces): 1188 | # TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty 1189 | # --always' 1190 | 1191 | # exceptions: 1192 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1193 | 1194 | if pieces["closest-tag"]: 1195 | rendered = pieces["closest-tag"] 1196 | if pieces["distance"]: 1197 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1198 | else: 1199 | # exception #1 1200 | rendered = pieces["short"] 1201 | if pieces["dirty"]: 1202 | rendered += "-dirty" 1203 | return rendered 1204 | 1205 | 1206 | def render_git_describe_long(pieces): 1207 | # TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty 1208 | # --always -long'. The distance/hash is unconditional. 1209 | 1210 | # exceptions: 1211 | # 1: no tags. HEX[-dirty] (note: no 'g' prefix) 1212 | 1213 | if pieces["closest-tag"]: 1214 | rendered = pieces["closest-tag"] 1215 | rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) 1216 | else: 1217 | # exception #1 1218 | rendered = pieces["short"] 1219 | if pieces["dirty"]: 1220 | rendered += "-dirty" 1221 | return rendered 1222 | 1223 | 1224 | def render(pieces, style): 1225 | if pieces["error"]: 1226 | return {"version": "unknown", 1227 | "full-revisionid": pieces.get("long"), 1228 | "dirty": None, 1229 | "error": pieces["error"]} 1230 | 1231 | if not style or style == "default": 1232 | style = "pep440" # the default 1233 | 1234 | if style == "pep440": 1235 | rendered = render_pep440(pieces) 1236 | elif style == "pep440-pre": 1237 | rendered = render_pep440_pre(pieces) 1238 | elif style == "pep440-post": 1239 | rendered = render_pep440_post(pieces) 1240 | elif style == "pep440-old": 1241 | rendered = render_pep440_old(pieces) 1242 | elif style == "git-describe": 1243 | rendered = render_git_describe(pieces) 1244 | elif style == "git-describe-long": 1245 | rendered = render_git_describe_long(pieces) 1246 | else: 1247 | raise ValueError("unknown style '%s'" % style) 1248 | 1249 | return {"version": rendered, "full-revisionid": pieces["long"], 1250 | "dirty": pieces["dirty"], "error": None} 1251 | 1252 | 1253 | def get_root(): 1254 | try: 1255 | return os.path.dirname(os.path.abspath(__file__)) 1256 | except NameError: 1257 | return os.path.dirname(os.path.abspath(sys.argv[0])) 1258 | 1259 | 1260 | def vcs_function(vcs, suffix): 1261 | return getattr(sys.modules[__name__], '%s_%s' % (vcs, suffix), None) 1262 | 1263 | 1264 | def get_versions(): 1265 | # returns dict with two keys: 'version' and 'full' 1266 | cfg = get_config() 1267 | verbose = cfg.verbose 1268 | assert cfg.versionfile_source is not None, \ 1269 | "please set versioneer.versionfile_source" 1270 | assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" 1271 | assert cfg.parentdir_prefix is not None, \ 1272 | "please set versioneer.parentdir_prefix" 1273 | assert cfg.VCS is not None, "please set versioneer.VCS" 1274 | 1275 | # I am in versioneer.py, which must live at the top of the source tree, 1276 | # which we use to compute the root directory. py2exe/bbfreeze/non-CPython 1277 | # don't have __file__, in which case we fall back to sys.argv[0] (which 1278 | # ought to be the setup.py script). We prefer __file__ since that's more 1279 | # robust in cases where setup.py was invoked in some weird way (e.g. pip) 1280 | root = get_root() 1281 | versionfile_abs = os.path.join(root, cfg.versionfile_source) 1282 | 1283 | get_keywords_f = vcs_function(cfg.VCS, "get_keywords") 1284 | versions_from_keywords_f = vcs_function(cfg.VCS, "versions_from_keywords") 1285 | pieces_from_vcs_f = vcs_function(cfg.VCS, "pieces_from_vcs") 1286 | 1287 | # extract version from first of: _version.py, VCS command (e.g. 'git 1288 | # describe'), parentdir. This is meant to work for developers using a 1289 | # source checkout, for users of a tarball created by 'setup.py sdist', 1290 | # and for users of a tarball/zipball created by 'git archive' or github's 1291 | # download-from-tag feature or the equivalent in other VCSes. 1292 | 1293 | if get_keywords_f and versions_from_keywords_f: 1294 | try: 1295 | vcs_keywords = get_keywords_f(versionfile_abs) 1296 | ver = versions_from_keywords_f(vcs_keywords, cfg.tag_prefix, 1297 | verbose) 1298 | if verbose: 1299 | print("got version from expanded keyword %s" % ver) 1300 | return ver 1301 | except NotThisMethod: 1302 | pass 1303 | 1304 | try: 1305 | ver = versions_from_file(versionfile_abs) 1306 | if verbose: 1307 | print("got version from file %s %s" % (versionfile_abs, ver)) 1308 | return ver 1309 | except NotThisMethod: 1310 | pass 1311 | 1312 | if pieces_from_vcs_f: 1313 | try: 1314 | pieces = pieces_from_vcs_f(cfg.tag_prefix, root, verbose) 1315 | ver = render(pieces, cfg.style) 1316 | if verbose: 1317 | print("got version from VCS %s" % ver) 1318 | return ver 1319 | except NotThisMethod: 1320 | pass 1321 | 1322 | try: 1323 | ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) 1324 | if verbose: 1325 | print("got version from parentdir %s" % ver) 1326 | return ver 1327 | except NotThisMethod: 1328 | pass 1329 | 1330 | if verbose: 1331 | print("unable to compute version") 1332 | 1333 | return {"version": "0+unknown", "full-revisionid": None, 1334 | "dirty": None, "error": "unable to compute version"} 1335 | 1336 | 1337 | def get_version(): 1338 | return get_versions()["version"] 1339 | 1340 | 1341 | class cmd_version(Command): 1342 | description = "report generated version string" 1343 | user_options = [] 1344 | boolean_options = [] 1345 | 1346 | def initialize_options(self): 1347 | pass 1348 | 1349 | def finalize_options(self): 1350 | pass 1351 | 1352 | def run(self): 1353 | ver = get_version() 1354 | print("Version is currently: %s" % ver) 1355 | 1356 | 1357 | class cmd_build(_build): 1358 | def run(self): 1359 | cfg = get_config() 1360 | versions = get_versions() 1361 | _build.run(self) 1362 | # now locate _version.py in the new build/ directory and replace it 1363 | # with an updated value 1364 | if cfg.versionfile_build: 1365 | target_versionfile = os.path.join(self.build_lib, 1366 | cfg.versionfile_build) 1367 | print("UPDATING %s" % target_versionfile) 1368 | write_to_version_file(target_versionfile, versions) 1369 | 1370 | if 'cx_Freeze' in sys.modules: # cx_freeze enabled? 1371 | from cx_Freeze.dist import build_exe as _build_exe 1372 | 1373 | class cmd_build_exe(_build_exe): 1374 | def run(self): 1375 | cfg = get_config() 1376 | versions = get_versions() 1377 | target_versionfile = cfg.versionfile_source 1378 | print("UPDATING %s" % target_versionfile) 1379 | write_to_version_file(target_versionfile, versions) 1380 | 1381 | _build_exe.run(self) 1382 | os.unlink(target_versionfile) 1383 | with open(cfg.versionfile_source, "w") as f: 1384 | assert cfg.VCS is not None, "please set versioneer.VCS" 1385 | LONG = LONG_VERSION_PY[cfg.VCS] 1386 | f.write(LONG % {"DOLLAR": "$", 1387 | "STYLE": cfg.style, 1388 | "TAG_PREFIX": cfg.tag_prefix, 1389 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1390 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1391 | }) 1392 | 1393 | 1394 | class cmd_sdist(_sdist): 1395 | def run(self): 1396 | versions = get_versions() 1397 | self._versioneer_generated_versions = versions 1398 | # unless we update this, the command will keep using the old version 1399 | self.distribution.metadata.version = versions["version"] 1400 | return _sdist.run(self) 1401 | 1402 | def make_release_tree(self, base_dir, files): 1403 | cfg = get_config() 1404 | _sdist.make_release_tree(self, base_dir, files) 1405 | # now locate _version.py in the new base_dir directory (remembering 1406 | # that it may be a hardlink) and replace it with an updated value 1407 | target_versionfile = os.path.join(base_dir, cfg.versionfile_source) 1408 | print("UPDATING %s" % target_versionfile) 1409 | write_to_version_file(target_versionfile, 1410 | self._versioneer_generated_versions) 1411 | 1412 | 1413 | def get_cmdclass(): 1414 | cmds = {'version': cmd_version, 1415 | 'build': cmd_build, 1416 | 'sdist': cmd_sdist, 1417 | } 1418 | if 'cx_Freeze' in sys.modules: # cx_freeze enabled? 1419 | cmds['build_exe'] = cmd_build_exe 1420 | del cmds['build'] 1421 | 1422 | return cmds 1423 | 1424 | 1425 | CONFIG_ERROR = """ 1426 | setup.cfg is missing the necessary Versioneer configuration. You need 1427 | a section like: 1428 | 1429 | [versioneer] 1430 | VCS = git 1431 | style = pep440 1432 | versionfile_source = src/myproject/_version.py 1433 | versionfile_build = myproject/_version.py 1434 | tag_prefix = "" 1435 | parentdir_prefix = myproject- 1436 | 1437 | You will also need to edit your setup.py to use the results: 1438 | 1439 | import versioneer 1440 | setup(version=versioneer.get_version(), 1441 | cmdclass=versioneer.get_cmdclass(), ...) 1442 | 1443 | Please read the docstring in ./versioneer.py for configuration instructions, 1444 | edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. 1445 | """ 1446 | 1447 | SAMPLE_CONFIG = """ 1448 | # See the docstring in versioneer.py for instructions. Note that you must 1449 | # re-run 'versioneer.py setup' after changing this section, and commit the 1450 | # resulting files. 1451 | 1452 | [versioneer] 1453 | #VCS = git 1454 | #style = pep440 1455 | #versionfile_source = 1456 | #versionfile_build = 1457 | #tag_prefix = 1458 | #parentdir_prefix = 1459 | 1460 | """ 1461 | 1462 | INIT_PY_SNIPPET = """ 1463 | from ._version import get_versions 1464 | __version__ = get_versions()['version'] 1465 | del get_versions 1466 | """ 1467 | 1468 | 1469 | def do_setup(): 1470 | try: 1471 | cfg = get_config() 1472 | except (EnvironmentError, configparser.NoSectionError, 1473 | configparser.NoOptionError) as e: 1474 | if isinstance(e, (EnvironmentError, configparser.NoSectionError)): 1475 | print("Adding sample versioneer config to setup.cfg", 1476 | file=sys.stderr) 1477 | with open(find_setup_cfg(), "a") as f: 1478 | f.write(SAMPLE_CONFIG) 1479 | print(CONFIG_ERROR, file=sys.stderr) 1480 | return 1 1481 | 1482 | print(" creating %s" % cfg.versionfile_source) 1483 | with open(cfg.versionfile_source, "w") as f: 1484 | assert cfg.VCS is not None, "please set versioneer.VCS" 1485 | LONG = LONG_VERSION_PY[cfg.VCS] 1486 | f.write(LONG % {"DOLLAR": "$", 1487 | "STYLE": cfg.style, 1488 | "TAG_PREFIX": cfg.tag_prefix, 1489 | "PARENTDIR_PREFIX": cfg.parentdir_prefix, 1490 | "VERSIONFILE_SOURCE": cfg.versionfile_source, 1491 | }) 1492 | 1493 | ipy = os.path.join(os.path.dirname(cfg.versionfile_source), 1494 | "__init__.py") 1495 | if os.path.exists(ipy): 1496 | try: 1497 | with open(ipy, "r") as f: 1498 | old = f.read() 1499 | except EnvironmentError: 1500 | old = "" 1501 | if INIT_PY_SNIPPET not in old: 1502 | print(" appending to %s" % ipy) 1503 | with open(ipy, "a") as f: 1504 | f.write(INIT_PY_SNIPPET) 1505 | else: 1506 | print(" %s unmodified" % ipy) 1507 | else: 1508 | print(" %s doesn't exist, ok" % ipy) 1509 | ipy = None 1510 | 1511 | # Make sure both the top-level "versioneer.py" and versionfile_source 1512 | # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so 1513 | # they'll be copied into source distributions. Pip won't be able to 1514 | # install the package without this. 1515 | manifest_in = os.path.join(get_root(), "MANIFEST.in") 1516 | simple_includes = set() 1517 | try: 1518 | with open(manifest_in, "r") as f: 1519 | for line in f: 1520 | if line.startswith("include "): 1521 | for include in line.split()[1:]: 1522 | simple_includes.add(include) 1523 | except EnvironmentError: 1524 | pass 1525 | # That doesn't cover everything MANIFEST.in can do 1526 | # (http://docs.python.org/2/distutils/sourcedist.html#commands), so 1527 | # it might give some false negatives. Appending redundant 'include' 1528 | # lines is safe, though. 1529 | if "versioneer.py" not in simple_includes: 1530 | print(" appending 'versioneer.py' to MANIFEST.in") 1531 | with open(manifest_in, "a") as f: 1532 | f.write("include versioneer.py\n") 1533 | else: 1534 | print(" 'versioneer.py' already in MANIFEST.in") 1535 | if cfg.versionfile_source not in simple_includes: 1536 | print(" appending versionfile_source ('%s') to MANIFEST.in" % 1537 | cfg.versionfile_source) 1538 | with open(manifest_in, "a") as f: 1539 | f.write("include %s\n" % cfg.versionfile_source) 1540 | else: 1541 | print(" versionfile_source already in MANIFEST.in") 1542 | 1543 | # Make VCS-specific changes. For git, this means creating/changing 1544 | # .gitattributes to mark _version.py for export-time keyword 1545 | # substitution. 1546 | do_vcs_install(manifest_in, cfg.versionfile_source, ipy) 1547 | return 0 1548 | 1549 | 1550 | def scan_setup_py(): 1551 | found = set() 1552 | setters = False 1553 | errors = 0 1554 | with open("setup.py", "r") as f: 1555 | for line in f.readlines(): 1556 | if "import versioneer" in line: 1557 | found.add("import") 1558 | if "versioneer.get_cmdclass()" in line: 1559 | found.add("cmdclass") 1560 | if "versioneer.get_version()" in line: 1561 | found.add("get_version") 1562 | if "versioneer.VCS" in line: 1563 | setters = True 1564 | if "versioneer.versionfile_source" in line: 1565 | setters = True 1566 | if len(found) != 3: 1567 | print("") 1568 | print("Your setup.py appears to be missing some important items") 1569 | print("(but I might be wrong). Please make sure it has something") 1570 | print("roughly like the following:") 1571 | print("") 1572 | print(" import versioneer") 1573 | print(" setup( version=versioneer.get_version(),") 1574 | print(" cmdclass=versioneer.get_cmdclass(), ...)") 1575 | print("") 1576 | errors += 1 1577 | if setters: 1578 | print("You should remove lines like 'versioneer.vcs = ' and") 1579 | print("'versioneer.versionfile_source = ' . This configuration") 1580 | print("now lives in setup.cfg, and should be removed from setup.py") 1581 | print("") 1582 | errors += 1 1583 | return errors 1584 | 1585 | if __name__ == "__main__": 1586 | cmd = sys.argv[1] 1587 | if cmd == "setup": 1588 | errors = do_setup() 1589 | errors += scan_setup_py() 1590 | if errors: 1591 | sys.exit(1) 1592 | --------------------------------------------------------------------------------