├── .gitignore
├── LICENSE
├── README.md
├── graphtime
├── __init__.py
├── ising_utils.py
├── markov_random_fields.py
├── test
│ ├── __init__.py
│ └── dmrf_test.py
└── utils.py
├── notebooks
├── DGM_BBA.ipynb
├── DGM_Villin.ipynb
├── IsingExample.ipynb
└── WLALL_peptide.ipynb
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | build/
2 | .vscode/
3 | *__pycache__*
4 | *.egg-info
5 | dist/
6 | *.pyc
7 | files.txt
8 | #*.pdf
9 | #*.png
10 | *\#*#
11 | *[#]*
12 | *[~]*
13 | *-checkpoint.ipynb
14 | *.DS_store
15 | paper/Coverletter.*
16 | .eggs/*
17 | node_modules/
18 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU LESSER GENERAL PUBLIC LICENSE
2 | Version 3, 29 June 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.
5 | Everyone is permitted to copy and distribute verbatim copies
6 | of this license document, but changing it is not allowed.
7 |
8 |
9 | This version of the GNU Lesser General Public License incorporates
10 | the terms and conditions of version 3 of the GNU General Public
11 | License, supplemented by the additional permissions listed below.
12 |
13 | 0. Additional Definitions.
14 |
15 | As used herein, "this License" refers to version 3 of the GNU Lesser
16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU
17 | General Public License.
18 |
19 | "The Library" refers to a covered work governed by this License,
20 | other than an Application or a Combined Work as defined below.
21 |
22 | An "Application" is any work that makes use of an interface provided
23 | by the Library, but which is not otherwise based on the Library.
24 | Defining a subclass of a class defined by the Library is deemed a mode
25 | of using an interface provided by the Library.
26 |
27 | A "Combined Work" is a work produced by combining or linking an
28 | Application with the Library. The particular version of the Library
29 | with which the Combined Work was made is also called the "Linked
30 | Version".
31 |
32 | The "Minimal Corresponding Source" for a Combined Work means the
33 | Corresponding Source for the Combined Work, excluding any source code
34 | for portions of the Combined Work that, considered in isolation, are
35 | based on the Application, and not on the Linked Version.
36 |
37 | The "Corresponding Application Code" for a Combined Work means the
38 | object code and/or source code for the Application, including any data
39 | and utility programs needed for reproducing the Combined Work from the
40 | Application, but excluding the System Libraries of the Combined Work.
41 |
42 | 1. Exception to Section 3 of the GNU GPL.
43 |
44 | You may convey a covered work under sections 3 and 4 of this License
45 | without being bound by section 3 of the GNU GPL.
46 |
47 | 2. Conveying Modified Versions.
48 |
49 | If you modify a copy of the Library, and, in your modifications, a
50 | facility refers to a function or data to be supplied by an Application
51 | that uses the facility (other than as an argument passed when the
52 | facility is invoked), then you may convey a copy of the modified
53 | version:
54 |
55 | a) under this License, provided that you make a good faith effort to
56 | ensure that, in the event an Application does not supply the
57 | function or data, the facility still operates, and performs
58 | whatever part of its purpose remains meaningful, or
59 |
60 | b) under the GNU GPL, with none of the additional permissions of
61 | this License applicable to that copy.
62 |
63 | 3. Object Code Incorporating Material from Library Header Files.
64 |
65 | The object code form of an Application may incorporate material from
66 | a header file that is part of the Library. You may convey such object
67 | code under terms of your choice, provided that, if the incorporated
68 | material is not limited to numerical parameters, data structure
69 | layouts and accessors, or small macros, inline functions and templates
70 | (ten or fewer lines in length), you do both of the following:
71 |
72 | a) Give prominent notice with each copy of the object code that the
73 | Library is used in it and that the Library and its use are
74 | covered by this License.
75 |
76 | b) Accompany the object code with a copy of the GNU GPL and this license
77 | document.
78 |
79 | 4. Combined Works.
80 |
81 | You may convey a Combined Work under terms of your choice that,
82 | taken together, effectively do not restrict modification of the
83 | portions of the Library contained in the Combined Work and reverse
84 | engineering for debugging such modifications, if you also do each of
85 | the following:
86 |
87 | a) Give prominent notice with each copy of the Combined Work that
88 | the Library is used in it and that the Library and its use are
89 | covered by this License.
90 |
91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license
92 | document.
93 |
94 | c) For a Combined Work that displays copyright notices during
95 | execution, include the copyright notice for the Library among
96 | these notices, as well as a reference directing the user to the
97 | copies of the GNU GPL and this license document.
98 |
99 | d) Do one of the following:
100 |
101 | 0) Convey the Minimal Corresponding Source under the terms of this
102 | License, and the Corresponding Application Code in a form
103 | suitable for, and under terms that permit, the user to
104 | recombine or relink the Application with a modified version of
105 | the Linked Version to produce a modified Combined Work, in the
106 | manner specified by section 6 of the GNU GPL for conveying
107 | Corresponding Source.
108 |
109 | 1) Use a suitable shared library mechanism for linking with the
110 | Library. A suitable mechanism is one that (a) uses at run time
111 | a copy of the Library already present on the user's computer
112 | system, and (b) will operate properly with a modified version
113 | of the Library that is interface-compatible with the Linked
114 | Version.
115 |
116 | e) Provide Installation Information, but only if you would otherwise
117 | be required to provide such information under section 6 of the
118 | GNU GPL, and only to the extent that such information is
119 | necessary to install and execute a modified version of the
120 | Combined Work produced by recombining or relinking the
121 | Application with a modified version of the Linked Version. (If
122 | you use option 4d0, the Installation Information must accompany
123 | the Minimal Corresponding Source and Corresponding Application
124 | Code. If you use option 4d1, you must provide the Installation
125 | Information in the manner specified by section 6 of the GNU GPL
126 | for conveying Corresponding Source.)
127 |
128 | 5. Combined Libraries.
129 |
130 | You may place library facilities that are a work based on the
131 | Library side by side in a single library together with other library
132 | facilities that are not Applications and are not covered by this
133 | License, and convey such a combined library under terms of your
134 | choice, if you do both of the following:
135 |
136 | a) Accompany the combined library with a copy of the same work based
137 | on the Library, uncombined with any other library facilities,
138 | conveyed under the terms of this License.
139 |
140 | b) Give prominent notice with the combined library that part of it
141 | is a work based on the Library, and explaining where to find the
142 | accompanying uncombined form of the same work.
143 |
144 | 6. Revised Versions of the GNU Lesser General Public License.
145 |
146 | The Free Software Foundation may publish revised and/or new versions
147 | of the GNU Lesser General Public License from time to time. Such new
148 | versions will be similar in spirit to the present version, but may
149 | differ in detail to address new problems or concerns.
150 |
151 | Each version is given a distinguishing version number. If the
152 | Library as you received it specifies that a certain numbered version
153 | of the GNU Lesser General Public License "or any later version"
154 | applies to it, you have the option of following the terms and
155 | conditions either of that published version or of any later version
156 | published by the Free Software Foundation. If the Library as you
157 | received it does not specify a version number of the GNU Lesser
158 | General Public License, you may choose any version of the GNU Lesser
159 | General Public License ever published by the Free Software Foundation.
160 |
161 | If the Library as you received it specifies that a proxy can decide
162 | whether future versions of the GNU Lesser General Public License shall
163 | apply, that proxy's public statement of acceptance of any version is
164 | permanent authorization for you to choose that version for the
165 | Library.
166 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # graphtime
2 | A python module for estimation and analysis of dynamic graphical models to encode transition densities.
3 |
4 | In particular, `graphtime` implements _dynamic Markov random fields_ (dMRF) or "dynamic Ising models", as a particular case of dynamic graphical models (DGM). DGMs represent molecular configurations using multiple features (_sub-systems_), fx torsion-angles or contacts. This is in contrast to the single global state, used in for example Markov state models. The advantage of this kind of model is that the number of parameters needed to be estimated is only quadratic in the number of sub-system states, rather than being exponential in the number of meta-stable states.
5 |
6 | The dMRFs models the interactions between the different sub-systems, or more specifically, how a current configuration of the sub-systems encode the distribution of sub-system states at a time $t+\tau$ in the future. The dMRFs are like Markov state models, fully probabilistic.
7 |
8 | Although this library was developed with application to molecular systems in mind, there is currently no functionality to analyse and featurize molecular simulation data within `graphtime` and this is not planned. However, there are several packages that does this including `MDTraj`, `pyEMMA` and `mdanalysis`. `graphtime` depends on a `straj` as input for estimation, which is a list of `numpy` arrays with the dimensions $k\times N$, with $k$ being the number of sub-systems and $N$ being the number of frames a the molecular simulation.
9 |
10 | Further details can be found in the manuscript:
11 |
12 | S. Olsson and F. Noé "Dynamic Graphical Models of Molecular Kinetics" _in review._ [pre-print](https://www.biorxiv.org/content/early/2018/11/09/467050)
13 |
14 |
15 | ### Dependencies
16 | The `graphtime` library is minimalisitic and makes extensive use of `numpy` and `sklearn`.
17 |
18 | - python >= 3.6.1
19 | - numpy >= 1.3
20 | - scikit-learn >= 0.19.0
21 | - scipy >= 1.1.0
22 | - msmtools >= 1.2.1
23 | - pyemma >= 2.5.2
24 |
25 | ### Installation
26 |
27 | Clone this repository and test
28 | `python setup.py test`
29 |
30 | If succesfull install using
31 | `python setup.py install`
32 |
33 | ### Issues and bugs
34 | If you are having problems using this library or discover any bugs please get in touch through the issues section on the `graphtime` github repository. For bug reports please provide a reproducable example.
35 |
--------------------------------------------------------------------------------
/graphtime/__init__.py:
--------------------------------------------------------------------------------
1 | __version__ = "0.0a"
--------------------------------------------------------------------------------
/graphtime/ising_utils.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | import numpy as _np
3 | from scipy.linalg import expm as _expm
4 | import itertools as _itrt
5 |
6 | def Ising_tmatrix(nspins, alpha=0.1, gamma=0.95, ratematrix=False):
7 | """
8 | Implements Glaubers master equation variant of the (1D) Ising model with periodic boundary conditions
9 |
10 | (J Math Phys 4 294 (1963); doi: 10.1063/1.1703954)
11 | nspins: number of spins in model (Note: returns 2^nspins times 2^nspins matrix)
12 | alpha: basal spin flip-rate, defines time-scale (=0.1)
13 | gamma: gamma is equal to tanh(\beta 2J) where J is the spin-spin coupling constant in a corresponding Ising model, and \beta is the inverse temperature.
14 | ratematrix: return rate matrix as well
15 | """
16 | W = _np.zeros((2**nspins, 2**nspins))
17 | for i, s in enumerate(_itrt.product([-1, 1], repeat = nspins)):
18 | s = _np.array(s)
19 | for j, c in enumerate(_itrt.product([-1, 1], repeat = nspins)):
20 | c = _np.array(c)
21 | if _np.all(s == c):
22 | #Diagonal is filled later.
23 | continue
24 | else:
25 | flipped = _np.where(s!=c)[0]
26 | if len(flipped)==1:
27 | f = flipped[0]
28 | W[i, j] = 0.5*alpha*(1.-0.5*gamma*s[f]*(s[f-1]+s[(f+1)%nspins]))
29 | else:
30 | pass
31 | #fill diagonal
32 | W[_np.diag_indices(2**nspins)] = -W.sum(axis=1)
33 | #compute transition matrix
34 | T = _expm(W)
35 | if ratematrix:
36 | return T, W
37 | else:
38 | return T
39 |
40 | def Ising_to_discrete_state(X):
41 | """
42 | Maps a trajectory of spin-states to a corresponding trajectory of unique states.
43 | Useful when estimating models with global discretization fx. MSMs.
44 |
45 | X : list of ndarrays ( trajectories of spin states (T, M) ) where T is the number of time-steps and M is the number of binary spins
46 |
47 | returns:
48 | dts : list of lists ( discrete state trajectories)
49 | """
50 | dts = []
51 | for x in X:
52 | x[_np.where(x==-1)] = 0
53 | dts.append ([int(''.join(map(str, f)), 2) for f in x])
54 | return dts
55 |
56 | all_Ising_states = lambda nspins:_np.array(list(_itrt.product([-1, 1], repeat = nspins)))
57 |
58 |
59 |
--------------------------------------------------------------------------------
/graphtime/markov_random_fields.py:
--------------------------------------------------------------------------------
1 | import itertools as _itrt
2 | import numpy as _np
3 | from sklearn.preprocessing import OneHotEncoder as _OneHotEncoder
4 | from sklearn.linear_model import LogisticRegression as _LogisticRegression
5 |
6 | class dMRF(object):
7 | """
8 | Implements a dynamic Markov random field model as described in Olsson and Noe 2018
9 | """
10 |
11 | def __init__(self, lrs, active_subsystems, lag = 1, enc = None, estimated = False):
12 | """
13 | Constructor for dMRF class.
14 |
15 | Arguments:
16 | --------------------
17 | lrs : list of LogisticRegression instances (sklearn)
18 | active_subsystems : list of active sub-systems
19 | estimated : bool, indicator whether dMRF is estimated
20 | """
21 |
22 | self.nsubsys_ = len(lrs)
23 | self.lrs = lrs
24 | self.encoder = enc
25 | self.active_subsystems_ = active_subsystems
26 | self.estimated_ = estimated
27 | self.lag = lag
28 |
29 | def simulate(self, nsteps, start = None):
30 | """
31 | Simulate a trajectory of all active sub-systems described by dMRF.
32 |
33 | Arguments:
34 | --------------------
35 | nsteps (int) number of steps (in lag-time of model)
36 | start (ndarray) initial configuration of trajectory. If not given, initial state is randomized.
37 | """
38 | # if there is no initial condition generate one
39 | if not isinstance(start, _np.ndarray):
40 | _s = _np.array([lr.classes_[_np.random.randint(0, len(lr.classes_))] for lr in self.lrs])
41 | else:
42 | _s = start.copy()
43 |
44 | _states = _np.zeros((self.nsubsys_, nsteps))
45 | _states[:, 0] = _s.copy()
46 |
47 | idx_ = [lr.classes_.tolist() for lr in self.lrs]
48 |
49 | # define implicit functions for sub-system sampling.
50 | # this avoids using np.random.multinomial for binary sub-systems
51 | # which can lead to a significant speedup
52 |
53 | state_samplers = [(lambda es, lr:lr.predict_proba(es).ravel()[0]<_np.random.rand() )
54 | if len(i)==2 else (lambda es, lr:_np.random.multinomial(1,
55 | pvals = lr.predict_proba(es).ravel()).argmax())
56 | for i in idx_ ]
57 |
58 | for n in range(nsteps-1):
59 | #for every sub-system sample new configuration given current global configuration
60 | encoded_state = self.encoder.transform(_states[:, n].reshape(1, -1))
61 | for j, (sampler, lr) in enumerate(zip(state_samplers, self.lrs)):
62 | _states[j, n + 1] = idx_[j][sampler(encoded_state, lr)]
63 | return _states.T
64 |
65 | def generate_transition_matrix(self, safemode = True, maxdim = 10000):
66 | """
67 | Compute full transition probability matrix of dMRF.
68 |
69 | Arguments:
70 | --------------------
71 | safemode (bool=True) enable safemode, checks whether output dimension is below
72 | maxdim times maxdim prior to allocating memory.
73 | maxdim (int=10000) maximum dimension of transtion matrix (used if safemode=True)
74 | """
75 | ndims = _np.prod([len(lr.classes_) for lr in self.lrs])
76 | if ndims > maxdim and safemode:
77 | raise MemoryError(
78 | 'Maximum safe-mode transition matrix dimension ({:i}x{:i}) exceeded.'.format(maxdim, maxdim))
79 | idx_ = [lr.classes_.tolist() for lr in self.lrs]
80 |
81 | T = _np.zeros((ndims, ndims))
82 |
83 | for i, s in enumerate(_itrt.product(*[lr.classes_ for lr in self.lrs])):
84 | _se = self.encoder.transform(_np.array([s]))
85 | # compute transition probabilities for each sub-system state at time t+\tau
86 | tprobs = [lr.predict_proba(_se) for lr in self.lrs]
87 | for j, z_ in enumerate(_itrt.product(*[lr.classes_ for lr in self.lrs])):
88 | # compute product of outcome state
89 | T[i, j] = _np.prod([tprob[:, idx.index(z)] for idx, tprob, z in zip(idx_, tprobs, list(z_))])
90 | return T
91 |
92 | def get_subsystem_couplings(self):
93 | """
94 | Returns estimated sub-system couplings (J), ndarray
95 | """
96 | return _np.vstack([lr.coef_ for lr in self.lrs])
97 |
98 | def get_subsystem_biases(self):
99 | """
100 | Returns estimated sub-system biases (h), ndarray
101 | """
102 | return _np.concatenate([lr.intercept_ for lr in self.lrs])
103 |
104 | def get_active_subsystems(self):
105 | """
106 | Return indices of sub-systems active in dMRF
107 | """
108 | return self.active_subsystems_
109 | def get_subsystem_count(self):
110 | """
111 | Returns number of active sub-systems
112 | """
113 | return self.nsubsys_
114 |
115 | def estimate_dMRF(strajs, lag = 1, stride = 1, Encoder = _OneHotEncoder(sparse = False),
116 | logistic_regression_kwargs = {'fit_intercept': False,
117 | 'penalty': 'l1', 'C': 1., 'tol': 1e-4, 'solver': 'saga'}):
118 | """
119 | Estimate dMRF using logistic (binary sub-systems) or softmax (multinomal sub-systems) regression.
120 |
121 | Arguments:
122 | --------------------
123 | strajs (list of ndarrays): state of each subsystem as a function of time.
124 | lag (int=1): lag-time used in auto-regression
125 | stride (int=1): data stride prior to model estimation. lag should be devisible by this quantity.
126 | Encoder (sklearn compatible categorical pre-processor): Encoder for spin-states, usually OneHotEncoder is recommended.
127 | logistic_regression_kwargs (dict): dictionary of keyword arguments forwarded to
128 | sklearn LogisticRegression.
129 | The multi_class kwargs is forced to 'ovr' for binary cases and 'multinomial' for multinomial cases.
130 |
131 | returns:
132 | dMRF instance -- estimated dMRF.
133 | """
134 | if stride > lag:
135 | raise ValueError("Stride exceeds lag. Lag has to be larger or equal to stride.")
136 | strided_strajs = [t[::stride] for t in strajs]
137 | P0 = _np.vstack([t[:-lag//stride] for t in strided_strajs])
138 | Pt = _np.vstack([t[lag//stride:] for t in strided_strajs])
139 | nframes_strided, nsubsys = P0.shape
140 |
141 | #find active sub-systems
142 | active_subsystems_0 = _np.where([len(_np.unique(P0[:, i]))>1 for i in range(nsubsys)])[0]
143 | active_subsystems_t = _np.where([len(_np.unique(Pt[:, i]))>1 for i in range(nsubsys)])[0]
144 | active_subsystems = list(set(active_subsystems_0).intersection(active_subsystems_t))
145 | lrs= []
146 |
147 | #remove constant sub-systems
148 | P0 = P0[:, active_subsystems]
149 | Pt = Pt[:, active_subsystems]
150 |
151 | P0 = Encoder.fit_transform(P0)
152 |
153 | for i in range(Pt.shape[1]):
154 | # if only two categories use one-versus-rest estimation mode
155 | logistic_regression_kwargs['multi_class'] = 'ovr'
156 |
157 | if len(_np.unique(Pt[:, i]))>2:
158 | # if we have more than 2 states change multiclass flag to multinomial
159 | logistic_regression_kwargs['multi_class'] = 'multinomial'
160 | logr = _LogisticRegression(**logistic_regression_kwargs).fit(P0, Pt[:, i])
161 | lrs.append(logr)
162 |
163 | return dMRF(lrs, active_subsystems, lag = lag, enc = Encoder, estimated = True)
--------------------------------------------------------------------------------
/graphtime/test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/markovmodel/graphtime/6a7a4980807f34b4aaeff932060db9b58c0f2dca/graphtime/test/__init__.py
--------------------------------------------------------------------------------
/graphtime/test/dmrf_test.py:
--------------------------------------------------------------------------------
1 | from __future__ import absolute_import
2 | import unittest
3 |
4 | import numpy as np
5 | import warnings
6 |
7 | from graphtime import markov_random_fields
8 | from graphtime import ising_utils
9 | from graphtime import utils as _ut
10 |
11 | from sklearn.preprocessing import OneHotEncoder as OneHotEncoder
12 | from sklearn.preprocessing import LabelBinarizer
13 |
14 | class TestdMRFSimple(unittest.TestCase):
15 | def setUp(self):
16 | """Store state of the rng"""
17 | self.state = np.random.mtrand.get_state()
18 |
19 | """Reseed the rng to enforce 'deterministic' behavior"""
20 | np.random.mtrand.seed(0xDEADBEEF)
21 | self.lag = 1
22 | self.stride = 1
23 | self.straj_binary = [np.array([[0,0],[0,1],[1,1],[1,1],[1,0],[1,0],[1,1],[0,1],[0,0],[0,0]])]
24 | self.straj_multinomial = [np.array([[0,0],[0,1],[1,2],[1,2],[1,0],[2,0],[1,1],[0,1],[2,0],[0,2]])]
25 | self.straj_bin_mult = [np.array([[0,0], [0,2], [1,1], [1,2], [1,2], [1,2], [1,1], [0,2], [0,1], [1,1], [0,1], [0,0], [0,2]])]
26 |
27 | def tearDown(self):
28 | """Revert the state of the rng"""
29 | np.random.mtrand.set_state(self.state)
30 |
31 | def test_dmrf_binary(self):
32 | """ self-consistency, estimation and convienence function """
33 | dmrf_bin = markov_random_fields.estimate_dMRF(self.straj_binary,
34 | lag = self.lag,
35 | stride = self.stride,
36 | Encoder = OneHotEncoder(sparse = False))
37 | self.assertEqual(self.lag, dmrf_bin.lag)
38 | self.assertTrue(dmrf_bin.estimated_)
39 | self.assertEqual(len(dmrf_bin.active_subsystems_), 2)
40 | tmat = dmrf_bin.generate_transition_matrix()
41 | self.assertEqual(tmat.shape, (4, 4))
42 |
43 | def test_dmrf_multinomial(self):
44 | """ self-consistency, estimation and convienence function multinomial """
45 | dmrf_multinom = markov_random_fields.estimate_dMRF(self.straj_multinomial,
46 | lag = self.lag,
47 | stride = self.stride,
48 | Encoder = OneHotEncoder(sparse = False))
49 | self.assertEqual(self.lag, dmrf_multinom.lag)
50 | self.assertTrue(dmrf_multinom.estimated_)
51 | self.assertEqual(len(dmrf_multinom.active_subsystems_), 2)
52 | tmat = dmrf_multinom.generate_transition_matrix()
53 | self.assertEqual(tmat.shape, (9, 9))
54 |
55 | def test_dmrf_multinomial_binary(self):
56 | """ self-consistency, estimation and convienence function, multiple trajectories"""
57 | dmrf_multinom_bin = markov_random_fields.estimate_dMRF(self.straj_multinomial+self.straj_binary,
58 | lag = self.lag,
59 | stride = self.stride,
60 | Encoder = OneHotEncoder(sparse = False))
61 | self.assertEqual(self.lag, dmrf_multinom_bin.lag)
62 | self.assertTrue(dmrf_multinom_bin.estimated_)
63 | self.assertEqual(len(dmrf_multinom_bin.active_subsystems_), 2)
64 | tmat = dmrf_multinom_bin.generate_transition_matrix()
65 | self.assertEqual(tmat.shape, (9, 9))
66 |
67 | def test_dmrf_one_multinomial_one_binary(self):
68 | """ self-consistency, estimation and convienence function, single trajectory"""
69 | dmrf_mbin = markov_random_fields.estimate_dMRF(self.straj_bin_mult,
70 | lag = self.lag,
71 | stride = self.stride,
72 | Encoder = OneHotEncoder(sparse = False))
73 | self.assertEqual(self.lag, dmrf_mbin.lag)
74 | self.assertTrue(dmrf_mbin.estimated_)
75 | self.assertEqual(len(dmrf_mbin.active_subsystems_), 2)
76 | tmat = dmrf_mbin.generate_transition_matrix()
77 | self.assertEqual(tmat.shape, (6, 6))
78 |
79 | class TestdMRFIsing(unittest.TestCase):
80 | def setUp(self):
81 | """Store state of the rng"""
82 | self.state = np.random.mtrand.get_state()
83 |
84 | """Reseed the rng to enforce 'deterministic' behavior"""
85 | np.random.mtrand.seed(0xDEADBEEF)
86 | self.lag = 1
87 | self.stride = 1
88 | self.nspins = 3
89 | self.alpha = 0.10
90 | self.IsingTmat = ising_utils.Ising_tmatrix(self.nspins, alpha = self.alpha, gamma = 0)
91 | self.ising_states = ising_utils.all_Ising_states(self.nspins)
92 | self.Isingdata_state = _ut.simulate_MSM(self.IsingTmat, 500000, s0 = 0)
93 | self.Isingdata = [np.array(self.ising_states[self.Isingdata_state])]
94 |
95 |
96 | def tearDown(self):
97 | """Revert the state of the rng"""
98 | np.random.mtrand.set_state(self.state)
99 | def test_dmrf_Ising_one_spin(self):
100 | """ estimation with 3 binary uncoupled spins, tests custom encoder """
101 | IsingDMRF = markov_random_fields.estimate_dMRF(self.Isingdata,
102 | lag = self.lag,
103 | stride = self.stride,
104 | Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))
105 | self_couplings = np.diag(np.vstack([lr.coef_ for lr in IsingDMRF.lrs]))
106 |
107 | # compare estimated sub-system flip rates to analytical values for zero-coupling Ising model
108 | self.assertTrue(np.allclose(np.ones(self.nspins)*self.alpha*self.lag, -np.log(np.tanh(self_couplings/2.)), rtol = 1e-3, atol = 1e-3))
109 |
110 | # check whether estimated fields are almost 0
111 | self.assertTrue(np.allclose(np.zeros(self.nspins), np.concatenate([lr.intercept_ for lr in IsingDMRF.lrs])))
112 |
113 | def test_dmrf_Ising_simulate_test(self):
114 | """ estimation with 3 binary uncoupled spins, test simulation with and without initial condition specified """
115 | IsingDMRF2 = markov_random_fields.estimate_dMRF(self.Isingdata,
116 | lag = self.lag,
117 | stride = self.stride,
118 | Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))
119 |
120 | # simulation with specified initial condition
121 | synth_traj = IsingDMRF2.simulate(nsteps = 2, start = np.ones(self.nspins))
122 |
123 | # simulation without specified initial condition
124 | synth_traj2 = IsingDMRF2.simulate(nsteps = 2)
125 |
126 | self.assertTrue(np.allclose(synth_traj, np.array([[ 1, 1, 1],
127 | [ 1, 1, 1]])))
128 |
129 | self.assertTrue(np.allclose(synth_traj2, np.array([[ 1, -1, 1],
130 | [ 1, -1, 1]])))
131 |
132 | def test_dmrf_Ising_strided_estimate(self):
133 | """ estimation with 3 binary uncoupled spins, compares strided and unstrided self-coupling estimates """
134 |
135 | fact = 2
136 |
137 | IsingDMRF_strided = markov_random_fields.estimate_dMRF(self.Isingdata,
138 | lag = self.lag*fact,
139 | stride = self.stride*fact,
140 | Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))
141 |
142 | IsingDMRF_unstrided = markov_random_fields.estimate_dMRF(self.Isingdata,
143 | lag = self.lag,
144 | stride = self.stride,
145 | Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))
146 |
147 | sc_strided = np.diag(np.vstack([lr.coef_ for lr in IsingDMRF_strided.lrs]))
148 | sc_unstrided = np.diag(np.vstack([lr.coef_ for lr in IsingDMRF_unstrided.lrs]))
149 |
150 | # compare strided and unstrided rate estimates
151 | self.assertTrue(np.allclose(sc_unstrided/2., np.arctanh(np.exp(np.log(np.tanh(sc_strided/2.))/fact)), rtol = 1e-3, atol = 1e-3 ))
152 |
--------------------------------------------------------------------------------
/graphtime/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as _np
2 |
3 | def simulate_MSM(T, N_steps, s0 = 0):
4 | """
5 | Fast trajectory generator for Markov state models (from Fabian Paul)
6 | T: transition matrix
7 | N_steps: number of steps
8 |
9 | """
10 | dtraj = _np.zeros(N_steps, dtype = _np.uint16)
11 | s = s0
12 | T_cdf = T.cumsum(axis=1)
13 | for t in range(N_steps):
14 | dtraj[t] = s
15 | s = _np.searchsorted(T_cdf[s, :], _np.random.rand())
16 | return dtraj
17 |
--------------------------------------------------------------------------------
/notebooks/DGM_BBA.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Dynamic graphical model of fast folder BBA\n",
8 | "Simon Olsson 2018\n",
9 | "\n",
10 | "This notebook makes use previously published simulation data ([Lindroff-Larsen et al. 2011](http://science.sciencemag.org/content/334/6055/517)). We do not have the rights to distribute this data but they can be requested directly from DE Shaw Research.\n",
11 | "\n",
12 | "Note that the random seed is fixed. The notebook makes extensive use of nested point-estimates (i.e. statistical estimates subject to fluctuations which condition other such values), and can therefore be numerically unstable for some random seeds.\n"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "import mdtraj as md\n",
22 | "%matplotlib inline\n",
23 | "import matplotlib.pyplot as plt\n",
24 | "\n",
25 | "import matplotlib as mpl\n",
26 | "import numpy as np\n",
27 | "import pyemma as pe\n",
28 | "from onedeeIsing import Ising_tmatrix\n",
29 | "from scipy.spatial import distance_matrix\n",
30 | "from sklearn.preprocessing import LabelBinarizer\n",
31 | "from graphtime.markov_random_fields import estimate_dMRF\n",
32 | "from graphtime.utils import simulate_MSM as generate\n",
33 | "\n",
34 | "double_column_width = 6.968\n",
35 | "single_column_width= 3.307\n",
36 | "font = {'sans-serif': \"Arial\",\n",
37 | " 'family': \"sans-serif\",\n",
38 | " 'size' : 8}\n",
39 | "\n",
40 | "mpl.rc('font', **font)\n",
41 | "mpl.rcParams['mathtext.fontset'] = 'custom'\n",
42 | "np.random.seed(313808)"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "# Note: \n",
52 | "# we do not have the rights to distribute these data. \n",
53 | "# Please inquiry about these directly from DE Shaw Research.\n",
54 | "# Adjust paths as necessary\n",
55 | "feat = pe.coordinates.featurizer('../DESRES-Trajectory_1FME-0-protein/1FME-0-protein.pdb')\n",
56 | "feat.add_backbone_torsions(deg=True)\n",
57 | "source = pe.coordinates.source([['../DESRES-Trajectory_1FME-0-protein/1FME-0-protein-{:03d}.dcd'.format(i) for i in range(112)],\n",
58 | " ['../DESRES-Trajectory_1FME-1-protein/1FME-1-protein-{:03d}.dcd'.format(i) for i in range(52)]], features=feat)\n",
59 | "dihe = source.get_output()"
60 | ]
61 | },
62 | {
63 | "cell_type": "code",
64 | "execution_count": null,
65 | "metadata": {},
66 | "outputs": [],
67 | "source": [
68 | "def discr_feats(ftrajs, feat_describe):\n",
69 | " discr_trajs = []\n",
70 | " for ft in ftrajs:\n",
71 | " dftraj = np.zeros(ft.shape, dtype = int)\n",
72 | " for i, fstr in enumerate(feat_describe):\n",
73 | " ls = fstr.split()\n",
74 | " if fstr[:3] == \"PHI\": # split into two states\n",
75 | " dftraj[:, i] = (ft[:, i]<0).astype(int)\n",
76 | " elif fstr[:3] == \"PSI\": # split in to two states if not n-terminal\n",
77 | " if int(ls[-1]) == 1:\n",
78 | " dftraj[:, i] = -1\n",
79 | " else:\n",
80 | " dftraj[:, i] = (ft[:, i]<80).astype(int)\n",
81 | " elif fstr[:3] == \"CHI\": #split into 3 rotamers\n",
82 | " tv = (ft[:, i]+180+60)%360\n",
83 | " dftraj[:, i] = (tv>125).astype(int) + (tv>250).astype(int) \n",
84 | " non_n_psi = np.where(dftraj[0, :]>-1)[0] \n",
85 | " discr_trajs.append(dftraj.copy()[:,non_n_psi])\n",
86 | " return discr_trajs, [f for i,f in enumerate(feat_describe) if i in non_n_psi]"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "dfeats, nlbls = discr_feats(dihe, feat.describe())"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "# remap features {0, 1} -> {-1, 1}\n",
105 | "dfeats_fixed = []\n",
106 | "for df in dfeats:\n",
107 | " _t = df.copy()\n",
108 | " _t[np.where(_t==0)] = -1 \n",
109 | " dfeats_fixed.append(_t)"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "C = 1000./len(dfeats_fixed[0])**0.5\n",
119 | "logistic_regression_kwargs={'fit_intercept': True, 'penalty': 'l1', 'C': C, \n",
120 | " 'tol': 0.0001, 'solver': 'saga'}\n",
121 | "dmrf_all_data = estimate_dMRF(dfeats_fixed, \n",
122 | " lag=300, stride=10, \n",
123 | " logistic_regression_kwargs=logistic_regression_kwargs,\n",
124 | " Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))"
125 | ]
126 | },
127 | {
128 | "cell_type": "markdown",
129 | "metadata": {},
130 | "source": [
131 | "### Build MSM\n",
132 | "TICA dimensionality reduction, clustering and lag-time optimization"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "import pyemma as pe"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "tica_objs = [pe.coordinates.tica(dfeats_fixed, lag = lag) for lag in [5,10,50,100,200,300,500,900,1500,2000,2500,3000]]"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
160 | "ax[0].semilogy([to.lag for to in tica_objs], [to.timescales[:10] for to in tica_objs])\n",
161 | "ax[0].set_xlabel('lag time / steps')\n",
162 | "ax[0].set_ylabel('implied timescale / steps')\n",
163 | "ax[1].plot([to.lag for to in tica_objs], [to.ndim for to in tica_objs])\n",
164 | "ax[1].set_xlabel('lag time / steps')\n",
165 | "ax[1].set_ylabel('number of dimensions for 95% kinetic variance')\n",
166 | "fig.tight_layout()\n"
167 | ]
168 | },
169 | {
170 | "cell_type": "code",
171 | "execution_count": null,
172 | "metadata": {},
173 | "outputs": [],
174 | "source": [
175 | "_ = np.argmin([to.ndim for to in tica_objs])"
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": null,
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "Y = tica_objs[_].get_output()"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "Ys = np.vstack(Y)"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "metadata": {},
200 | "outputs": [],
201 | "source": [
202 | "a=plt.hist2d(Ys[:, 0], Ys[:, 1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "cluster_obj = pe.coordinates.cluster_kmeans(data = Y, k=384, stride=2)"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "its = pe.msm.its([dt[::10] for i,dt in enumerate(cluster_obj.dtrajs) ], lags = [ 5, 10, 20, 30, 50, 70, 90, 100, 120,150,200,250], nits=6, errors='bayes')"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "pe.plots.plot_implied_timescales(its, ylog=True)"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {},
236 | "outputs": [],
237 | "source": [
238 | "msm = pe.msm.bayesian_markov_model([dt[::1] for i,dt in enumerate(cluster_obj.dtrajs) ], lag = 1500)"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "metadata": {},
245 | "outputs": [],
246 | "source": [
247 | "ckt = msm.cktest(4)"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "metadata": {},
254 | "outputs": [],
255 | "source": [
256 | "pe.plots.plot_cktest(ckt, diag=True)"
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "HMM = msm.coarse_grain(4)"
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": null,
271 | "metadata": {},
272 | "outputs": [],
273 | "source": [
274 | "\n",
275 | "inmeta = [[np.where(HMM.metastable_assignments[dt.reshape(-1)]==i)[0] for i in range(HMM.nstates)] for dt in msm.discrete_trajectories_active]\n",
276 | "\n",
277 | "Meta_filtered = [[np.where(np.isin(HMM.metastable_assignments[dt.reshape(-1)],[i], invert=True))[0] for i in range(HMM.nstates)] for dt in msm.discrete_trajectories_active]\n",
278 | "\n",
279 | "not_in_meta_data = [[[df[t] for t in np.split(mf[i], np.where(np.diff(im[i])>1)[0]) if len(t)>300] for i in range(HMM.nstates)] for df,mf,im in zip(dfeats_fixed, Meta_filtered, inmeta) ]\n",
280 | "\n",
281 | "not_in_meta_stacked = [a+b for a,b in zip(*not_in_meta_data)]"
282 | ]
283 | },
284 | {
285 | "cell_type": "code",
286 | "execution_count": null,
287 | "metadata": {},
288 | "outputs": [],
289 | "source": [
290 | "regl_=[2000/np.vstack([2*t-1 for t in not_in_meta_stacked[i]]).shape[0]**0.5 for i in range(4)]\n",
291 | "regl_"
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "dMRFs = []\n",
301 | "\n",
302 | "for M in range(4):\n",
303 | " logistic_regression_kwargs={'fit_intercept': True, 'penalty': 'l1', 'C': regl_[M], \n",
304 | " 'tol': 0.0001, 'solver': 'saga'}\n",
305 | " dMRFs.append(estimate_dMRF(not_in_meta_stacked[M], \n",
306 | " lag=400, stride=10, Encoder = LabelBinarizer(neg_label=-1, pos_label=1),\n",
307 | " logistic_regression_kwargs=logistic_regression_kwargs\n",
308 | " ))\n"
309 | ]
310 | },
311 | {
312 | "cell_type": "code",
313 | "execution_count": null,
314 | "metadata": {},
315 | "outputs": [],
316 | "source": [
317 | "[len(d.get_active_subsystems()) for d in dMRFs]"
318 | ]
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "metadata": {},
323 | "source": [
324 | "Visual comparison of $J(\\tau)$ for different sub-sampled data-sets. correlation to estimate on full data-set"
325 | ]
326 | },
327 | {
328 | "cell_type": "code",
329 | "execution_count": null,
330 | "metadata": {},
331 | "outputs": [],
332 | "source": [
333 | "fig,ax=plt.subplots(ncols=4, figsize=(12,10))\n",
334 | "for M,d in enumerate(dMRFs):\n",
335 | " ax[M].imshow(np.hstack([d.get_subsystem_couplings(), d.get_subsystem_biases().reshape(-1,1)] ))\n",
336 | "fig.tight_layout()\n",
337 | "#ax[1].imshow(np.hstack([np.vstack(villin_nf_coupl), villin_nf_bias] ))"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": null,
343 | "metadata": {},
344 | "outputs": [],
345 | "source": [
346 | "fig,ax = plt.subplots(ncols=4, figsize=(10,2))\n",
347 | "[ax[M].scatter(dMRFs[M].get_subsystem_couplings().ravel(), dmrf_all_data.get_subsystem_couplings().ravel()) for M in range(4)]\n",
348 | "[ax[M].set_title(\"MS {}. Ndp {}\".format(M+1, len(np.vstack(not_in_meta_stacked[M])) )) for M in range(4)]\n",
349 | "plt.tight_layout()"
350 | ]
351 | },
352 | {
353 | "cell_type": "markdown",
354 | "metadata": {},
355 | "source": [
356 | "Generate synthetic trajectories"
357 | ]
358 | },
359 | {
360 | "cell_type": "code",
361 | "execution_count": null,
362 | "metadata": {},
363 | "outputs": [],
364 | "source": [
365 | "synthts = [d.simulate(nsteps=100000, start= (np.array(not_in_meta_stacked[M][0][0])) ) for M,d in enumerate(dMRFs)] "
366 | ]
367 | },
368 | {
369 | "cell_type": "code",
370 | "execution_count": null,
371 | "metadata": {},
372 | "outputs": [],
373 | "source": [
374 | "syntht_all_nb = dmrf_all_data.simulate(nsteps=100000, start=dfeats_fixed[0][0])"
375 | ]
376 | },
377 | {
378 | "cell_type": "code",
379 | "execution_count": null,
380 | "metadata": {},
381 | "outputs": [],
382 | "source": [
383 | "syntht_all_nb[syntht_all_nb==-3]=-1"
384 | ]
385 | },
386 | {
387 | "cell_type": "code",
388 | "execution_count": null,
389 | "metadata": {},
390 | "outputs": [],
391 | "source": [
392 | "Y_synth_all_data = tica_objs[_].transform(syntht_all_nb)"
393 | ]
394 | },
395 | {
396 | "cell_type": "code",
397 | "execution_count": null,
398 | "metadata": {},
399 | "outputs": [],
400 | "source": [
401 | "\n",
402 | "Y_synths = []\n",
403 | "\n",
404 | "for s in synthts:\n",
405 | " Y_synths.append(np.vstack(tica_objs[9].transform(s)))\n"
406 | ]
407 | },
408 | {
409 | "cell_type": "code",
410 | "execution_count": null,
411 | "metadata": {},
412 | "outputs": [],
413 | "source": [
414 | "fig, axs = plt.subplots(nrows=2, ncols = 3,figsize=(single_column_width,.75*single_column_width), sharex=True, sharey=True)\n",
415 | "ax = axs.ravel()\n",
416 | "ax[0].hist2d(Ys[:, 0], Ys[:, 1], bins=128,norm=mpl.colors.LogNorm(), label=\"all data\")\n",
417 | "ax[0].set_title('All MD data')\n",
418 | "ax[0].set_ylabel('TIC2')\n",
419 | "\n",
420 | "#ax[-1].hist2d(Y_synth_all_data[:, 0], Y_synth_all_data[:, 1], bins=128,norm=mpl.colors.LogNorm(), label=\"all data\")\n",
421 | "#ax[-1].set_title('All MD data')\n",
422 | "ax[-1].axis('off')#set_ylabel('TIC2')\n",
423 | "\n",
424 | "\n",
425 | "for I,ys in enumerate(Y_synths):\n",
426 | " ax[I+1].hist2d(ys[:,0],ys[:,1], bins=128, norm=mpl.colors.LogNorm(), label=\"missing meta {}\".format(I))\n",
427 | " #ax[I+1].scatter(cluster_obj.cluster_centers_[HMM.metastable_assignments==I, 0], cluster_obj.cluster_centers_[HMM.metastable_assignments==I, 1],marker='^', alpha=0.1, color='r')\n",
428 | " ax[I+1].set_title('Without {}'.format(I+1))\n",
429 | " if I==2:\n",
430 | " ax[I+1].set_ylabel('TIC2')\n",
431 | " if I>1:\n",
432 | " ax[I+1].set_xlabel('TIC1')\n",
433 | "fig.tight_layout()\n",
434 | "plt.savefig('_tica_leave_one_out_bba.pdf', dpi=300)"
435 | ]
436 | },
437 | {
438 | "cell_type": "code",
439 | "execution_count": null,
440 | "metadata": {},
441 | "outputs": [],
442 | "source": [
443 | "#pe.coordinates.save_trajs(source, HMM.sample_by_observation_probabilities(10), \"BBA_HMM_\", fmt=\"pdb\")"
444 | ]
445 | },
446 | {
447 | "cell_type": "code",
448 | "execution_count": null,
449 | "metadata": {},
450 | "outputs": [],
451 | "source": [
452 | "dtraj_Y_synth_all = cluster_obj.transform(Y_synth_all_data)\n"
453 | ]
454 | },
455 | {
456 | "cell_type": "code",
457 | "execution_count": null,
458 | "metadata": {},
459 | "outputs": [],
460 | "source": [
461 | "mrfmsm = pe.msm.estimate_markov_model(dtrajs=dtraj_Y_synth_all.reshape(-1), lag=1)"
462 | ]
463 | },
464 | {
465 | "cell_type": "code",
466 | "execution_count": null,
467 | "metadata": {},
468 | "outputs": [],
469 | "source": [
470 | "dtraj_synths = cluster_obj.transform(Y_synths)\n",
471 | "dtraj_synths_all = cluster_obj.transform(Y_synth_all_data)"
472 | ]
473 | },
474 | {
475 | "cell_type": "code",
476 | "execution_count": null,
477 | "metadata": {},
478 | "outputs": [],
479 | "source": [
480 | "from mpl_toolkits.axes_grid1 import make_axes_locatable\n",
481 | "def _colorbar(mappable):\n",
482 | " ax = mappable.axes\n",
483 | " fig = ax.figure\n",
484 | " divider = make_axes_locatable(ax)\n",
485 | " cax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\n",
486 | " return fig.colorbar(mappable, cax=cax)"
487 | ]
488 | },
489 | {
490 | "cell_type": "code",
491 | "execution_count": null,
492 | "metadata": {},
493 | "outputs": [],
494 | "source": [
495 | "for i in range(5):\n",
496 | " print(\"set_color color{}=\".format(i) + str(list(mpl.colors.to_rgb(cmap(i)))))"
497 | ]
498 | },
499 | {
500 | "cell_type": "code",
501 | "execution_count": null,
502 | "metadata": {},
503 | "outputs": [],
504 | "source": [
505 | "yy_=[(syntht[:,np.argsort(syntht.var(axis=0))[:]][np.where(HMM.metastable_assignments[ye[::1]]==3)[0],:].mean(axis=0)+1)/2 for syntht,ye in zip(synthts, dtraj_synths)]\n",
506 | "xx_=dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==3)[0],:].mean(axis=0)[:]"
507 | ]
508 | },
509 | {
510 | "cell_type": "code",
511 | "execution_count": null,
512 | "metadata": {},
513 | "outputs": [],
514 | "source": [
515 | "import matplotlib.gridspec as gridspec\n",
516 | "from scipy.stats import spearmanr\n",
517 | "from itertools import product\n",
518 | "\n",
519 | "fig = plt.figure(figsize=(double_column_width/1.3,double_column_width*1.1))\n",
520 | "\n",
521 | "gs = gridspec.GridSpec(143, 100, left=0.097,bottom=0.02,top=0.98,right=0.999)#, wspace=None, hspace=None)\n",
522 | "ax = plt.subplot(gs[5:23, 2:22])\n",
523 | "ax2 = plt.subplot(gs[5:23, 34:55])\n",
524 | "ax3 = plt.subplot(gs[5:23, 70:78])\n",
525 | "ls = []\n",
526 | "for i in range(HMM.nstates): \n",
527 | " ax2.scatter(np.vstack(synthts[i]).var(axis=0), (np.vstack([df[::300] for df in dfeats_fixed])).var(axis=0), s=5, label=\"Without state {:d}\".format(i+1))\n",
528 | " l = ax.scatter(np.vstack(synthts[i]).mean(axis=0), (np.vstack([df[::300] for df in dfeats_fixed])).mean(axis=0), s=5, label=\"Without state {:d}\".format(i+1))\n",
529 | " ls.append(l)\n",
530 | "ax.set_xlabel('DGM Mean feature')\n",
531 | "ax2.set_xlabel('DGM Variance of feature ')\n",
532 | "\n",
533 | "ax.set_ylabel('MD Mean feature')\n",
534 | "ax2.set_ylabel('MD Variance of feature')\n",
535 | "\n",
536 | "ax3.axis('off')\n",
537 | "ax3.legend(ls, [\"Without state {:d}\".format(i+1) for i in range(5)], fontsize=9, loc=(-0.20,0.0))\n",
538 | "#plt.tight_layout()\n",
539 | "\n",
540 | "#fig, ax = plt.subplots(nrows=5, ncols=5, sharey=True, sharex=True, figsize=(10,10))\n",
541 | "ax_ = np.array([[plt.subplot(gs[40+23*(j)+2*j:40+23*(j+1)+j*2, 23*(i)+2*i:23*(i+1)+2*i]) for i in range(HMM.nstates)] for j in range(HMM.nstates)])\n",
542 | "\n",
543 | "axf = ax_.flatten()\n",
544 | "\n",
545 | "ax.text(-.35, 1.30, \"A\", transform=ax.transAxes,\n",
546 | " fontsize=12, va='top')\n",
547 | "\n",
548 | "ax2.text(-.35, 1.30, \"B\", transform=ax2.transAxes,\n",
549 | " fontsize=12, va='top')\n",
550 | "i=0\n",
551 | "axf[i].text(-.35, 1.30, \"C\", transform=axf[i].transAxes,\n",
552 | " fontsize=12, va='top')\n",
553 | "for j in range(HMM.nstates):\n",
554 | " for syntht,ye in zip(synthts, dtraj_synths):\n",
555 | " yy_=syntht[np.where(HMM.metastable_assignments[ye[::1]]==j)[0],:].mean(axis=0)\n",
556 | " xx_=np.vstack(dfeats_fixed)[np.where(HMM.metastable_assignments[np.concatenate(cluster_obj.dtrajs)[:]]==j)[0],:].mean(axis=0)\n",
557 | " axf[i].scatter(xx_, yy_,s=5, color=\"C{:d}\".format(j))\n",
558 | " \n",
559 | " axf[i].text(0.05, 0.95, r\"$\\rho={:0.2f}$\".format(np.corrcoef(xx_,yy_)[0,1]), transform=axf[i].transAxes,\n",
560 | " fontsize=9, va='top')\n",
561 | "\n",
562 | " axf[i].set_xlim((-1,1))\n",
563 | " axf[i].set_ylim((-1,1))\n",
564 | " \n",
565 | " #axf[i].text(xx_.min(), yy_.max()-0.1,)\n",
566 | " if i<4:\n",
567 | " axf[i].set_title('State {}'.format(i+1), fontsize = 10)\n",
568 | " \n",
569 | " if i%4==0: \n",
570 | " #axf[i].set_ylabel('Empirical mean feature')\n",
571 | " if i==10:\n",
572 | " axf[i].set_ylabel('MD Mean feature')\n",
573 | " else:\n",
574 | " axf[i].set_yticklabels([])\n",
575 | " axf[i].set_yticks([])\n",
576 | " \n",
577 | " if i>11:\n",
578 | " #axf[i].set_xlabel('MRF mean feature')\n",
579 | " if i==22:\n",
580 | " axf[i].set_xlabel('DGM Mean feature')\n",
581 | " else:\n",
582 | " axf[i].set_xticklabels([])\n",
583 | " axf[i].set_xticks([])\n",
584 | "\n",
585 | " \n",
586 | " if i%HMM.nstates==j:\n",
587 | " axf[i].set_facecolor((0.85,0.85,0.85))\n",
588 | " i=i+1\n",
589 | "fig.text(0,0.42 ,'MD Mean feature' , rotation=90)\n",
590 | "fig.text(0.47,0.01 ,'DGM Mean feature' , rotation=0)\n",
591 | "\n",
592 | "#axf[-1].axis('off')\n",
593 | "#gs.tight_layout(fig, pad=-0.5)\n",
594 | "plt.savefig('feature_scatter_BBA.pdf')"
595 | ]
596 | },
597 | {
598 | "cell_type": "code",
599 | "execution_count": null,
600 | "metadata": {},
601 | "outputs": [],
602 | "source": [
603 | "from scipy import spatial"
604 | ]
605 | },
606 | {
607 | "cell_type": "code",
608 | "execution_count": null,
609 | "metadata": {},
610 | "outputs": [],
611 | "source": [
612 | "from scipy import spatial\n",
613 | "def mrftraj_to_dtraj(mrftraj, ftrajs, transformer = lambda x:0.5*(x+1)):\n",
614 | " dtraj_out = []\n",
615 | " errors = []\n",
616 | " ftrajl = np.cumsum([0]+[len(d) for d in ftrajs])\n",
617 | " ftraj = np.vstack(ftrajs)\n",
618 | " for m in mrftraj:\n",
619 | " pair_contacts = spatial.distance.cdist(transformer(m).reshape(1,-1), ftraj, metric='hamming').reshape(-1)\n",
620 | " idx = np.argmin(pair_contacts)\n",
621 | " tidx = max((ftrajl3:\n",
786 | " ax.axis('off')\n",
787 | " if i==4:\n",
788 | " ax.legend([l, l2, l3], [\"dMRF\", \"HMM (full MD)\", \"State left out during estimation\"], fontsize=8, loc=(0.27,0.2))\n",
789 | " ax.set_yticks([])\n",
790 | " ax.set_xticks([])\n",
791 | " ax.axis('off')\n",
792 | " \n",
793 | " else:\n",
794 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths[i].reshape(-1)], lag=1)\n",
795 | " l = ax.bar(range(1,5), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"Ising\")\n",
796 | " np.savetxt('bba_hist_dmrf{:}.txt'.format(i), ising_msm.stationary_distribution)\n",
797 | " ls.append(l)\n",
798 | " l2 = ax.bar(range(1,5), HMM.stationary_distribution,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
799 | " ls.append(l2)\n",
800 | " l3 = ax.scatter([i+1], 0.5, marker=\"*\", s=50, color='purple')\n",
801 | "\n",
802 | " if i>0:\n",
803 | " ax.set_xticks([1,2,3,4])\n",
804 | " if i>2:\n",
805 | " ax.set_xlabel(r'Meta-stable state / $x$')\n",
806 | "\n",
807 | " else:\n",
808 | " ax.set_xticks([])\n",
809 | "\n",
810 | " if i in [0, 3]:\n",
811 | " continue\n",
812 | " #ax.set_ylabel('State prob (log-scale)')\n",
813 | " else:\n",
814 | " ax.set_yticks([])\n",
815 | "np.savetxt('bba_hist_hmm.txt', HMM.stationary_distribution)\n",
816 | "\n",
817 | "fig.text(0.58,0.65, r\"Meta-stable state / $x$\", rotation=0)\n",
818 | "\n",
819 | " \n",
820 | "axs2 = [plt.subplot(gs[105:145, 35*i+20*i:35*(i+1)+20*i]) for i in range(2) ]\n",
821 | "\n",
822 | "bins = np.unique(np.concatenate(errs))\n",
823 | "for i, err in enumerate(errs):\n",
824 | " axs2[0].hist(err, bins=bins, label = \"Without state {:d}\".format(i+1), histtype='step', lw=1, normed=True, log=False)\n",
825 | "axs2[0].set_xlabel(r'$\\epsilon $')\n",
826 | "axs2[0].set_ylabel(r'$p(\\epsilon)$')\n",
827 | "\n",
828 | "for i in range(HMM.nstates):\n",
829 | " a_ = np.correlate(meanfree_synthtrajs[i][:2094],meanfree_synthtrajs[i][:2094], mode='full')[2094:]\n",
830 | " axs2[1].plot(np.arange(1,2094)*60./1000.,a_/a_[0], lw=1, label=\"Without state {:d}\".format(i+1) )\n",
831 | "#axs2[-1].axis('off')\n",
832 | "a_ = np.mean([np.correlate(mf,mf,mode='full')[l:][:min(mfl)-1] for mf,l in zip(meanfree_MD,mfl)], axis=0)\n",
833 | "axs2[1].plot(np.arange(1,1277)*60./1000.,a_/a_[0],lw=1, label=\"MD\", color='k')\n",
834 | "axs2[1].set_xlabel(r'$\\tau$ / $\\mu$s')\n",
835 | "\n",
836 | "axs2[1].set_ylabel(r'$C(\\tau)$ / Glu 17 $\\phi$ rotamer')\n",
837 | "axs2[1].semilogx()\n",
838 | "axs2[1].set_xlim((axs2[1].get_xlim()[0],100))\n",
839 | "ax3 = plt.subplot(gs[165:, :])\n",
840 | "ax3.legend([child for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)],\n",
841 | " [child.get_label() for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)]\n",
842 | " ,loc=(0.0,-0.4), ncol=2)#axs2[0].legend()\n",
843 | "ax3.axis('off')\n",
844 | "axs[0].text(-.35, 1.30, \"A\", transform=axs[0].transAxes,\n",
845 | " fontsize=12, va='top')\n",
846 | "axs2[0].text(-.35, 1.30, \"B\", transform=axs2[0].transAxes,\n",
847 | " fontsize=12, va='top')\n",
848 | "axs2[1].text(-.35, 1.30, \"C\", transform=axs2[1].transAxes,\n",
849 | " fontsize=12, va='top')\n",
850 | "#gs.tight_layout(fig,pad=-1.5)\n",
851 | "plt.savefig('statdist_err_acf_bba.pdf')"
852 | ]
853 | },
854 | {
855 | "cell_type": "markdown",
856 | "metadata": {},
857 | "source": [
858 | "Data exports for plotting"
859 | ]
860 | },
861 | {
862 | "cell_type": "code",
863 | "execution_count": null,
864 | "metadata": {},
865 | "outputs": [],
866 | "source": [
867 | "np.savetxt('bba_errs.txt', errs)\n",
868 | "\n",
869 | "np.savetxt('bba_synthtrajs.txt', meanfree_synthtrajs)\n",
870 | "\n",
871 | "np.savetxt('bba_syntht_all.txt', dtraj_synths_all.reshape(-1))\n",
872 | "\n",
873 | "HMM.save('bba_hmm.pyemma', overwrite=True)\n",
874 | "\n",
875 | "a_ = np.mean([np.correlate(mf,mf,mode='full')[l:][:min(mfl)-1] for mf,l in zip(meanfree_MD,mfl)], axis=0)\n",
876 | "\n",
877 | "np.savetxt('bba_md_acf.txt', a_)"
878 | ]
879 | },
880 | {
881 | "cell_type": "code",
882 | "execution_count": null,
883 | "metadata": {},
884 | "outputs": [],
885 | "source": [
886 | "\n",
887 | "tica_dmrfs = [pe.coordinates.tica(data=[(syntht_all_nb+1)/2], lag=lag) for lag in [1,2,3,4,5]]\n",
888 | "\n",
889 | "fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
890 | "ax[0].semilogy([to.lag for to in tica_dmrfs], [to.timescales[:10] for to in tica_dmrfs])\n",
891 | "ax[0].set_xlabel('lag time / steps')\n",
892 | "ax[0].set_ylabel('implied timescale / steps')\n",
893 | "ax[1].plot([to.lag for to in tica_dmrfs], [to.ndim for to in tica_dmrfs])\n",
894 | "ax[1].set_xlabel('lag time / steps')\n",
895 | "ax[1].set_ylabel('number of dimensions for 95% kinetic variance')\n",
896 | "fig.tight_layout()\n",
897 | "\n",
898 | "\n",
899 | "hva_saa=tica_dmrfs[1].get_output()\n",
900 | "\n",
901 | "a=plt.hist2d(hva_saa[0][:,0],hva_saa[0][:,1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')\n",
902 | "\n",
903 | "\n",
904 | "cluster_dmrf = pe.coordinates.cluster_kmeans(hva_saa, 1024, stride=10)\n",
905 | "\n",
906 | "dmrf_msm = pe.msm.estimate_markov_model(cluster_dmrf.dtrajs, lag=1)\n",
907 | "\n"
908 | ]
909 | },
910 | {
911 | "cell_type": "code",
912 | "execution_count": null,
913 | "metadata": {},
914 | "outputs": [],
915 | "source": []
916 | },
917 | {
918 | "cell_type": "code",
919 | "execution_count": null,
920 | "metadata": {},
921 | "outputs": [],
922 | "source": [
923 | "plt.semilogy(dmrf_msm.timescales()[:15],'o')"
924 | ]
925 | },
926 | {
927 | "cell_type": "code",
928 | "execution_count": null,
929 | "metadata": {},
930 | "outputs": [],
931 | "source": [
932 | "dmrf_hmm_ts = pe.msm.timescales_hmsm(cluster_dmrf.dtrajs, nstates=4, lags=[1,2,3,4],errors='bayes')\n",
933 | "dmrf_hmm_ = dmrf_hmm_ts.models[0]"
934 | ]
935 | },
936 | {
937 | "cell_type": "code",
938 | "execution_count": null,
939 | "metadata": {},
940 | "outputs": [],
941 | "source": [
942 | "HMM_blinded_dmrfs=[]\n",
943 | "msm_blinded_msms=[]\n",
944 | "__nstates=[4,3,3,3]\n",
945 | "for k in range(4):\n",
946 | " tica_dmrfs = [pe.coordinates.tica(data=[(synthts[k]+1)/2], lag=lag) for lag in [1,2,3,4,5]]\n",
947 | "\n",
948 | " fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
949 | " ax[0].semilogy([to.lag for to in tica_dmrfs], [to.timescales[:10] for to in tica_dmrfs])\n",
950 | " ax[0].set_xlabel('lag time / steps')\n",
951 | " ax[0].set_ylabel('implied timescale / steps')\n",
952 | " ax[1].plot([to.lag for to in tica_dmrfs], [to.ndim for to in tica_dmrfs])\n",
953 | " ax[1].set_xlabel('TIC1')\n",
954 | " ax[1].set_ylabel('TIC2')\n",
955 | " fig.tight_layout()\n",
956 | "\n",
957 | "\n",
958 | " hva_saa=tica_dmrfs[1].get_output()\n",
959 | "\n",
960 | " a=plt.hist2d(hva_saa[0][:,0],hva_saa[0][:,1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')\n",
961 | "\n",
962 | "\n",
963 | " cluster_dmrf = pe.coordinates.cluster_kmeans(hva_saa, 384, stride=10)\n",
964 | " #msm_blinded_msms.append(pe.msm.estimate_markov_model([dt.reshape(-1) for dt in cluster_dmrf.get_output()], lag=1))\n",
965 | " HMM_blinded_dmrfs.append(pe.msm.bayesian_hidden_markov_model([dt.reshape(-1) for dt in cluster_dmrf.get_output()], __nstates[k], lag=1))\n",
966 | " "
967 | ]
968 | },
969 | {
970 | "cell_type": "code",
971 | "execution_count": null,
972 | "metadata": {},
973 | "outputs": [],
974 | "source": [
975 | "from itertools import product\n",
976 | "from matplotlib import gridspec"
977 | ]
978 | },
979 | {
980 | "cell_type": "code",
981 | "execution_count": null,
982 | "metadata": {},
983 | "outputs": [],
984 | "source": [
985 | "mfpt_mats=[np.zeros((4,4))]+[np.zeros((a,a)) for i,a in enumerate(__nstates)]\n",
986 | "#mfpt_mats.append()\n",
987 | "for k in range(4):\n",
988 | " for i,j in product(range(__nstates[k]), repeat=2):\n",
989 | " #try:\n",
990 | " mfpt_mats[k+1][i,j] = 0.2*0.4*HMM_blinded_dmrfs[k].mfpt(i,j)\n",
991 | " #except:\n",
992 | " # print(k,i,j)\n",
993 | "for i,j in product(range(4), repeat=2):\n",
994 | " mfpt_mats[0][i,j] = 0.2*dmrf_hmm_.mfpt(i,j)*1e-3"
995 | ]
996 | },
997 | {
998 | "cell_type": "code",
999 | "execution_count": null,
1000 | "metadata": {},
1001 | "outputs": [],
1002 | "source": [
1003 | "dmrf_lifetimes=[]\n",
1004 | "dmrf_lifetimes.append([1.5*0.2*dmrf_hmm_.mfpt([j],[i for i in range(dmrf_hmm_.nstates) if i!=j]) for j in range(dmrf_hmm_.nstates)])\n",
1005 | "for k in range(4):\n",
1006 | " dmrf_lifetimes.append([1.5*0.2*HMM_blinded_dmrfs[k].mfpt([j],[i for i in range(HMM_blinded_dmrfs[k].nstates) if i!=j]) for j in range(HMM_blinded_dmrfs[k].nstates)])\n",
1007 | " #for i,j in product(range(__nstates[k]), repeat=2):\n",
1008 | " # mfpt_mats[k+1][i,j] = 0.3*HMM_blinded_dmrfs[k].mfpt(i,j)*0.2\n",
1009 | " #for i,j in product(range(5), repeat=2):\n",
1010 | " #mfpt_mats[0][i,j] = 0.3*dmrf_hmm_.mfpt(i,j)*0.2"
1011 | ]
1012 | },
1013 | {
1014 | "cell_type": "code",
1015 | "execution_count": null,
1016 | "metadata": {},
1017 | "outputs": [],
1018 | "source": [
1019 | "lifetimes_hmm = [1.5*1e-3*0.2*HMM.mfpt([j],[i for i in range(HMM.nstates) if i!=j]) for j in range(HMM.nstates)]"
1020 | ]
1021 | },
1022 | {
1023 | "cell_type": "code",
1024 | "execution_count": null,
1025 | "metadata": {},
1026 | "outputs": [],
1027 | "source": [
1028 | "hmm_mfpts = np.zeros((HMM.nstates,HMM.nstates))\n",
1029 | "for i,j in product(range(HMM.nstates), repeat=2):\n",
1030 | " hmm_mfpts[i,j] = 0.2*HMM.mfpt(i,j)*1.5*1e-3"
1031 | ]
1032 | },
1033 | {
1034 | "cell_type": "code",
1035 | "execution_count": null,
1036 | "metadata": {},
1037 | "outputs": [],
1038 | "source": [
1039 | "from scipy.optimize import linear_sum_assignment"
1040 | ]
1041 | },
1042 | {
1043 | "cell_type": "code",
1044 | "execution_count": null,
1045 | "metadata": {},
1046 | "outputs": [],
1047 | "source": [
1048 | "fig = plt.figure(figsize=(single_column_width*1.3, 1.3*double_column_width*2./3))\n",
1049 | "from matplotlib import patheffects\n",
1050 | "gs = gridspec.GridSpec(160, 120, left=0.15,bottom=0.08,top=0.95,right=0.90)#, wspace=None, hspace=None)\n",
1051 | "ax_feats = [plt.subplot(gs[35*(i):35*(i+1), 40*(j):40*(j+1)]) for i,j in product(range(2), range(3))]\n",
1052 | "ax_lifetimes = [plt.subplot(gs[35*(i)+90:35*(i+1)+90, 40*(j):40*(j+1)]) for i,j in product(range(2), range(3))]\n",
1053 | "\n",
1054 | "\n",
1055 | "for k,_dmrf_hmm in enumerate(HMM_blinded_dmrfs):\n",
1056 | " HMM_MD_configurations = np.array([dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==j)[0],:].mean(axis=0) for j in range(HMM.nstates)]).T\n",
1057 | "\n",
1058 | " HMM_dMRF_configurations= np.array([synthts[k][_dmrf_hmm.hidden_state_trajectories[0]==i].mean(axis=0) for i in range(_dmrf_hmm.nstates)]).T\n",
1059 | " correlates_ = np.zeros((HMM.nstates,_dmrf_hmm.nstates))\n",
1060 | " for i,j in product(range(HMM.nstates), range(_dmrf_hmm.nstates)):\n",
1061 | " correlates_[i, j] = np.std(HMM_MD_configurations[:,i]-HMM_dMRF_configurations[:,j])/np.std(HMM_MD_configurations[:,i])\n",
1062 | " \n",
1063 | " cax = np.array(ax_feats).ravel()[k]\n",
1064 | " cax2 = np.array(ax_lifetimes).ravel()[k]\n",
1065 | "\n",
1066 | " if k<1:\n",
1067 | " cax.set_xticks([])\n",
1068 | " cax2.set_xticks([])\n",
1069 | " else:\n",
1070 | " cax2.set_xlabel('Metastable state')\n",
1071 | " cax2.set_xticks(range(1,6))\n",
1072 | " cax.set_xlabel('Avg. MD feature')\n",
1073 | " if k in [1,2,4]:\n",
1074 | " cax.set_yticks([])\n",
1075 | " cax2.set_yticks([])\n",
1076 | " else:\n",
1077 | " cax.set_ylabel('Avg. DGM feature')\n",
1078 | " cax2.set_ylabel(r'lifetime / $\\mu s$')\n",
1079 | "\n",
1080 | " \n",
1081 | " for i,j in zip(*linear_sum_assignment(correlates_)):\n",
1082 | " cax.scatter(HMM_MD_configurations[:,i], HMM_dMRF_configurations[:,j], s=1, color=f'C{i}')\n",
1083 | " t=cax.text(-1,0.8-0.3*i, r'$\\rho=%.2f$'%(np.corrcoef(HMM_MD_configurations[:,i], HMM_dMRF_configurations[:,j])[0,1]), color=f'C{i}')\n",
1084 | " t.set_path_effects([patheffects.Stroke(linewidth=0.5, foreground='black'),\n",
1085 | " patheffects.Normal()])\n",
1086 | " cax2.bar(i+1, dmrf_lifetimes[k+1][j], log=False, color=f'C{i}')\n",
1087 | " #cax2.set_ylim([0,0.9])\n",
1088 | " cax.set_xlim([-1.1,1.1])\n",
1089 | " cax.set_ylim([-1.1,1.1])\n",
1090 | " cax.text(0,-1, f\"Without {k+1}\", va='center',ha='center')\n",
1091 | " cax2.text(2.5,10.5, f\"Without {k+1}\", va='top',ha='center')\n",
1092 | " cax2.scatter(range(1,5),lifetimes_hmm,s=15,color='C7', zorder=10, marker=\"*\",lw=0.1,edgecolors='k')\n",
1093 | " cax2.set_xlim(0.5,4.5)\n",
1094 | " cax2.set_ylim(0,11)\n",
1095 | " if k in [1,2,4]:\n",
1096 | " cax2.set_yticks([])\n",
1097 | " cax2.set_yticklabels([]) \n",
1098 | " \n",
1099 | " \n",
1100 | "np.array(ax_feats)[-1].axis('off')\n",
1101 | "np.array(ax_lifetimes)[-1].axis('off')\n",
1102 | "np.array(ax_feats)[-2].axis('off')\n",
1103 | "np.array(ax_lifetimes)[-2].axis('off')\n",
1104 | "np.array(ax_lifetimes)[-3].set_xticks([1,2,3,4])\n",
1105 | "np.array(ax_lifetimes)[-3].set_xlim(np.array(ax_lifetimes)[0].get_xlim())\n",
1106 | "\n",
1107 | "ax_feats[0].text(-.35, 1.15, \"C\", transform=ax_feats[0].transAxes,\n",
1108 | " fontsize=12, va='top')\n",
1109 | "\n",
1110 | "ax_lifetimes[0].text(-.35, 1.15, \"D\", transform=ax_lifetimes[0].transAxes,\n",
1111 | " fontsize=12, va='top')\n",
1112 | "\n",
1113 | "#gs.tight_layout(fig)\n",
1114 | "fig.savefig('BBA_DGM_METASTABLE.pdf')\n"
1115 | ]
1116 | }
1117 | ],
1118 | "metadata": {
1119 | "kernelspec": {
1120 | "display_name": "Python 3",
1121 | "language": "python",
1122 | "name": "python3"
1123 | },
1124 | "language_info": {
1125 | "codemirror_mode": {
1126 | "name": "ipython",
1127 | "version": 3
1128 | },
1129 | "file_extension": ".py",
1130 | "mimetype": "text/x-python",
1131 | "name": "python",
1132 | "nbconvert_exporter": "python",
1133 | "pygments_lexer": "ipython3",
1134 | "version": "3.6.5"
1135 | }
1136 | },
1137 | "nbformat": 4,
1138 | "nbformat_minor": 2
1139 | }
1140 |
--------------------------------------------------------------------------------
/notebooks/DGM_Villin.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Dynamic graphical model of fast folder Villin\n",
8 | "Simon Olsson 2018\n",
9 | "\n",
10 | "This notebook makes use previously published simulation data ([Lindroff-Larsen et al. 2011](http://science.sciencemag.org/content/334/6055/517)). We do not have the rights to distribute this data but they can be requested directly from DE Shaw Research.\n",
11 | "\n",
12 | "Note that the random seed is fixed. The notebook makes extensive use of nested point-estimates (i.e. statistical estimates subject to fluctuations which condition other such values), and can therefore be numerically unstable for some random seeds.\n"
13 | ]
14 | },
15 | {
16 | "cell_type": "code",
17 | "execution_count": null,
18 | "metadata": {},
19 | "outputs": [],
20 | "source": [
21 | "import mdtraj as md\n",
22 | "%matplotlib inline\n",
23 | "import matplotlib.pyplot as plt\n",
24 | "\n",
25 | "import matplotlib as mpl\n",
26 | "import numpy as np\n",
27 | "import pyemma as pe\n",
28 | "from onedeeIsing import Ising_tmatrix\n",
29 | "\n",
30 | "from sklearn.preprocessing import LabelBinarizer\n",
31 | "from graphtime.markov_random_fields import estimate_dMRF\n",
32 | "from graphtime.utils import simulate_MSM as generate\n",
33 | "\n",
34 | "double_column_width = 6.968\n",
35 | "single_column_width= 3.307\n",
36 | "font = {'sans-serif': \"Arial\",\n",
37 | " 'family': \"sans-serif\",\n",
38 | " 'size' : 8}\n",
39 | "\n",
40 | "mpl.rc('font', **font)\n",
41 | "mpl.rcParams['mathtext.fontset'] = 'custom'\n",
42 | "np.random.seed(101023)"
43 | ]
44 | },
45 | {
46 | "cell_type": "code",
47 | "execution_count": null,
48 | "metadata": {},
49 | "outputs": [],
50 | "source": [
51 | "feat = pe.coordinates.featurizer('villin/2F4K-0-protein/2F4K-0-protein.pdb')\n",
52 | "\n",
53 | "feat.add_backbone_torsions(deg=True)\n",
54 | "#feat.add_sidechain_torsions(which=['chi1'], deg=True)\n",
55 | "\n",
56 | "source = pe.coordinates.source([['villin/2F4K-0-protein/2F4K-0-protein-{:03d}.dcd'.format(i) for i in range(63)]], features=feat)\n",
57 | "\n",
58 | "dihe = source.get_output()"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "execution_count": null,
64 | "metadata": {},
65 | "outputs": [],
66 | "source": [
67 | "def discr_feats(ftrajs, feat_describe):\n",
68 | " discr_trajs = []\n",
69 | " for ft in ftrajs:\n",
70 | " dftraj = np.zeros(ft.shape, dtype = int)\n",
71 | " for i, fstr in enumerate(feat_describe):\n",
72 | " ls = fstr.split()\n",
73 | " if fstr[:3] == \"PHI\": # split into two states\n",
74 | " dftraj[:, i] = (ft[:, i]<0).astype(int)\n",
75 | " elif fstr[:3] == \"PSI\": # split in to two states if not n-terminal\n",
76 | " if int(ls[-1]) == 1:\n",
77 | " dftraj[:, i] = -1\n",
78 | " else:\n",
79 | " dftraj[:, i] = (ft[:, i]<80).astype(int)\n",
80 | " elif fstr[:3] == \"CHI\": #split into 3 rotamers\n",
81 | " tv = (ft[:, i]+180+60)%360\n",
82 | " dftraj[:, i] = (tv>125).astype(int) + (tv>250).astype(int) \n",
83 | " non_n_psi = np.where(dftraj[0, :]>-1)[0] \n",
84 | " discr_trajs.append(dftraj.copy()[:,non_n_psi])\n",
85 | " return discr_trajs, [f for i,f in enumerate(feat_describe) if i in non_n_psi]"
86 | ]
87 | },
88 | {
89 | "cell_type": "code",
90 | "execution_count": null,
91 | "metadata": {},
92 | "outputs": [],
93 | "source": [
94 | "dfeats, nlbls = discr_feats(dihe, feat.describe())"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "dihe_fixed = [dihe[0][:,[i for i in range(68) if i not in [38,66 ]]].copy()]\n"
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": null,
109 | "metadata": {},
110 | "outputs": [],
111 | "source": [
112 | "dfeats_fixed = [dfeats[0][:,[i for i in range(68) if i not in [38,66 ]]].copy()]\n",
113 | "#dfeats_fixed[0][:,38] = 1 "
114 | ]
115 | },
116 | {
117 | "cell_type": "code",
118 | "execution_count": null,
119 | "metadata": {
120 | "scrolled": true
121 | },
122 | "outputs": [],
123 | "source": [
124 | "C = 1000./len(dfeats_fixed[0])**0.5\n",
125 | "logistic_regression_kwargs={'fit_intercept': True, 'penalty': 'l1', 'C': C, \n",
126 | " 'tol': 0.0001, 'solver': 'saga'}\n",
127 | "dmrf_all_data = estimate_dMRF([2*dfeats_fixed[0]-1], \n",
128 | " lag=300, stride=10, \n",
129 | " logistic_regression_kwargs=logistic_regression_kwargs,\n",
130 | " Encoder = LabelBinarizer(neg_label = -1, pos_label = 1))\n",
131 | "\n",
132 | "#penta_params_r, penta_biases_r, penta_Encoder_r = estimate_potts([2*dfeats_fixed[0]-1], lag=10, C=10, fit_bias=False,tol=1e-6)"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "fig,ax=plt.subplots(ncols=1,figsize=(16,8))\n",
142 | "ax.imshow(np.hstack([dmrf_all_data.get_subsystem_couplings(), dmrf_all_data.get_subsystem_biases().reshape(-1,1)] ))\n",
143 | "#ax[1].imshow(np.hstack([np.vstack(penta_params_r), penta_biases_r] ))"
144 | ]
145 | },
146 | {
147 | "cell_type": "markdown",
148 | "metadata": {},
149 | "source": [
150 | "### Build MSM"
151 | ]
152 | },
153 | {
154 | "cell_type": "code",
155 | "execution_count": null,
156 | "metadata": {},
157 | "outputs": [],
158 | "source": [
159 | "import pyemma as pe\n"
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": null,
165 | "metadata": {},
166 | "outputs": [],
167 | "source": [
168 | "tica_objs = [pe.coordinates.tica(dfeats_fixed, lag = lag) for lag in [5,10,50,100,200,300,500,900]]"
169 | ]
170 | },
171 | {
172 | "cell_type": "code",
173 | "execution_count": null,
174 | "metadata": {},
175 | "outputs": [],
176 | "source": [
177 | "fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
178 | "ax[0].semilogy([to.lag for to in tica_objs], [to.timescales[:10] for to in tica_objs])\n",
179 | "ax[0].set_xlabel('lag time / steps')\n",
180 | "ax[0].set_ylabel('implied timescale / steps')\n",
181 | "ax[1].plot([to.lag for to in tica_objs], [to.ndim for to in tica_objs])\n",
182 | "ax[1].set_xlabel('lag time / steps')\n",
183 | "ax[1].set_ylabel('number of dimensions for 95% kinetic variance')\n",
184 | "fig.tight_layout()\n"
185 | ]
186 | },
187 | {
188 | "cell_type": "code",
189 | "execution_count": null,
190 | "metadata": {},
191 | "outputs": [],
192 | "source": [
193 | "Y = tica_objs[5].get_output()"
194 | ]
195 | },
196 | {
197 | "cell_type": "code",
198 | "execution_count": null,
199 | "metadata": {},
200 | "outputs": [],
201 | "source": [
202 | "Ys = np.vstack(Y)"
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": null,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "a=plt.hist2d(Ys[:, 0], Ys[:, 1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')"
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": null,
217 | "metadata": {},
218 | "outputs": [],
219 | "source": [
220 | "cluster_obj = pe.coordinates.cluster_kmeans(data = Y, k=1024, stride=10)"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "its = pe.msm.its(cluster_obj.dtrajs, lags = [10, 50, 100, 200, 300, 500, 700, 900, 1000, 1200], nits=4)#, errors='bayes')"
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": null,
235 | "metadata": {},
236 | "outputs": [],
237 | "source": [
238 | "pe.plots.plot_implied_timescales(its, ylog=False)"
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": null,
244 | "metadata": {},
245 | "outputs": [],
246 | "source": [
247 | "msm = pe.msm.bayesian_markov_model(cluster_obj.dtrajs, lag = 300)"
248 | ]
249 | },
250 | {
251 | "cell_type": "code",
252 | "execution_count": null,
253 | "metadata": {},
254 | "outputs": [],
255 | "source": [
256 | "ckt = msm.cktest(5)"
257 | ]
258 | },
259 | {
260 | "cell_type": "code",
261 | "execution_count": null,
262 | "metadata": {},
263 | "outputs": [],
264 | "source": [
265 | "pe.plots.plot_cktest(ckt, diag=True)"
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": null,
271 | "metadata": {},
272 | "outputs": [],
273 | "source": [
274 | "HMM = msm.coarse_grain(5)"
275 | ]
276 | },
277 | {
278 | "cell_type": "code",
279 | "execution_count": null,
280 | "metadata": {},
281 | "outputs": [],
282 | "source": [
283 | "pe.plots.scatter_contour(cluster_obj.cluster_centers_[:, 0], cluster_obj.cluster_centers_[:, 1], -np.log(msm.stationary_distribution))"
284 | ]
285 | },
286 | {
287 | "cell_type": "code",
288 | "execution_count": null,
289 | "metadata": {},
290 | "outputs": [],
291 | "source": [
292 | "pe.plots.scatter_contour(cluster_obj.cluster_centers_[:, 2], cluster_obj.cluster_centers_[:, 3], HMM.observation_probabilities[0])"
293 | ]
294 | },
295 | {
296 | "cell_type": "code",
297 | "execution_count": null,
298 | "metadata": {},
299 | "outputs": [],
300 | "source": [
301 | "\n",
302 | "\n",
303 | "inmeta = [np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0].reshape(-1)]==i)[0] for i in range(HMM.nstates)]\n",
304 | "\n",
305 | "Meta_filtered = [np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs[0].reshape(-1)],[i], invert=True))[0] for i in range(HMM.nstates)]\n",
306 | "\n",
307 | "not_in_meta_data = [[dfeats_fixed[0][t] for t in np.split(Meta_filtered[i], np.where(np.diff(inmeta[i])>1)[0]) if len(t)>300] for i in range(HMM.nstates)]\n",
308 | "\n",
309 | "only_in_meta_data = [[dfeats_fixed[0][t] for t in np.split(inmeta[i], np.where(np.diff(inmeta[i])>1)[0]) if len(t)>2] for i in range(HMM.nstates)]"
310 | ]
311 | },
312 | {
313 | "cell_type": "code",
314 | "execution_count": null,
315 | "metadata": {},
316 | "outputs": [],
317 | "source": [
318 | "regl_=[1000./np.vstack([2*t-1 for t in not_in_meta_data[i]]).shape[0]**0.5 for i in range(5)]\n",
319 | "regl_"
320 | ]
321 | },
322 | {
323 | "cell_type": "code",
324 | "execution_count": null,
325 | "metadata": {},
326 | "outputs": [],
327 | "source": [
328 | "dMRFs = [] \n",
329 | "for M in range(5):\n",
330 | " logistic_regression_kwargs={'fit_intercept': True, 'penalty': 'l1', 'C': regl_[M], \n",
331 | " 'tol': 0.0001, 'solver': 'saga'}\n",
332 | " dMRFs.append(estimate_dMRF([2*t-1 for t in not_in_meta_data[M]], \n",
333 | " lag=300, stride=10, \n",
334 | " logistic_regression_kwargs=logistic_regression_kwargs,\n",
335 | " Encoder = LabelBinarizer(neg_label = -1, pos_label = 1)))\n"
336 | ]
337 | },
338 | {
339 | "cell_type": "code",
340 | "execution_count": null,
341 | "metadata": {},
342 | "outputs": [],
343 | "source": [
344 | "[len(dm.get_active_subsystems()) for dm in dMRFs]"
345 | ]
346 | },
347 | {
348 | "cell_type": "code",
349 | "execution_count": null,
350 | "metadata": {},
351 | "outputs": [],
352 | "source": [
353 | "fig,ax=plt.subplots(ncols=5, figsize=(12,10))\n",
354 | "for M,d in enumerate(dMRFs):\n",
355 | " ax[M].imshow(np.hstack([d.get_subsystem_couplings(), d.get_subsystem_biases().reshape(-1,1)]))\n",
356 | "fig.tight_layout()\n",
357 | "#ax[1].imshow(np.hstack([np.vstack(villin_nf_coupl), villin_nf_bias] ))"
358 | ]
359 | },
360 | {
361 | "cell_type": "code",
362 | "execution_count": null,
363 | "metadata": {},
364 | "outputs": [],
365 | "source": [
366 | "plt.close('all')"
367 | ]
368 | },
369 | {
370 | "cell_type": "code",
371 | "execution_count": null,
372 | "metadata": {},
373 | "outputs": [],
374 | "source": []
375 | },
376 | {
377 | "cell_type": "code",
378 | "execution_count": null,
379 | "metadata": {},
380 | "outputs": [],
381 | "source": [
382 | "synthts = [d.simulate(nsteps=100000, start= ((2*np.array(not_in_meta_data[M][0][0]))-1)[d.get_active_subsystems()] ) for M,d in enumerate(dMRFs)] "
383 | ]
384 | },
385 | {
386 | "cell_type": "code",
387 | "execution_count": null,
388 | "metadata": {},
389 | "outputs": [],
390 | "source": [
391 | "syntht_alldata = dmrf_all_data.simulate(nsteps=100000, start = (2*dfeats_fixed[0][0])-1)"
392 | ]
393 | },
394 | {
395 | "cell_type": "code",
396 | "execution_count": null,
397 | "metadata": {},
398 | "outputs": [],
399 | "source": [
400 | "\n",
401 | "Y_synths = []\n",
402 | "\n",
403 | "for s in synthts:\n",
404 | " Y_synths.append(np.vstack(tica_objs[5].transform([(sf+1)/2 for sf in s] )))\n"
405 | ]
406 | },
407 | {
408 | "cell_type": "code",
409 | "execution_count": null,
410 | "metadata": {},
411 | "outputs": [],
412 | "source": [
413 | "fig, axs = plt.subplots(nrows=2, ncols = 3,figsize=(single_column_width,.75*single_column_width), sharex=True, sharey=True)\n",
414 | "ax = axs.ravel()\n",
415 | "ax[0].hist2d(Y[0][:,0],Y[0][:,1], bins=128,norm=mpl.colors.LogNorm(), label=\"all data\")\n",
416 | "ax[0].set_title('All MD data')\n",
417 | "ax[0].set_ylabel('TIC2')\n",
418 | "\n",
419 | "for I,ys in enumerate(Y_synths):\n",
420 | " ax[I+1].hist2d(ys[:,0],ys[:,1], bins=128, norm=mpl.colors.LogNorm(), label=\"missing meta {}\".format(I))\n",
421 | " #ax[I+1].scatter(cluster_obj.cluster_centers_[HMM.metastable_assignments==I, 0], cluster_obj.cluster_centers_[HMM.metastable_assignments==I, 1],marker='^', alpha=0.1, color='r')\n",
422 | " ax[I+1].set_title('Without {}'.format(I+1))\n",
423 | " if I==2:\n",
424 | " ax[I+1].set_ylabel('TIC2')\n",
425 | " if I>1:\n",
426 | " ax[I+1].set_xlabel('TIC1')\n",
427 | "fig.tight_layout()\n",
428 | "plt.savefig('tica_leave_one_out.pdf', dpi=300)"
429 | ]
430 | },
431 | {
432 | "cell_type": "code",
433 | "execution_count": null,
434 | "metadata": {},
435 | "outputs": [],
436 | "source": [
437 | "Y_synth_all = tica_objs[5].transform((syntht_alldata+1)/2)"
438 | ]
439 | },
440 | {
441 | "cell_type": "code",
442 | "execution_count": null,
443 | "metadata": {},
444 | "outputs": [],
445 | "source": [
446 | "dtraj_synths = cluster_obj.transform(Y_synths)\n",
447 | "dtraj_synths_all = cluster_obj.transform(Y_synth_all)"
448 | ]
449 | },
450 | {
451 | "cell_type": "code",
452 | "execution_count": null,
453 | "metadata": {},
454 | "outputs": [],
455 | "source": [
456 | "import matplotlib.gridspec as gridspec"
457 | ]
458 | },
459 | {
460 | "cell_type": "code",
461 | "execution_count": null,
462 | "metadata": {},
463 | "outputs": [],
464 | "source": [
465 | "yy_=[(syntht[:,np.argsort(syntht.var(axis=0))[:]][np.where(HMM.metastable_assignments[ye[::1]]==3)[0],:].mean(axis=0)+1)/2 for syntht,ye in zip(synthts, dtraj_synths)]\n",
466 | "xx_=dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==3)[0],:].mean(axis=0)[:]"
467 | ]
468 | },
469 | {
470 | "cell_type": "code",
471 | "execution_count": null,
472 | "metadata": {},
473 | "outputs": [],
474 | "source": [
475 | "from scipy.stats import spearmanr\n",
476 | "\n",
477 | "\n",
478 | "fig = plt.figure(figsize=(double_column_width/1.3,double_column_width*1.1))\n",
479 | "\n",
480 | "gs = gridspec.GridSpec(143, 100, left=0.097,bottom=0.02,top=0.98,right=0.999)#, wspace=None, hspace=None)\n",
481 | "ax = plt.subplot(gs[5:23, 2:22])\n",
482 | "ax2 = plt.subplot(gs[5:23, 34:55])\n",
483 | "ax3 = plt.subplot(gs[5:23, 70:78])\n",
484 | "ls = []\n",
485 | "for i in range(5): \n",
486 | " ax2.scatter(np.vstack(synthts[i]).var(axis=0), (2*dfeats_fixed[0][::300, :]-1).var(axis=0), s=5, label=\"Without state {:d}\".format(i+1))\n",
487 | " l = ax.scatter(np.vstack(synthts[i]).mean(axis=0), (2*dfeats_fixed[0][::300, :]-1).mean(axis=0), s=5, label=\"Without state {:d}\".format(i+1))\n",
488 | " ls.append(l)\n",
489 | "ax.set_xlabel('DGM Mean feature')\n",
490 | "ax2.set_xlabel('DGM Variance of feature ')\n",
491 | "\n",
492 | "ax.set_ylabel('MD Mean feature')\n",
493 | "ax2.set_ylabel('MD Variance of feature')\n",
494 | "\n",
495 | "ax3.axis('off')\n",
496 | "ax3.legend(ls, [\"Without state {:d}\".format(i+1) for i in range(5)], fontsize=9, loc=(-0.20,0.1))\n",
497 | "#plt.tight_layout()\n",
498 | "\n",
499 | "#fig, ax = plt.subplots(nrows=5, ncols=5, sharey=True, sharex=True, figsize=(10,10))\n",
500 | "ax_ = np.array([[plt.subplot(gs[40+18*(j)+2*j:40+18*(j+1)+j*2, 18*(i)+2*i:18*(i+1)+2*i]) for i in range(5)] for j in range(5)])\n",
501 | "\n",
502 | "axf = ax_.flatten()\n",
503 | "\n",
504 | "ax.text(-.35, 1.30, \"A\", transform=ax.transAxes,\n",
505 | " fontsize=12, va='top')\n",
506 | "\n",
507 | "ax2.text(-.35, 1.30, \"B\", transform=ax2.transAxes,\n",
508 | " fontsize=12, va='top')\n",
509 | "i=0\n",
510 | "axf[i].text(-.35, 1.30, \"C\", transform=axf[i].transAxes,\n",
511 | " fontsize=12, va='top')\n",
512 | "for j in range(5):\n",
513 | " for syntht,ye in zip(synthts, dtraj_synths):\n",
514 | " yy_=syntht[np.where(HMM.metastable_assignments[ye[::1]]==j)[0],:].mean(axis=0)\n",
515 | " xx_=2*dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==j)[0],:].mean(axis=0)-1\n",
516 | " axf[i].scatter(xx_, yy_,s=5, color=\"C{:d}\".format(j))\n",
517 | " \n",
518 | " axf[i].text(0.05, 0.95, r\"$\\rho={:0.2f}$\".format(np.corrcoef(xx_,yy_)[0,1]), transform=axf[i].transAxes,\n",
519 | " fontsize=9, va='top')\n",
520 | "\n",
521 | " axf[i].set_xlim((-1,1))\n",
522 | " axf[i].set_ylim((-1,1))\n",
523 | " \n",
524 | " #axf[i].text(xx_.min(), yy_.max()-0.1,)\n",
525 | " if i<5:\n",
526 | " axf[i].set_title('State {}'.format(i+1), fontsize = 10)\n",
527 | " \n",
528 | " if i%5==0: \n",
529 | " #axf[i].set_ylabel('Empirical mean feature')\n",
530 | " if i==10:\n",
531 | " axf[i].set_ylabel('MD mean feature')\n",
532 | " else:\n",
533 | " axf[i].set_yticklabels([])\n",
534 | " axf[i].set_yticks([])\n",
535 | " \n",
536 | " if i>19:\n",
537 | " #axf[i].set_xlabel('MRF mean feature')\n",
538 | " if i==22:\n",
539 | " axf[i].set_xlabel('DGM mean feature')\n",
540 | " else:\n",
541 | " axf[i].set_xticklabels([])\n",
542 | " axf[i].set_xticks([])\n",
543 | "\n",
544 | " \n",
545 | " if i%5==j:\n",
546 | " axf[i].set_facecolor((0.85,0.85,0.85)) #2*dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==j)[0],:].mean(axis=0)-1\n",
547 | " i=i+1\n",
548 | "#axf[-1].axis('off')\n",
549 | "#gs.tight_layout(fig, pad=-0.5)\n",
550 | "plt.savefig('feature_scatter.pdf')"
551 | ]
552 | },
553 | {
554 | "cell_type": "code",
555 | "execution_count": null,
556 | "metadata": {},
557 | "outputs": [],
558 | "source": [
559 | "from scipy.spatial import distance_matrix"
560 | ]
561 | },
562 | {
563 | "cell_type": "code",
564 | "execution_count": null,
565 | "metadata": {},
566 | "outputs": [],
567 | "source": [
568 | "not_folded = np.hstack([np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[0,1,2,4]))[0].reshape(-1,1), dfeats_fixed[0][np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[0,1,2,4]))[0], :]])\n",
569 | "\n",
570 | "not_inmeta = [np.hstack([np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[xx for xx in range(5) if xx != i]))[0].reshape(-1,1), dfeats_fixed[0][np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[xx for xx in range(5) if xx != i]))[0], :]]) for i in range(5)]\n",
571 | "\n",
572 | "only_inmeta = [np.hstack([np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[i]))[0].reshape(-1,1), \n",
573 | " dfeats_fixed[0][np.where(np.isin(HMM.metastable_assignments[cluster_obj.dtrajs],[i]))[0], :]]) for i in range(5)]\n",
574 | "\n",
575 | "not_folded_split = [t[:,1:] for t in np.split(not_folded, np.where(np.diff(not_folded[:,0])>1)[0], axis=0) if len(t)>300]\n",
576 | "\n",
577 | "Meta_filtered = [[t[:,1:] for t in np.split(not_inmeta[i], np.where(np.diff(not_inmeta[i][:,0])>1)[0], axis=0) if len(t)>300] for i in range(5) ]\n",
578 | "\n",
579 | "Just_one_Meta = [[t[:,1:] for t in np.split(only_inmeta[i], np.where(np.diff(only_inmeta[i][:,0])>1)[0], axis=0) if len(t)>300] for i in range(5) ]"
580 | ]
581 | },
582 | {
583 | "cell_type": "code",
584 | "execution_count": null,
585 | "metadata": {},
586 | "outputs": [],
587 | "source": [
588 | "from scipy import spatial"
589 | ]
590 | },
591 | {
592 | "cell_type": "code",
593 | "execution_count": null,
594 | "metadata": {},
595 | "outputs": [],
596 | "source": [
597 | "from scipy import spatial\n",
598 | "def mrftraj_to_dtraj(mrftraj, ftraj, transformer = lambda x:0.5*(x+1)):\n",
599 | " dtraj_out = []\n",
600 | " errors = []\n",
601 | " for m in mrftraj:\n",
602 | " pair_contacts = spatial.distance.cdist(transformer(m).reshape(1,-1), ftraj, metric='hamming').reshape(-1)\n",
603 | " dtraj_out.append(np.argmin(pair_contacts))\n",
604 | " errors.append(pair_contacts[dtraj_out[-1]])\n",
605 | " \n",
606 | " return dtraj_out, errors\n",
607 | "\n"
608 | ]
609 | },
610 | {
611 | "cell_type": "code",
612 | "execution_count": null,
613 | "metadata": {},
614 | "outputs": [],
615 | "source": [
616 | "resampledtraj,errors = mrftraj_to_dtraj(syntht_alldata[:1000], dfeats_fixed[0])"
617 | ]
618 | },
619 | {
620 | "cell_type": "code",
621 | "execution_count": null,
622 | "metadata": {},
623 | "outputs": [],
624 | "source": [
625 | "rtrajs = []\n",
626 | "errs = []\n",
627 | "for i in range(5):\n",
628 | " _resampledtraj,_errors = mrftraj_to_dtraj(synthts[i][:1000], dfeats_fixed[0])\n",
629 | " rtrajs.append(_resampledtraj)\n",
630 | " errs.append(_errors)\n",
631 | " pe.coordinates.save_traj(source, np.vstack((np.zeros((1,1000)) ,_resampledtraj)).T.astype(int), \"resampled_wo_{}.pdb\".format(i))"
632 | ]
633 | },
634 | {
635 | "cell_type": "code",
636 | "execution_count": null,
637 | "metadata": {},
638 | "outputs": [],
639 | "source": [
640 | "pe.coordinates.save_traj(source, np.vstack((np.zeros((1,1000)) ,resampledtraj)).T.astype(int), \"subsampled.pdb\")"
641 | ]
642 | },
643 | {
644 | "cell_type": "code",
645 | "execution_count": null,
646 | "metadata": {},
647 | "outputs": [],
648 | "source": [
649 | "for i, sampl in enumerate(HMM.sample_by_observation_probabilities(5)):\n",
650 | " pe.coordinates.save_traj(source, sampl, \"HMM_state_{}.pdb\".format(i))"
651 | ]
652 | },
653 | {
654 | "cell_type": "markdown",
655 | "metadata": {},
656 | "source": [
657 | "Meta-data for synthetic trajectory used for generating animated GIF"
658 | ]
659 | },
660 | {
661 | "cell_type": "code",
662 | "execution_count": null,
663 | "metadata": {},
664 | "outputs": [],
665 | "source": [
666 | "np.savez(\"resmtraj3.npz\", **{'data': dfeats_fixed[0][rtrajs[3], :]})\n",
667 | "np.savez(\"state_assign_resmtraj_3.npz\", **{'data': HMM.metastable_assignments[cluster_obj.transform(tica_objs[5].transform(dfeats_fixed[0][rtrajs[3], :])).reshape(-1)]} )\n",
668 | "np.savez(\"recerr_resmtraj_3.npz\", **{'data': errs[3]} )"
669 | ]
670 | },
671 | {
672 | "cell_type": "code",
673 | "execution_count": null,
674 | "metadata": {},
675 | "outputs": [],
676 | "source": [
677 | "from itertools import product"
678 | ]
679 | },
680 | {
681 | "cell_type": "code",
682 | "execution_count": null,
683 | "metadata": {},
684 | "outputs": [],
685 | "source": [
686 | "meanfree_synthtrajs = [0.5*(synthts[i][:10000,56]+1)-0.5*(synthts[i][:10000,56]+1).mean(axis=0) for i in range(5)]\n",
687 | "meanfree_MD = dfeats_fixed[0][::300,56]-dfeats_fixed[0][::300,56].mean(axis=0)"
688 | ]
689 | },
690 | {
691 | "cell_type": "code",
692 | "execution_count": null,
693 | "metadata": {},
694 | "outputs": [],
695 | "source": [
696 | "def func(x, a, b, c):\n",
697 | " return a * np.exp(-b * x) + c"
698 | ]
699 | },
700 | {
701 | "cell_type": "code",
702 | "execution_count": null,
703 | "metadata": {},
704 | "outputs": [],
705 | "source": [
706 | "from scipy.optimize import curve_fit"
707 | ]
708 | },
709 | {
710 | "cell_type": "code",
711 | "execution_count": null,
712 | "metadata": {},
713 | "outputs": [],
714 | "source": [
715 | "for i in range(5):\n",
716 | " popt, pcov = curve_fit(func, np.arange(2093)*60/1000., np.correlate(meanfree_synthtrajs[i][:2094],meanfree_synthtrajs[i][:2094], mode='full')[2094:])\n",
717 | " print(\"relaxation rate {:0.3f} for without state {:d}\".format(popt[1],i+1))\n",
718 | "\n",
719 | "popt, pcov = curve_fit(func, np.arange(2093)*60/1000., np.correlate(meanfree_MD,meanfree_MD,mode='full')[2094:])\n",
720 | "print(\"relaxation rate MD\", popt[1]) \n"
721 | ]
722 | },
723 | {
724 | "cell_type": "code",
725 | "execution_count": null,
726 | "metadata": {},
727 | "outputs": [],
728 | "source": [
729 | "from itertools import product"
730 | ]
731 | },
732 | {
733 | "cell_type": "code",
734 | "execution_count": null,
735 | "metadata": {},
736 | "outputs": [],
737 | "source": [
738 | "fig = plt.figure(figsize=(single_column_width,1.8*1.75*single_column_width/2))\n",
739 | "\n",
740 | "gs = gridspec.GridSpec(180, 90,left=0.16,bottom=0.05,top=0.95,right=0.95, wspace=0.0, hspace=0.)#, width_ratios=1, height_ratios=1)\n",
741 | "axs = [plt.subplot(gs[30*(i)+15:30*(i+1)+15, 3+29*(j):3+29*(j+1)]) for i,j in product(range(2), range(3))]\n",
742 | "axs.append(plt.subplot(gs[:15, :]))\n",
743 | "#gs.update(hspace=0, wspace=0)\n",
744 | "\n",
745 | "ax_ylbl = plt.subplot(gs[30:,:3])\n",
746 | "ax_ylbl.set_ylabel(r'$p(x)$ (log-scale)')\n",
747 | "ylbl=ax_ylbl.axes.yaxis.get_label()\n",
748 | "fig.text(ylbl.get_position()[0]+0.05,1.5*ylbl.get_position()[1]+0.10*2/3 , ylbl.get_text(), rotation=90)\n",
749 | "\n",
750 | "ax_ylbl.axis('off')\n",
751 | "\n",
752 | "\n",
753 | "ls = []\n",
754 | "for i, ax in enumerate(axs):\n",
755 | " ax.set_ylim((2e-4,2.0))\n",
756 | " if i==5: \n",
757 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths_all.reshape(-1)], lag=1)\n",
758 | " l = ax.bar(range(1,6), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"Ising\")\n",
759 | " ls.append(l)\n",
760 | " l2 = ax.bar(range(1,6), HMM.stationary_distribution,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
761 | " #ax.legend([l, l2, l3], [\"MRF\", \"HMM (full MD)\", \"Missing state\"], fontsize=12, loc=3)\n",
762 | " #ax.axis('off')\n",
763 | " ax.set_yticks([])\n",
764 | " ax.set_xticks([1,2,3,4,5])\n",
765 | " #ax.set_xlabel('Meta-stable state')\n",
766 | " elif i==6:\n",
767 | " ax.legend([l, l2, l3], [\"dMRF\", \"HMM (full MD)\", \"State left out during estimation\"], fontsize=8, loc=(0.27,0.2))\n",
768 | " ax.set_yticks([])\n",
769 | " ax.set_xticks([])\n",
770 | " ax.axis('off')\n",
771 | " \n",
772 | " else:\n",
773 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths[i].reshape(-1)], lag=1)\n",
774 | " l = ax.bar(range(1,6), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"Ising\")\n",
775 | " ls.append(l)\n",
776 | " l2 = ax.bar(range(1,6), HMM.stationary_distribution,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
777 | " ls.append(l2)\n",
778 | " l3 = ax.scatter([i+1], 0.5, marker=\"*\", s=50, color='purple')\n",
779 | "\n",
780 | " if i>2:\n",
781 | " ax.set_xticks([1,2,3,4,5])\n",
782 | " if i>3:\n",
783 | " ax.set_xlabel(r'Meta-stable state / $x$')\n",
784 | "\n",
785 | " else:\n",
786 | " ax.set_xticks([])\n",
787 | "\n",
788 | " if i in [0, 3]:\n",
789 | " continue\n",
790 | " #ax.set_ylabel('State prob (log-scale)')\n",
791 | " else:\n",
792 | " ax.set_yticks([])\n",
793 | "\n",
794 | "axs2 = [plt.subplot(gs[105:145, 35*i+20*i:35*(i+1)+20*i]) for i in range(2) ]\n",
795 | "\n",
796 | "bins = np.unique(np.concatenate(errs))\n",
797 | "for i, err in enumerate(errs):\n",
798 | " axs2[0].hist(err, bins=bins, label = \"Without state {:d}\".format(i+1), histtype='step', lw=1, normed=True, log=False)\n",
799 | "axs2[0].set_xlabel(r'$\\epsilon $')\n",
800 | "axs2[0].set_ylabel(r'$p(\\epsilon)$')\n",
801 | "\n",
802 | "\n",
803 | "for i in range(5):\n",
804 | " _a = np.correlate(meanfree_synthtrajs[i][:10000],meanfree_synthtrajs[i][:10000], mode='full')[10000:]\n",
805 | " axs2[1].plot(np.arange(1,10000)*60./1000.,_a/_a[0], label=\"Without state {:d}\".format(i+1) )\n",
806 | "_a = np.correlate(meanfree_MD,meanfree_MD,mode='full')[2094:]\n",
807 | "axs2[1].plot(np.arange(1,2094)*60./1000.,_a/_a[0], label=\"MD\", color='k')\n",
808 | "\n",
809 | "axs2[1].set_xlabel(r'$\\tau$ / $\\mu$s')\n",
810 | "\n",
811 | "axs2[1].set_ylabel(r'$C(\\tau)$ / Lys 71 $\\phi$ rotamer')\n",
812 | "axs2[1].semilogx()\n",
813 | "axs2[1].set_xlim((axs2[1].get_xlim()[0],100))\n",
814 | "ax3 = plt.subplot(gs[165:, :])\n",
815 | "ax3.legend([child for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)],\n",
816 | " [child.get_label() for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)]\n",
817 | " ,loc=(0.0,-0.4), ncol=2)#axs2[0].legend()\n",
818 | "ax3.axis('off')\n",
819 | "axs[0].text(-.35, 1.30, \"A\", transform=axs[0].transAxes,\n",
820 | " fontsize=12, va='top')\n",
821 | "axs2[0].text(-.35, 1.30, \"B\", transform=axs2[0].transAxes,\n",
822 | " fontsize=12, va='top')\n",
823 | "axs2[1].text(-.35, 1.30, \"C\", transform=axs2[1].transAxes,\n",
824 | " fontsize=12, va='top')\n",
825 | "#gs.tight_layout(fig)#, pad=-1.5)\n",
826 | "plt.savefig('Villin_statdist_corrfunc.pdf')"
827 | ]
828 | },
829 | {
830 | "cell_type": "code",
831 | "execution_count": null,
832 | "metadata": {},
833 | "outputs": [],
834 | "source": [
835 | "\n",
836 | "from matplotlib.colors import hex2color"
837 | ]
838 | },
839 | {
840 | "cell_type": "code",
841 | "execution_count": null,
842 | "metadata": {},
843 | "outputs": [],
844 | "source": [
845 | "new_colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728',\n",
846 | " '#9467bd', '#8c564b', '#e377c2', '#7f7f7f',\n",
847 | " '#bcbd22', '#17becf']\n",
848 | "for i,nc in enumerate(new_colors):\n",
849 | " print(\"set_color C{:}, [{:}, {:}, {:}]\".format(i+1, *hex2color(nc)))"
850 | ]
851 | },
852 | {
853 | "cell_type": "code",
854 | "execution_count": null,
855 | "metadata": {},
856 | "outputs": [],
857 | "source": [
858 | "bba_errs = np.loadtxt('bba_errs.txt')\n",
859 | "bba_synthts = np.loadtxt('bba_synthtrajs.txt')\n",
860 | "bba_dmrf_mus = [np.loadtxt('bba_hist_dmrf{}.txt'.format(i)) for i in range(4)]\n",
861 | "bba_hist_hmm = np.loadtxt('bba_hist_hmm.txt')\n",
862 | "bba_syntht_all = np.loadtxt('bba_syntht_all.txt')\n",
863 | "bba_hmm = pe.load('bba_hmm.pyemma')\n",
864 | "bba_md_acf = np.loadtxt('bba_md_acf.txt')"
865 | ]
866 | },
867 | {
868 | "cell_type": "code",
869 | "execution_count": null,
870 | "metadata": {},
871 | "outputs": [],
872 | "source": [
873 | "import msmtools as mt"
874 | ]
875 | },
876 | {
877 | "cell_type": "code",
878 | "execution_count": null,
879 | "metadata": {},
880 | "outputs": [],
881 | "source": [
882 | "fig = plt.figure(figsize=(double_column_width,0.90*1.8*1.75*single_column_width/2+2./3*single_column_width ))\n",
883 | "\n",
884 | "gs = gridspec.GridSpec(180+60, 180,left=0.08,bottom=0.025,top=0.95,right=0.95, wspace=0.0, hspace=0.)#, width_ratios=1, height_ratios=1)\n",
885 | "axs = [plt.subplot(gs[30*(i)+62+15:30*(i+1)+62+15, 3+27*(j):3+27*(j+1)]) for i,j in product(range(2), range(3))]\n",
886 | "axs.append(plt.subplot(gs[62:62+15, :]))\n",
887 | "axs3 = [plt.subplot(gs[30*(i)+62+15:30*(i+1)+62+15, 10+27*(j)+90:10+27*(j+1)+90]) for i,j in product(range(2), range(3))]\n",
888 | "\n",
889 | "#gs.update(hspace=0, wspace=0)\n",
890 | "\n",
891 | "ax_ylbl = plt.subplot(gs[30+62:,:3])\n",
892 | "ax_ylbl.set_ylabel(r'$p(x)$ (log-scale)')\n",
893 | "ylbl=ax_ylbl.axes.yaxis.get_label()\n",
894 | "fig.text(ylbl.get_position()[0]+0.03,0.5*ylbl.get_position()[1]+1.0*1/3 , ylbl.get_text(), rotation=90)\n",
895 | "\n",
896 | "ax_ylbl.axis('off')\n",
897 | "\n",
898 | "\n",
899 | "ls = []\n",
900 | "for i, ax in enumerate(axs):\n",
901 | " ax.set_ylim((2e-4,5.0))\n",
902 | " if i==5: \n",
903 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths_all.reshape(-1)], lag=1)\n",
904 | " l = ax.bar(range(1,6), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"Ising\")\n",
905 | " ls.append(l)\n",
906 | " l2 = ax.bar(range(1,6), HMM.stationary_distribution,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
907 | " #ax.legend([l, l2, l3], [\"MRF\", \"HMM (full MD)\", \"Missing state\"], fontsize=12, loc=3)\n",
908 | " #ax.axis('off')\n",
909 | " ax.text(3.,3.0, \"all data\", va='top',ha='center')\n",
910 | " ax.set_yticks([])\n",
911 | " ax.set_xticks([1,2,3,4,5])\n",
912 | " #ax.set_xlabel('Meta-stable state')\n",
913 | " elif i==6:\n",
914 | " #ax.legend([l, l2, l3], [\"DGM\", \"HMM (full MD)\", \"State left out during estimation\"])#, fontsize=6, loc=(1.5,-1.0))\n",
915 | " ax.set_yticks([])\n",
916 | " ax.set_xticks([])\n",
917 | " ax.axis('off')\n",
918 | " \n",
919 | " else:\n",
920 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths[i].reshape(-1)], lag=1)\n",
921 | " l = ax.bar(range(1,6), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"Ising\")\n",
922 | " ls.append(l)\n",
923 | " l2 = ax.bar(range(1,6), HMM.stationary_distribution,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
924 | " ls.append(l2)\n",
925 | " l3 = ax.scatter([i+1], 0.5, marker=\"*\", s=50, color='purple')\n",
926 | "\n",
927 | " if i>2:\n",
928 | " ax.set_xticks([1,2,3,4,5])\n",
929 | " if i>3:\n",
930 | " ax.set_xlabel(r'Meta-stable state / $x$')\n",
931 | "\n",
932 | " else:\n",
933 | " ax.set_xticks([])\n",
934 | "\n",
935 | " if i in [0, 3]:\n",
936 | " continue\n",
937 | " #ax.set_ylabel('State prob (log-scale)')\n",
938 | " else:\n",
939 | " ax.set_yticks([])\n",
940 | "\n",
941 | "for i, ax in enumerate(axs3):\n",
942 | " ax.set_ylim((2e-4,5.0))\n",
943 | " if i==4: \n",
944 | " ising_msm = pe.msm.estimate_markov_model(bba_hmm.metastable_assignments[bba_syntht_all.reshape(-1).astype(int)], lag=1)\n",
945 | " l = ax.bar(range(1,5), ising_msm.stationary_distribution,hatch=\"//\", fill=False, label=\"DGM\")\n",
946 | " #ls.append(l)\n",
947 | " l2 = ax.bar(range(1,5), bba_hist_hmm,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
948 | " #ax.legend([l, l2, l3], [\"MRF\", \"HMM (full MD)\", \"Missing state\"], fontsize=12, loc=3)\n",
949 | " #ax.axis('off')\n",
950 | " ax.set_yticks([])\n",
951 | " ax.set_xticks([1,2,3,4])\n",
952 | " ax.set_xlabel(r'Meta-stable state / $x$')\n",
953 | " ax.text(2.5,3.0, \"all data\", va='top',ha='center')\n",
954 | " #ax.set_xlabel('Meta-stable state')\n",
955 | " elif i==5:\n",
956 | " #ax.legend([l, l2, l3], [\"dMRF\", \"HMM (full MD)\", \"State left out during estimation\"], fontsize=8, loc=(0.27/2,0.2))\n",
957 | " ax.set_yticks([])\n",
958 | " ax.set_xticks([])\n",
959 | " ax.axis('off')\n",
960 | " \n",
961 | " else:\n",
962 | " ising_msm = pe.msm.estimate_markov_model(HMM.metastable_assignments[dtraj_synths[i].reshape(-1)], lag=1)\n",
963 | " l = ax.bar(range(1,5), bba_dmrf_mus[i],hatch=\"//\", fill=False, label=\"Ising\")\n",
964 | " #ls.append(l)\n",
965 | " l2 = ax.bar(range(1,5), bba_hist_hmm,fill=True, alpha=0.2, log=True, label=\"HMM (full MD)\")\n",
966 | " ls.append(l2)\n",
967 | " l3 = ax.scatter([i+1], 0.5, marker=\"*\", s=50, color='purple')\n",
968 | "\n",
969 | " if i>1:\n",
970 | " ax.set_xticks([1,2,3,4])\n",
971 | " #if i>3:\n",
972 | " # ax.set_xlabel(r'Meta-stable state / $x$')\n",
973 | "\n",
974 | " else:\n",
975 | " ax.set_xticks([])\n",
976 | "\n",
977 | " if i in [0, 3]:\n",
978 | " continue\n",
979 | " #ax.set_ylabel('State prob (log-scale)')\n",
980 | " else:\n",
981 | " ax.set_yticks([])\n",
982 | "\n",
983 | " \n",
984 | "axs2 = [plt.subplot(gs[62+105:62+145, 30*i+20*i:30*(i+1)+20*i]) for i in range(2) ]\n",
985 | "\n",
986 | "axs4 = [plt.subplot(gs[62+105:62+145, 30*i+20*i+100:30*(i+1)+20*i+100]) for i in range(2) ]\n",
987 | "\n",
988 | "\n",
989 | "ym = np.array(errs).mean(axis=1)\n",
990 | "yconf = mt.util.statistics.confidence_interval(np.array(errs).T, conf=.68)\n",
991 | "\n",
992 | "#for i, err in enumerate(bba_errs):\n",
993 | "bl=axs2[0].bar(np.arange(1, ym.shape[0]+1), ym, yerr=(ym-yconf[0],yconf[1]+ym))\n",
994 | "for i,b in enumerate(bl):\n",
995 | " b.set_color(f'C{i}')\n",
996 | "\n",
997 | "axs2[0].set_ylabel(r'$\\epsilon $')\n",
998 | "axs2[0].set_xlabel(r'Without state')\n",
999 | "axs2[0].set_xticks([1,2,3,4,5])\n",
1000 | "\n",
1001 | "\n",
1002 | "#for i, err in enumerate(errs):\n",
1003 | "# axs2[0].hist(err, bins=bins, label = \"Without state {:d}\".format(i+1), histtype='step', lw=1, normed=True, log=False)\n",
1004 | "#axs2[0].set_xlabel(r'$\\epsilon $')\n",
1005 | "#axs2[0].set_ylabel(r'$p(\\epsilon)$')\n",
1006 | "\n",
1007 | "ym = bba_errs.mean(axis=1)\n",
1008 | "yconf = mt.util.statistics.confidence_interval(bba_errs.T, conf=.68)\n",
1009 | "\n",
1010 | "#for i, err in enumerate(bba_errs):\n",
1011 | "bl=axs4[0].bar(np.arange(1, ym.shape[0]+1), ym, yerr=(ym-yconf[0],yconf[1]+ym))\n",
1012 | "for i,b in enumerate(bl):\n",
1013 | " b.set_color(f'C{i}')\n",
1014 | "axs4[0].set_ylabel(r'$\\epsilon $')\n",
1015 | "axs4[0].set_xlabel(r'Without state')\n",
1016 | "axs4[0].set_xticks([1,2,3,4])\n",
1017 | "\n",
1018 | "for i in range(5):\n",
1019 | " _a = np.correlate(meanfree_synthtrajs[i][:10000],meanfree_synthtrajs[i][:10000], mode='full')[10000:]\n",
1020 | " axs2[1].plot(np.arange(1,10000)*60./1000.,_a/_a[0], label=\"Without state {:d}\".format(i+1) )\n",
1021 | "_a = np.correlate(meanfree_MD,meanfree_MD,mode='full')[2094:]\n",
1022 | "axs2[1].plot(np.arange(1,2094)*60./1000.,_a/_a[0], label=\"MD\", color='k')\n",
1023 | "\n",
1024 | "axs2[1].set_xlabel(r'$\\tau$ / $\\mu$s')\n",
1025 | "\n",
1026 | "axs2[1].set_ylabel(r'$C(\\tau)$ / Lys 71 $\\phi$ rotamer')\n",
1027 | "axs2[1].semilogx()\n",
1028 | "axs2[1].set_xlim((axs2[1].get_xlim()[0],50))\n",
1029 | "\n",
1030 | "\n",
1031 | "for i in range(4):\n",
1032 | " _a = np.correlate(bba_synthts[i][:],bba_synthts[i][:], mode='full')[2094:]\n",
1033 | " axs4[1].plot(np.arange(1,2094)*60./1000.,_a/_a[0], label=\"Without state {:d}\".format(i+1) )\n",
1034 | " \n",
1035 | "_a = np.loadtxt('bba_md_acf.txt')\n",
1036 | "axs4[1].plot(np.arange(1,1277)*60./1000.,_a/_a[0], label=\"MD\", color='k')\n",
1037 | "\n",
1038 | "axs4[1].set_xlabel(r'$\\tau$ / $\\mu$s')\n",
1039 | "\n",
1040 | "axs4[1].set_ylabel(r'$C(\\tau)$ / Glu 17 $\\phi$ rotamer')\n",
1041 | "axs4[1].semilogx()\n",
1042 | "axs4[1].set_xlim((axs2[1].get_xlim()[0],50))\n",
1043 | "\n",
1044 | "\n",
1045 | "# [\"DGM\", \"HMM (full MD)\", \"State left out during estimation\"]\n",
1046 | "ax3 = plt.subplot(gs[60+165:, :])\n",
1047 | "ax3.legend([child for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)]+[l, l2, l3],\n",
1048 | " [child.get_label() for child in axs2[1].get_children() if isinstance(child, mpl.lines.Line2D)]+[\"DGM\", \"HMM (full MD)\", \"State left out\"]\n",
1049 | " ,loc=(-0.025,-0.28), ncol=5)#axs2[0].legend()\n",
1050 | "ax3.axis('off')\n",
1051 | "axs[0].text(-.35, 1.30, \"B\", transform=axs[0].transAxes,\n",
1052 | " fontsize=12, va='top')\n",
1053 | "axs2[0].text(-.35, 1.30, \"C\", transform=axs2[0].transAxes,\n",
1054 | " fontsize=12, va='top')\n",
1055 | "axs2[1].text(-.35, 1.30, \"D\", transform=axs2[1].transAxes,\n",
1056 | " fontsize=12, va='top')\n",
1057 | "\n",
1058 | "ax3.axis('off')\n",
1059 | "axs3[0].text(-.35, 1.30, \"F\", transform=axs3[0].transAxes,\n",
1060 | " fontsize=12, va='top')\n",
1061 | "axs4[0].text(-.35, 1.30, \"G\", transform=axs4[0].transAxes,\n",
1062 | " fontsize=12, va='top')\n",
1063 | "axs4[1].text(-.35, 1.30, \"H\", transform=axs4[1].transAxes,\n",
1064 | " fontsize=12, va='top')\n",
1065 | "\n",
1066 | "gs_renders= [plt.subplot(gs[33*(i)+2:33*(i+1)+2, 29*(j):29*(j+1)]) for i,j in product(range(2), range(3))]#[plt.subplot(gs[2+j*34:2+(j+1)*34, 34*(i%5):34*((i+1)%5)]) for j in range(2) for i in range(3)]\n",
1067 | "gs_renders_= [plt.subplot(gs[33*(i)+2:33*(i+1)+2, 10+29*(j)+90:10+29*(j+1)+90]) for i,j in product(range(2), range(3))]#[plt.subplot(gs[2+j*34:2+(j+1)*34, 34*(i%5):34*((i+1)%5)]) for j in range(2) for i in range(3)]\n",
1068 | "\n",
1069 | "\n",
1070 | "for i in range(5):\n",
1071 | " gs_renders[i].imshow(plt.imread('villin_hmm{:}.png'.format(i)))\n",
1072 | " gs_renders[i].axis('off')\n",
1073 | " if i == 0:\n",
1074 | " gs_renders[i].text(0.1, 0.95, 'A', transform=gs_renders[i].transAxes,\n",
1075 | " fontsize=12, va='top')\n",
1076 | "\n",
1077 | "gs_renders[-1].axis('off')\n",
1078 | "\n",
1079 | "for i in range(4):\n",
1080 | " gs_renders_[i].imshow(plt.imread('bba_hmm{:}.png'.format(i+1)))\n",
1081 | " gs_renders_[i].axis('off')\n",
1082 | " if i == 0:\n",
1083 | " gs_renders_[i].text(0.1, 0.95, \"E\", transform=gs_renders_[i].transAxes,\n",
1084 | " fontsize=12, va='top')\n",
1085 | "gs_renders_[-1].axis('off')\n",
1086 | "gs_renders_[-2].axis('off')\n",
1087 | "\n",
1088 | "\n",
1089 | "axs3[1].text(.5, 1.30, \"BBA\", transform=gs_renders_[1].transAxes,\n",
1090 | " fontsize=15, va='top', horizontalalignment='center', verticalalignment='center')\n",
1091 | "axs[1].text(0.5, 1.30, \"Villin\", transform=gs_renders[1].transAxes,\n",
1092 | " fontsize=15, va='top', horizontalalignment='center', verticalalignment='center')\n",
1093 | "#gs.tight_layout(fig)#, pad=-1.5)\n",
1094 | "plt.savefig('Villin_statdist_corrfunc.pdf', dpi=300)"
1095 | ]
1096 | },
1097 | {
1098 | "cell_type": "code",
1099 | "execution_count": null,
1100 | "metadata": {},
1101 | "outputs": [],
1102 | "source": [
1103 | "from scipy.optimize import linear_sum_assignment"
1104 | ]
1105 | },
1106 | {
1107 | "cell_type": "code",
1108 | "execution_count": null,
1109 | "metadata": {},
1110 | "outputs": [],
1111 | "source": [
1112 | "\n",
1113 | "tica_dmrfs = [pe.coordinates.tica(data=[(syntht_alldata+1)/2], lag=lag) for lag in [1,2,3,4,5]]\n",
1114 | "\n",
1115 | "fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
1116 | "ax[0].semilogy([to.lag for to in tica_dmrfs], [to.timescales[:10] for to in tica_dmrfs])\n",
1117 | "ax[0].set_xlabel('lag time / steps')\n",
1118 | "ax[0].set_ylabel('implied timescale / steps')\n",
1119 | "ax[1].plot([to.lag for to in tica_dmrfs], [to.ndim for to in tica_dmrfs])\n",
1120 | "ax[1].set_xlabel('lag time / steps')\n",
1121 | "ax[1].set_ylabel('number of dimensions for 95% kinetic variance')\n",
1122 | "fig.tight_layout()\n",
1123 | "\n",
1124 | "\n",
1125 | "hva_saa=tica_dmrfs[1].get_output()\n",
1126 | "\n",
1127 | "a=plt.hist2d(hva_saa[0][:,0],hva_saa[0][:,1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')\n",
1128 | "\n",
1129 | "\n",
1130 | "cluster_dmrf = pe.coordinates.cluster_kmeans(hva_saa, 1024, stride=10)\n",
1131 | "\n",
1132 | "\n"
1133 | ]
1134 | },
1135 | {
1136 | "cell_type": "code",
1137 | "execution_count": null,
1138 | "metadata": {},
1139 | "outputs": [],
1140 | "source": [
1141 | "dmrf_msm = pe.msm.estimate_markov_model(cluster_dmrf.dtrajs, lag=1)"
1142 | ]
1143 | },
1144 | {
1145 | "cell_type": "code",
1146 | "execution_count": null,
1147 | "metadata": {},
1148 | "outputs": [],
1149 | "source": [
1150 | "plt.semilogy(dmrf_msm.timescales()[:20]*300,'o')"
1151 | ]
1152 | },
1153 | {
1154 | "cell_type": "code",
1155 | "execution_count": null,
1156 | "metadata": {},
1157 | "outputs": [],
1158 | "source": [
1159 | "dmrf_hmm_ts = pe.msm.timescales_hmsm(cluster_dmrf.dtrajs, nstates=6, lags=[1,2,3,4],errors='bayes')\n"
1160 | ]
1161 | },
1162 | {
1163 | "cell_type": "code",
1164 | "execution_count": null,
1165 | "metadata": {},
1166 | "outputs": [],
1167 | "source": [
1168 | "pe.plots.plot_implied_timescales(dmrf_hmm_ts, ylog=False, dt=.3,units='us')"
1169 | ]
1170 | },
1171 | {
1172 | "cell_type": "code",
1173 | "execution_count": null,
1174 | "metadata": {},
1175 | "outputs": [],
1176 | "source": []
1177 | },
1178 | {
1179 | "cell_type": "code",
1180 | "execution_count": null,
1181 | "metadata": {},
1182 | "outputs": [],
1183 | "source": [
1184 | "dmrf_hmm_ = dmrf_hmm_ts.models[0]"
1185 | ]
1186 | },
1187 | {
1188 | "cell_type": "code",
1189 | "execution_count": null,
1190 | "metadata": {},
1191 | "outputs": [],
1192 | "source": [
1193 | "HMM_MD_configurations = np.array([2*dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==j)[0],:].mean(axis=0)-1 for j in range(HMM.nstates)]).T\n",
1194 | "\n",
1195 | "HMM_dMRF_configurations= np.array([syntht_alldata[dmrf_hmm_.hidden_state_trajectories[0]==i].mean(axis=0) for i in range(dmrf_hmm_.nstates)]).T"
1196 | ]
1197 | },
1198 | {
1199 | "cell_type": "code",
1200 | "execution_count": null,
1201 | "metadata": {},
1202 | "outputs": [],
1203 | "source": [
1204 | "correlates_ = np.zeros((HMM.nstates,dmrf_hmm_.nstates))\n",
1205 | "for i,j in product(range(HMM.nstates), range(dmrf_hmm_.nstates)):\n",
1206 | " correlates_[i, j] = np.std(HMM_MD_configurations[:,i]-HMM_dMRF_configurations[:,j])/np.std(HMM_MD_configurations[:,i])"
1207 | ]
1208 | },
1209 | {
1210 | "cell_type": "code",
1211 | "execution_count": null,
1212 | "metadata": {},
1213 | "outputs": [],
1214 | "source": [
1215 | "HMM_blinded_dmrfs=[]\n",
1216 | "msm_blinded_msms=[]\n",
1217 | "__nstates=[6,5,5,4,5]\n",
1218 | "for k in range(5):\n",
1219 | " tica_dmrfs = [pe.coordinates.tica(data=[(synthts[k]+1)/2], lag=lag) for lag in [1,2,3,4,5]]\n",
1220 | "\n",
1221 | " fig, ax = plt.subplots(ncols = 2, figsize=(8, 3))\n",
1222 | " ax[0].semilogy([to.lag for to in tica_dmrfs], [to.timescales[:10] for to in tica_dmrfs])\n",
1223 | " ax[0].set_xlabel('lag time / steps')\n",
1224 | " ax[0].set_ylabel('implied timescale / steps')\n",
1225 | " ax[1].plot([to.lag for to in tica_dmrfs], [to.ndim for to in tica_dmrfs])\n",
1226 | " ax[1].set_xlabel('TIC1')\n",
1227 | " ax[1].set_ylabel('TIC2')\n",
1228 | " fig.tight_layout()\n",
1229 | "\n",
1230 | "\n",
1231 | " hva_saa=tica_dmrfs[1].get_output()\n",
1232 | "\n",
1233 | " a=plt.hist2d(hva_saa[0][:,0],hva_saa[0][:,1], norm=mpl.colors.LogNorm(), bins=256)#, interpolation='gaussian')\n",
1234 | "\n",
1235 | "\n",
1236 | " cluster_dmrf = pe.coordinates.cluster_kmeans(hva_saa, 1024, stride=10)\n",
1237 | " #msm_blinded_msms.append(pe.msm.estimate_markov_model([dt.reshape(-1) for dt in cluster_dmrf.get_output()], lag=1))\n",
1238 | " HMM_blinded_dmrfs.append(pe.msm.bayesian_hidden_markov_model([dt.reshape(-1) for dt in cluster_dmrf.get_output()], __nstates[k], lag=1))\n",
1239 | " "
1240 | ]
1241 | },
1242 | {
1243 | "cell_type": "code",
1244 | "execution_count": null,
1245 | "metadata": {},
1246 | "outputs": [],
1247 | "source": [
1248 | "[_.nstates for _ in HMM_blinded_dmrfs]"
1249 | ]
1250 | },
1251 | {
1252 | "cell_type": "code",
1253 | "execution_count": null,
1254 | "metadata": {},
1255 | "outputs": [],
1256 | "source": [
1257 | "dmrf_lifetimes=[]\n",
1258 | "dmrf_lifetimes.append([0.3*0.2*dmrf_hmm_.mfpt([j],[i for i in range(dmrf_hmm_.nstates) if i!=j]) for j in range(dmrf_hmm_.nstates)])\n",
1259 | "for k in range(5):\n",
1260 | " dmrf_lifetimes.append([0.3*0.2*HMM_blinded_dmrfs[k].mfpt([j],[i for i in range(HMM_blinded_dmrfs[k].nstates) if i!=j]) for j in range(HMM_blinded_dmrfs[k].nstates)])\n",
1261 | " #for i,j in product(range(__nstates[k]), repeat=2):\n",
1262 | " # mfpt_mats[k+1][i,j] = 0.3*HMM_blinded_dmrfs[k].mfpt(i,j)*0.2\n",
1263 | " #for i,j in product(range(5), repeat=2):\n",
1264 | " #mfpt_mats[0][i,j] = 0.3*dmrf_hmm_.mfpt(i,j)*0.2"
1265 | ]
1266 | },
1267 | {
1268 | "cell_type": "code",
1269 | "execution_count": null,
1270 | "metadata": {},
1271 | "outputs": [],
1272 | "source": [
1273 | "hmm_mfpts = np.zeros((HMM.nstates,HMM.nstates))\n",
1274 | "for i,j in product(range(HMM.nstates), repeat=2):\n",
1275 | " hmm_mfpts[i,j] = 0.2*1e-3*HMM.mfpt(i,j)"
1276 | ]
1277 | },
1278 | {
1279 | "cell_type": "code",
1280 | "execution_count": null,
1281 | "metadata": {},
1282 | "outputs": [],
1283 | "source": [
1284 | "lifetimes_hmm = [0.2*1e-3*HMM.mfpt([j],[i for i in range(HMM.nstates) if i!=j]) for j in range(HMM.nstates)]"
1285 | ]
1286 | },
1287 | {
1288 | "cell_type": "code",
1289 | "execution_count": null,
1290 | "metadata": {},
1291 | "outputs": [],
1292 | "source": [
1293 | "fig = plt.figure(figsize=(single_column_width*1.3, 1.3*double_column_width*2./3))\n",
1294 | "from matplotlib import patheffects\n",
1295 | "gs = gridspec.GridSpec(160, 120, left=0.15,bottom=0.08,top=0.95,right=0.90)#, wspace=None, hspace=None)\n",
1296 | "ax_feats = [plt.subplot(gs[35*(i):35*(i+1), 40*(j):40*(j+1)]) for i,j in product(range(2), range(3))]\n",
1297 | "ax_lifetimes = [plt.subplot(gs[35*(i)+90:35*(i+1)+90, 40*(j):40*(j+1)]) for i,j in product(range(2), range(3))]\n",
1298 | "\n",
1299 | "\n",
1300 | "for k,_dmrf_hmm in enumerate(HMM_blinded_dmrfs):\n",
1301 | " HMM_MD_configurations = np.array([2*dfeats_fixed[0][np.where(HMM.metastable_assignments[cluster_obj.dtrajs[0][:]]==j)[0],:].mean(axis=0)-1 for j in range(HMM.nstates)]).T\n",
1302 | "\n",
1303 | " HMM_dMRF_configurations= np.array([synthts[k][_dmrf_hmm.hidden_state_trajectories[0]==i].mean(axis=0) for i in range(_dmrf_hmm.nstates)]).T\n",
1304 | " correlates_ = np.zeros((HMM.nstates,_dmrf_hmm.nstates))\n",
1305 | " for i,j in product(range(HMM.nstates), range(_dmrf_hmm.nstates)):\n",
1306 | " correlates_[i, j] = np.std(HMM_MD_configurations[:,i]-HMM_dMRF_configurations[:,j])/np.std(HMM_MD_configurations[:,i])\n",
1307 | " \n",
1308 | " cax = np.array(ax_feats).ravel()[k]\n",
1309 | " cax2 = np.array(ax_lifetimes).ravel()[k]\n",
1310 | "\n",
1311 | " if k<2:\n",
1312 | " cax.set_xticks([])\n",
1313 | " cax2.set_xticks([])\n",
1314 | " else:\n",
1315 | " cax2.set_xlabel('Metastable state')\n",
1316 | " cax2.set_xticks(range(1,6))\n",
1317 | " cax.set_xlabel('Avg. MD feature')\n",
1318 | " if k in [1,2,4]:\n",
1319 | " cax.set_yticks([])\n",
1320 | " cax2.set_yticks([])\n",
1321 | " else:\n",
1322 | " cax.set_ylabel('Avg. DGM feature')\n",
1323 | " cax2.set_ylabel(r'lifetime / $\\mu s$')\n",
1324 | "\n",
1325 | " \n",
1326 | " for i,j in zip(*linear_sum_assignment(correlates_)):\n",
1327 | " cax.scatter(HMM_MD_configurations[:,i], HMM_dMRF_configurations[:,j], s=1, color=f'C{i}')\n",
1328 | " t=cax.text(-1,0.8-0.3*i, r'$\\rho=%.2f$'%(np.corrcoef(HMM_MD_configurations[:,i], HMM_dMRF_configurations[:,j])[0,1]), color=f'C{i}')\n",
1329 | " t.set_path_effects([patheffects.Stroke(linewidth=0.5, foreground='black'),\n",
1330 | " patheffects.Normal()])\n",
1331 | " cax2.bar(i+1, dmrf_lifetimes[k+1][j], log=False, color=f'C{i}')\n",
1332 | " cax2.set_ylim([0,0.9])\n",
1333 | " cax.text(0,-1, f\"Without {k+1}\", va='center',ha='center')\n",
1334 | " cax2.text(3,0.85, f\"Without {k+1}\", va='top',ha='center')\n",
1335 | " cax2.scatter(range(1,6),lifetimes_hmm,s=15,color='C7', zorder=10, marker=\"*\",lw=0.1,edgecolors='k')\n",
1336 | " if k in [1,2,4]:\n",
1337 | " cax2.set_yticks([])\n",
1338 | " cax2.set_yticklabels([]) \n",
1339 | " \n",
1340 | " \n",
1341 | "np.array(ax_feats)[-1].axis('off')\n",
1342 | "np.array(ax_lifetimes)[-1].axis('off')\n",
1343 | "\n",
1344 | "ax_feats[0].text(-.35, 1.15, \"A\", transform=ax_feats[0].transAxes,\n",
1345 | " fontsize=12, va='top')\n",
1346 | "\n",
1347 | "ax_lifetimes[0].text(-.35, 1.15, \"B\", transform=ax_lifetimes[0].transAxes,\n",
1348 | " fontsize=12, va='top')\n",
1349 | "\n",
1350 | "#gs.tight_layout(fig)\n",
1351 | "fig.savefig('VILLIN_DGM_METASTABLE.pdf')"
1352 | ]
1353 | }
1354 | ],
1355 | "metadata": {
1356 | "kernelspec": {
1357 | "display_name": "Python 3",
1358 | "language": "python",
1359 | "name": "python3"
1360 | },
1361 | "language_info": {
1362 | "codemirror_mode": {
1363 | "name": "ipython",
1364 | "version": 3
1365 | },
1366 | "file_extension": ".py",
1367 | "mimetype": "text/x-python",
1368 | "name": "python",
1369 | "nbconvert_exporter": "python",
1370 | "pygments_lexer": "ipython3",
1371 | "version": "3.6.5"
1372 | }
1373 | },
1374 | "nbformat": 4,
1375 | "nbformat_minor": 2
1376 | }
1377 |
--------------------------------------------------------------------------------
/notebooks/IsingExample.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Estimating a dynamic random field using the graphtime library\n",
8 | "\n",
9 | "Simon Olsson 2018, simon.olsson / at / fu-berlin.de or [@smnlsssn](http://www.twitter.com/smnlssn)\n",
10 | "\n",
11 | "In this notebook we illustrate the use of the graphtime library to estimate dMRFs. The notebook will reproduce Fig 2, from our manuscript.\n",
12 | "\n",
13 | "We cover:\n",
14 | " - the generation of simulation data, here with the Ising model\n",
15 | " - the estimation of dMRFs \n",
16 | " - rudimentary visualization of results\n",
17 | " \n",
18 | "Imports and some function definitions"
19 | ]
20 | },
21 | {
22 | "cell_type": "code",
23 | "execution_count": null,
24 | "metadata": {},
25 | "outputs": [],
26 | "source": [
27 | "%matplotlib inline\n",
28 | "from matplotlib import pyplot as plt\n",
29 | "import matplotlib as mpl\n",
30 | "import numpy as np\n",
31 | "import matplotlib.gridspec as gridspec\n",
32 | "from matplotlib.patches import Circle, Arrow\n",
33 | "from matplotlib.collections import PatchCollection\n",
34 | "\n",
35 | "\n",
36 | "from sklearn.preprocessing import LabelBinarizer\n",
37 | "\n",
38 | "font = {'sans-serif': \"Arial\",\n",
39 | " 'family': \"sans-serif\",\n",
40 | " 'size' : 8, \n",
41 | " }\n",
42 | "\n",
43 | "\n",
44 | "mpl.rc('font', **font)\n",
45 | "mpl.rc('font',family='sans-serif')\n",
46 | "mpl.rc('text.latex', preamble=r'\\usepackage{sfmath}')\n",
47 | "\n",
48 | "from graphtime import markov_random_fields\n",
49 | "from graphtime import utils as _ut\n",
50 | "from graphtime import ising_utils as ising\n",
51 | "\n",
52 | "import msmtools as mt\n",
53 | "import pyemma as pe"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "is_positive_state = lambda x:x.mean(axis=1)>-0.01\n",
63 | "\n",
64 | "def generate_biased_data(Tmat, ntrajs, subsys_configurations, \n",
65 | " maxlen=1000, minlen=10, truncation_condition = is_positive_state):\n",
66 | " \"\"\"\n",
67 | " Generate biased data-set where trajectories are truncated if `truncation_condition` of future state \n",
68 | " evaluates to True.\n",
69 | " \n",
70 | " Arguments:\n",
71 | " ---------------\n",
72 | " Tmat (ndarray) : N times N matrix of transition probabilities (a Markov state model)\n",
73 | " ntrajs (int) : number of trajectories to generate\n",
74 | " subsys_configurations (ndarray) : N times K matrix mapping each Markov state in `Tmat` to sub-system \n",
75 | " configurations\n",
76 | " maxlen (int=1000) : maximum length (steps) of a trajectory\n",
77 | " minlen (int=10) : minimum length (step) of a trajectory\n",
78 | " truncation_condition (function) : a callable which takes an K dimensional `ndarray` and returns a boolean.\n",
79 | " \n",
80 | " \"\"\"\n",
81 | " otrajs = []\n",
82 | " gtrajs = 0\n",
83 | " while gtrajs0:\n",
87 | " trunc_pos = truct_spots[0]-1\n",
88 | " if trunc_pos>minlen:\n",
89 | " otrajs.append(subsys_configurations[tmptraj[:trunc_pos]])\n",
90 | " gtrajs=gtrajs+1\n",
91 | " else:\n",
92 | " otrajs.append(subsys_configurations[tmptraj[:]])\n",
93 | " gtrajs=gtrajs+1\n",
94 | " \n",
95 | " return otrajs\n",
96 | "\n",
97 | "def featurize(X):\n",
98 | " dts=[]\n",
99 | " for x in X.copy():\n",
100 | " x[np.where(x==-1)] = 0\n",
101 | " dts.append ([int(''.join(map(str, f)),2) for f in x])\n",
102 | " return dts"
103 | ]
104 | },
105 | {
106 | "cell_type": "markdown",
107 | "metadata": {},
108 | "source": [
109 | "Initiate reference Ising model"
110 | ]
111 | },
112 | {
113 | "cell_type": "code",
114 | "execution_count": null,
115 | "metadata": {},
116 | "outputs": [],
117 | "source": [
118 | "# number of spins (sub-systems) of Ising model to simulate\n",
119 | "nspin = 9\n",
120 | "\n",
121 | "# generate reference transition matrix (discretized Glauber rate model)\n",
122 | "ising_tmatrix = ising.Ising_tmatrix(nspin, alpha = 0.05)\n",
123 | "\n",
124 | "# generate map from Markov state to Ising system configuration, global to local encoding [-1,1]\n",
125 | "ising_configurations = np.array(ising.all_Ising_states(nspin))\n"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "## Generate data\n",
133 | "Generates 16 data-sets of equilibrium and non-equilibrium data. The non-equilibrium data only see global states with a net-negative Ising magnetization. The Ising magnetization is simply the average of the local state encoding. Conversely, for the the equilibrium data all states are in principle allowed. All simulations are initialized in an all negative configuration. "
134 | ]
135 | },
136 | {
137 | "cell_type": "code",
138 | "execution_count": null,
139 | "metadata": {},
140 | "outputs": [],
141 | "source": [
142 | "noneq_data = [generate_biased_data(ising_tmatrix, 500, ising_configurations, maxlen=3000) for i in range(16)]\n",
143 | "\n",
144 | "nframes_neqd = [np.vstack(bd).shape[0]-2*(len(noneq_data)-1) for bd in noneq_data]\n",
145 | "\n",
146 | "eq_data = [[ising_configurations[_ut.simulate_MSM(ising_tmatrix, nf)]] for nf in nframes_neqd]"
147 | ]
148 | },
149 | {
150 | "cell_type": "markdown",
151 | "metadata": {},
152 | "source": [
153 | "## Estimate models\n",
154 | "\n",
155 | "Estimate dMRFs for equilibrium data-sets"
156 | ]
157 | },
158 | {
159 | "cell_type": "code",
160 | "execution_count": null,
161 | "metadata": {},
162 | "outputs": [],
163 | "source": [
164 | "eq_data_dmrfs_no_intercepts = []\n",
165 | "eq_data_dmrfs = []\n",
166 | "\n",
167 | "logreg_kwargs_no_intercepts = {'fit_intercept': False, \n",
168 | " 'penalty': 'l1', 'C': 50., 'tol': 1e-4, 'solver': 'saga'}\n",
169 | "\n",
170 | "logreg_kwargs_intercepts = {'fit_intercept': True, \n",
171 | " 'penalty': 'l1', 'C': 50., 'tol': 1e-4, 'solver': 'saga'}\n",
172 | "\n",
173 | "for _gen_data in eq_data:\n",
174 | " #estimate model forcing fields (bias) to zero\n",
175 | " eq_data_dmrfs_no_intercepts.append(markov_random_fields.estimate_dMRF(_gen_data, lag = 1, \n",
176 | " logistic_regression_kwargs = logreg_kwargs_no_intercepts,\n",
177 | " Encoder = LabelBinarizer(neg_label = -1,\n",
178 | " pos_label = 1)))\n",
179 | " \n",
180 | " #estimate model with fields (bias)\n",
181 | " eq_data_dmrfs.append(markov_random_fields.estimate_dMRF(_gen_data, lag = 1, \n",
182 | " logistic_regression_kwargs = logreg_kwargs_intercepts,\n",
183 | " Encoder = LabelBinarizer(neg_label = -1,\n",
184 | " pos_label = 1)))\n"
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "Estimate dMRFs for non-equilibrium data-sets"
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": null,
197 | "metadata": {},
198 | "outputs": [],
199 | "source": [
200 | "neq_data_dmrfs_no_intercepts = []\n",
201 | "neq_data_dmrfs = []\n",
202 | "\n",
203 | "for bd in noneq_data: \n",
204 | " #estimate model forcing fields (bias) to zero\n",
205 | " neq_data_dmrfs_no_intercepts.append(markov_random_fields.estimate_dMRF(bd, lag = 1, \n",
206 | " logistic_regression_kwargs = logreg_kwargs_no_intercepts,\n",
207 | " Encoder = LabelBinarizer(neg_label = -1,\n",
208 | " pos_label = 1)))\n",
209 | " #estimate model with fields (bias)\n",
210 | " neq_data_dmrfs.append(markov_random_fields.estimate_dMRF(bd, lag = 1, \n",
211 | " logistic_regression_kwargs = logreg_kwargs_intercepts,\n",
212 | " Encoder = LabelBinarizer(neg_label = -1,\n",
213 | " pos_label = 1)))"
214 | ]
215 | },
216 | {
217 | "cell_type": "markdown",
218 | "metadata": {},
219 | "source": [
220 | "## Reconstruct transition matrices from estimated dMRFs"
221 | ]
222 | },
223 | {
224 | "cell_type": "code",
225 | "execution_count": null,
226 | "metadata": {},
227 | "outputs": [],
228 | "source": [
229 | "tmats_eq_no_intercepts = [_dmrf.generate_transition_matrix() for _dmrf in eq_data_dmrfs_no_intercepts]\n",
230 | "tmats_eq = [_dmrf.generate_transition_matrix() for _dmrf in eq_data_dmrfs]\n",
231 | "\n",
232 | "tmats_neq_no_intercepts = [_dmrf.generate_transition_matrix() for _dmrf in neq_data_dmrfs_no_intercepts]\n",
233 | "tmats_neq = [_dmrf.generate_transition_matrix() for _dmrf in neq_data_dmrfs]\n"
234 | ]
235 | },
236 | {
237 | "cell_type": "code",
238 | "execution_count": null,
239 | "metadata": {},
240 | "outputs": [],
241 | "source": [
242 | "ts_eq = [mt.analysis.timescales(t)[1:4] for t in tmats_eq_no_intercepts]\n",
243 | "ts_neq = [mt.analysis.timescales(t)[1:4] for t in tmats_neq_no_intercepts] "
244 | ]
245 | },
246 | {
247 | "cell_type": "code",
248 | "execution_count": null,
249 | "metadata": {},
250 | "outputs": [],
251 | "source": [
252 | "MSM = [pe.msm.estimate_markov_model(featurize(bd.copy()), lag = 1, reversible = False) for bd in noneq_data]"
253 | ]
254 | },
255 | {
256 | "cell_type": "markdown",
257 | "metadata": {},
258 | "source": [
259 | "## Build Figure 2"
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "execution_count": null,
265 | "metadata": {},
266 | "outputs": [],
267 | "source": [
268 | "double_column_width = 6.968\n",
269 | "single_column_width= 3.307\n",
270 | "\n",
271 | "fig = plt.figure(figsize=(single_column_width, 1.6*single_column_width))\n",
272 | "\n",
273 | "gs = gridspec.GridSpec(170, 100)\n",
274 | "ax = np.array([[plt.subplot(gs[70+i*50:70+(i+1)*50, j*50:(j+1)*50]) for i in range(2)] for j in range(2)]).T\n",
275 | "\n",
276 | "ax[0,0].grid('on')\n",
277 | "ax[0,0].plot([-0.5,5], [-0.5,5], color='k', ls = '--', lw=0.5)\n",
278 | "ax[0,0].errorbar(np.mean([_dmrf.get_subsystem_couplings().ravel() for _dmrf in neq_data_dmrfs_no_intercepts], axis=0), \n",
279 | " np.mean([_dmrf.get_subsystem_couplings().ravel() for _dmrf in eq_data_dmrfs_no_intercepts], axis=0),\n",
280 | " xerr=np.std([_dmrf.get_subsystem_couplings().ravel() for _dmrf in neq_data_dmrfs_no_intercepts], axis=0),\n",
281 | " yerr=np.std([_dmrf.get_subsystem_couplings().ravel() for _dmrf in eq_data_dmrfs_no_intercepts], axis=0), fmt=\".\")\n",
282 | "ax[0,0].set_xlim([-0.5,5])\n",
283 | "ax[0,0].set_ylim([-0.5,5])\n",
284 | "ax[0,0].annotate(\"Self coupl.\", \n",
285 | " xy=(np.array(neq_data_dmrfs_no_intercepts[0].get_subsystem_couplings()).max(), \n",
286 | " np.array(eq_data_dmrfs_no_intercepts[0].get_subsystem_couplings()).max()), \n",
287 | " xytext=(0, 0.8*np.array(neq_data_dmrfs_no_intercepts[0].get_subsystem_couplings()).max()),\n",
288 | " arrowprops=dict(arrowstyle=\"->\"))\n",
289 | "ax[0,0].set_xlabel(r'NED $J_{ij}$', fontsize=8)\n",
290 | "ax[0,0].set_ylabel(r'ED $J_{ij}$', fontsize=8)\n",
291 | "\n",
292 | "\n",
293 | "ax[0,1].hist(ising_configurations.mean(axis=1), \n",
294 | " weights = mt.analysis.statdist(np.mean(tmats_eq_no_intercepts, axis=0)), \n",
295 | " normed=True, histtype='step', bins=10, log = False, label='dMRF ED')\n",
296 | "\n",
297 | "ax[0,1].hist(ising_configurations.mean(axis=1), \n",
298 | " weights = mt.analysis.statdist(np.mean(tmats_neq_no_intercepts, axis=0)), \n",
299 | " normed=True, histtype='step', bins=10, log = False, label='dMRF NED')\n",
300 | "\n",
301 | "ax[0,1].hist(np.concatenate([ising_configurations.mean(axis=1)[m.active_set] for m in MSM]), \n",
302 | " weights = np.concatenate([m.stationary_distribution for m in MSM]), \n",
303 | " normed=True, histtype='step', bins=5, log = False, label='MSM NED')\n",
304 | "\n",
305 | "ax[0,1].hist(ising_configurations.mean(axis=1), \n",
306 | " weights = mt.analysis.statdist(ising_tmatrix), \n",
307 | " normed=True, histtype='step', bins=10, log = False, label='True', color='k')\n",
308 | "\n",
309 | "ax[0,1].hist(2*np.vstack([np.vstack(bd) for bd in bd]).mean(axis=1)-1, \n",
310 | " normed=True, histtype='step', bins=5, log = False, label='NED', ls='--')\n",
311 | "\n",
312 | "ax[0,1].hist(np.vstack(_gen_data).mean(axis=1), \n",
313 | " normed=True, histtype='step', bins=10, log = True, label='ED', ls='--')\n",
314 | "\n",
315 | "ax[0,1].set_ylim([2e-1, 5])\n",
316 | "ax[0,1].set_xlabel(r'$\\langle M \\rangle$', fontsize=8)\n",
317 | "ax[0,1].set_ylabel(r'$p(\\langle M \\rangle$)', fontsize=8)\n",
318 | "\n",
319 | "avgp = np.mean(tmats_eq_no_intercepts, axis=0).ravel()\n",
320 | "lo,up = mt.util.statistics.confidence_interval([d.ravel() for d in tmats_eq_no_intercepts])\n",
321 | "\n",
322 | "ax[1,0].errorbar(ising_tmatrix.ravel(), \n",
323 | " avgp,\n",
324 | " yerr=(avgp - lo, up - avgp),\n",
325 | " fmt='.')\n",
326 | "\n",
327 | "ax[1,0].plot([-0,1],[-0,1], lw=0.5, ls='--', color='k')\n",
328 | "ax[1,0].set_xlim([-0,1])\n",
329 | "ax[1,0].set_ylim([-0,1])\n",
330 | "ax[1,0].set_xlabel(r'True $T_{ij}$', fontsize=8 )\n",
331 | "ax[1,0].set_ylabel(r'NED dMRF $T_{ij}$', fontsize=8 )\n",
332 | "\n",
333 | "\n",
334 | "\n",
335 | "w=0.27\n",
336 | "avgp = np.mean(ts_eq, axis=0)\n",
337 | "lo,up = mt.util.statistics.confidence_interval([d for d in ts_eq])\n",
338 | "ax[1, 1].bar(np.arange(1, 4) - w, avgp, label = 'ED dMRF', yerr = (avgp - lo, up - avgp),\n",
339 | " width = w, fill = False, edgecolor = \"C0\", ecolor = \"C0\")\n",
340 | "\n",
341 | "avgp = np.mean(ts_neq, axis=0)\n",
342 | "lo,up = mt.util.statistics.confidence_interval([d for d in ts_neq])\n",
343 | "ax[1, 1].bar(np.arange(1, 4), avgp, label = 'NED dMRF', yerr = (avgp - lo, up - avgp), \n",
344 | " width = w, fill = False, edgecolor = \"C1\", ecolor = \"C1\" )\n",
345 | "\n",
346 | "avgp = np.mean([m.timescales(k = 3) for m in MSM], axis = 0)\n",
347 | "lo,up = mt.util.statistics.confidence_interval([d for d in np.mean([m.timescales(k = 3) for m in MSM], axis = 0)])\n",
348 | "ax[1, 1].bar(np.arange(1, 4) + w,avgp, yerr = (avgp - lo, up - avgp), label = 'NED MSM' , \n",
349 | " width = w, fill = False, edgecolor = \"C2\", ecolor = \"C2\", log=True)\n",
350 | "\n",
351 | "\n",
352 | "for _i,_ts in enumerate(mt.analysis.timescales(ising_tmatrix)[1:4]):\n",
353 | " ax[1,1].hlines(_ts, (_i+1)-w, (_i+1)+w , label='True', color='k')\n",
354 | " \n",
355 | "ax[1, 1].set_xlabel('Process', fontsize=8)\n",
356 | "ax[1, 1].set_ylabel('Time-scale / step', fontsize=8)\n",
357 | "ax[1, 1].set_xticks(range(1,9))\n",
358 | "ax[1, 1].set_xlim([1-w*2.2,3+w*2.2])\n",
359 | "\n",
360 | "#ax[1,1].xaxis.grid('on')\n",
361 | "\n",
362 | "#ax[1,1].legend()\n",
363 | "\n",
364 | "for a,lbl in zip(ax.ravel(), ('B', 'C', 'D', 'E')):\n",
365 | " a.text(-0.45, 1.15, lbl, transform=a.transAxes,\n",
366 | " fontsize=12, va='top')\n",
367 | "legend_ax = plt.subplot(gs[60:70,:])\n",
368 | "legend_ax.legend([child for child in ax[0,1].get_children() if isinstance(child, mpl.patches.Polygon)],\n",
369 | " [child.get_label() for child in ax[0,1].get_children() if isinstance(child, mpl.patches.Polygon)]\n",
370 | " ,loc=(0.12,1.1), ncol=2)\n",
371 | "\n",
372 | "legend_ax.axis('off')\n",
373 | "\n",
374 | "# Ising model illustration\n",
375 | "ax2 = [plt.subplot(gs[:50, 20:70])] \n",
376 | "patches = []\n",
377 | "coupl = []\n",
378 | "\n",
379 | "for i, x1, y1, r in zip(range(9), np.linspace(0,2*np.pi,10)[:9], np.linspace(0,2*np.pi,10)[:9], np.ones(9)*0.25):\n",
380 | " circle = Circle((np.cos(x1), np.sin(y1)), r, fill=False, facecolor='white')\n",
381 | " ax2[0].text(np.cos(x1), np.sin(y1), r\"$s_{:}$\".format(i+1),\n",
382 | " horizontalalignment='center', verticalalignment='center')\n",
383 | " patches.append(circle)\n",
384 | "\n",
385 | "coupl.append(Circle((0,0),radius=1))\n",
386 | " \n",
387 | "p = PatchCollection(patches)\n",
388 | "cp_=PatchCollection(coupl)\n",
389 | "\n",
390 | "p.set_facecolor('w')\n",
391 | "p.set_edgecolor('k')\n",
392 | "p.set_linewidth(1)\n",
393 | "cp_.set_facecolor('w')\n",
394 | "cp_.set_edgecolor('r')\n",
395 | "cp_.set_linewidth(1.5)\n",
396 | "ax2[0].add_collection(cp_)\n",
397 | "ax2[0].add_collection(p)\n",
398 | "\n",
399 | "ax2[0].set_xlim((-1.26,1.26))\n",
400 | "ax2[0].set_ylim((-1.26,1.26))\n",
401 | "ax2[0].set_xticks([])\n",
402 | "ax2[0].set_yticks([])\n",
403 | "ax2[0].get_xaxis().set_visible(False) \n",
404 | "ax2[0].get_yaxis().set_visible(False) \n",
405 | "ax2[0].axis('off')\n",
406 | "ax2[0].text(-0.4, 1.05, \"A\", transform=ax2[0].transAxes,\n",
407 | " fontsize=12, va='top')\n",
408 | "\n",
409 | "fig.tight_layout(pad=0.5)\n",
410 | "#fig.savefig('Fig1.pdf')"
411 | ]
412 | },
413 | {
414 | "cell_type": "code",
415 | "execution_count": null,
416 | "metadata": {},
417 | "outputs": [],
418 | "source": [
419 | "fig.savefig('Fig2.png', dpi=300)\n",
420 | "fig.savefig('Fig2.pdf')"
421 | ]
422 | },
423 | {
424 | "cell_type": "code",
425 | "execution_count": null,
426 | "metadata": {},
427 | "outputs": [],
428 | "source": []
429 | }
430 | ],
431 | "metadata": {
432 | "kernelspec": {
433 | "display_name": "Python 3",
434 | "language": "python",
435 | "name": "python3"
436 | },
437 | "language_info": {
438 | "codemirror_mode": {
439 | "name": "ipython",
440 | "version": 3
441 | },
442 | "file_extension": ".py",
443 | "mimetype": "text/x-python",
444 | "name": "python",
445 | "nbconvert_exporter": "python",
446 | "pygments_lexer": "ipython3",
447 | "version": "3.6.5"
448 | }
449 | },
450 | "nbformat": 4,
451 | "nbformat_minor": 2
452 | }
453 |
--------------------------------------------------------------------------------
/notebooks/WLALL_peptide.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "## Generate WLALL figure from manuscript\n",
8 | "\n",
9 | "Simon Olsson 2018\n",
10 | "\n",
11 | "Featurizes and analyses 24 trajectories of the WLALL peptide (25 trajectories in the original data-set). One trajectory (15) is left out as a rare event is happening in the first tens of nano-second in this trajectory which is not reversibly sampled. \n",
12 | "\n",
13 | "Generates manuscript figure for WLALL peptide.\n",
14 | "\n",
15 | "Please note, since this notebook makes use of random sampling for error-estimation, exact reproduction cannot be expected. The notebook requires internet access, as primary data is downloaded. Complete execution of the notebook may vary between tens of minutes to hours depending on available hardware, speed internet connectivity and server load."
16 | ]
17 | },
18 | {
19 | "cell_type": "code",
20 | "execution_count": null,
21 | "metadata": {},
22 | "outputs": [],
23 | "source": [
24 | "%matplotlib inline\n",
25 | "import mdtraj as md\n",
26 | "import matplotlib.pyplot as plt\n",
27 | "import matplotlib.gridspec as gridspec\n",
28 | "import mdshare\n",
29 | "import matplotlib as mpl\n",
30 | "import numpy as np\n",
31 | "import pyemma as pe\n",
32 | "import msmtools\n",
33 | "from sklearn.preprocessing import LabelBinarizer\n",
34 | "\n",
35 | "from graphtime import markov_random_fields\n",
36 | "from graphtime import utils as _ut\n",
37 | "\n",
38 | "double_column_width = 6.968\n",
39 | "single_column_width= 3.307\n",
40 | "font = {'sans-serif': \"Arial\",\n",
41 | " 'family': \"sans-serif\",\n",
42 | " 'size' : 8}\n",
43 | "\n",
44 | "\n",
45 | "mpl.rc('font', **font)\n",
46 | "mpl.rcParams['mathtext.fontset'] = 'custom'"
47 | ]
48 | },
49 | {
50 | "cell_type": "markdown",
51 | "metadata": {},
52 | "source": [
53 | "Load and prepare data for model estimation"
54 | ]
55 | },
56 | {
57 | "cell_type": "code",
58 | "execution_count": null,
59 | "metadata": {},
60 | "outputs": [],
61 | "source": [
62 | "def to_red_tmat(Tfull, dtrajs_):\n",
63 | " \"\"\" \n",
64 | " Slices out a sub-matrix of Tfull consistent with states observed in dtrajs_ \n",
65 | " and renormalizes the sub-matrix to yield a transition matrix on the subset.\n",
66 | " \"\"\"\n",
67 | " tft=Tfull[np.array(list(set(np.ravel(dtrajs_)))), :][:, np.array(list(set(np.ravel(dtrajs_))))].copy()\n",
68 | " return tft/tft.sum(axis=1)[:, None]\n",
69 | "\n",
70 | "def discr_feats(ftrajs, feat_describe):\n",
71 | " discr_trajs = []\n",
72 | " for ft in ftrajs:\n",
73 | " dftraj = np.zeros(ft.shape, dtype = int)\n",
74 | " for i, fstr in enumerate(feat_describe):\n",
75 | " ls = fstr.split()\n",
76 | " if fstr[:3] == \"PHI\": # split into two states\n",
77 | " dftraj[:, i] = (ft[:, i]<0).astype(int)\n",
78 | " elif fstr[:3] == \"PSI\": # split in to two states if not n-terminal\n",
79 | " if int(ls[-1]) == 1:\n",
80 | " dftraj[:, i] = -1\n",
81 | " else:\n",
82 | " dftraj[:, i] = (ft[:, i]<80).astype(int)\n",
83 | " elif fstr[:3] == \"CHI\": #split into 3 rotamers\n",
84 | " tv = (ft[:, i]+180+60)%360\n",
85 | " dftraj[:, i] = (tv>125).astype(int) + (tv>250).astype(int) \n",
86 | " non_n_psi = np.where(dftraj[0, :]>-1)[0] \n",
87 | " discr_trajs.append(dftraj.copy()[:,non_n_psi])\n",
88 | " return discr_trajs, [f for i,f in enumerate(feat_describe) if i in non_n_psi]\n",
89 | "\n",
90 | "\n",
91 | "def featurize(X):\n",
92 | " dts=[]\n",
93 | " for x in X:\n",
94 | " dts.append ([int(''.join(map(str, f)), 2) for f in x])\n",
95 | " return dts\n"
96 | ]
97 | },
98 | {
99 | "cell_type": "code",
100 | "execution_count": null,
101 | "metadata": {},
102 | "outputs": [],
103 | "source": [
104 | "pdb = mdshare.fetch('pentapeptide-impl-solv.pdb', working_directory='pentapeptide_data')\n",
105 | "files = mdshare.fetch('pentapeptide-*-500ns-impl-solv.xtc', working_directory='pentapeptide_data')"
106 | ]
107 | },
108 | {
109 | "cell_type": "code",
110 | "execution_count": null,
111 | "metadata": {},
112 | "outputs": [],
113 | "source": [
114 | "feat = pe.coordinates.featurizer('pentapeptide_data/pentapeptide-impl-solv.pdb')\n",
115 | "\n",
116 | "feat.add_backbone_torsions(deg=True)\n",
117 | "\n",
118 | "source = pe.coordinates.source([f'pentapeptide_data/pentapeptide-{i:02}-500ns-impl-solv.xtc' for i in range(25)], features=feat)\n",
119 | "\n",
120 | "dihe = source.get_output()"
121 | ]
122 | },
123 | {
124 | "cell_type": "code",
125 | "execution_count": null,
126 | "metadata": {},
127 | "outputs": [],
128 | "source": [
129 | "bindihe = [(d<0).astype(int) for d in dihe]"
130 | ]
131 | },
132 | {
133 | "cell_type": "code",
134 | "execution_count": null,
135 | "metadata": {},
136 | "outputs": [],
137 | "source": [
138 | "dfeats, nlbls = discr_feats(dihe, feat.describe())"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "Estimate Markov state models and Dynamic graphical models for multiple lag-times."
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "from functools import reduce\n",
155 | "MRF_MSM = []\n",
156 | "MSM_MSM = []\n",
157 | "X = dfeats.copy()\n",
158 | "\n",
159 | "idx=np.array([i for i in np.arange(25) if i not in [15]])\n",
160 | "np.random.shuffle(idx)\n",
161 | "_x=np.array_split(idx, 5)\n",
162 | "_I=[np.concatenate([_x[j] for j in range(5) if j!=i]) for i in range(5)]\n",
163 | "dtrajs_ = featurize(X)\n",
164 | "\n",
165 | "lr_kwargs = {'fit_intercept': True, 'penalty': 'l1', 'C': 1.0, 'tol': 0.0001, 'solver': 'saga'} \n",
166 | "for lag in [2,5,10,15,20,30,50]:\n",
167 | " _dmrfs = [markov_random_fields.estimate_dMRF([2*X[i]-1 for i in I], \n",
168 | " lag = lag, \n",
169 | " Encoder = LabelBinarizer(neg_label = -1, pos_label = 1),\n",
170 | " logistic_regression_kwargs = lr_kwargs\n",
171 | " ) \n",
172 | " for I in _I]\n",
173 | " Ts = [_D.generate_transition_matrix() for _D in _dmrfs ]\n",
174 | " bmsm = [pe.msm.estimate_markov_model(dtrajs=[dtrajs_[i] for i in I], lag = lag) for I in _I]\n",
175 | " ms_interesection=reduce(np.intersect1d, [m.active_set for m in bmsm ])\n",
176 | " bm_as=[np.where(m.active_set==ms_interesection.reshape(-1,1))[1] for m in bmsm ]\n",
177 | " \n",
178 | " Ts_red = [to_red_tmat(T, [ms_interesection]) for T in Ts]\n",
179 | " MRF_MSM.append(Ts_red)\n",
180 | " MSM_MSM.append([bmsm, bm_as])"
181 | ]
182 | },
183 | {
184 | "cell_type": "code",
185 | "execution_count": null,
186 | "metadata": {},
187 | "outputs": [],
188 | "source": [
189 | "lag=20\n",
190 | "_dmrfs = [markov_random_fields.estimate_dMRF([2*X[i]-1 for i in I], \n",
191 | " lag = lag, \n",
192 | " Encoder = LabelBinarizer(neg_label = -1, pos_label = 1),\n",
193 | " logistic_regression_kwargs = lr_kwargs\n",
194 | " ) \n",
195 | " for I in _I]\n",
196 | "Ts = [_D.generate_transition_matrix() for _D in _dmrfs ]\n",
197 | "bmsm = [pe.msm.estimate_markov_model(dtrajs=[dtrajs_[i] for i in I], lag = lag) for I in _I]\n",
198 | "ms_interesection_selected_lag=reduce(np.intersect1d, [m.active_set for m in bmsm ])\n"
199 | ]
200 | },
201 | {
202 | "cell_type": "markdown",
203 | "metadata": {},
204 | "source": [
205 | "Compute statistics and prepare input for plotting"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": [
214 | "import msmtools\n",
215 | "its_mrf = np.array([msmtools.util.statistics.confidence_interval([msmtools.analysis.timescales(t)[1:7]*l for t in M]) for l,M in zip([2,5,10,15,20,30,50],MRF_MSM)])\n",
216 | "its_msm = np.array([msmtools.util.statistics.confidence_interval([t.timescales(k=6) for t in M[0]]) for l,M in zip([2,5,10,15,20,30,50],MSM_MSM)])"
217 | ]
218 | },
219 | {
220 | "cell_type": "code",
221 | "execution_count": null,
222 | "metadata": {},
223 | "outputs": [],
224 | "source": [
225 | "bayes_msm = pe.msm.bayesian_markov_model([dt for i, dt in enumerate(dtrajs_) if i!=15], lag = 20)"
226 | ]
227 | },
228 | {
229 | "cell_type": "code",
230 | "execution_count": null,
231 | "metadata": {},
232 | "outputs": [],
233 | "source": [
234 | "bsmts=bayes_msm.sample_mean('timescales')\n",
235 | "bscts=bayes_msm.sample_conf('timescales')"
236 | ]
237 | },
238 | {
239 | "cell_type": "code",
240 | "execution_count": null,
241 | "metadata": {},
242 | "outputs": [],
243 | "source": [
244 | "\n",
245 | "statdist_mrf_conf = np.array([msmtools.util.statistics.confidence_interval([msmtools.analysis.statdist(t) for t in M]) for l,M in zip([2,5,10,15,20,30,50],MRF_MSM)])\n",
246 | "statdist_msm_conf = np.array([msmtools.util.statistics.confidence_interval([t.stationary_distribution[a_s] for t,a_s in zip(*M) ]) for l,M in zip([2,5,10,15,20,30,50],MSM_MSM)])\n",
247 | "\n",
248 | "statdist_mrf_avg = np.array([np.mean([msmtools.analysis.statdist(t) for t in M], axis=0) for l,M in zip([2,5,10,15,20,30,50],MRF_MSM)])\n",
249 | "statdist_msm_avg = np.array([np.mean([t.stationary_distribution[a_s] for t,a_s in zip(*M) ], axis=0) for l,M in zip([2,5,10,15,20,30,50],MSM_MSM)])\n",
250 | "\n",
251 | "\n",
252 | "xerr = np.vstack([statdist_mrf_avg[4]-statdist_mrf_conf[4][0], statdist_mrf_conf[4][1]-statdist_mrf_avg[4]])\n",
253 | "yerr = np.vstack([statdist_msm_avg[4]-statdist_msm_conf[4][0], statdist_msm_conf[4][1]-statdist_msm_avg[4]])\n",
254 | " \n",
255 | "\n",
256 | "idx = [i for i in np.arange(25) if i not in [15]]\n",
257 | "np.random.shuffle(idx)\n",
258 | "\n",
259 | "bmsm = [pe.msm.estimate_markov_model(dtrajs=[dtrajs_[i][:] for i in idx[:I]], lag = 20) for I in range(1,25)]\n",
260 | "ms_interesection=reduce(np.intersect1d, [m.active_set for m in bmsm ])\n",
261 | "bm_as=[np.where(m.active_set==ms_interesection.reshape(-1,1))[1] for m in bmsm ]\n",
262 | "\n",
263 | "\n"
264 | ]
265 | },
266 | {
267 | "cell_type": "code",
268 | "execution_count": null,
269 | "metadata": {},
270 | "outputs": [],
271 | "source": [
272 | "fig = plt.figure(figsize=(single_column_width, 2*single_column_width))\n",
273 | "dt = 0.1\n",
274 | "gs = gridspec.GridSpec(200, 100,left=0.1,bottom=-0.05,top=1.0,right=1.0, wspace=0.05, hspace=.05)\n",
275 | "axts = np.array([[plt.subplot(gs[100+i*18+i*12:100+(i+1)*18+i*12, 8+j*35+j*15:8+(j+1)*35+j*15]) for i in range(3)] for j in range(2)]).T\n",
276 | "axsd = np.array([[plt.subplot(gs[50:80, 8+j*33+j*18:8+(j+1)*33+j*18]) for i in range(1)] for j in range(2)]).T[::-1,:]\n",
277 | "\n",
278 | "for i,_ax in enumerate(axts.flatten()[:]):\n",
279 | " # confidence intervals of MSM/DGM ITS\n",
280 | " _ax.fill_between(np.array([2,5,10,15,20,30,50])*dt,its_msm[:,0,i]*dt, its_msm[:,1,i]*dt,alpha=0.35, label=\"MSM\")\n",
281 | " _ax.fill_between(np.array([2,5,10,15,20,30,50])*dt,its_mrf[:,0,i]*dt, its_mrf[:,1,i]*dt,alpha=0.35, label=\"DGM\")\n",
282 | " \n",
283 | " # Bayesian MSM error-bars\n",
284 | " _ax.hlines(bsmts[i]*dt, 19.5*dt,20.5*dt, lw=2, linestyle=\":\")\n",
285 | " _ax.fill_between(np.array([19.5,20.5])*dt,bscts[0][i]*np.ones(2)*dt, bscts[1][i]*np.ones(2)*dt,lw=0,alpha=0.35,color='k',label=\"Bayesian MSM\")\n",
286 | "\n",
287 | " # MSM/DGM ITS\n",
288 | " _ax.plot(np.array([2,5,10,15,20,30,50])*dt,np.array([2,5,10,15,20,30,50])*dt, color=\"k\")\n",
289 | " _ax.fill_between(np.array([2,5,10,15,20,30,50])*dt,np.zeros(7), np.array([2,5,10,15,20,30,50])*dt, color='k',alpha=0.25,)\n",
290 | " _ax.set_ylim([0.5,8])\n",
291 | " if i==2:\n",
292 | " _ax.set_ylabel(r'implied timescale / $\\mathrm{ns}$')\n",
293 | " if i>2:\n",
294 | " _ax.set_xlabel(r'lag time $\\tau$ / $\\mathrm{ns}$')\n",
295 | " if i==5:\n",
296 | " _ax.legend()\n",
297 | " _ax.set_ylim(10,20)\n",
298 | " _ax.axis('off')\n",
299 | " else:\n",
300 | " _ax.set_title('implied timescale {:d}'.format(i+1))\n",
301 | "\n",
302 | " \n",
303 | "colors_ = [plt.cm.viridis(c) for c in np.linspace(0,1,bm_as[0].shape[0])]\n",
304 | "# Correlate stationary distributions on common sub-sets\n",
305 | "axsd[0,0].errorbar(statdist_mrf_avg[4],statdist_msm_avg[4], xerr=xerr, yerr=yerr,fmt='.',ms=0,zorder=1,ecolor='k')\n",
306 | "axsd[0,0].scatter(statdist_mrf_avg[4],statdist_msm_avg[4], s=10, c='m',zorder=10)\n",
307 | "\n",
308 | "axsd[0,0].set_xlim(1e-6,2);\n",
309 | "axsd[0,0].set_ylim(1e-6,2)\n",
310 | "axsd[0,0].plot([1e-6,2],[1e-6,2], ls=':', color='k')\n",
311 | "axsd[0,0].set_xlabel(r'dMRF, $\\pi_i$')\n",
312 | "axsd[0,0].set_ylabel(r'MSM, $\\pi_i$')\n",
313 | "axsd[0,0].loglog()\n",
314 | "\n",
315 | "# illustration of sub-system encoding\n",
316 | "axsd[0,1].hist2d(np.vstack(dihe)[:,0]*np.pi/180, np.vstack(dihe)[:,3]*np.pi/180,bins=128, norm=mpl.colors.LogNorm(), alpha=0.4)\n",
317 | "axsd[0,1].vlines(0,-1.5*np.pi,1.5*np.pi,linestyles=':', color='k' )\n",
318 | "axsd[0,1].hlines(80.*np.pi/180,-1.5*np.pi,1.5*np.pi,linestyles=':', color='k' )\n",
319 | "axsd[0,1].set_xticks([-np.pi,-np.pi/2,0,np.pi/2,np.pi])\n",
320 | "axsd[0,1].set_yticks([-np.pi,-np.pi/2,0,np.pi/2,np.pi])\n",
321 | "axsd[0,1].set_xticklabels([r'$-\\pi$',r'$-\\frac{\\pi}{2}$',\"0\",r'$\\frac{\\pi}{2}$',r'$\\pi$'])\n",
322 | "axsd[0,1].set_yticklabels([r'$-\\pi$',r'$-\\frac{\\pi}{2}$',\"0\",r'$\\frac{\\pi}{2}$',r'$\\pi$'])\n",
323 | "axsd[0,1].set_ylabel(r'$\\psi$')\n",
324 | "axsd[0,1].set_xlabel(r'$\\phi$')\n",
325 | "axsd[0,1].text(-np.pi/2., -np.pi/2., r\"$1 / 1$\", va='center', ha='center')\n",
326 | "axsd[0,1].text(np.pi/2., -np.pi/2., r\"$-1 / 1$\", va='center', ha='center')\n",
327 | "axsd[0,1].text(np.pi/2., np.pi/1.5, r\"$-1 / -1$\", va='center', ha='center')\n",
328 | "axsd[0,1].text(-np.pi/2., np.pi/1.5, r\"$1 / -1$\", va='center', ha='center')\n",
329 | "axsd[0,1].set_xlim((-np.pi, np.pi))\n",
330 | "axsd[0,1].set_ylim((-np.pi, np.pi))\n",
331 | "\n",
332 | "\n",
333 | "\n",
334 | "#embedding of structure render\n",
335 | "structure_ax = plt.subplot(gs[1:40, 0:-10])\n",
336 | "structure_ax.imshow(plt.imread('penta_render_trimmed.png'),interpolation='nearest')\n",
337 | "structure_ax.axis('off')\n",
338 | "for a,lbl in zip([axts[0,0], axsd[0,0], axsd[0,1]], ('D', 'B', 'C')):\n",
339 | " a.text(-0.5, 1.10, lbl, transform=a.transAxes,\n",
340 | " fontsize=12, va='top')\n",
341 | "\n",
342 | "for a,lbl in zip([structure_ax], ('A')):\n",
343 | " a.text(-0.20, 0.95, lbl, transform=a.transAxes,\n",
344 | " fontsize=12, va='top')\n",
345 | " \n",
346 | "\n",
347 | "\n",
348 | "#fig.savefig('Fig3_re.pdf', dpi=600)"
349 | ]
350 | }
351 | ],
352 | "metadata": {
353 | "kernelspec": {
354 | "display_name": "Python 3",
355 | "language": "python",
356 | "name": "python3"
357 | },
358 | "language_info": {
359 | "codemirror_mode": {
360 | "name": "ipython",
361 | "version": 3
362 | },
363 | "file_extension": ".py",
364 | "mimetype": "text/x-python",
365 | "name": "python",
366 | "nbconvert_exporter": "python",
367 | "pygments_lexer": "ipython3",
368 | "version": "3.6.5"
369 | }
370 | },
371 | "nbformat": 4,
372 | "nbformat_minor": 2
373 | }
374 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 | from graphtime import __version__
3 | setup(
4 | name='graphtime',
5 | version = __version__,
6 | author='Simon Olsson',
7 | author_email='simon.olsson@fu-berlin.de',
8 | packages=['graphtime', 'graphtime.test'],
9 | scripts=[],
10 | url='http://127.0.0.1',
11 | license='LICENSE.txt',
12 | description='A module for learning encoding of transition probabilities with undirected graphical models',
13 | long_description=open('README.md').read(),
14 | install_requires=[
15 | "numpy >= 1.3",
16 | "scikit-learn >= 0.19.0",
17 | "scipy >= 1.1.0",
18 | "msmtools >= 1.2.1",
19 | "pyemma >= 2.5.2"
20 | ],
21 | )
--------------------------------------------------------------------------------