├── .gitignore
├── .idea
└── inspectionProfiles
│ ├── Project_Default.xml
│ └── profiles_settings.xml
├── Example Code.ipynb
├── LICENSE
├── Multivariate Example.ipynb
├── README.md
├── bayesian_changepoint_detection
├── __init__.py
├── cy_offline_changepoint_detection.py
├── generate_data.py
├── offline_changepoint_detection.py
└── online_changepoint_detection.py
├── example.py
├── setup.py
└── xuan_motivating_example.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 |
5 | # C extensions
6 | *.so
7 |
8 | # Distribution / packaging
9 | .Python
10 | env/
11 | bin/
12 | build/
13 | develop-eggs/
14 | dist/
15 | eggs/
16 | lib/
17 | lib64/
18 | parts/
19 | sdist/
20 | var/
21 | *.egg-info/
22 | .installed.cfg
23 | *.egg
24 |
25 | # Installer logs
26 | pip-log.txt
27 | pip-delete-this-directory.txt
28 |
29 | # Unit test / coverage reports
30 | htmlcov/
31 | .tox/
32 | .coverage
33 | .cache
34 | nosetests.xml
35 | coverage.xml
36 |
37 | # Translations
38 | *.mo
39 |
40 | # Mr Developer
41 | .mr.developer.cfg
42 | .project
43 | .pydevproject
44 |
45 | # Rope
46 | .ropeproject
47 |
48 | # Django stuff:
49 | *.log
50 | *.pot
51 |
52 | # Sphinx documentation
53 | docs/_build/
54 | *.ipynb
55 | .idea/*
56 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/Project_Default.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/.idea/inspectionProfiles/profiles_settings.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2014 Johannes Kulick
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | Bayesian Changepoint Detection
4 | ==============================
5 |
6 | Methods to get the probability of a changepoint in a time series. Both online and offline methods are available. Read the following papers to really understand the methods:
7 |
8 |
9 | [1] Paul Fearnhead, Exact and Efficient Bayesian Inference for Multiple
10 | Changepoint problems, Statistics and computing 16.2 (2006), pp. 203--213
11 |
12 | [2] Ryan P. Adams, David J.C. MacKay, Bayesian Online Changepoint Detection,
13 | arXiv 0710.3742 (2007)
14 |
15 | [3] Xuan Xiang, Kevin Murphy, Modeling Changing Dependency Structure in
16 | Multivariate Time Series, ICML (2007), pp. 1055--1062
17 |
18 | To see it in action have a look at the [example notebook](http://nbviewer.ipython.org/urls/raw.githubusercontent.com/hildensia/bayesian_changepoint_detection/master/Example%20Code.ipynb?create=1 "Example Code in an IPython Notebook").
19 |
--------------------------------------------------------------------------------
/bayesian_changepoint_detection/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | __version__ = '0.2.dev1'
4 |
--------------------------------------------------------------------------------
/bayesian_changepoint_detection/cy_offline_changepoint_detection.py:
--------------------------------------------------------------------------------
1 | import pyximport
2 | pyximport.install()
3 | from cy_offline import *
4 |
5 |
6 |
--------------------------------------------------------------------------------
/bayesian_changepoint_detection/generate_data.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import numpy as np
3 |
4 | def generate_normal_time_series(num, minl=50, maxl=1000):
5 | data = np.array([], dtype=np.float64)
6 | partition = np.random.randint(minl, maxl, num)
7 | for p in partition:
8 | mean = np.random.randn()*10
9 | var = np.random.randn()*1
10 | if var < 0:
11 | var = var * -1
12 | tdata = np.random.normal(mean, var, p)
13 | data = np.concatenate((data, tdata))
14 | return partition, np.atleast_2d(data).T
15 |
16 | def generate_multinormal_time_series(num, dim, minl=50, maxl=1000):
17 | data = np.empty((1,dim), dtype=np.float64)
18 | partition = np.random.randint(minl, maxl, num)
19 | for p in partition:
20 | mean = np.random.standard_normal(dim)*10
21 | # Generate a random SPD matrix
22 | A = np.random.standard_normal((dim,dim))
23 | var = np.dot(A,A.T)
24 |
25 | tdata = np.random.multivariate_normal(mean, var, p)
26 | data = np.concatenate((data, tdata))
27 | return partition, data[1:,:]
28 |
29 | def generate_xuan_motivating_example(minl=50, maxl=1000):
30 | dim = 2
31 | num = 3
32 | partition = np.random.randint(minl, maxl, num)
33 | mu = np.zeros(dim)
34 | Sigma1 = np.asarray([[1.0,0.75],[0.75,1.0]])
35 | data = np.random.multivariate_normal(mu, Sigma1, partition[0])
36 | Sigma2 = np.asarray([[1.0,0.0],[0.0,1.0]])
37 | data = np.concatenate((data,np.random.multivariate_normal(mu, Sigma2, partition[1])))
38 | Sigma3 = np.asarray([[1.0,-0.75],[-0.75,1.0]])
39 | data = np.concatenate((data,np.random.multivariate_normal(mu, Sigma3, partition[2])))
40 | return partition, data
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/bayesian_changepoint_detection/offline_changepoint_detection.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import numpy as np
3 | from scipy.special import gammaln, multigammaln, comb
4 | from decorator import decorator
5 |
6 | # This makes the code compatible with Python 3
7 | # without causing performance hits on Python 2
8 | try:
9 | xrange
10 | except NameError:
11 | xrange = range
12 |
13 |
14 | try:
15 | from sselogsumexp import logsumexp
16 | except ImportError:
17 | from scipy.special import logsumexp
18 | print("Use scipy logsumexp().")
19 | else:
20 | print("Use SSE accelerated logsumexp().")
21 |
22 |
23 | def _dynamic_programming(f, *args, **kwargs):
24 | if f.data is None:
25 | f.data = args[0]
26 |
27 | if not np.array_equal(f.data, args[0]):
28 | f.cache = {}
29 | f.data = args[0]
30 |
31 | try:
32 | f.cache[args[1:3]]
33 | except KeyError:
34 | f.cache[args[1:3]] = f(*args, **kwargs)
35 | return f.cache[args[1:3]]
36 |
37 | def dynamic_programming(f):
38 | f.cache = {}
39 | f.data = None
40 | return decorator(_dynamic_programming, f)
41 |
42 |
43 | def offline_changepoint_detection(data, prior_func,
44 | observation_log_likelihood_function,
45 | truncate=-np.inf):
46 | """Compute the likelihood of changepoints on data.
47 |
48 | Keyword arguments:
49 | data -- the time series data
50 | prior_func -- a function given the likelihood of a changepoint given the distance to the last one
51 | observation_log_likelihood_function -- a function giving the log likelihood
52 | of a data part
53 | truncate -- the cutoff probability 10^truncate to stop computation for that changepoint log likelihood
54 |
55 | P -- the likelihoods if pre-computed
56 | """
57 |
58 | n = len(data)
59 | Q = np.zeros((n,))
60 | g = np.zeros((n,))
61 | G = np.zeros((n,))
62 | P = np.ones((n, n)) * -np.inf
63 |
64 | # save everything in log representation
65 | for t in range(n):
66 | g[t] = np.log(prior_func(t))
67 | if t == 0:
68 | G[t] = g[t]
69 | else:
70 | G[t] = np.logaddexp(G[t-1], g[t])
71 |
72 | P[n-1, n-1] = observation_log_likelihood_function(data, n-1, n)
73 | Q[n-1] = P[n-1, n-1]
74 |
75 | for t in reversed(range(n-1)):
76 | P_next_cp = -np.inf # == log(0)
77 | for s in range(t, n-1):
78 | P[t, s] = observation_log_likelihood_function(data, t, s+1)
79 |
80 | # compute recursion
81 | summand = P[t, s] + Q[s + 1] + g[s + 1 - t]
82 | P_next_cp = np.logaddexp(P_next_cp, summand)
83 |
84 | # truncate sum to become approx. linear in time (see
85 | # Fearnhead, 2006, eq. (3))
86 | if summand - P_next_cp < truncate:
87 | break
88 |
89 | P[t, n-1] = observation_log_likelihood_function(data, t, n)
90 |
91 | # (1 - G) is numerical stable until G becomes numerically 1
92 | if G[n-1-t] < -1e-15: # exp(-1e-15) = .99999...
93 | antiG = np.log(1 - np.exp(G[n-1-t]))
94 | else:
95 | # (1 - G) is approx. -log(G) for G close to 1
96 | antiG = np.log(-G[n-1-t])
97 |
98 | Q[t] = np.logaddexp(P_next_cp, P[t, n-1] + antiG)
99 |
100 | Pcp = np.ones((n-1, n-1)) * -np.inf
101 | for t in range(n-1):
102 | Pcp[0, t] = P[0, t] + Q[t + 1] + g[t] - Q[0]
103 | if np.isnan(Pcp[0, t]):
104 | Pcp[0, t] = -np.inf
105 | for j in range(1, n-1):
106 | for t in range(j, n-1):
107 | tmp_cond = Pcp[j-1, j-1:t] + P[j:t+1, t] + Q[t + 1] + g[0:t-j+1] - Q[j:t+1]
108 | Pcp[j, t] = logsumexp(tmp_cond.astype(np.float32))
109 | if np.isnan(Pcp[j, t]):
110 | Pcp[j, t] = -np.inf
111 |
112 | return Q, P, Pcp
113 |
114 | @dynamic_programming
115 | def gaussian_obs_log_likelihood(data, t, s):
116 | s += 1
117 | n = s - t
118 | mean = data[t:s].sum(0) / n
119 |
120 | muT = (n * mean) / (1 + n)
121 | nuT = 1 + n
122 | alphaT = 1 + n / 2
123 | betaT = 1 + 0.5 * ((data[t:s] - mean) ** 2).sum(0) + ((n)/(1 + n)) * (mean**2 / 2)
124 | scale = (betaT*(nuT + 1))/(alphaT * nuT)
125 |
126 | # splitting the PDF of the student distribution up is /much/ faster.
127 | # (~ factor 20) using sum over for loop is even more worthwhile
128 | prob = np.sum(np.log(1 + (data[t:s] - muT)**2/(nuT * scale)))
129 | lgA = gammaln((nuT + 1) / 2) - np.log(np.sqrt(np.pi * nuT * scale)) - gammaln(nuT/2)
130 |
131 | return np.sum(n * lgA - (nuT + 1)/2 * prob)
132 |
133 | def ifm_obs_log_likelihood(data, t, s):
134 | '''Independent Features model from xuan et al'''
135 | s += 1
136 | n = s - t
137 | x = data[t:s]
138 | if len(x.shape)==2:
139 | d = x.shape[1]
140 | else:
141 | d = 1
142 | x = np.atleast_2d(x).T
143 |
144 | N0 = d # weakest prior we can use to retain proper prior
145 | V0 = np.var(x)
146 | Vn = V0 + (x**2).sum(0)
147 |
148 | # sum over dimension and return (section 3.1 from Xuan paper):
149 | return d*( -(n/2)*np.log(np.pi) + (N0/2)*np.log(V0) - \
150 | gammaln(N0/2) + gammaln((N0+n)/2) ) - \
151 | ( ((N0+n)/2)*np.log(Vn) ).sum(0)
152 |
153 | def fullcov_obs_log_likelihood(data, t, s):
154 | '''Full Covariance model from xuan et al'''
155 | s += 1
156 | n = s - t
157 | x = data[t:s]
158 | if len(x.shape)==2:
159 | dim = x.shape[1]
160 | else:
161 | dim = 1
162 | x = np.atleast_2d(x).T
163 |
164 | N0 = dim # weakest prior we can use to retain proper prior
165 | V0 = np.var(x)*np.eye(dim)
166 |
167 | # Improvement over np.outer
168 | # http://stackoverflow.com/questions/17437523/python-fast-way-to-sum-outer-products
169 | # Vn = V0 + np.array([np.outer(x[i], x[i].T) for i in xrange(x.shape[0])]).sum(0)
170 | Vn = V0 + np.einsum('ij,ik->jk', x, x)
171 |
172 | # section 3.2 from Xuan paper:
173 | return -(dim*n/2)*np.log(np.pi) + (N0/2)*np.linalg.slogdet(V0)[1] - \
174 | multigammaln(N0/2,dim) + multigammaln((N0+n)/2,dim) - \
175 | ((N0+n)/2)*np.linalg.slogdet(Vn)[1]
176 |
177 | def const_prior(r, l):
178 | return 1/(l)
179 |
180 | def geometric_prior(t, p):
181 | return p * ((1 - p) ** (t - 1))
182 |
183 | def neg_binominal_prior(t, k, p):
184 | return comb(t - k, k - 1) * p ** k * (1 - p) ** (t - k)
185 |
--------------------------------------------------------------------------------
/bayesian_changepoint_detection/online_changepoint_detection.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import numpy as np
3 | from scipy import stats
4 | import math
5 |
6 | def online_changepoint_detection(data, hazard_func, observation_likelihood):
7 | maxes = np.zeros(len(data) + 1)
8 |
9 | R = np.zeros((len(data) + 1, len(data) + 1))
10 | R[0, 0] = 1
11 |
12 | for t, x in enumerate(data):
13 | # Evaluate the predictive distribution for the new datum under each of
14 | # the parameters. This is the standard thing from Bayesian inference.
15 | predprobs = observation_likelihood.pdf(x)
16 |
17 | # Evaluate the hazard function for this interval
18 | H = hazard_func(np.array(range(t+1)))
19 |
20 | # Evaluate the growth probabilities - shift the probabilities down and to
21 | # the right, scaled by the hazard function and the predictive
22 | # probabilities.
23 | R[1:t+2, t+1] = R[0:t+1, t] * predprobs * (1-H)
24 |
25 | # Evaluate the probability that there *was* a changepoint and we're
26 | # accumulating the mass back down at r = 0.
27 | R[0, t+1] = np.sum( R[0:t+1, t] * predprobs * H)
28 |
29 | # Renormalize the run length probabilities for improved numerical
30 | # stability.
31 | R[:, t+1] = R[:, t+1] / np.sum(R[:, t+1])
32 |
33 | # Update the parameter sets for each possible run length.
34 | observation_likelihood.update_theta(x)
35 |
36 | maxes[t] = R[:, t].argmax()
37 | return R, maxes
38 |
39 |
40 | def constant_hazard(lam, r):
41 | return 1/lam * np.ones(r.shape)
42 |
43 |
44 | def gaussian_hazard(mu, sigma, r):
45 | # mu and sigma are the hyperparameters of the cecum event happening. In our case 12000 frames correspond to 6.5min where cecum is typically observed
46 | # Hazard function = pdf/survival = pdf/(1-cdf)
47 | # Greater run length will have greater hazard. So we use stats.norm.pdf(r) directly which will return an array of hazard values for each run length
48 | pdf = stats.norm.pdf(r,mu,sigma) # Computes probability for each runlength starting from 0 to t
49 | survival = 1-stats.norm.cdf(r,mu,sigma) # Computes survival for each runlength starting from 0 to t
50 | hazard_arr = pdf / survival # Computes hazard value for each runlength starting from 0 to t
51 | #hazard_arr = hazard*np.ones(r.shape) # Apply the same hazard for all vaules of column t in R[] ---> Not reqired since the previous line itself is an array of r.shape values
52 | return hazard_arr
53 |
54 | class StudentT:
55 | def __init__(self, alpha, beta, kappa, mu):
56 | self.alpha0 = self.alpha = np.array([alpha])
57 | self.beta0 = self.beta = np.array([beta])
58 | self.kappa0 = self.kappa = np.array([kappa])
59 | self.mu0 = self.mu = np.array([mu])
60 |
61 | def pdf(self, data):
62 | return stats.t.pdf(x=data,
63 | df=2*self.alpha,
64 | loc=self.mu,
65 | scale=np.sqrt(self.beta * (self.kappa+1) / (self.alpha *
66 | self.kappa)))
67 |
68 | def update_theta(self, data):
69 | muT0 = np.concatenate((self.mu0, (self.kappa * self.mu + data) / (self.kappa + 1)))
70 | kappaT0 = np.concatenate((self.kappa0, self.kappa + 1.))
71 | alphaT0 = np.concatenate((self.alpha0, self.alpha + 0.5))
72 | betaT0 = np.concatenate((self.beta0, self.beta + (self.kappa * (data -
73 | self.mu)**2) / (2. * (self.kappa + 1.))))
74 |
75 | self.mu = muT0
76 | self.kappa = kappaT0
77 | self.alpha = alphaT0
78 | self.beta = betaT0
79 |
80 | class NormalKnownPrecision:
81 | # Normal --> use this when variance is known and mean is unknown / changing for the incoming data
82 | def __init__(self, mu, prec):
83 | self.mu0 = self.mu = np.array([mu])
84 | self.prec0 = self.prec = np.array([prec])
85 |
86 | def pdf(self, data):
87 | return stats.norm.pdf(data,self.mu, 1/self.prec + 1)
88 |
89 | def update_theta(self, data, t):
90 | offsets = np.arange(1, t+2) # To calculate new mean params at each node of the next time step, we need to multiply each of the previous mean value in self.mu array (2u1+data/3, 3u2+data/4 etc.,), we have offsets
91 | muT0 = np.concatenate((self.mu0, (offsets*self.mu + data)/(offsets+1)))
92 | precT0 = np.concatenate((self.prec0, self.prec+self.prec0))
93 | #precT0 = np.concatenate((self.prec0, math.sqrt(t / ( (t-1)/self.prec**2 + (data-newMu)*(data-self.mu))) ))
94 |
95 | self.mu = muT0
96 | self.prec = precT0
97 |
98 |
99 | class Poisson:
100 | def __init__(self, k, theta):
101 | self.k0 = self.k = np.array([k])
102 | self.theta0 = self.theta = np.array([theta])
103 |
104 | def pdf(self, data):
105 | return stats.nbinom.pmf(data,self.k, 1/(1+self.theta))
106 |
107 | def update_theta(self, data):
108 | kT0 = np.concatenate((self.k0, self.k+data))
109 | thetaT0 = np.concatenate((self.theta0, self.theta/(1+self.theta)))
110 |
111 | self.k = kT0
112 | self.theta = thetaT0
113 |
--------------------------------------------------------------------------------
/example.py:
--------------------------------------------------------------------------------
1 | from __future__ import division
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import seaborn
5 |
6 | import cProfile
7 | import bayesian_changepoint_detection.offline_changepoint_detection as offcd
8 | import bayesian_changepoint_detection.generate_data as gd
9 | from functools import partial
10 |
11 | if __name__ == '__main__':
12 | show_plot = True
13 | dim = 4
14 | if dim == 1:
15 | partition, data = gd.generate_normal_time_series(7, 50, 200)
16 | else:
17 | partition, data = gd.generate_multinormal_time_series(7, dim, 50, 200)
18 | changes = np.cumsum(partition)
19 |
20 | if show_plot:
21 | fig, ax = plt.subplots(figsize=[16,12])
22 | for p in changes:
23 | ax.plot([p,p],[np.min(data),np.max(data)],'r')
24 | for d in range(dim):
25 | ax.plot(data[:,d])
26 | plt.show()
27 |
28 |
29 | #Q, P, Pcp = offcd.offline_changepoint_detection(data,partial(offcd.const_prior, l=(len(data)+1)),offcd.gaussian_obs_log_likelihood, truncate=-20)
30 | #Q_ifm, P_ifm, Pcp_ifm = offcd.offline_changepoint_detection(data,partial(offcd.const_prior, l=(len(data)+1)),offcd.ifm_obs_log_likelihood,truncate=-20)
31 | Q_full, P_full, Pcp_full = offcd.offline_changepoint_detection(data,partial(offcd.const_prior, l=(len(data)+1)),offcd.fullcov_obs_log_likelihood, truncate=-50)
32 |
33 | if show_plot:
34 | fig, ax = plt.subplots(figsize=[18, 16])
35 | ax = fig.add_subplot(2, 1, 1)
36 | for p in changes:
37 | ax.plot([p,p],[np.min(data),np.max(data)],'r')
38 | for d in range(dim):
39 | ax.plot(data[:,d])
40 | ax = fig.add_subplot(2, 1, 2, sharex=ax)
41 | ax.plot(np.exp(Pcp_full).sum(0))
42 | plt.show()
43 |
44 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python2
2 |
3 | from distutils.core import setup
4 | import bayesian_changepoint_detection
5 |
6 | setup(name='bayesian_changepoint_detection',
7 | version=bayesian_changepoint_detection.__version__,
8 | description='Some Bayesian changepoint detection algorithms',
9 | author='Johannes Kulick',
10 | author_email='johannes.kulick@ipvs.uni-stuttgart.de',
11 | url='http://github.com/hildensia/bayesian_changepoint_detection',
12 | packages = ['bayesian_changepoint_detection'],
13 | requires=['scipy', 'numpy']
14 | )
15 |
--------------------------------------------------------------------------------
/xuan_motivating_example.py:
--------------------------------------------------------------------------------
1 | ''' Example from Xiang Xuan's thesis: Section 3.2'''
2 | from __future__ import division
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 |
6 | import bayesian_changepoint_detection.offline_changepoint_detection as offcd
7 | import bayesian_changepoint_detection.generate_data as gd
8 | from functools import partial
9 |
10 | if __name__ == '__main__':
11 | show_plot = True
12 |
13 | partition, data = gd.generate_xuan_motivating_example(50,200)
14 | changes = np.cumsum(partition)
15 |
16 | Q_ifm, P_ifm, Pcp_ifm = offcd.offline_changepoint_detection(data,partial(offcd.const_prior, l=(len(data)+1)),offcd.ifm_obs_log_likelihood,truncate=-20)
17 | Q_full, P_full, Pcp_full = offcd.offline_changepoint_detection(data,partial(offcd.const_prior, l=(len(data)+1)),offcd.fullcov_obs_log_likelihood, truncate=-20)
18 |
19 | if show_plot:
20 | fig, ax = plt.subplots(figsize=[18, 16])
21 | ax = fig.add_subplot(3, 1, 1)
22 | for p in changes:
23 | ax.plot([p,p],[np.min(data),np.max(data)],'r')
24 | for d in range(2):
25 | ax.plot(data[:,d])
26 | plt.legend(['Raw data with Original Changepoints'])
27 | ax1 = fig.add_subplot(3, 1, 2, sharex=ax)
28 | ax1.plot(np.exp(Pcp_ifm).sum(0))
29 | plt.legend(['Independent Factor Model'])
30 | ax2 = fig.add_subplot(3, 1, 3, sharex=ax)
31 | ax2.plot(np.exp(Pcp_full).sum(0))
32 | plt.legend(['Full Covariance Model'])
33 | plt.show()
34 |
35 |
--------------------------------------------------------------------------------