├── .gitignore
├── LICENSE
├── README.md
├── matlab
├── README.md
├── example1_lgss.m
├── example2_lgss.m
├── example3_sv.m
├── generateData.m
├── kalmanFilter.m
├── particleFilter.m
├── particleFilterSVmodel.m
├── particleMetropolisHastings.m
└── particleMetropolisHastingsSVmodel.m
├── python
├── README.md
├── example1-lgss.py
├── example2-lgss.py
├── example3-sv.py
└── helpers
│ ├── __init__.py
│ ├── dataGeneration.py
│ ├── parameterEstimation.py
│ └── stateEstimation.py
└── r
├── README.md
├── example1-lgss.R
├── example2-lgss.R
├── example3-sv.R
├── example4-sv.R
├── example5-sv.R
├── extra-code-for-tutorial
├── example1-lgss-plotData.R
├── example2-lgss-varyingT.R
├── example4-sv-plotProposals.R
└── example4-sv-varyingN.R
└── helpers
├── dataGeneration.R
├── parameterEstimation.R
├── plotting.R
└── stateEstimation.R
/.gitignore:
--------------------------------------------------------------------------------
1 | .vscode/
2 | .R~
3 | .m~
4 | .pyc
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # C extensions
12 | *.so
13 |
14 | # Distribution / packaging
15 | .Python
16 | build/
17 | develop-eggs/
18 | dist/
19 | downloads/
20 | eggs/
21 | .eggs/
22 | lib/
23 | lib64/
24 | parts/
25 | sdist/
26 | var/
27 | wheels/
28 | *.egg-info/
29 | .installed.cfg
30 | *.egg
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | .hypothesis/
52 |
53 | # Translations
54 | *.mo
55 | *.pot
56 |
57 | # Django stuff:
58 | *.log
59 | local_settings.py
60 |
61 | # Flask stuff:
62 | instance/
63 | .webassets-cache
64 |
65 | # Scrapy stuff:
66 | .scrapy
67 |
68 | # Sphinx documentation
69 | docs/_build/
70 |
71 | # PyBuilder
72 | target/
73 |
74 | # Jupyter Notebook
75 | .ipynb_checkpoints
76 |
77 | # pyenv
78 | .python-version
79 |
80 | # celery beat schedule file
81 | celerybeat-schedule
82 |
83 | # SageMath parsed files
84 | *.sage.py
85 |
86 | # Environments
87 | .env
88 | .venv
89 | env/
90 | venv/
91 | ENV/
92 |
93 | # Spyder project settings
94 | .spyderproject
95 | .spyproject
96 |
97 | # Rope project settings
98 | .ropeproject
99 |
100 | # mkdocs documentation
101 | /site
102 |
103 | # mypy
104 | .mypy_cache/
105 |
106 | # History files
107 | .Rhistory
108 | .Rapp.history
109 |
110 | # Session Data files
111 | .RData
112 |
113 | # Example code in package build process
114 | *-Ex.R
115 |
116 | # Output files from R CMD build
117 | /*.tar.gz
118 |
119 | # Output files from R CMD check
120 | /*.Rcheck/
121 |
122 | # RStudio files
123 | .Rproj.user/
124 |
125 | # produced vignettes
126 | vignettes/*.html
127 | vignettes/*.pdf
128 |
129 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
130 | .httr-oauth
131 |
132 | # knitr and R markdown default cache directories
133 | /*_cache/
134 | /cache/
135 |
136 | # Temporary files created by R markdown
137 | *.utf8.md
138 | *.knit.md
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | GNU GENERAL PUBLIC LICENSE
2 | Version 2, June 1991
3 |
4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
6 | Everyone is permitted to copy and distribute verbatim copies
7 | of this license document, but changing it is not allowed.
8 |
9 | Preamble
10 |
11 | The licenses for most software are designed to take away your
12 | freedom to share and change it. By contrast, the GNU General Public
13 | License is intended to guarantee your freedom to share and change free
14 | software--to make sure the software is free for all its users. This
15 | General Public License applies to most of the Free Software
16 | Foundation's software and to any other program whose authors commit to
17 | using it. (Some other Free Software Foundation software is covered by
18 | the GNU Lesser General Public License instead.) You can apply it to
19 | your programs, too.
20 |
21 | When we speak of free software, we are referring to freedom, not
22 | price. Our General Public Licenses are designed to make sure that you
23 | have the freedom to distribute copies of free software (and charge for
24 | this service if you wish), that you receive source code or can get it
25 | if you want it, that you can change the software or use pieces of it
26 | in new free programs; and that you know you can do these things.
27 |
28 | To protect your rights, we need to make restrictions that forbid
29 | anyone to deny you these rights or to ask you to surrender the rights.
30 | These restrictions translate to certain responsibilities for you if you
31 | distribute copies of the software, or if you modify it.
32 |
33 | For example, if you distribute copies of such a program, whether
34 | gratis or for a fee, you must give the recipients all the rights that
35 | you have. You must make sure that they, too, receive or can get the
36 | source code. And you must show them these terms so they know their
37 | rights.
38 |
39 | We protect your rights with two steps: (1) copyright the software, and
40 | (2) offer you this license which gives you legal permission to copy,
41 | distribute and/or modify the software.
42 |
43 | Also, for each author's protection and ours, we want to make certain
44 | that everyone understands that there is no warranty for this free
45 | software. If the software is modified by someone else and passed on, we
46 | want its recipients to know that what they have is not the original, so
47 | that any problems introduced by others will not reflect on the original
48 | authors' reputations.
49 |
50 | Finally, any free program is threatened constantly by software
51 | patents. We wish to avoid the danger that redistributors of a free
52 | program will individually obtain patent licenses, in effect making the
53 | program proprietary. To prevent this, we have made it clear that any
54 | patent must be licensed for everyone's free use or not licensed at all.
55 |
56 | The precise terms and conditions for copying, distribution and
57 | modification follow.
58 |
59 | GNU GENERAL PUBLIC LICENSE
60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
61 |
62 | 0. This License applies to any program or other work which contains
63 | a notice placed by the copyright holder saying it may be distributed
64 | under the terms of this General Public License. The "Program", below,
65 | refers to any such program or work, and a "work based on the Program"
66 | means either the Program or any derivative work under copyright law:
67 | that is to say, a work containing the Program or a portion of it,
68 | either verbatim or with modifications and/or translated into another
69 | language. (Hereinafter, translation is included without limitation in
70 | the term "modification".) Each licensee is addressed as "you".
71 |
72 | Activities other than copying, distribution and modification are not
73 | covered by this License; they are outside its scope. The act of
74 | running the Program is not restricted, and the output from the Program
75 | is covered only if its contents constitute a work based on the
76 | Program (independent of having been made by running the Program).
77 | Whether that is true depends on what the Program does.
78 |
79 | 1. You may copy and distribute verbatim copies of the Program's
80 | source code as you receive it, in any medium, provided that you
81 | conspicuously and appropriately publish on each copy an appropriate
82 | copyright notice and disclaimer of warranty; keep intact all the
83 | notices that refer to this License and to the absence of any warranty;
84 | and give any other recipients of the Program a copy of this License
85 | along with the Program.
86 |
87 | You may charge a fee for the physical act of transferring a copy, and
88 | you may at your option offer warranty protection in exchange for a fee.
89 |
90 | 2. You may modify your copy or copies of the Program or any portion
91 | of it, thus forming a work based on the Program, and copy and
92 | distribute such modifications or work under the terms of Section 1
93 | above, provided that you also meet all of these conditions:
94 |
95 | a) You must cause the modified files to carry prominent notices
96 | stating that you changed the files and the date of any change.
97 |
98 | b) You must cause any work that you distribute or publish, that in
99 | whole or in part contains or is derived from the Program or any
100 | part thereof, to be licensed as a whole at no charge to all third
101 | parties under the terms of this License.
102 |
103 | c) If the modified program normally reads commands interactively
104 | when run, you must cause it, when started running for such
105 | interactive use in the most ordinary way, to print or display an
106 | announcement including an appropriate copyright notice and a
107 | notice that there is no warranty (or else, saying that you provide
108 | a warranty) and that users may redistribute the program under
109 | these conditions, and telling the user how to view a copy of this
110 | License. (Exception: if the Program itself is interactive but
111 | does not normally print such an announcement, your work based on
112 | the Program is not required to print an announcement.)
113 |
114 | These requirements apply to the modified work as a whole. If
115 | identifiable sections of that work are not derived from the Program,
116 | and can be reasonably considered independent and separate works in
117 | themselves, then this License, and its terms, do not apply to those
118 | sections when you distribute them as separate works. But when you
119 | distribute the same sections as part of a whole which is a work based
120 | on the Program, the distribution of the whole must be on the terms of
121 | this License, whose permissions for other licensees extend to the
122 | entire whole, and thus to each and every part regardless of who wrote it.
123 |
124 | Thus, it is not the intent of this section to claim rights or contest
125 | your rights to work written entirely by you; rather, the intent is to
126 | exercise the right to control the distribution of derivative or
127 | collective works based on the Program.
128 |
129 | In addition, mere aggregation of another work not based on the Program
130 | with the Program (or with a work based on the Program) on a volume of
131 | a storage or distribution medium does not bring the other work under
132 | the scope of this License.
133 |
134 | 3. You may copy and distribute the Program (or a work based on it,
135 | under Section 2) in object code or executable form under the terms of
136 | Sections 1 and 2 above provided that you also do one of the following:
137 |
138 | a) Accompany it with the complete corresponding machine-readable
139 | source code, which must be distributed under the terms of Sections
140 | 1 and 2 above on a medium customarily used for software interchange; or,
141 |
142 | b) Accompany it with a written offer, valid for at least three
143 | years, to give any third party, for a charge no more than your
144 | cost of physically performing source distribution, a complete
145 | machine-readable copy of the corresponding source code, to be
146 | distributed under the terms of Sections 1 and 2 above on a medium
147 | customarily used for software interchange; or,
148 |
149 | c) Accompany it with the information you received as to the offer
150 | to distribute corresponding source code. (This alternative is
151 | allowed only for noncommercial distribution and only if you
152 | received the program in object code or executable form with such
153 | an offer, in accord with Subsection b above.)
154 |
155 | The source code for a work means the preferred form of the work for
156 | making modifications to it. For an executable work, complete source
157 | code means all the source code for all modules it contains, plus any
158 | associated interface definition files, plus the scripts used to
159 | control compilation and installation of the executable. However, as a
160 | special exception, the source code distributed need not include
161 | anything that is normally distributed (in either source or binary
162 | form) with the major components (compiler, kernel, and so on) of the
163 | operating system on which the executable runs, unless that component
164 | itself accompanies the executable.
165 |
166 | If distribution of executable or object code is made by offering
167 | access to copy from a designated place, then offering equivalent
168 | access to copy the source code from the same place counts as
169 | distribution of the source code, even though third parties are not
170 | compelled to copy the source along with the object code.
171 |
172 | 4. You may not copy, modify, sublicense, or distribute the Program
173 | except as expressly provided under this License. Any attempt
174 | otherwise to copy, modify, sublicense or distribute the Program is
175 | void, and will automatically terminate your rights under this License.
176 | However, parties who have received copies, or rights, from you under
177 | this License will not have their licenses terminated so long as such
178 | parties remain in full compliance.
179 |
180 | 5. You are not required to accept this License, since you have not
181 | signed it. However, nothing else grants you permission to modify or
182 | distribute the Program or its derivative works. These actions are
183 | prohibited by law if you do not accept this License. Therefore, by
184 | modifying or distributing the Program (or any work based on the
185 | Program), you indicate your acceptance of this License to do so, and
186 | all its terms and conditions for copying, distributing or modifying
187 | the Program or works based on it.
188 |
189 | 6. Each time you redistribute the Program (or any work based on the
190 | Program), the recipient automatically receives a license from the
191 | original licensor to copy, distribute or modify the Program subject to
192 | these terms and conditions. You may not impose any further
193 | restrictions on the recipients' exercise of the rights granted herein.
194 | You are not responsible for enforcing compliance by third parties to
195 | this License.
196 |
197 | 7. If, as a consequence of a court judgment or allegation of patent
198 | infringement or for any other reason (not limited to patent issues),
199 | conditions are imposed on you (whether by court order, agreement or
200 | otherwise) that contradict the conditions of this License, they do not
201 | excuse you from the conditions of this License. If you cannot
202 | distribute so as to satisfy simultaneously your obligations under this
203 | License and any other pertinent obligations, then as a consequence you
204 | may not distribute the Program at all. For example, if a patent
205 | license would not permit royalty-free redistribution of the Program by
206 | all those who receive copies directly or indirectly through you, then
207 | the only way you could satisfy both it and this License would be to
208 | refrain entirely from distribution of the Program.
209 |
210 | If any portion of this section is held invalid or unenforceable under
211 | any particular circumstance, the balance of the section is intended to
212 | apply and the section as a whole is intended to apply in other
213 | circumstances.
214 |
215 | It is not the purpose of this section to induce you to infringe any
216 | patents or other property right claims or to contest validity of any
217 | such claims; this section has the sole purpose of protecting the
218 | integrity of the free software distribution system, which is
219 | implemented by public license practices. Many people have made
220 | generous contributions to the wide range of software distributed
221 | through that system in reliance on consistent application of that
222 | system; it is up to the author/donor to decide if he or she is willing
223 | to distribute software through any other system and a licensee cannot
224 | impose that choice.
225 |
226 | This section is intended to make thoroughly clear what is believed to
227 | be a consequence of the rest of this License.
228 |
229 | 8. If the distribution and/or use of the Program is restricted in
230 | certain countries either by patents or by copyrighted interfaces, the
231 | original copyright holder who places the Program under this License
232 | may add an explicit geographical distribution limitation excluding
233 | those countries, so that distribution is permitted only in or among
234 | countries not thus excluded. In such case, this License incorporates
235 | the limitation as if written in the body of this License.
236 |
237 | 9. The Free Software Foundation may publish revised and/or new versions
238 | of the General Public License from time to time. Such new versions will
239 | be similar in spirit to the present version, but may differ in detail to
240 | address new problems or concerns.
241 |
242 | Each version is given a distinguishing version number. If the Program
243 | specifies a version number of this License which applies to it and "any
244 | later version", you have the option of following the terms and conditions
245 | either of that version or of any later version published by the Free
246 | Software Foundation. If the Program does not specify a version number of
247 | this License, you may choose any version ever published by the Free Software
248 | Foundation.
249 |
250 | 10. If you wish to incorporate parts of the Program into other free
251 | programs whose distribution conditions are different, write to the author
252 | to ask for permission. For software which is copyrighted by the Free
253 | Software Foundation, write to the Free Software Foundation; we sometimes
254 | make exceptions for this. Our decision will be guided by the two goals
255 | of preserving the free status of all derivatives of our free software and
256 | of promoting the sharing and reuse of software generally.
257 |
258 | NO WARRANTY
259 |
260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
268 | REPAIR OR CORRECTION.
269 |
270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
278 | POSSIBILITY OF SUCH DAMAGES.
279 |
280 | END OF TERMS AND CONDITIONS
281 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # pmh-tutorial
2 | This code was downloaded from https://github.com/compops/pmh-tutorial and contains the code used to produce the results in the tutorial:
3 |
4 | J. Dahlin and T. B. Schön, **Getting started with particle Metropolis-Hastings for inference in nonlinear dynamical models**. Journal of Statistical Software, Code Snippets, Volume 88, Number 2, pp. 1-41, Foundation for Open Access Statistics, 2019.
5 |
6 | The tutorial is available as open access from [Journal of Statistical Software](https://doi.org/10.18637/jss.v088.c02). An R package is also provided on CRAN with the implementation of the tutorial in R. The source code (almost identical to the code in the subdirectory R/) is found at [pmh-tutorial-rpkg](https://github.com/compops/pmh-tutorial-rpkg).
7 |
8 | ## Included material
9 | **r/** This is the main implementation. The complete R code developed and implemented in the tutorial. This code was used to make all the numerical illustrations in the tutorial including the figures and tables. The workspaces for these runs are also provided as a [zip-file in the latest release of the code](https://github.com/compops/pmh-tutorial/releases/latest) to reproduce all the figures in the tutorial.
10 |
11 | **python/** Code for Python to implement the basic algorithms covered in the tutorial. Implementations for the advanced topics are not provided. Only simple plotting is implemented and no figures or saved data from runs are provided.
12 |
13 | **matlab/** Code for MATLAB to implement the basic algorithms covered in the tutorial. Implementations for the advanced topics are not provided. Only simple plotting is implemented and no figures or saved data from runs are provided.
14 |
15 | ## Generalisations
16 | There is source code available for Python that implements some of the generalisations discussed in the tutorial. See the README file under *python/* for more information.
17 |
18 | ## Copyright information
19 | See *LICENSE* for more information.
20 |
21 | ``` R
22 | ##############################################################################
23 | # This program is free software; you can redistribute it and/or modify
24 | # it under the terms of the GNU General Public License as published by
25 | # the Free Software Foundation; either version 2 of the License, or
26 | # (at your option) any later version.
27 | #
28 | # This program is distributed in the hope that it will be useful,
29 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
30 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 | # GNU General Public License for more details.
32 | #
33 | # You should have received a copy of the GNU General Public License along
34 | # with this program; if not, write to the Free Software Foundation, Inc.,
35 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
36 | ##############################################################################
37 | ```
38 |
--------------------------------------------------------------------------------
/matlab/README.md:
--------------------------------------------------------------------------------
1 | # MATLAB code for PMH tutorial
2 |
3 | This MATLAB code implements the Kalman filter (KF), particle filter (PF) and particle Metropolis-Hastings (PMH) algorithm for two different dynamical models: a linear Gaussian state-space (LGSS) model and a stochastic volatility (SV) model. Note that the Kalman filter can only be employed for the first of these two models. The details of the code is described in the [tutorial paper](https://doi.org/10.18637/jss.v088.c02).
4 |
5 | Note that the MATLAB code in this folder covers the basic implementations in the paper. The notation of the variables has been changed sligthly compared with the tutorial paper to improve readability of the code. However, it should be easy to translate between the two. See the R code in r/ for all the implementations and to recreate the results in the tutorial.
6 |
7 | ## Requirements
8 | The code is written and tested for MATLAB 2016b and makes use of the statistics toolbox and the Quandl package. See the [package documentation](https://github.com/quandl/Matlab) for more installation and to download the toolbox. Note that urlread2 is required by the Quandl toolbox and should be installed as detailed in the README file of the Quandl toolbox.
9 |
10 | ## Main script files
11 | These are the main script files that implement the various algorithms discussed in the tutorial:
12 |
13 | * **example1_lgss.m** State estimation in a LGSS model using the KM and a fully-adapted PF (faPF). The code is discussed in Section 3.1 and the results are presented in Section 3.2 as Figure 4 and Table 1.
14 |
15 | * **example2_lgss.m** Parameter estimation of one parameter in the LGSS model using PMH with the faPF as the likelihood estimator. The code is discussed in Section 4.1 and the results are presented in Section 4.2 as Figure 5.
16 |
17 | * **example3_sv.m** Parameter estimation of three parameters in the SV model using PMH with the bootstrap PF as the likelihood estimator. The code is discussed in Section 5.1 and the results are presented in Section 5.2 as Figure 6. The code takes about an hour to run.
18 |
19 | ## Supporting files
20 | * **generateData.m** Implements data generation for the LGSS model.
21 | * **kalmanFilter.m** Implements the Kalman filter for the LGSS model.
22 | * **particleFilter.m** Implements the faPF for the LGSS model.
23 | * **particleFilterSVmodel.m** Implements the bPF for the SV model.
24 | * **particleMetropolisHastings.m** Implements the PMH algorithm for the LGSS model.
25 | * **particleMetropolisHastingsSVmodel.m** Implements the PMH algorithm for the SV model.
26 |
27 | ## Adapting the code for another model
28 | See the discussion in *README.MD* in the directory *r/*.
29 |
30 | ## Copyright information
31 | ``` R
32 | ##############################################################################
33 | # This program is free software; you can redistribute it and/or modify
34 | # it under the terms of the GNU General Public License as published by
35 | # the Free Software Foundation; either version 2 of the License, or
36 | # (at your option) any later version.
37 | #
38 | # This program is distributed in the hope that it will be useful,
39 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
40 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
41 | # GNU General Public License for more details.
42 | #
43 | # You should have received a copy of the GNU General Public License along
44 | # with this program; if not, write to the Free Software Foundation, Inc.,
45 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
46 | ##############################################################################
47 | ```
48 |
--------------------------------------------------------------------------------
/matlab/example1_lgss.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % State estimation in a LGSS model using particle and Kalman filters
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 |
9 | % Set random seed
10 | rng(0)
11 |
12 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
13 | % Define the model and generate data
14 | % x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
15 | % y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 | phi = 0.75;
18 | sigmav = 1.00;
19 | sigmae = 0.10;
20 | parameters = [phi sigmav sigmae];
21 | noObservations = 250;
22 | initialState = 0;
23 |
24 | [states, observations] = generateData(parameters, noObservations, initialState);
25 |
26 | subplot(3,1,1);
27 | plot(observations(2:(noObservations + 1)), 'LineWidth', 1.5, 'Color', [27 158 119] / 256);
28 | xlabel('time');
29 | ylabel('measurement');
30 |
31 | subplot(3,1,2);
32 | plot(states(2:(noObservations + 1)), 'LineWidth', 1.5, 'Color', [217 95 2] / 256);
33 | xlabel('time');
34 | ylabel('latent state');
35 |
36 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
37 | % State estimation
38 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
39 |
40 | % Particle filter with N = 20 particles
41 | stateEstPF = particleFilter(observations, parameters, 20, initialState);
42 |
43 | % Kalman filter
44 | stateEstKF = kalmanFilter(observations, parameters, initialState, 0.01);
45 |
46 | subplot(3,1,3);
47 | difference = stateEstPF(2:noObservations) - stateEstKF(2:noObservations);
48 | plot(1:(noObservations - 1), difference, 'LineWidth', 1.5, 'Color', [117 112 179] / 256);
49 | xlabel('time');
50 | ylabel('difference in state estimate');
--------------------------------------------------------------------------------
/matlab/example2_lgss.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Example of particle Metropolis-Hastings in a LGSS model.
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 |
9 | % Set random seed
10 | rng(0)
11 |
12 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
13 | % Define the model and generate data
14 | % x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
15 | % y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
16 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
17 | phi = 0.75;
18 | sigmav = 1.00;
19 | sigmae = 0.10;
20 | parameters = [phi sigmav sigmae];
21 | noObservations = 250;
22 | initialState = 0;
23 |
24 | [states, observations] = generateData(parameters, noObservations, initialState);
25 |
26 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
27 | % PMH
28 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
29 | initialPhi = 0.50;
30 | noParticles = 100; % Use noParticles ~ noObservations
31 | noBurnInIterations = 1000;
32 | noIterations = 5000;
33 | stepSize = 0.10;
34 |
35 | phiTrace = particleMetropolisHastings(observations, initialPhi, [sigmav sigmae], noParticles, initialState, noIterations, stepSize);
36 |
37 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
38 | % Plot the results
39 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
40 | noBins = floor(sqrt(noIterations - noBurnInIterations));
41 | grid = noBurnInIterations:noIterations;
42 | phiTrace = phiTrace(noBurnInIterations:noIterations);
43 |
44 | % Plot the parameter posterior estimate (solid black line = posterior mean)
45 | subplot(3, 1, 1);
46 | hist(phiTrace, noBins);
47 | xlabel('phi');
48 | ylabel('posterior density estimate');
49 |
50 | h = findobj(gca, 'Type', 'patch');
51 | set(h, 'FaceColor', [117 112 179] / 256, 'EdgeColor', 'w');
52 |
53 | hold on;
54 | plot([1 1] * mean(phiTrace), [0 200], 'LineWidth', 3);
55 | hold off;
56 |
57 | % Plot the trace of the Markov chain after burn-in (solid black line = posterior mean)
58 | subplot(3, 1, 2);
59 | plot(grid, phiTrace, 'Color', [117 112 179] / 256, 'LineWidth', 1);
60 | xlabel('iteration');
61 | ylabel('phi');
62 |
63 | hold on;
64 | plot([grid(1) grid(end)], [1 1] * mean(phiTrace), 'k', 'LineWidth', 3);
65 | hold off;
66 |
67 | % Plot ACF of the Markov chain after burn-in
68 | subplot(3, 1, 3);
69 | [acf, lags] = xcorr(phiTrace - mean(phiTrace), 100, 'coeff');
70 | stem(lags(101:200), acf(101:200), 'Color', [117 112 179] / 256, 'LineWidth', 2);
71 | xlabel('lag');
72 | ylabel('ACF of phi');
--------------------------------------------------------------------------------
/matlab/example3_sv.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Example of particle Metropolis-Hastings in a stochastic volatility model
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 |
9 | % Set random seed
10 | rng(0)
11 |
12 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
13 | % Load data
14 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
15 | data = Quandl.get('NASDAQOMX/OMXS30', 'start_date', '2012-01-02', 'end_date', '2014-01-02', 'type', 'data');
16 | logReturns = 100 * diff(log(flipud(data(:, 2))));
17 | noObservations = length(logReturns);
18 |
19 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
20 | % PMH
21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
22 | initialTheta = [0 0.9 0.2];
23 | noParticles = 500; % Use noParticles ~ noObservations
24 | noBurnInIterations = 2500;
25 | noIterations = 7500;
26 | stepSize = diag([0.10 0.01 0.05].^2);
27 |
28 | [parameterTrace, logVolatilityEstimate] = particleMetropolisHastingsSVmodel(logReturns, initialTheta, noParticles, noIterations, stepSize);
29 |
30 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
31 | % Plot the results
32 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
33 | grid = noBurnInIterations:noIterations;
34 | noBins = floor(sqrt(noIterations - noBurnInIterations));
35 | logVolatilityEstimate = logVolatilityEstimate(grid, 2:(noObservations + 1));
36 | parameterTrace = parameterTrace(grid, :);
37 |
38 | % Plot the log-returns
39 | subplot(5, 3, [1 2 3]);
40 | plot(logReturns, 'LineWidth', 1, 'Color', [27 158 119] / 256)
41 | xlabel('time');
42 | ylabel('log-return');
43 |
44 | % Plot the log-volatility
45 | subplot(5, 3, [4 5 6]);
46 | plot(mean(logVolatilityEstimate, 1), 'LineWidth', 1, 'Color', [217 95 2] / 256)
47 | xlabel('time');
48 | ylabel('log-volatility estimate');
49 |
50 | % Histogram of marginal parameter posterior of mu
51 | subplot(5, 3, 7);
52 | hist(parameterTrace(:, 1), noBins);
53 | xlabel('mu');
54 | ylabel('posterior density estimate');
55 |
56 | h = findobj(gca, 'Type', 'patch');
57 | set(h, 'FaceColor', [117 112 179] / 256, 'EdgeColor', 'w');
58 | hold on;
59 | plot([1 1] * mean(parameterTrace(:, 1)), [0 500], 'k');
60 | hold off;
61 |
62 | % Trace plot for mu
63 | subplot(5, 3, 8);
64 | plot(grid, parameterTrace(:, 1), 'Color', [117 112 179] / 256);
65 | hold on;
66 | plot([grid(1) grid(end)], [1 1] * mean(parameterTrace(:, 1)), 'k');
67 | hold off;
68 | xlabel('iteration');
69 | ylabel('trace of mu');
70 |
71 | % Plot ACF of the Markov chain for mu after burn-in
72 | subplot(5, 3, 9);
73 | [acf, lags] = xcorr(parameterTrace(:, 1) - mean(parameterTrace(:, 1)), 100, 'coeff');
74 | stem(lags(101:200), acf(101:200), 'Color', [117 112 179] / 256, 'LineWidth', 2);
75 | xlabel('lag');
76 | ylabel('ACF of mu');
77 |
78 | % Histogram of marginal parameter posterior of phi
79 | subplot(5, 3, 10);
80 | hist(parameterTrace(:, 2), noBins);
81 | xlabel('phi');
82 | ylabel('posterior density estimate');
83 |
84 | h = findobj(gca, 'Type', 'patch');
85 | set(h, 'FaceColor', [231 41 138] / 256, 'EdgeColor', 'w');
86 | hold on;
87 | plot([1 1] * mean(parameterTrace(:, 2)), [0 500], 'k');
88 | hold off;
89 |
90 | % Trace plot for phi
91 | subplot(5, 3, 11);
92 | plot(grid, parameterTrace(:, 2), 'Color', [231 41 138] / 256);
93 | xlabel('iteration');
94 | ylabel('trace of phi');
95 | hold on;
96 | plot([grid(1) grid(end)],[1 1] * mean(parameterTrace(:, 2)), 'k');
97 | hold off;
98 |
99 | % Plot ACF of the Markov chain for phi after burn-in
100 | subplot(5, 3, 12);
101 | [acf, lags] = xcorr(parameterTrace(:, 2) - mean(parameterTrace(:, 2)), 100, 'coeff');
102 | stem(lags(101:200), acf(101:200), 'Color', [231 41 138] / 256, 'LineWidth', 2);
103 | xlabel('lag');
104 | ylabel('ACF of phi');
105 |
106 | % Histogram of marginal parameter posterior of sigma_v
107 | subplot(5, 3, 13);
108 | hist(parameterTrace(:, 3), noBins);
109 | xlabel('sigmav');
110 | ylabel('posterior density estimate');
111 |
112 | h = findobj(gca, 'Type', 'patch');
113 | set(h, 'FaceColor', [102 166 30] / 256, 'EdgeColor', 'w');
114 | hold on;
115 | plot([1 1] * mean(parameterTrace(:, 3)), [0 500], 'k');
116 | hold off;
117 |
118 | % Trace plot of sigma_v
119 | subplot(5, 3, 14);
120 | plot(grid, parameterTrace(:, 3), 'Color', [102 166 30] / 256);
121 | hold on;
122 | plot([grid(1) grid(end)],[1 1] * mean(parameterTrace(:, 3)), 'k');
123 | hold off;
124 | xlabel('iteration');
125 | ylabel('trace of sigmav');
126 |
127 | % Plot ACF of the Markov chain of sigma_v after burn-in
128 | subplot(5, 3, 15);
129 | [acf, lags] = xcorr(parameterTrace(:, 3) - mean(parameterTrace(:, 3)), 100, 'coeff');
130 | stem(lags(101:200), acf(101:200), 'Color', [102 166 30] / 256, 'LineWidth', 2);
131 | xlabel('lag');
132 | ylabel('ACF of sigmav');
--------------------------------------------------------------------------------
/matlab/generateData.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Generates data from the LGSS model
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function[states, observations] = generateData(parameters, noObservations, initialState)
9 | states = zeros(noObservations+1, 1);
10 | observations = zeros(noObservations+1, 1);
11 |
12 | states(1) = initialState;
13 | phi = parameters(1);
14 | sigmav = parameters(2);
15 | sigmae = parameters(3);
16 |
17 | for t = 2:(noObservations + 1)
18 | states(t) = phi * states(t-1) + sigmav * normrnd(0, 1);
19 | observations(t) = states(t) + sigmae * normrnd(0, 1);
20 | end
21 | end
--------------------------------------------------------------------------------
/matlab/kalmanFilter.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Kalman filtering
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function xHatFiltered = kalmanFilter(observations, parameters, initialState, initialStateCov)
9 |
10 | noObservations = length(observations);
11 | A = parameters(1);
12 | C = 1;
13 | Q = parameters(2)^2;
14 | R = parameters(3)^2;
15 |
16 | xHatFiltered = initialState * ones( noObservations, 1);
17 | xHatPredicted = initialState * ones( noObservations, 1);
18 | predictiveCovariance = initialStateCov;
19 |
20 | for t = 1:noObservations
21 | % Correction step
22 | S = C * predictiveCovariance * C + R;
23 | kalmanGain = predictiveCovariance * C / S;
24 | filteredCovariance = predictiveCovariance - kalmanGain * S * kalmanGain;
25 | xHatFiltered(t) = xHatPredicted(t) + kalmanGain * ( observations(t) - C * xHatPredicted(t) );
26 |
27 | % Prediction step
28 | xHatPredicted(t+1) = A * xHatFiltered(t);
29 | predictiveCovariance = A * filteredCovariance * A + Q;
30 | end
31 | end
--------------------------------------------------------------------------------
/matlab/particleFilter.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Fully-adapted particle filter for the linear Gaussian SSM
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function[xHatFiltered, logLikelihood] = particleFilter(observations, parameters, noParticles, initialState)
9 |
10 | noObservations = length(observations) - 1;
11 | phi = parameters(1);
12 | sigmav = parameters(2);
13 | sigmae = parameters(3);
14 |
15 | particles = zeros(noParticles, noObservations + 1);
16 | ancestorIndices = zeros(noParticles, noObservations + 1);
17 | weights = ones(noParticles, noObservations + 1);
18 | normalisedWeights = ones(noParticles, noObservations + 1) / noParticles;
19 | xHatFiltered = zeros(noObservations + 1, 1);
20 |
21 | logLikelihood = 0;
22 | ancestorIndices(:, 1)= 1:noParticles;
23 | xHatFiltered(1) = initialState;
24 | particles(:, 1) = initialState;
25 |
26 | for t = 2:noObservations
27 | % Resample (multinomial)
28 | newAncestors = randsample(noParticles, noParticles, true, normalisedWeights(:,t - 1));
29 | ancestorIndices(:, 1:(t - 1)) = ancestorIndices(newAncestors, 1:(t - 1));
30 | ancestorIndices(:, t) = newAncestors;
31 |
32 | % Propagate
33 | part1 = ( sigmav^(-2) + sigmae^(-2) )^(-1);
34 | part2 = sigmae^(-2) .* observations(t);
35 | part2 = part2 + sigmav^(-2) .* phi .* particles(newAncestors, t - 1);
36 | particles(:, t) = part1 .* part2 + sqrt(part1) .* normrnd(0, 1, noParticles, 1);
37 |
38 | % Compute weights
39 | weights(:, t) = dnorm(observations(t + 1), phi .* particles(:, t), sqrt(sigmae^2 + sigmav^2));
40 |
41 | maxWeight = max(weights(:, t));
42 | weights(:, t) = exp(weights(:, t) - maxWeight);
43 | sumWeights = sum(weights(:, t));
44 | normalisedWeights(:, t) = weights(:, t) / sumWeights;
45 |
46 | % Estimate the log-likelihood
47 | predictiveLikelihood = maxWeight + log(sumWeights) - log(noParticles);
48 | logLikelihood = logLikelihood + predictiveLikelihood;
49 |
50 | % Estimate the state
51 | xHatFiltered(t) = mean(particles(:,t));
52 | end
53 | end
54 |
55 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
56 | % Helper for computing the logarithm of the Gaussian density
57 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
58 | function[out] = dnorm(x, mu, sigma)
59 | out = -0.5 .* log(2 * pi) - 0.5 .* log(sigma.^2) - 0.5 ./ sigma.^2 .* (x - mu).^2;
60 | end
--------------------------------------------------------------------------------
/matlab/particleFilterSVmodel.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Bootstrap particle filter for the SV model
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function[xHatFiltered, logLikelihood] = particleFilterSVmodel(observations, parameters, noParticles)
9 |
10 | noObservations = length(observations);
11 | mu = parameters(1);
12 | phi = parameters(2);
13 | sigmav = parameters(3);
14 |
15 | particles = zeros(noParticles, noObservations + 1);
16 | ancestorIndices = zeros(noParticles, noObservations + 1);
17 | weights = ones(noParticles, noObservations + 1);
18 | normalisedWeights = ones(noParticles, noObservations + 1) / noParticles;
19 |
20 | logLikelihood = 0;
21 | ancestorIndices(:, 1)= 1:noParticles;
22 | particles(:, 1) = mu + sigmav / sqrt(1 - phi^2) * normrnd(0, 1, noParticles, 1);
23 | xHatFiltered(1) = mean(particles(:, 1));
24 |
25 | for t = 2:(noObservations + 1)
26 | % Resample (multinomial)
27 | newAncestors = randsample(noParticles, noParticles, true, normalisedWeights(:, t - 1));
28 | ancestorIndices(:, 1:(t - 1)) = ancestorIndices(newAncestors, 1:(t - 1));
29 | ancestorIndices(:, t) = newAncestors;
30 |
31 | % Propagate
32 | part1 = mu + phi * (particles(newAncestors, t - 1) - mu);
33 | part2 = sigmav * normrnd(0, 1, noParticles, 1);
34 | particles(:, t) = part1 + part2;
35 |
36 | % Compute weights
37 | weights(:, t) = dnorm(observations(t - 1), 0, exp(particles(:, t) / 2));
38 |
39 | maxWeight = max(weights(:, t));
40 | weights(:, t) = exp(weights(:, t) - maxWeight);
41 | sumWeights = sum(weights(:, t));
42 | normalisedWeights(:, t) = weights(:, t) / sumWeights;
43 |
44 | % Estimate the log-likelihood
45 | predictiveLikelihood = maxWeight + log(sumWeights) - log(noParticles);
46 | logLikelihood = logLikelihood + predictiveLikelihood;
47 | end
48 |
49 | % Sample the state estimate using the weights at t = T
50 | xHatFiltered = zeros(1, noObservations + 1);
51 | ancestorIndex = randsample(noParticles, 1, true, normalisedWeights(:, noObservations));
52 |
53 | for t = 2:(noObservations + 1)
54 | xHatFiltered(t) = particles(ancestorIndices(ancestorIndex, t), t);
55 | end
56 | end
57 |
58 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
59 | % Helper for computing the logarithm of N(x; mu, sigma^2)
60 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
61 | function[out] = dnorm(x, mu, sigma)
62 | out = -0.5 .* log(2 * pi) - 0.5 .* log(sigma.^2) - 0.5 ./ sigma.^2 .* (x - mu).^2;
63 | end
--------------------------------------------------------------------------------
/matlab/particleMetropolisHastings.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Particle Metropolis-Hastings (PMH) for the LGSS model
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function[phi] = particleMetropolisHastings(observations, initialPhi, parameters, noParticles, initialState, noIterations, stepSize)
9 |
10 | sigmav = parameters(1);
11 | sigmae = parameters(2);
12 |
13 | phi = zeros(noIterations, 1);
14 | phiProposed = zeros(noIterations, 1);
15 | logLikelihood = zeros(noIterations, 1);
16 | logLikelihoodProposed = zeros(noIterations, 1);
17 | proposedPhiAccepted = zeros(noIterations, 1);
18 |
19 | % Set the initial parameter and estimate the initial log-likelihood
20 | phi(1) = initialPhi;
21 | parameters = [phi(1) sigmav sigmae];
22 | [~, logLikelihood(1)] = particleFilter(observations, parameters, noParticles, initialState);
23 |
24 | for k = 2:noIterations
25 | % Propose a new parameter
26 | phiProposed(k) = phi(k-1) + stepSize * normrnd(0, 1);
27 |
28 | % Estimate the log-likelihood (don't run if unstable system)
29 | if (abs(phiProposed(k)) < 1.0)
30 | thetaProposed = [phiProposed(k), sigmav, sigmae];
31 | [~, logLikelihoodProposed(k)] = particleFilter(observations, thetaProposed, noParticles, initialState);
32 | end
33 |
34 | % Compute the acceptance probability (reject if unstable system)
35 | prior = dnorm(phiProposed(k), 0, 1) - dnorm(phi(k - 1), 0, 1);
36 | likelihoodDifference = logLikelihoodProposed(k) - logLikelihood(k - 1);
37 | acceptProbability = exp(prior + likelihoodDifference);
38 | acceptProbability = acceptProbability * (abs(phiProposed(k)) < 1.0);
39 |
40 | % Accept / reject step
41 | uniformRandomVariable = unifrnd(0, 1);
42 | if (uniformRandomVariable < acceptProbability)
43 | % Accept the parameter
44 | phi(k) = phiProposed(k);
45 | logLikelihood(k) = logLikelihoodProposed(k);
46 | proposedPhiAccepted(k) = 1.0;
47 | else
48 | % Reject the parameter
49 | phi(k) = phi(k - 1);
50 | logLikelihood(k) = logLikelihood(k - 1);
51 | proposedPhiAccepted(k) = 0.0;
52 | end
53 |
54 | % Write out progress
55 | if ( rem(k, 100) == 0 )
56 | disp(['#####################################################################']);
57 | disp([' Iteration: ',num2str(k),' of : ', num2str(noIterations) ,' completed.']);
58 | disp([' Current state of the Markov chain: ', num2str(phi(k), 2)]);
59 | disp([' Proposed next state of the Markov chain: ', num2str(phiProposed(k), 2)]);
60 | disp([' Current posterior mean: ', num2str(mean(phi(1:k)), 2)]);
61 | disp([' Current acceptance rate: ', num2str(mean(proposedPhiAccepted(1:k)), 2)]);
62 | disp(['#####################################################################']);
63 | end
64 | end
65 | end
66 |
67 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
68 | % Helper for computing the logarithm of N(x; mu, sigma^2)
69 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
70 | function[out] = dnorm(x, mu, sigma)
71 | out = -0.5 .* log(2 * pi) - 0.5 .* log(sigma.^2) - 0.5 ./ sigma.^2 .* (x - mu).^2;
72 | end
73 |
--------------------------------------------------------------------------------
/matlab/particleMetropolisHastingsSVmodel.m:
--------------------------------------------------------------------------------
1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
2 | % Particle Metropolis-Hastings (PMH) for the SV model
3 | %
4 | % Johan Dahlin
5 | % Documentation at https://github.com/compops/pmh-tutorial
6 | % Published under GNU General Public License
7 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
8 | function[theta, xHatFiltered] = particleMetropolisHastingsSVmodel(observations, initialParameters, noParticles, noIterations, stepSize)
9 | noObservations = length(observations);
10 |
11 | theta = zeros(noIterations, 3);
12 | thetaProposed = zeros(noIterations, 3);
13 | xHatFiltered = zeros(noIterations, noObservations + 1);
14 | xHatFilteredProposed = zeros(noIterations, noObservations + 1);
15 | logLikelihood = zeros(noIterations, 1);
16 | logLikelihoodProposed = zeros(noIterations, 1);
17 | proposedThetaAccepted = zeros(noIterations, 1);
18 |
19 | % Set the initial parameter and estimate the initial log-likelihood
20 | theta(1, :) = initialParameters;
21 | [xHatFiltered(1, :), logLikelihood(1)] = particleFilterSVmodel(observations, theta(1, :), noParticles);
22 |
23 | for k = 2:noIterations
24 | % Propose a new parameter
25 | thetaProposed(k, :) = mvnrnd(theta(k-1, :), stepSize);
26 |
27 | % Estimate the log-likelihood (don't run if unstable system)
28 | if (abs(thetaProposed(k, 2)) < 1.0) && (thetaProposed(k, 3) > 0.0)
29 | [xHatFilteredProposed(k, :), logLikelihoodProposed(k)] = particleFilterSVmodel(observations, thetaProposed(k, :), noParticles);
30 | end
31 |
32 | % Compute the acceptance probability (reject if unstable)
33 | prior = dnorm(thetaProposed(k, 1), 0, 1);
34 | prior = prior - dnorm(theta(k - 1, 1), 0, 1);
35 | prior = prior + dnorm(thetaProposed(k, 2), 0.95, 0.05);
36 | prior = prior - dnorm(theta(k - 1, 2), 0.95, 0.05);
37 | prior = prior + dgamma(thetaProposed(k, 3), 2, 10);
38 | prior = prior - dgamma(theta(k - 1, 3), 2, 10);
39 | likelihoodDifference = logLikelihoodProposed(k) - logLikelihood(k - 1);
40 | acceptProbability = exp(prior + likelihoodDifference);
41 | acceptProbability = acceptProbability * (abs(thetaProposed(k, 2)) < 1.0);
42 | acceptProbability = acceptProbability * (thetaProposed(k, 3) > 0.0);
43 |
44 | % Accept / reject step
45 | uniformRandomVariable = unifrnd(0, 1);
46 | if (uniformRandomVariable < acceptProbability)
47 | % Accept the parameter
48 | theta(k, :) = thetaProposed(k, :);
49 | xHatFiltered(k, :) = xHatFilteredProposed(k, :);
50 | logLikelihood(k) = logLikelihoodProposed(k);
51 | proposedThetaAccepted(k) = 1.0;
52 | else
53 | % Reject the parameter
54 | theta(k, :) = theta(k - 1, :);
55 | xHatFiltered(k, :) = xHatFiltered(k - 1, :);
56 | logLikelihood(k) = logLikelihood(k - 1);
57 | proposedThetaAccepted(k) = 0.0;
58 | end
59 |
60 | % Write out progress
61 | if ( rem(k, 100) == 0 )
62 | disp(['#####################################################################################']);
63 | disp([' Iteration: ',num2str(k),' of : ', num2str(noIterations) ,' completed.']);
64 | disp([' Current state of the Markov chain: ', num2str(theta(k, :), 3)]);
65 | disp([' Proposed next state of the Markov chain: ', num2str(thetaProposed(k, :), 3)]);
66 | disp([' Current posterior mean: ', num2str(mean(theta(1:k, :), 1), 3)]);
67 | disp([' Current acceptance rate: ', num2str(mean(proposedThetaAccepted(1:k)), 3)]);
68 | disp(['#####################################################################################']);
69 | end
70 | end
71 | end
72 |
73 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
74 | % Helper for computing the logarithm of N(x; mu, sigma^2)
75 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
76 | function[out] = dnorm(x, mu, sigma)
77 | out = -0.5 .* log(2 * pi) - 0.5 .* log(sigma.^2) - 0.5 ./ sigma.^2 .* (x - mu).^2;
78 | end
79 |
80 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
81 | % Helper for computing the logarithm of Gamma(x; a, b) with mean a/b
82 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
83 | function[out] = dgamma(x, a, b)
84 | out = a * log(b) - gammaln(a) + (a-1) * log(x) - b * x;
85 | end
86 |
--------------------------------------------------------------------------------
/python/README.md:
--------------------------------------------------------------------------------
1 | # Python code for PMH tutorial
2 |
3 | This Python code implements the Kalman filter (KF), particle filter (PF) and particle Metropolis-Hastings (PMH) algorithm for two different dynamical models: a linear Gaussian state-space (LGSS) model and a stochastic volatility (SV) model. Note that the Kalman filter can only be employed for the first of these two models. The details of the code is described in [the tutorial paper](https://doi.org/10.18637/jss.v088.c02).
4 |
5 | Note that the Python code in this folder covers the basic implementations in the paper. The notation of the variables has been changed slightly compared with the tutorial paper to improve readability of the code. However, it should be easy to translate between the two. See the R code in r/ for all the implementations and to recreate the results in the tutorial.
6 |
7 | ## Requirements
8 | The code is written and tested for `Python 2.7.6/3.6` together with `NumPy 1.9.2/1.11.3`, `SciPy 0.15.1/0.18.1`, `Matplotlib 1.4.3/2.0.0` and `Quandl 2.8.9/3.1.0`. These packages are easily available via [Anaconda](https://docs.continuum.io/anaconda/install) by installing the package for your preference of Python version and then executing
9 | ``` bash
10 | conda install numpy scipy matplotlib quandl
11 | ```
12 | For more information about the Quandl library, see [the documentation](https://www.quandl.com/tools/python).
13 |
14 | ## Main script files
15 | These are the main script files that implement the various algorithms discussed in the tutorial.
16 |
17 | * **example1-lgss.py** State estimation in a LGSS model using the KM and a fully-adapted PF (faPF). The code is discussed in Section 3.1 and the results are presented in Section 3.2 as Figure 4 and Table 1.
18 |
19 | * **example2-lgss.py** Parameter estimation of one parameter in the LGSS model using PMH with the faPF as the likelihood estimator. The code is discussed in Section 4.1 and the results are presented in Section 4.2 as Figure 5.
20 |
21 | * **example3-sv.py** Parameter estimation of three parameters in the SV model using PMH with the bootstrap PF as the likelihood estimator. The code is discussed in Section 5.1 and the results are presented in Section 5.2 as Figure 6. The code takes about an hour to run.
22 |
23 | ## Supporting files (helpers/)
24 | * **dataGeneration.py** Generates data from a LGSS model.
25 |
26 | * **parameterEstimation.py** Implements the PMH algorithm for the LGSS model (particleMetropolisHastings) and the SV model (particleMetropolisHastingsSVModel).
27 |
28 | * **stateEstimation.py** Implements the faPF for the LGSS model (particleFilter), the Kalman filter for the LGSS model (kalmanFilter) and the bPF for the SV model (paticleFilterSVmodel).
29 |
30 |
31 | ## Adapting the code for another model
32 | See the discussion in *README.MD* in the directory *r/*.
33 |
34 | ## Generalisations
35 | Some generalisations and improvements of this code is discussed in the tutorial, see the last paragraph in Section 7. Python code for PMH1 and PMH2 is available in the repo [pmh-stco2015](https://github.com/compops/pmh-stco2015), Python code for qPMH2 is availabe in the repo [https://github.com/compops/qpmh2-sysid2015](qpmh2-sysid2015) and Python code for correlated pseudo-marginal Metropolis-Hastings is available in the repo [https://github.com/compops/pmmh-correlated2015](pmmh-correlated2015). These are excellent resources for getting up to speed with the current frontier in research connected to PMH.
36 |
37 | ## Copyright information
38 | ``` R
39 | ##############################################################################
40 | # This program is free software; you can redistribute it and/or modify
41 | # it under the terms of the GNU General Public License as published by
42 | # the Free Software Foundation; either version 2 of the License, or
43 | # (at your option) any later version.
44 | #
45 | # This program is distributed in the hope that it will be useful,
46 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
47 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
48 | # GNU General Public License for more details.
49 | #
50 | # You should have received a copy of the GNU General Public License along
51 | # with this program; if not, write to the Free Software Foundation, Inc.,
52 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
53 | ##############################################################################
54 | ```
55 |
--------------------------------------------------------------------------------
/python/example1-lgss.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # State estimation in a LGSS model using particle and Kalman filters
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | from __future__ import print_function, division
10 | import matplotlib.pylab as plt
11 | import numpy as np
12 |
13 | from helpers.dataGeneration import generateData
14 | from helpers.stateEstimation import particleFilter, kalmanFilter
15 |
16 | # Set the random seed to replicate results in tutorial
17 | np.random.seed(10)
18 |
19 | ##############################################################################
20 | # Define the model and generate data
21 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
22 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
23 | ##############################################################################
24 | parameters = np.zeros(3) # theta = (phi, sigmav, sigmae)
25 | parameters[0] = 0.75
26 | parameters[1] = 1.00
27 | parameters[2] = 0.10
28 | noObservations = 250
29 | initialState = 0
30 |
31 | state, observations = generateData(parameters, noObservations, initialState)
32 |
33 | # Plot data
34 | plt.subplot(3, 1, 1)
35 | plt.plot(observations, color='#1B9E77', linewidth=1.5)
36 | plt.xlabel("time")
37 | plt.ylabel("measurement")
38 |
39 | plt.subplot(3, 1, 2)
40 | plt.plot(state, color='#D95F02', linewidth=1.5)
41 | plt.xlabel("time")
42 | plt.ylabel("latent state")
43 |
44 | ##############################################################################
45 | # State estimation
46 | ##############################################################################
47 |
48 | # Particle filter with 20 particles
49 | xHatPF, _ = particleFilter(observations, parameters, 20, initialState)
50 |
51 | # Kalman filter
52 | xHatKF = kalmanFilter(observations, parameters, initialState, 0.01)
53 |
54 | # Plot state estimate
55 | plt.subplot(3, 1, 3)
56 | plt.plot(xHatKF[1:noObservations] - xHatPF[0:noObservations-1], color='#7570B3', linewidth=1.5)
57 | plt.xlabel("time")
58 | plt.ylabel("difference in estimate")
59 | plt.show()
--------------------------------------------------------------------------------
/python/example2-lgss.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings in a LGSS model.
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | from __future__ import print_function, division
10 | import matplotlib.pylab as plt
11 | import numpy as np
12 |
13 | from helpers.dataGeneration import generateData
14 | from helpers.stateEstimation import particleFilter, kalmanFilter
15 | from helpers.parameterEstimation import particleMetropolisHastings
16 |
17 | # Set the random seed to replicate results in tutorial
18 | np.random.seed(10)
19 |
20 | ##############################################################################
21 | # Define the model and generate data
22 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
23 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
24 | ##############################################################################
25 | parameters = np.zeros(3) # theta = (phi, sigmav, sigmae)
26 | parameters[0] = 0.75
27 | parameters[1] = 1.00
28 | parameters[2] = 0.10
29 | noObservations = 250
30 | initialState = 0
31 |
32 | state, observations = generateData(parameters, noObservations, initialState)
33 |
34 | ##############################################################################
35 | # PMH
36 | ##############################################################################
37 | initialPhi = 0.50
38 | noParticles = 500 # Use noParticles ~ noObservations
39 | noBurnInIterations = 1000
40 | noIterations = 5000
41 | stepSize = 0.10
42 |
43 | phiTrace = particleMetropolisHastings(
44 | observations, initialPhi, parameters, noParticles,
45 | initialState, particleFilter, noIterations, stepSize)
46 |
47 | ##############################################################################
48 | # Plot the results
49 | ##############################################################################
50 | noBins = int(np.floor(np.sqrt(noIterations - noBurnInIterations)))
51 | grid = np.arange(noBurnInIterations, noIterations, 1)
52 | phiTrace = phiTrace[noBurnInIterations:noIterations]
53 |
54 | # Plot the parameter posterior estimate (solid black line = posterior mean)
55 | plt.subplot(3, 1, 1)
56 | plt.hist(phiTrace, noBins, normed=1, facecolor='#7570B3')
57 | plt.xlabel("phi")
58 | plt.ylabel("posterior density estimate")
59 | plt.axvline(np.mean(phiTrace), color='k')
60 |
61 | # Plot the trace of the Markov chain after burn-in (solid black line = posterior mean)
62 | plt.subplot(3, 1, 2)
63 | plt.plot(grid, phiTrace, color='#7570B3')
64 | plt.xlabel("iteration")
65 | plt.ylabel("phi")
66 | plt.axhline(np.mean(phiTrace), color='k')
67 |
68 | # Plot the autocorrelation function
69 | plt.subplot(3, 1, 3)
70 | macf = np.correlate(phiTrace - np.mean(phiTrace), phiTrace - np.mean(phiTrace), mode='full')
71 | idx = int(macf.size/2)
72 | macf = macf[idx:]
73 | macf = macf[0:100]
74 | macf /= macf[0]
75 | grid = range(len(macf))
76 | plt.plot(grid, macf, color='#7570B3')
77 | plt.xlabel("lag")
78 | plt.ylabel("ACF of phi")
79 |
80 | plt.show()
--------------------------------------------------------------------------------
/python/example3-sv.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings
3 | # in a stochastic volatility model
4 | #
5 | # Johan Dahlin
6 | # Documentation at https://github.com/compops/pmh-tutorial
7 | # Published under GNU General Public License
8 | ##############################################################################
9 |
10 | from __future__ import print_function, division
11 | import matplotlib.pylab as plt
12 | import quandl
13 | import numpy as np
14 |
15 | from helpers.stateEstimation import particleFilterSVmodel
16 | from helpers.parameterEstimation import particleMetropolisHastingsSVModel
17 |
18 | # Set the random seed to replicate results in tutorial
19 | np.random.seed(10)
20 |
21 | ##############################################################################
22 | # Load data
23 | ##############################################################################
24 | data = quandl.get("NASDAQOMX/OMXS30", trim_start="2012-01-02", trim_end="2014-01-02")
25 | logReturns = 100 * np.diff(np.log(data['Index Value']))
26 | noLogReturns = len(logReturns)
27 |
28 | ##############################################################################
29 | # PMH
30 | ##############################################################################
31 | initialTheta = np.array((0.0, 0.9, 0.2)) # Inital guess of theta = (mu, phi, sigmav)
32 | noParticles = 500 # Choose noParticles ~ noLogReturns
33 | noBurnInIterations = 2500
34 | noIterations = 7500
35 | stepSize = np.diag((0.10**2, 0.01**2, 0.05**2))
36 |
37 | logVolatilityEst, parameterTrace = particleMetropolisHastingsSVModel(
38 | logReturns, initialTheta, noParticles,
39 | particleFilterSVmodel, noIterations, stepSize)
40 |
41 | ##############################################################################
42 | # Plot the results
43 | ##############################################################################
44 | noBins = int(np.floor(np.sqrt(noIterations - noBurnInIterations)))
45 | grid = np.arange(noBurnInIterations, noIterations, 1)
46 | logVolatilityEst = logVolatilityEst[noBurnInIterations:noIterations, :]
47 | parameterEst = parameterTrace[noBurnInIterations:noIterations, :]
48 |
49 | plt.figure(1)
50 |
51 | plt.subplot(5, 3, (1, 3))
52 | plt.plot(logReturns, color='#1B9E77', linewidth=1.5)
53 | plt.xlabel("time")
54 | plt.ylabel("log-return")
55 |
56 | plt.subplot(5, 3, (4, 6))
57 | plt.plot(np.mean(logVolatilityEst, axis=0), color='#D95F02', linewidth=1.5)
58 | plt.xlabel("time")
59 | plt.ylabel("log-volatility estimate")
60 |
61 | # Histogram of marginal parameter posterior of mu
62 | plt.subplot(5, 3, 7)
63 | plt.hist(parameterEst[:, 0], noBins, normed=1, facecolor='#7570B3')
64 | plt.xlabel("mu")
65 | plt.ylabel("posterior density estimate")
66 | plt.axvline(np.mean(parameterEst[:, 0]), linewidth=1.5, color='k')
67 |
68 | # Trace plot of mu
69 | plt.subplot(5, 3, 8)
70 | plt.plot(grid, parameterEst[:, 0], color='#7570B3')
71 | plt.xlabel("iteration")
72 | plt.ylabel("trace of mu")
73 | plt.axhline(np.mean(parameterEst[:, 0]), linewidth=1.5, color='k')
74 |
75 | # Autocorrelation function for mu
76 | plt.subplot(5, 3, 9)
77 | detrended_trace = parameterEst[:, 0] - np.mean(parameterEst[:, 0])
78 | macf = np.correlate(detrended_trace, detrended_trace, mode='full')
79 | idx = int(macf.size/2)
80 | macf = macf[idx:]
81 | macf = macf[0:100]
82 | macf /= macf[0]
83 | grid_acf = range(len(macf))
84 | plt.plot(grid_acf, macf, color='#7570B3')
85 | plt.xlabel("lag")
86 | plt.ylabel("ACF of mu")
87 |
88 | # Histogram of marginal parameter posterior of phi
89 | plt.subplot(5, 3, 10)
90 | plt.hist(parameterEst[:, 1], noBins, normed=1, facecolor='#E7298A')
91 | plt.xlabel("phi")
92 | plt.ylabel("posterior density estimate")
93 | plt.axvline(np.mean(parameterEst[:, 1]), linewidth=1.5, color='k')
94 |
95 | # Trace plot of phi
96 | plt.subplot(5, 3, 11)
97 | plt.plot(grid, parameterEst[:, 1], color='#E7298A')
98 | plt.xlabel("iteration")
99 | plt.ylabel("trace of phi")
100 | plt.axhline(np.mean(parameterEst[:, 1]), linewidth=1.5, color='k')
101 |
102 | # Autocorrelation function for phi
103 | plt.subplot(5, 3, 12)
104 | detrended_trace = parameterEst[:, 1] - np.mean(parameterEst[:, 1])
105 | macf = np.correlate(detrended_trace, detrended_trace, mode='full')
106 | idx = int(macf.size/2)
107 | macf = macf[idx:]
108 | macf = macf[0:100]
109 | macf /= macf[0]
110 | grid_acf = range(len(macf))
111 | plt.plot(grid_acf, macf, color='#E7298A')
112 | plt.xlabel("lag")
113 | plt.ylabel("ACF of phi")
114 |
115 | # Histogram of marginal parameter posterior of sigma
116 | plt.subplot(5, 3, 13)
117 | plt.hist(parameterEst[:, 2], noBins, normed=1, facecolor='#66A61E')
118 | plt.xlabel("sigmav")
119 | plt.ylabel("posterior density estimate")
120 | plt.axvline(np.mean(parameterEst[:, 2]), linewidth=1.5, color='k')
121 |
122 | # Trace plot of sigma
123 | plt.subplot(5, 3, 14)
124 | plt.plot(grid, parameterEst[:, 2], color='#66A61E')
125 | plt.xlabel("iteration")
126 | plt.ylabel("trace of sigmav")
127 | plt.axhline(np.mean(parameterEst[:, 2]), linewidth=1.5, color='k')
128 |
129 | # Autocorrelation function for sigma
130 | plt.subplot(5, 3, 15)
131 | detrended_trace = parameterEst[:, 2] - np.mean(parameterEst[:, 2])
132 | macf = np.correlate(detrended_trace, detrended_trace, mode='full')
133 | idx = int(macf.size/2)
134 | macf = macf[idx:]
135 | macf = macf[0:100]
136 | macf /= macf[0]
137 | grid_acf = range(len(macf))
138 | plt.plot(grid_acf, macf, color='#66A61E')
139 | plt.xlabel("lag")
140 | plt.ylabel("ACF of sigmav")
141 |
142 | plt.show()
--------------------------------------------------------------------------------
/python/helpers/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/compops/pmh-tutorial/ec188a501950d814d3ebe05a98cf67e622cb7136/python/helpers/__init__.py
--------------------------------------------------------------------------------
/python/helpers/dataGeneration.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Generates data from the LGSS model
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 | from __future__ import print_function, division
9 | import numpy as np
10 | from numpy.random import randn
11 |
12 | def generateData(theta, noObservations, initialState):
13 | phi = theta[0]
14 | sigmav = theta[1]
15 | sigmae = theta[2]
16 |
17 | state = np.zeros(noObservations + 1)
18 | observation = np.zeros(noObservations)
19 | state[0] = initialState
20 |
21 | for t in range(1, noObservations):
22 | state[t] = phi * state[t - 1] + sigmav * randn()
23 | observation[t] = state[t] + sigmae * randn()
24 |
25 | return(state, observation)
26 |
--------------------------------------------------------------------------------
/python/helpers/parameterEstimation.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Particle Metropolis-Hastings for LGSS and SV models
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | from __future__ import print_function, division
10 | import numpy as np
11 | from numpy.random import randn, uniform, multivariate_normal
12 | from scipy.stats import gamma, norm
13 |
14 | ##############################################################################
15 | # Particle Metropolis-Hastings (PMH) for the LGSS model
16 | ##############################################################################
17 | def particleMetropolisHastings(observations, initialPhi, parameters, noParticles,
18 | initialState, particleFilter, noIterations, stepSize):
19 |
20 | phi = np.zeros(noIterations)
21 | phiProposed = np.zeros(noIterations)
22 | logLikelihood = np.zeros(noIterations)
23 | logLikelihoodProposed = np.zeros(noIterations)
24 | proposedPhiAccepted = np.zeros(noIterations)
25 |
26 | # Set the initial parameter and estimate the initial log-likelihood
27 | phi[0] = initialPhi
28 | _, logLikelihood[0] = particleFilter(observations, (phi[0], parameters[1], parameters[2]), noParticles, initialState)
29 |
30 | for k in range(1, noIterations):
31 | # Propose a new parameter
32 | phiProposed[k] = phi[k - 1] + stepSize * randn()
33 |
34 | # Estimate the log-likelihood if the proposed phi results in a stable model
35 | if (np.abs(phiProposed[k]) < 1.0):
36 | _, logLikelihoodProposed[k] = particleFilter(observations, (phiProposed[k], parameters[1], parameters[2]), noParticles, initialState)
37 |
38 | # Compute the acceptance probability
39 | acceptProbability = np.min((1.0, np.exp(logLikelihoodProposed[k] - logLikelihood[k - 1])))
40 | acceptProbability *= np.abs(phiProposed[k]) < 1.0
41 |
42 | # Accept / reject step
43 | uniformRandomVariable = uniform()
44 | if uniformRandomVariable < acceptProbability:
45 | # Accept the parameter
46 | phi[k] = phiProposed[k]
47 | logLikelihood[k] = logLikelihoodProposed[k]
48 | proposedPhiAccepted[k] = 1.0
49 | else:
50 | # Reject the parameter
51 | phi[k] = phi[k - 1]
52 | logLikelihood[k] = logLikelihood[k - 1]
53 | proposedPhiAccepted[k] = 0.0
54 |
55 | # Write out progress
56 | if np.remainder(k, 100) == 0:
57 | print("#####################################################################")
58 | print(" Iteration: " + str(k) + " of : " + str(noIterations) + " completed.")
59 | print("")
60 | print(" Current state of the Markov chain: " + "%.4f" % phi[k] + ".")
61 | print(" Proposed next state of the Markov chain: " + "%.4f" % phiProposed[k] + ".")
62 | print(" Current posterior mean: " + "%.4f" % np.mean(phi[0:k]) + ".")
63 | print(" Current acceptance rate: " + "%.4f" % np.mean(proposedPhiAccepted[0:k]) + ".")
64 | print("#####################################################################")
65 |
66 | return phi
67 |
68 | ##############################################################################
69 | # Particle Metropolis-Hastings (PMH) for the SV model
70 | ##############################################################################
71 | def particleMetropolisHastingsSVModel(observations, initialTheta,
72 | noParticles, particleFilter, noIterations, stepSize):
73 |
74 | noObservations = len(observations)
75 |
76 | theta = np.zeros((noIterations, 3))
77 | thetaProposed = np.zeros((noIterations, 3))
78 | logLikelihood = np.zeros(noIterations)
79 | logLikelihoodProposed = np.zeros(noIterations)
80 | xHatFiltered = np.zeros((noIterations, noObservations))
81 | xHatFilteredProposed = np.zeros((noIterations, noObservations))
82 | proposedThetaAccepted = np.zeros(noIterations)
83 |
84 | # Set the initial parameter and estimate the initial log-likelihood
85 | theta[0, :] = initialTheta
86 | (xHatFiltered[0, :], logLikelihood[0]) = particleFilter(observations, theta[0, :], noParticles)
87 |
88 | for k in range(1, noIterations):
89 |
90 | # Propose a new parameter
91 | thetaProposed[k, :] = theta[k - 1, :] + multivariate_normal(mean = np.zeros(3), cov = stepSize)
92 |
93 | # Estimate the log-likelihood if the proposed theta results in a stable model
94 | if ((np.abs(thetaProposed[k, 1]) < 1.0) & (thetaProposed[k, 2] > 0.0)):
95 | (xHatFilteredProposed[k, :], logLikelihoodProposed[k]) = particleFilter(observations, thetaProposed[k, :], noParticles)
96 |
97 | # Compute the ratio between the prior distributions (in log-form)
98 | prior = norm.logpdf(thetaProposed[k, 0], 0, 1)
99 | prior -= norm.logpdf(theta[k - 1, 0], 0, 1)
100 |
101 | prior += norm.logpdf(thetaProposed[k, 1], 0.95, 0.05)
102 | prior -= norm.logpdf(theta[k - 1, 1], 0.95, 0.05)
103 |
104 | prior += gamma.logpdf(thetaProposed[k, 2], 2, 1.0 / 10.0)
105 | prior -= gamma.logpdf(theta[k - 1, 2], 2, 1.0 / 10.0)
106 |
107 | # Compute the acceptance probability
108 | acceptProbability = np.min((1.0, np.exp(prior + logLikelihoodProposed[k] - logLikelihood[k - 1])))
109 | acceptProbability *= np.abs(thetaProposed[k, 1]) < 1.0
110 | acceptProbability *= thetaProposed[k, 2] > 0.0
111 |
112 | # Accept / reject step
113 | uniformRandomVariable = uniform()
114 | if (uniformRandomVariable < acceptProbability):
115 | # Accept the parameter
116 | theta[k, :] = thetaProposed[k, :]
117 | xHatFiltered[k, :] = xHatFilteredProposed[k, :]
118 | logLikelihood[k] = logLikelihoodProposed[k]
119 | proposedThetaAccepted[k] = 1.0
120 | else:
121 | # Reject the parameter
122 | theta[k, :] = theta[k - 1, :]
123 | xHatFiltered[k, :] = xHatFiltered[k - 1, :]
124 | logLikelihood[k] = logLikelihood[k - 1]
125 | proposedThetaAccepted[k] = 0.0
126 |
127 | # Write out progress
128 | if np.remainder(k, 100) == 0:
129 | print("#####################################################################")
130 | print(" Iteration: " + str(k) + " of : " + str(noIterations) + " completed.")
131 | print("")
132 | print(" Current state of the Markov chain: " + "%.4f" % theta[k, 0] + " " + "%.4f" % theta[k, 1] + " " + "%.4f" % theta[k, 2] + ".")
133 | print(" Proposed next state of the Markov chain: " + "%.4f" % thetaProposed[k, 0] + " " + "%.4f" % thetaProposed[k, 1] + " " + "%.4f" % thetaProposed[k, 2] + ".")
134 | print(" Current posterior mean: " + "%.4f" % np.mean( theta[0:k, 0]) + " " + "%.4f" % np.mean(theta[0:k, 1]) + " " + "%.4f" % np.mean(theta[0:k, 2]) + ".")
135 | print(" Current acceptance rate: " + "%.4f" % np.mean(proposedThetaAccepted[0:k]) + ".")
136 | print("#####################################################################")
137 |
138 | return (xHatFiltered, theta)
139 |
--------------------------------------------------------------------------------
/python/helpers/stateEstimation.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # State estimation in LGSS and SV models using Kalman and particle filters
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | from __future__ import print_function, division
10 | import numpy as np
11 | from numpy.random import randn, choice
12 | from scipy.stats import norm
13 |
14 | ##############################################################################
15 | # Kalman filter for the linear Gaussian SSM
16 | ##############################################################################
17 | def kalmanFilter(observations, parameters, initialState, initialStateCov):
18 |
19 | noObservations = len(observations)
20 | A = parameters[0]
21 | C = 1
22 | Q = parameters[1]**2
23 | R = parameters[2]**2
24 |
25 | predictiveCov = initialStateCov
26 | xHatPredicted = initialState * np.ones((noObservations + 1, 1))
27 | xHatFiltered = initialState * np.ones((noObservations, 1))
28 |
29 | for t in range(0, noObservations):
30 | # Correction step
31 | S = C * predictiveCov * C + R
32 | kalmanGain = predictiveCov * C / S
33 | filteredCovariance = predictiveCov - kalmanGain * S * kalmanGain
34 | yHatPredicted = C * xHatPredicted[t]
35 | xHatFiltered[t] = xHatPredicted[t] + kalmanGain * (observations[t - 1] - yHatPredicted)
36 |
37 | # Prediction step
38 | xHatPredicted[t + 1] = A * xHatFiltered[t]
39 | predictiveCov = A * filteredCovariance * A + Q
40 |
41 | return xHatFiltered
42 |
43 | ##############################################################################
44 | # Fully-adapted particle filter for the linear Gaussian SSM
45 | ##############################################################################
46 | def particleFilter(observations, parameters, noParticles, initialState):
47 |
48 | noObservations = len(observations) - 1
49 | phi = parameters[0]
50 | sigmav = parameters[1]
51 | sigmae = parameters[2]
52 |
53 | particles = np.zeros((noParticles, noObservations))
54 | ancestorIndices = np.zeros((noParticles, noObservations))
55 | weights = np.zeros((noParticles, noObservations))
56 | normalisedWeights = np.zeros((noParticles, noObservations))
57 | xHatFiltered = np.zeros((noObservations, 1))
58 |
59 | # Set the initial state and weights
60 | ancestorIndices[: , 0] = range(noParticles)
61 | particles[:, 0] = initialState
62 | xHatFiltered[0] = initialState
63 | normalisedWeights[:, 0] = 1.0 / noParticles
64 | logLikelihood = 0
65 |
66 | for t in range(1, noObservations):
67 | # Resample (multinomial)
68 | newAncestors = choice(noParticles, noParticles, p=normalisedWeights[:, t - 1], replace=True)
69 | ancestorIndices[:, 1:t - 1] = ancestorIndices[newAncestors, 1:t - 1]
70 | ancestorIndices[:, t] = newAncestors
71 |
72 | # Propagate
73 | part1 = (sigmav**(-2) + sigmae**(-2))**(-1)
74 | part2 = sigmae**(-2) * observations[t]
75 | part2 = part2 + sigmav**(-2) * phi * particles[newAncestors, t - 1]
76 | particles[:, t] = part1 * part2 + np.sqrt(part1) * randn(1, noParticles)
77 |
78 | # Compute weights
79 | yhatMean = phi * particles[:, t]
80 | yhatVariance = np.sqrt(sigmav**2 + sigmae**2)
81 | weights[:, t] = norm.logpdf(observations[t + 1], yhatMean, yhatVariance)
82 |
83 | maxWeight = np.max(weights[:, t])
84 | weights[:, t] = np.exp(weights[:, t] - maxWeight)
85 | sumWeights = np.sum(weights[:, t])
86 | normalisedWeights[:, t] = weights[:, t] / sumWeights
87 |
88 | # Estimate the state
89 | xHatFiltered[t] = np.sum(normalisedWeights[:, t] * particles[:, t])
90 |
91 | # Estimate log-likelihood
92 | predictiveLikelihood = maxWeight + np.log(sumWeights) - np.log(noParticles)
93 | logLikelihood += predictiveLikelihood
94 |
95 | return xHatFiltered, logLikelihood
96 |
97 | ##############################################################################
98 | # Bootstrap particle filter for the stochastic volatility model
99 | ##############################################################################
100 | def particleFilterSVmodel(observations, parameters, noParticles):
101 |
102 | noObservations = len(observations)
103 | mu = parameters[0]
104 | phi = parameters[1]
105 | sigmav = parameters[2]
106 |
107 | particles = np.zeros((noParticles, noObservations))
108 | ancestorIndices = np.zeros((noParticles, noObservations))
109 | weights = np.zeros((noParticles, noObservations))
110 | normalisedWeights = np.zeros((noParticles, noObservations))
111 | xHatFiltered = np.zeros((noObservations, 1))
112 |
113 | # Set the initial state and weights
114 | particles[:, 0] = mu + sigmav / np.sqrt(1.0 - phi**2) * randn(1, noParticles)
115 | normalisedWeights[:, 0] = 1.0 / noParticles
116 | weights[:, 0] = 1.0
117 | logLikelihood = 0
118 |
119 | for t in range(1, noObservations):
120 | # Resample particles
121 | newAncestors = choice(noParticles, noParticles, p=normalisedWeights[:, t - 1], replace=True)
122 | ancestorIndices[:, 1:t - 1] = ancestorIndices[newAncestors, 1:t - 1]
123 | ancestorIndices[:, t] = newAncestors
124 |
125 | # Propagate particles
126 | particles[:, t] = mu + phi * (particles[newAncestors, t - 1] - mu) + sigmav * randn(1, noParticles)
127 |
128 | # Weight particles
129 | weights[:, t] = norm.logpdf(observations[t - 1], 0, np.exp(particles[:, t] / 2))
130 |
131 | maxWeight = np.max(weights[:, t])
132 | weights[:, t] = np.exp(weights[:, t] - maxWeight)
133 | sumWeights = np.sum(weights[:, t])
134 | normalisedWeights[:, t] = weights[:, t] / sumWeights
135 |
136 | # Estimate the filtered state
137 | xHatFiltered[t] = np.sum(normalisedWeights[:, t] * particles[:, t])
138 |
139 | # Estimate log-likelihood
140 | predictiveLikelihood = maxWeight + np.log(sumWeights) - np.log(noParticles)
141 | logLikelihood += predictiveLikelihood
142 |
143 |
144 | # Sample the state estimate using the weights at t=T
145 | ancestorIndex = choice(noParticles, 1, p=normalisedWeights[:, noObservations - 1])
146 | stateTrajectory = particles[ancestorIndices[ancestorIndex, noObservations - 1].astype(int), :]
147 |
148 | return stateTrajectory, logLikelihood
--------------------------------------------------------------------------------
/r/README.md:
--------------------------------------------------------------------------------
1 | # R code for PMH tutorial
2 |
3 | This R code implements the Kalman filter (KF), particle filter (PF) and particle Metropolis-Hastings (PMH) algorithm for two different dynamical models: a linear Gaussian state-space (LGSS) model and a stochastic volatility (SV) model. Note that the Kalman filter can only be employed for the first of these two models. The details of the code is described in [the tutorial paper](https://doi.org/10.18637/jss.v088.c02).
4 |
5 | ## Requirements
6 | The code is written and tested for `R 3.2.2` and makes use of the packages `Quandl` and `mvtnorm`. These can be installed in R by executing the command:
7 | ``` R
8 | install.packages(c("mvtnorm", "Quandl"))
9 | ```
10 |
11 | ## Main script files
12 | These are the main script files that implement the various algorithms discussed in the tutorial.
13 |
14 | * **example1-lgss.R** State estimation in a LGSS model using the KF and a fully-adapted PF (faPF). The code is discussed in Section 3.1 and the results are presented in Section 3.2 as Figure 4 and Table 1.
15 |
16 | * **example2-lgss.R** Parameter estimation of one parameter in the LGSS model using PMH with the faPF as the likelihood estimator. The code is discussed in Section 4.1 and the results are presented in Section 4.2 as Figure 5.
17 |
18 | * **example3-sv.R** Parameter estimation of three parameters in the SV model using PMH with the bootstrap PF as the likelihood estimator. The code is discussed in Section 5.1 and the results are presented in Section 5.2 as Figure 6. The code takes about an hour to run.
19 |
20 | * **example4-sv.R** Modified version of the code in *example3-sv.R* to make use of a better tailored parameter proposal. The details are discussed in Section 6.3.2 and the results are presented in the same section as Figures 7 and 8. Note that the only difference in the code is that the variable stepSize is changed.
21 |
22 | * **example5-sv.R** Modified version of the code in *example3-sv.R* to make use of another parameterisation of the model and a better tailored parameter proposal. The details are discussed in Section 6.3.3 and the results are presented in the same section. Note that the differences in the code is the use of another implementation of PMH ant that the variable stepSize is changed.
23 |
24 |
25 | ## Additional script files for creating plots for tutorial (extra-code-for-tutorial/)
26 | These are some additional files to recreate some extra results discussed in the tutorial.
27 |
28 | * **example1-lgss-plotData.R** Some sample code for generate data and recreate the plot of the data presented as Figure 3.
29 |
30 | * **example2-lgss-varyingT.R** An extended version of *example2-lgss.R* and makes several runs while changing the number of observations. The results are presented in Section 3.2 as Table 1.
31 |
32 | * **example4-sv-plotProposals.R** Some (ugly) code to plot the estimates of the posterior distribution and the proposal distribution using the output from a run of *example3-sv.R*. This code generates Figure 7 in Section 6.3.2.
33 |
34 |
35 | ## Supporting files (helpers/)
36 | * **dataGeneration.R** Generates data from a LGSS model.
37 |
38 | * **parameterEstimation.R** Implements the PMH algorithm for the LGSS model (particleMetropolisHastings), the SV model (particleMetropolisHastingsSVModel) and the reparameterised SV model (particleMetropolisHastingsSVModelReparameterised).
39 |
40 | * **stateEstimation.R** Implements the faPF for the LGSS model (particleFilter), the Kalman filter for the LGSS model (kalmanFilter) and the bPF for the SV model (paticleFilterSVmodel).
41 |
42 | * **plotting.R** Generate the figures presented in the paper using the output of the PMH algorithm for the SV model.
43 |
44 | ## Saved results (savedWorkspaces/ and figures/)
45 | These directories are placeholders for the output from running the code. The workspaces and plots used in the tutorial are found as a zip-file in the [latest release of the code](https://github.com/compops/pmh-tutorial/releases/latest) as binaries are not usually version controlled by Git and the workspaces are quite large (ca 80 mb).
46 |
47 | * **savedWorkspaces/** Saved copies of the workspace after running the corresponding code. These outputs are used to generate all the results in the aper. Can be used to directly recreate the plots in the tutorial by setting the flags loadSavedWorkspace and savePlotToFile to TRUE.
48 |
49 | * **figures/** Saved plots from running the files.
50 |
51 | ## Adapting the code for another model
52 | The code provided in *helpers/stateInference.R* and *helpers/parameterInferernce.R* is quite general. To adapt this code for your own model, you can start with the code in *example3-sv.R* together with the functions *particleFilterSVmodel* and *particleMetropolisHastings* from the helpers.
53 |
54 | ### Particle filter
55 | In the particle filter, you need to change the lines connected to: (i) the sampling of the initial state, (ii) the propagation of particles and (iii) the computation of the weights. For (i), you need to rewrite:
56 | ``` R
57 | particles[, 1] <- rnorm(noParticles, mu, sigmav / sqrt(1 - phi^2))
58 | ```
59 | to fit your model. Two simple choices are to make use of the stationary distribution of the state (as is done for the SV model) computed by hand or to initialize all particles to some value (as is done in the LGSS model) by:
60 | ``` R
61 | particles[, 1] <- initialState
62 | ```
63 | where *initialState* is provided by the user. The particle filter usually quite rapidly converges to the state in this case if the state process quickly forgets its past (mixes well).
64 |
65 | For (ii), you need to change:
66 | ``` R
67 | part1 <- mu + phi * (particles[newAncestors, t - 1] - mu)
68 | part2 <- rnorm(noParticles, 0, sigmav)
69 | particles[, t] <- part1 + part2
70 | ```
71 | to something else. For the bPF, this corresponds to the state process of your state-space model.
72 |
73 | For (iii), you need to change:
74 | ``` R
75 | weights[, t] <- dnorm(y[t - 1], 0, exp(particles[, t] / 2), log = TRUE)
76 | ```
77 | to something else. For the bPF, this corresponds to the observation process of your state-space model.
78 |
79 | Finally, note that the particle filter implementation can only be used for state-space models where the state and observation are scalar. However, it is quite straightforward to make use of particle filtering when the state and/or observations are multivariate. It is basically only bookkeeping. If the dimension of the state is larger than say 5, good proposals are usually required to not run into the curse of dimensionality. This is a hot current research topic in the particle filtering literature.
80 |
81 | ### Particle Metropolis-Hastings
82 | The implementation of the PMH algorithm is general and does not require any larger changes if the model is changed. The dimensionality of the variables *xHatFiltered*, *xHatFilteredProposed*, *theta* and *thetaProposed* needs to be altered to match the dimensionality of the state and the number of parameters in the new state-space model. Moreover, the initial value of theta and the proposal distribution need to be calibrated for your new model. The simplest way to do this is by so-called pilot runs. Set the initial value to something reasonable and stepSize to a diagonal matrix with quite small elements, so that you get at least some accepted proposed values. After the pilot run, adapt the proposal as is discussed in 6.3.2 and initialise the PMH algorithm in the estimated posterior mean. Repeat this one or two more times or until you are satisfied.
83 |
84 | It is known that this simple version of PMH performs bad when the number of parameters is larger than about 5. To circumvent this problem, see the suggestions in Sections 4.3 and 6. It is also discussed there how to choose the number of particles *noParticles* and the number of iterations *noIterations* to use in the PMH algorithm. *noBurnInIterations* can be selected by looking at the trace plot for when the Markov chain has reached its steady-state/stationarity. I usually use *noIterations* as 10,000 or 30,000 (with *noBurnInIterations* as 3,000 or 10,0000) to get good posterior estimates but these runs take time. Also, using *noParticles* as somewhere between *T* and 2*T* is a good place to start.
85 |
86 | ## Copyright information
87 | ``` R
88 | ##############################################################################
89 | # This program is free software; you can redistribute it and/or modify
90 | # it under the terms of the GNU General Public License as published by
91 | # the Free Software Foundation; either version 2 of the License, or
92 | # (at your option) any later version.
93 | #
94 | # This program is distributed in the hope that it will be useful,
95 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
96 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
97 | # GNU General Public License for more details.
98 | #
99 | # You should have received a copy of the GNU General Public License along
100 | # with this program; if not, write to the Free Software Foundation, Inc.,
101 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
102 | ##############################################################################
103 | ```
104 |
--------------------------------------------------------------------------------
/r/example1-lgss.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # State estimation in a LGSS model using particle and Kalman filters
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | source("helpers/dataGeneration.R")
10 | source("helpers/stateEstimation.R")
11 |
12 | # Set the random seed to replicate results in tutorial
13 | set.seed(10)
14 |
15 | # Should the results be loaded from file (to quickly generate plots)
16 | loadSavedWorkspace <- FALSE
17 |
18 | # Save plot to file
19 | savePlotToFile <- FALSE
20 |
21 | ##############################################################################
22 | # Define the model and generate data
23 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
24 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
25 | ##############################################################################
26 | phi <- 0.75
27 | sigmav <- 1.00
28 | sigmae <- 0.10
29 | T <- 250
30 | initialState <- 0
31 |
32 | data <- generateData(c(phi, sigmav, sigmae), T, initialState)
33 | x <- data$x
34 | y <- data$y
35 |
36 | # Export plot to file
37 | if (savePlotToFile) {
38 | cairo_pdf("figures/example1-lgss.pdf",
39 | height = 10,
40 | width = 8)
41 | }
42 |
43 | # Plot the latent state and observations
44 | layout(matrix(c(1, 1, 2, 2, 3, 4), 3, 2, byrow = TRUE))
45 | par (mar = c(4, 5, 0, 0))
46 |
47 | grid <- seq(0, T)
48 |
49 | plot(
50 | grid,
51 | y,
52 | col = "#1B9E77",
53 | type = "l",
54 | xlab = "time",
55 | ylab = "observation",
56 | ylim = c(-6, 6),
57 | bty = "n"
58 | )
59 | polygon(c(grid, rev(grid)),
60 | c(y, rep(-6, T + 1)),
61 | border = NA,
62 | col = rgb(t(col2rgb("#1B9E77")) / 256, alpha = 0.25))
63 |
64 |
65 | ##############################################################################
66 | # State estimation using the particle filter and Kalman filter
67 | ##############################################################################
68 | if (loadSavedWorkspace) {
69 | load("savedWorkspaces/example1-lgss.RData")
70 | } else {
71 | # Using noParticles = 20 particles and plot the estimate of the latent state
72 | noParticles <- 20
73 | outputPF <-
74 | particleFilter(y, c(phi, sigmav, sigmae), noParticles, initialState)
75 | outputKF <-
76 | kalmanFilter(y, c(phi, sigmav, sigmae), initialState, 0.01)
77 | difference <-
78 | outputPF$xHatFiltered - outputKF$xHatFiltered[-(T + 1)]
79 | }
80 |
81 | grid <- seq(0, T - 1)
82 | plot(
83 | grid,
84 | difference,
85 | col = "#7570B3",
86 | type = "l",
87 | xlab = "time",
88 | ylab = "error in state estimate",
89 | ylim = c(-0.1, 0.1),
90 | bty = "n"
91 | )
92 | polygon(
93 | c(grid, rev(grid)),
94 | c(difference, rep(-0.1, T)),
95 | border = NA,
96 | col = rgb(t(col2rgb("#7570B3")) / 256, alpha = 0.25)
97 | )
98 |
99 | # Compute bias and MSE
100 | logBiasMSE <- matrix(0, nrow = 7, ncol = 2)
101 | gridN <- c(10, 20, 50, 100, 200, 500, 1000)
102 |
103 | for (ii in 1:length(gridN)) {
104 | pfEstimate <-
105 | particleFilter(y, c(phi, sigmav, sigmae), gridN[ii], initialState)
106 | pfEstimate <- pfEstimate$xHatFiltered
107 | kfEstimate <- outputKF$xHatFiltered[-(T + 1)]
108 |
109 | logBiasMSE[ii, 1] <- log(mean(abs(pfEstimate - kfEstimate)))
110 | logBiasMSE[ii, 2] <- log(mean((pfEstimate - kfEstimate) ^ 2))
111 | }
112 |
113 | ##############################################################################
114 | # Plot the bias and MSE for comparison
115 | ##############################################################################
116 | plot(
117 | gridN,
118 | logBiasMSE[, 1],
119 | col = "#E7298A",
120 | type = "l",
121 | xlab = "no. particles (N)",
122 | ylab = "log-bias",
123 | ylim = c(-7,-3),
124 | bty = "n"
125 | )
126 | polygon(
127 | c(gridN, rev(gridN)),
128 | c(logBiasMSE[, 1], rep(-7, length(gridN))),
129 | border = NA,
130 | col = rgb(t(col2rgb("#E7298A")) / 256, alpha = 0.25)
131 | )
132 | points(gridN, logBiasMSE[, 1], col = "#E7298A", pch = 19)
133 |
134 | plot(
135 | gridN,
136 | logBiasMSE[, 2],
137 | col = "#66A61E",
138 | lwd = 1.5,
139 | type = "l",
140 | xlab = "no. particles (N)",
141 | ylab = "log-MSE",
142 | ylim = c(-12,-6),
143 | bty = "n"
144 | )
145 | polygon(
146 | c(gridN, rev(gridN)),
147 | c(logBiasMSE[, 2], rep(-12, length(gridN))),
148 | border = NA,
149 | col = rgb(t(col2rgb("#66A61E")) / 256, alpha = 0.25)
150 | )
151 | points(gridN, logBiasMSE[, 2], col = "#66A61E", pch = 19)
152 |
153 | # Close the plotting device
154 | if (savePlotToFile) {
155 | dev.off()
156 | }
157 |
158 | # Print a table (no. particles, log-bias, log-mse)
159 | print(t(rbind(gridN, t(logBiasMSE))))
160 |
161 | # gridN
162 | # [1,] 10 -3.696997 -6.938594
163 | # [2,] 20 -3.964671 -7.493297
164 | # [3,] 50 -4.567552 -8.718346
165 | # [4,] 100 -4.850363 -9.294468
166 | # [5,] 200 -5.192173 -9.905719
167 | # [6,] 500 -5.668407 -10.866745
168 | # [7,] 1000 -6.077648 -11.671646
169 |
170 | # Save the workspace to file
171 | if (!loadSavedWorkspace) {
172 | save.image("savedWorkspaces/example1-lgss.RData")
173 | }
--------------------------------------------------------------------------------
/r/example2-lgss.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings in a LGSS model.
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | source("helpers/dataGeneration.R")
10 | source("helpers/stateEstimation.R")
11 | source("helpers/parameterEstimation.R")
12 |
13 | # Set the random seed to replicate results in tutorial
14 | set.seed(10)
15 |
16 | # Should the results be loaded from file (to quickly generate plots)
17 | loadSavedWorkspace <- FALSE
18 |
19 | # Save plot to file
20 | savePlotToFile <- FALSE
21 |
22 | ##############################################################################
23 | # Define the model and generate data
24 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
25 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
26 | ##############################################################################
27 | phi <- 0.75
28 | sigmav <- 1.00
29 | sigmae <- 0.10
30 | T <- 250
31 | initialState <- 0
32 |
33 | data <- generateData(c(phi, sigmav, sigmae), T, initialState)
34 |
35 | ##############################################################################
36 | # PMH
37 | ##############################################################################
38 | initialPhi <- 0.50
39 | noParticles <- 100
40 | noBurnInIterations <- 1000
41 | noIterations <- 5000
42 |
43 | if (loadSavedWorkspace) {
44 | load("savedWorkspaces/example2-lgss.RData")
45 | } else {
46 | res1 <- particleMetropolisHastings(
47 | data$y,
48 | initialPhi,
49 | sigmav,
50 | sigmae,
51 | noParticles,
52 | initialState,
53 | noIterations,
54 | stepSize = 0.01
55 | )
56 | res2 <- particleMetropolisHastings(
57 | data$y,
58 | initialPhi,
59 | sigmav,
60 | sigmae,
61 | noParticles,
62 | initialState,
63 | noIterations,
64 | stepSize = 0.10
65 | )
66 | res3 <- particleMetropolisHastings(
67 | data$y,
68 | initialPhi,
69 | sigmav,
70 | sigmae,
71 | noParticles,
72 | initialState,
73 | noIterations,
74 | stepSize = 0.50
75 | )
76 | }
77 |
78 | ##############################################################################
79 | # Plot the results
80 | ##############################################################################
81 | resTh1 <- res1[noBurnInIterations:noIterations,]
82 | resTh2 <- res2[noBurnInIterations:noIterations,]
83 | resTh3 <- res3[noBurnInIterations:noIterations,]
84 |
85 | # Estimate the KDE of the marginal posteriors
86 | kde1 <- density(resTh1,
87 | kernel = "e",
88 | from = 0.5,
89 | to = 0.8)
90 | kde2 <- density(resTh2,
91 | kernel = "e",
92 | from = 0.5,
93 | to = 0.8)
94 | kde3 <- density(resTh3,
95 | kernel = "e",
96 | from = 0.5,
97 | to = 0.8)
98 |
99 | # Export plot to file
100 | if (savePlotToFile) {
101 | cairo_pdf("figures/example2-lgss.pdf",
102 | height = 10,
103 | width = 8)
104 | }
105 |
106 | layout(matrix(1:9, 3, 3, byrow = TRUE))
107 | par (mar = c(4, 5, 0, 0))
108 |
109 | # Plot the parameter posterior estimate
110 | hist(
111 | resTh1,
112 | breaks = floor(sqrt(noIterations - noBurnInIterations)),
113 | col = rgb(t(col2rgb("#7570B3")) / 256, alpha = 0.25),
114 | border = NA,
115 | xlab = expression(phi),
116 | ylab = "posterior estimate",
117 | main = "",
118 | xlim = c(0.5, 0.8),
119 | ylim = c(0, 12),
120 | freq = FALSE
121 | )
122 | lines(kde1, lwd = 2, col = "#7570B3")
123 | abline(v = mean(resTh1),
124 | lwd = 1,
125 | lty = "dotted")
126 |
127 | hist(
128 | resTh2,
129 | breaks = floor(sqrt(noIterations - noBurnInIterations)),
130 | col = rgb(t(col2rgb("#E7298A")) / 256, alpha = 0.25),
131 | border = NA,
132 | xlab = expression(phi),
133 | ylab = "posterior estimate",
134 | main = "",
135 | xlim = c(0.5, 0.8),
136 | ylim = c(0, 12),
137 | freq = FALSE
138 | )
139 | lines(kde2, lwd = 2, col = "#E7298A")
140 | abline(v = mean(resTh2),
141 | lwd = 1,
142 | lty = "dotted")
143 |
144 | hist(
145 | resTh3,
146 | breaks = floor(sqrt(noIterations - noBurnInIterations)),
147 | col = rgb(t(col2rgb("#66A61E")) / 256, alpha = 0.25),
148 | border = NA,
149 | xlab = expression(phi),
150 | ylab = "posterior estimate",
151 | main = "",
152 | xlim = c(0.5, 0.8),
153 | ylim = c(0, 12),
154 | freq = FALSE
155 | )
156 | lines(kde3, lwd = 2, col = "#66A61E")
157 | abline(v = mean(resTh3),
158 | lwd = 1,
159 | lty = "dotted")
160 |
161 | # Plot the trace of the Markov chain during 1000 iterations after the burn-in
162 | grid <- seq(noBurnInIterations, noBurnInIterations + 1000 - 1, 1)
163 |
164 | plot(
165 | grid,
166 | resTh1[1:1000],
167 | col = '#7570B3',
168 | type = "l",
169 | xlab = "iteration",
170 | ylab = expression(phi),
171 | ylim = c(0.4, 0.8),
172 | bty = "n"
173 | )
174 | abline(h = mean(resTh1),
175 | lwd = 1,
176 | lty = "dotted")
177 | polygon(
178 | c(grid, rev(grid)),
179 | c(resTh1[1:1000], rep(0.4, 1000)),
180 | border = NA,
181 | col = rgb(t(col2rgb("#7570B3")) / 256, alpha = 0.25)
182 | )
183 |
184 | plot(
185 | grid,
186 | resTh2[1:1000],
187 | col = '#E7298A',
188 | type = "l",
189 | xlab = "iteration",
190 | ylab = expression(phi),
191 | ylim = c(0.4, 0.8),
192 | bty = "n"
193 | )
194 | abline(h = mean(resTh2),
195 | lwd = 1,
196 | lty = "dotted")
197 | polygon(
198 | c(grid, rev(grid)),
199 | c(resTh2[1:1000], rep(0.4, 1000)),
200 | border = NA,
201 | col = rgb(t(col2rgb("#E7298A")) / 256, alpha = 0.25)
202 | )
203 |
204 | plot(
205 | grid,
206 | resTh3[1:1000],
207 | col = '#66A61E',
208 | type = "l",
209 | xlab = "iteration",
210 | ylab = expression(phi),
211 | ylim = c(0.4, 0.8),
212 | bty = "n"
213 | )
214 | abline(h = mean(resTh3),
215 | lwd = 1,
216 | lty = "dotted")
217 | polygon(
218 | c(grid, rev(grid)),
219 | c(resTh3[1:1000], rep(0.4, 1000)),
220 | border = NA,
221 | col = rgb(t(col2rgb("#66A61E")) / 256, alpha = 0.25)
222 | )
223 |
224 | # Plot the ACF of the Markov chain
225 |
226 | res1ACF <- acf(resTh1, plot = FALSE, lag.max = 60)
227 | plot(
228 | res1ACF$lag,
229 | res1ACF$acf,
230 | col = '#7570B3',
231 | type = "l",
232 | xlab = "iteration",
233 | ylab = "ACF",
234 | ylim = c(-0.2, 1),
235 | bty = "n"
236 | )
237 | polygon(
238 | c(res1ACF$lag, rev(res1ACF$lag)),
239 | c(res1ACF$acf, rep(0, length(res1ACF$lag))),
240 | border = NA,
241 | col = rgb(t(col2rgb("#7570B3")) / 256, alpha = 0.25)
242 | )
243 | abline(h = 1.96 / sqrt(length(grid)), lty = "dotted")
244 | abline(h = -1.96 / sqrt(length(grid)), lty = "dotted")
245 |
246 | res2ACF <- acf(resTh2, plot = FALSE, lag.max = 60)
247 | plot(
248 | res2ACF$lag,
249 | res2ACF$acf,
250 | col = '#E7298A',
251 | type = "l",
252 | xlab = "iteration",
253 | ylab = "ACF",
254 | ylim = c(-0.2, 1),
255 | bty = "n"
256 | )
257 | polygon(
258 | c(res2ACF$lag, rev(res2ACF$lag)),
259 | c(res2ACF$acf, rep(0, length(res2ACF$lag))),
260 | border = NA,
261 | col = rgb(t(col2rgb("#E7298A")) / 256, alpha = 0.25)
262 | )
263 | abline(h = 1.96 / sqrt(length(grid)), lty = "dotted")
264 | abline(h = -1.96 / sqrt(length(grid)), lty = "dotted")
265 |
266 | res3ACF <- acf(resTh3, plot = FALSE, lag.max = 60)
267 | plot(
268 | res3ACF$lag,
269 | res3ACF$acf,
270 | col = '#66A61E',
271 | type = "l",
272 | xlab = "iteration",
273 | ylab = "ACF",
274 | ylim = c(-0.2, 1),
275 | bty = "n"
276 | )
277 | polygon(
278 | c(res3ACF$lag, rev(res3ACF$lag)),
279 | c(res3ACF$acf, rep(0, length(res3ACF$lag))),
280 | border = NA,
281 | col = rgb(t(col2rgb("#66A61E")) / 256, alpha = 0.25)
282 | )
283 | abline(h = 1.96 / sqrt(length(grid)), lty = "dotted")
284 | abline(h = -1.96 / sqrt(length(grid)), lty = "dotted")
285 |
286 | # Close the plotting device
287 | if (savePlotToFile) {
288 | dev.off()
289 | }
290 |
291 | # Estimate the parameter posterior mean
292 | mean(res1[grid])
293 | mean(res2[grid])
294 | mean(res3[grid])
295 |
296 | # Save the workspace to file
297 | if (!loadSavedWorkspace) {
298 | save.image("savedWorkspaces/example2-lgss.RData")
299 | }
--------------------------------------------------------------------------------
/r/example3-sv.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings in a SV model
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | library("Quandl")
10 | library("mvtnorm")
11 | source("helpers/stateEstimation.R")
12 | source("helpers/parameterEstimation.R")
13 | source("helpers/plotting.R")
14 |
15 | # Set the random seed to replicate results in tutorial
16 | set.seed(10)
17 |
18 | # Should the results be loaded from file (to quickly generate plots)
19 | loadSavedWorkspace <- FALSE
20 |
21 | # Save plot to file
22 | savePlotToFile <- FALSE
23 | nPlot <- 2500
24 |
25 | ##############################################################################
26 | # Load data
27 | ##############################################################################
28 | d <-
29 | Quandl(
30 | "NASDAQOMX/OMXS30",
31 | start_date = "2012-01-02",
32 | end_date = "2014-01-02",
33 | type = "zoo"
34 | )
35 | y <- as.numeric(100 * diff(log(d$"Index Value")))
36 |
37 | ##############################################################################
38 | # PMH
39 | ##############################################################################
40 | initialTheta <- c(0, 0.9, 0.2)
41 | noParticles <- 500
42 | noBurnInIterations <- 2500
43 | noIterations <- 7500
44 | stepSize <- diag(c(0.10, 0.01, 0.05) ^ 2)
45 |
46 | if (loadSavedWorkspace) {
47 | load("savedWorkspaces/example3-sv.RData")
48 | } else {
49 | res <- particleMetropolisHastingsSVmodel(y, initialTheta, noParticles, noIterations, stepSize)
50 | }
51 |
52 | ##############################################################################
53 | # Plot the results
54 | ##############################################################################
55 | if (savePlotToFile) {
56 | cairo_pdf("figures/example3-sv.pdf",
57 | height = 10,
58 | width = 8)
59 | }
60 |
61 | iact <- makePlotsParticleMetropolisHastingsSVModel(y, res, noBurnInIterations, noIterations, nPlot)
62 |
63 | # Close the plotting device
64 | if (savePlotToFile) {
65 | dev.off()
66 | }
67 |
68 | # Print the estimate of the posterior mean and standard deviation
69 | resTh <- res$theta[noBurnInIterations:noIterations, ]
70 | thhat <- colMeans(resTh)
71 | thhatSD <- apply(resTh, 2, sd)
72 |
73 | print(thhat)
74 | print(thhatSD)
75 |
76 | #[1] -0.2337134 0.9708399 0.1498914
77 | #[1] 0.37048000 0.02191359 0.05595271
78 |
79 | # Compute an estimate of the IACT using the first 100 ACF coefficients
80 | print(iact)
81 | # [1] 135.19084 85.98935 65.80120
82 |
83 | # Estimate the covariance of the posterior to tune the proposal
84 | estCov <- var(resTh)
85 | # [,1] [,2] [,3]
86 | # [1,] 0.137255431 -0.0016258103 0.0015047492
87 | # [2,] -0.001625810 0.0004802053 -0.0009973058
88 | # [3,] 0.001504749 -0.0009973058 0.0031307062
89 |
90 | # Save the workspace to file
91 | if (!loadSavedWorkspace) {
92 | save.image("savedWorkspaces/example3-sv.RData")
93 | }
--------------------------------------------------------------------------------
/r/example4-sv.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings in a SV
3 | # with a proposal adapted from a pilot run.
4 | #
5 | # Johan Dahlin
6 | # Documentation at https://github.com/compops/pmh-tutorial
7 | # Published under GNU General Public License
8 | ##############################################################################
9 |
10 | library("Quandl")
11 | library("mvtnorm")
12 | source("helpers/stateEstimation.R")
13 | source("helpers/parameterEstimation.R")
14 | source("helpers/plotting.R")
15 |
16 | # Set the random seed to replicate results in tutorial
17 | set.seed(10)
18 |
19 | # Should the results be loaded from file (to quickly generate plots)
20 | loadSavedWorkspace <- FALSE
21 |
22 | # Save plot to file
23 | savePlotToFile <- TRUE
24 | nPlot <- 2500
25 |
26 | ##############################################################################
27 | # Load data
28 | ##############################################################################
29 | d <-
30 | Quandl(
31 | "NASDAQOMX/OMXS30",
32 | start_date = "2012-01-02",
33 | end_date = "2014-01-02",
34 | type = "zoo"
35 | )
36 | y <- as.numeric(100 * diff(log(d$"Index Value")))
37 |
38 |
39 | ##############################################################################
40 | # PMH
41 | ##############################################################################
42 |
43 | initialTheta <- c(0, 0.9, 0.2)
44 | noParticles <- 500
45 | noBurnInIterations <- 2500
46 | noIterations <- 7500
47 | stepSize <- matrix(
48 | c(
49 | 0.137255431,-0.0016258103,
50 | 0.0015047492,-0.0016258103,
51 | 0.0004802053,-0.0009973058,
52 | 0.0015047492,-0.0009973058,
53 | 0.0031307062
54 | ),
55 | ncol = 3,
56 | nrow = 3
57 | )
58 | stepSize <- 2.562^2 / 3 * stepSize
59 |
60 | if (loadSavedWorkspace) {
61 | load("savedWorkspaces/example4-sv.RData")
62 | } else {
63 | res <- particleMetropolisHastingsSVmodel(y, initialTheta, noParticles, noIterations, stepSize)
64 | }
65 |
66 | ##############################################################################
67 | # Plot the results
68 | ##############################################################################
69 | if (savePlotToFile) {
70 | cairo_pdf("figures/example4-sv.pdf",
71 | height = 10,
72 | width = 8)
73 | }
74 |
75 | iact <- makePlotsParticleMetropolisHastingsSVModel(y, res, noBurnInIterations, noIterations, nPlot)
76 |
77 | # Close the plotting device
78 | if (savePlotToFile) {
79 | dev.off()
80 | }
81 |
82 | ##############################################################################
83 | # Compute and save the results
84 | ##############################################################################
85 |
86 | # Print the estimate of the posterior mean and standard deviation
87 | resTh <- res$theta[noBurnInIterations:noIterations, ]
88 | thhat <- colMeans(resTh)
89 | thhatSD <- apply(resTh, 2, sd)
90 |
91 | print(thhat)
92 | print(thhatSD)
93 |
94 | #[1] -0.0997589 0.9723418 0.1492119
95 | #[1] 0.27266581 0.01792217 0.04535608
96 |
97 | # Compute an estimate of the IACT using the first 100 ACF coefficients
98 | print(iact)
99 | # [1] 31.94972 32.07775 28.36988
100 |
101 | # Save the workspace to file
102 | if (!loadSavedWorkspace) {
103 | save.image("savedWorkspaces/example4-sv.RData")
104 | }
105 |
--------------------------------------------------------------------------------
/r/example5-sv.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Parameter estimation using particle Metropolis-Hastings in a reparameterised version of a
3 | # stochastic volatility model with a proposal adapted from a pilot run.
4 | #
5 | # Johan Dahlin
6 | # Documentation at https://github.com/compops/pmh-tutorial
7 | # Published under GNU General Public License
8 | ##############################################################################
9 |
10 | library("Quandl")
11 | library("mvtnorm")
12 | source("helpers/stateEstimation.R")
13 | source("helpers/parameterEstimation.R")
14 | source("helpers/plotting.R")
15 |
16 | # Set the random seed to replicate results in tutorial
17 | set.seed(10)
18 |
19 | # Should the results be loaded from file (to quickly generate plots)
20 | loadSavedWorkspace <- FALSE
21 |
22 | # Save plot to file
23 | savePlotToFile <- FALSE
24 | nPlot <- 2500
25 |
26 | ##############################################################################
27 | # Load data
28 | ##############################################################################
29 | d <-
30 | Quandl(
31 | "NASDAQOMX/OMXS30",
32 | start_date = "2012-01-02",
33 | end_date = "2014-01-02",
34 | type = "zoo"
35 | )
36 | y <- as.numeric(100 * diff(log(d$"Index Value")))
37 |
38 |
39 | ##############################################################################
40 | # PMH
41 | ##############################################################################
42 | initialTheta <- c(0, 0.9, 0.2)
43 | noParticles <- 500
44 | noBurnInIterations <- 2500
45 | noIterations <- 7500
46 | stepSize <- matrix(
47 | c(
48 | 0.041871682,-0.001200581,-0.002706803,-0.001200581,
49 | 0.054894707,-0.056321320,-0.002706803,-0.056321320,
50 | 0.087342276
51 | ),
52 | ncol = 3,
53 | nrow = 3
54 | )
55 | stepSize <- 2.562^2 / 3 * stepSize
56 |
57 | if (loadSavedWorkspace) {
58 | load("savedWorkspaces/example5-sv.RData")
59 | } else {
60 | res <- particleMetropolisHastingsSVmodelReparameterised(y, initialTheta, noParticles, noIterations, stepSize)
61 | }
62 |
63 | ##############################################################################
64 | # Plot the results
65 | ##############################################################################
66 | if (savePlotToFile) {
67 | cairo_pdf("figures/example5-sv.pdf", height = 10, width = 8)
68 | }
69 |
70 | iact <- makePlotsParticleMetropolisHastingsSVModel(y, res, noBurnInIterations, noIterations, nPlot)
71 |
72 | # Close the plotting device
73 | if (savePlotToFile) {
74 | dev.off()
75 | }
76 |
77 | ##############################################################################
78 | # Compute and save the results
79 | ##############################################################################
80 |
81 | # Print the estimate of the posterior mean and standard deviation
82 | resTh <- res$theta[noBurnInIterations:noIterations, ]
83 | thhat <- colMeans(resTh)
84 | thhatSD <- apply(resTh, 2, sd)
85 |
86 | #[1] -0.1550373 0.9601144 0.1742736
87 | #[1] 0.23637116 0.02239614 0.05701460
88 |
89 | # Compute an estimate of the IACT using the first 100 ACF coefficients
90 | print(iact)
91 | # [1] 21.93670 28.96783 16.65938
92 |
93 | # Estimate the covariance of the posterior to tune the proposal
94 | resThTransformed <- res$thetaTransformed[noBurnInIterations:noIterations,]
95 | estCov <- var(resThTransformed)
96 |
97 | # Save the workspace to file
98 | if (!loadSavedWorkspace) {
99 | save.image("savedWorkspaces/example5-sv.RData")
100 | }
--------------------------------------------------------------------------------
/r/extra-code-for-tutorial/example1-lgss-plotData.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Generates and plots data from a LGSS model.
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | source("../helpers/dataGeneration.R")
10 | source("../helpers/stateEstimation.R")
11 |
12 | # Set the random seed to replicate results in tutorial
13 | set.seed(10)
14 |
15 | # Save plot to file
16 | savePlotToFile <- TRUE
17 |
18 | ##############################################################################
19 | # Define the model and generate data
20 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
21 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
22 | ##############################################################################
23 | phi <- 0.75
24 | sigmav <- 1.00
25 | sigmae <- 0.10
26 | T <- 250
27 | initialState <- 0
28 |
29 | data <- generateData(c(phi, sigmav, sigmae), T, initialState)
30 | x <- data$x
31 | y <- data$y
32 |
33 | ##############################################################################
34 | # Plotting
35 | ##############################################################################
36 |
37 | # Export plot to file
38 | if (savePlotToFile) {
39 | cairo_pdf("../figures/lgss-data.pdf",
40 | height = 3,
41 | width = 8)
42 | }
43 |
44 | grid = seq(0, T)
45 |
46 | # Plot the latent state and observations
47 | layout(matrix(1:3, 1, 3, byrow = TRUE))
48 | par(mar = c(4, 5, 0, 0))
49 |
50 | plot(
51 | grid,
52 | x,
53 | col = "#D95F02",
54 | lwd = 1,
55 | type = "l",
56 | xlab = "time",
57 | ylab = expression("latent state " * x[t]),
58 | bty = "n",
59 | ylim = c(-4, 6)
60 | )
61 | polygon(c(grid, rev(grid)),
62 | c(x, rep(-4, T + 1)),
63 | border = NA,
64 | col = rgb(t(col2rgb("#D95F02")) / 256, alpha = 0.25))
65 |
66 | plot(
67 | grid[-1],
68 | y[-1],
69 | col = "#1B9E77",
70 | lwd = 1,
71 | type = "l",
72 | xlab = "time",
73 | ylab = expression("observation " * y[t]),
74 | bty = "n",
75 | ylim = c(-4, 6)
76 | )
77 | polygon(c(grid[-1], rev(grid[-1])),
78 | c(y[-1], rep(-4, T)),
79 | border = NA,
80 | col = rgb(t(col2rgb("#1B9E77")) / 256, alpha = 0.25))
81 |
82 | foo = acf(y[-1], plot = F, lag.max = 25)
83 |
84 | plot(
85 | foo$lag,
86 | foo$acf,
87 | col = "#66A61E",
88 | lwd = 1.5,
89 | type = "l",
90 | xlab = "time",
91 | ylab = expression("ACF of " * y[t]),
92 | bty = "n",
93 | ylim = c(-0.2, 1),
94 | xlim = c(0, 25)
95 | )
96 | polygon(
97 | c(foo$lag, rev(foo$lag)),
98 | c(foo$acf, rep(0.0, length(foo$lag))),
99 | border = NA,
100 | col = rgb(t(col2rgb("#66A61E")) / 256, alpha = 0.25)
101 | )
102 | abline(h = -1.96 / sqrt(T), lty = "dotted")
103 | abline(h = 1.96 / sqrt(T), lty = "dotted")
104 |
105 |
106 | if (savePlotToFile) {
107 | dev.off()
108 | }
--------------------------------------------------------------------------------
/r/extra-code-for-tutorial/example2-lgss-varyingT.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Runs the particle Metropolis-Hastings algorithm from different number
3 | # of observations generated from a LGSS model.
4 | #
5 | # Johan Dahlin
6 | # Documentation at https://github.com/compops/pmh-tutorial
7 | # Published under GNU General Public License
8 | ##############################################################################
9 |
10 | source("../helpers/dataGeneration.R")
11 | source("../helpers/stateEstimation.R")
12 | source("../helpers/parameterEstimation.R")
13 |
14 | # Should the results be loaded from file (to quickly generate plots)
15 | loadSavedWorkspace <- FALSE
16 |
17 | ##############################################################################
18 | # Define the model and generate data
19 | # x[t + 1] = phi * x[t] + sigmav * v[t], v[t] ~ N(0, 1)
20 | # y[t] = x[t] + sigmae * e[t], e[t] ~ N(0, 1)
21 | ##############################################################################
22 | phi <- 0.75
23 | sigmav <- 1.00
24 | sigmae <- 0.10
25 | T <- 250
26 | initialState <- 0
27 |
28 | ##############################################################################
29 | # PMH
30 | ##############################################################################
31 |
32 | initialPhi <- 0.50
33 | noParticles <- 100
34 | noBurnInIterations <- 1000
35 | noIterations <- 5000
36 | stepSize <- 0.10
37 |
38 | # Loop over different data lengths
39 | TT <- c(10, 20, 50, 100, 200, 500)
40 | Tmean <- matrix(0, nrow = length(TT), ncol = 1)
41 | Tvar <- matrix(0, nrow = length(TT), ncol = 1)
42 |
43 | if (loadSavedWorkspace) {
44 | load("../savedWorkspaces/example2-lgss-varyingT.RData")
45 | } else {
46 | for (i in 1:length(TT)) {
47 |
48 | set.seed(10)
49 | data <- generateData(c(phi, sigmav, sigmae), TT[i], initialState)
50 | res <-
51 | particleMetropolisHastings(
52 | data$y,
53 | initialPhi,
54 | sigmav,
55 | sigmae,
56 | noParticles,
57 | initialState,
58 | noIterations,
59 | stepSize
60 | )
61 |
62 | Tmean[i] <- mean(res[noBurnInIterations:noIterations])
63 | Tvar[i] <- var(res[noBurnInIterations:noIterations])
64 | }
65 | }
66 |
67 | ##############################################################################
68 | # Save workspace and print results
69 | ##############################################################################
70 | if (!loadSavedWorkspace) {
71 | save.image("../savedWorkspaces/example2-lgss-varyingT.RData")
72 | }
73 |
74 | # Print the results to screen (no. observations, posterior mean, posterior variance)
75 | cbind(TT, Tmean, Tvar)
76 |
77 | # [1,] 10 0.5955020 0.0399332238
78 | # [2,] 20 0.7943218 0.0127682838
79 | # [3,] 50 0.7649620 0.0089581720
80 | # [4,] 100 0.7269762 0.0060643002
81 | # [5,] 200 0.6960883 0.0026939445
82 | # [6,] 500 0.7185719 0.0009992732
--------------------------------------------------------------------------------
/r/extra-code-for-tutorial/example4-sv-plotProposals.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Ugly code to plot the estimate of the posterior distribution and the
3 | # proposal distribution adapted from a pilot run of particle
4 | # Metropolis-Hastings.
5 | #
6 | # Johan Dahlin
7 | # Documentation at https://github.com/compops/pmh-tutorial
8 | # Published under GNU General Public License
9 | ##############################################################################
10 |
11 | # Import helpers
12 | library("MASS")
13 | library("mvtnorm")
14 |
15 | # Save plot to file
16 | savePlotToFile <- FALSE
17 |
18 | # Load the run
19 | load("../savedWorkspaces/example3-sv.RData")
20 |
21 | ##############################################################################
22 | # Parameter proposals
23 | ##############################################################################
24 | # The unadapted proposal
25 | stepSize1 <- diag(c(0.10, 0.01, 0.05) ^ 2)
26 |
27 | # The adapted proposal
28 | stepSize2 <- matrix(
29 | c(
30 | 0.137255431,-0.0016258103,
31 | 0.0015047492,-0.0016258103,
32 | 0.0004802053,-0.0009973058,
33 | 0.0015047492,-0.0009973058,
34 | 0.0031307062
35 | ),
36 | ncol = 3,
37 | nrow = 3
38 | )
39 | stepSize2 <- 0.8 * stepSize2
40 |
41 | ##############################################################################
42 | # Create grids
43 | ##############################################################################
44 | # Estimate the posterior mean and covariance
45 | resTh <- res$theta[noBurnInIterations:noIterations, ]
46 | estThe <- colMeans(resTh)
47 | estCov <- var(resTh)
48 |
49 | # Create a grid for each parameter
50 | gridth1 <- seq(-1, 1, 0.01)
51 | gridth2 <- seq(0.90, 1.05, 0.01)
52 | gridth3 <- seq(0.01, 0.35, 0.01)
53 |
54 | #------------------------------------------------------------------------------
55 | # Make a grid of all pairs of parameters
56 | #------------------------------------------------------------------------------
57 |
58 | grid1 <- matrix(0, length(gridth1) * length(gridth2), 2)
59 | grid2 <- matrix(0, length(gridth1) * length(gridth3), 2)
60 | grid3 <- matrix(0, length(gridth2) * length(gridth3), 2)
61 |
62 | kk = 1
63 | for (ii in 1:length(gridth1)) {
64 | for (jj in 1:length(gridth2)) {
65 | grid1[kk, ] <- c(gridth1[ii], gridth2[jj])
66 | kk <- kk + 1
67 | }
68 | }
69 |
70 | kk = 1
71 | for (ii in 1:length(gridth1)) {
72 | for (jj in 1:length(gridth3)) {
73 | grid2[kk, ] <- c(gridth1[ii], gridth3[jj])
74 | kk <- kk + 1
75 | }
76 | }
77 |
78 | kk = 1
79 | for (ii in 1:length(gridth2)) {
80 | for (jj in 1:length(gridth3)) {
81 | grid3[kk, ] <- c(gridth2[ii], gridth3[jj])
82 | kk <- kk + 1
83 | }
84 | }
85 |
86 |
87 | ##############################################################################
88 | # Evaluate the proposal distribution over the grid centered at the
89 | # posterior mean
90 | ##############################################################################
91 |
92 | dgrid1 <- matrix(
93 | dmvnorm(grid1, mean = estThe[-3], sigma = stepSize1[-3, -3]),
94 | length(gridth1),
95 | length(gridth2),
96 | byrow = TRUE
97 | )
98 |
99 | dgrid2 <- matrix(
100 | dmvnorm(grid2, mean = estThe[-2], sigma = stepSize1[-2, -2]),
101 | length(gridth1),
102 | length(gridth3),
103 | byrow = TRUE
104 | )
105 |
106 | dgrid3 <- matrix(
107 | dmvnorm(grid3, mean = estThe[-1], sigma = stepSize1[-1, -1]),
108 | length(gridth2),
109 | length(gridth3),
110 | byrow = TRUE
111 | )
112 |
113 | dgrid4 <- matrix(
114 | dmvnorm(grid1, mean = estThe[-3], sigma = stepSize2[-3, -3]),
115 | length(gridth1),
116 | length(gridth2),
117 | byrow = TRUE
118 | )
119 |
120 | dgrid5 <- matrix(
121 | dmvnorm(grid2, mean = estThe[-2], sigma = stepSize2[-2, -2]),
122 | length(gridth1),
123 | length(gridth3),
124 | byrow = TRUE
125 | )
126 |
127 | dgrid6 <- matrix(
128 | dmvnorm(grid3, mean = estThe[-1], sigma = stepSize2[-1, -1]),
129 | length(gridth2),
130 | length(gridth3),
131 | byrow = TRUE
132 | )
133 |
134 |
135 | ##############################################################################
136 | # Compute the 2-dimensional kernel density estimate of the posterior
137 | ##############################################################################
138 |
139 | foo1 <- kde2d(resTh[, 1], resTh[, 2], n = 50)
140 | foo2 <- kde2d(resTh[, 1], resTh[, 3], n = 50)
141 | foo3 <- kde2d(resTh[, 2], resTh[, 3], n = 50)
142 |
143 |
144 | ##############################################################################
145 | # Greate the plot
146 | ##############################################################################
147 |
148 | if (savePlotToFile) {
149 | cairo_pdf("../figures/example4-sv-plotProposals.pdf",
150 | height = 6,
151 | width = 8)
152 | }
153 |
154 | layout(matrix(1:6, 2, 3, byrow = TRUE))
155 | par(mar = c(4, 5, 0, 0))
156 |
157 | #------------------------------------------------------------------------------
158 | # Mu versus phi (old proposal)
159 | #------------------------------------------------------------------------------
160 |
161 | contour(
162 | foo1,
163 | xlim = c(-1, 1),
164 | ylim = c(0.88, 1.05),
165 | labels = NULL,
166 | bty = "n",
167 | col = "#7570B3",
168 | lwd = 1.5,
169 | labcex = 0.001,
170 | xlab = expression(mu),
171 | ylab = expression(phi)
172 | )
173 |
174 | contour(
175 | gridth1,
176 | gridth2,
177 | dgrid1,
178 | labels = NULL,
179 | nlevels = 5,
180 | add = T,
181 | col = "grey20",
182 | labcex = 0.001,
183 | lwd = 2
184 | )
185 |
186 | #------------------------------------------------------------------------------
187 | # Mu versus sigma_v (old proposal)
188 | #------------------------------------------------------------------------------
189 |
190 | contour(
191 | foo2,
192 | xlim = c(-1, 1),
193 | ylim = c(0.00, 0.35),
194 | labels = NULL,
195 | bty = "n",
196 | col = "#E7298A",
197 | lwd = 1.5,
198 | labcex = 0.001,
199 | xlab = expression(mu),
200 | ylab = expression(sigma[v])
201 | )
202 |
203 | contour(
204 | gridth1,
205 | gridth3,
206 | dgrid2,
207 | labels = NULL,
208 | nlevels = 5,
209 | add = T,
210 | col = "grey20",
211 | labcex = 0.001,
212 | lwd = 2
213 | )
214 |
215 | #------------------------------------------------------------------------------
216 | # Phi versus sigma_v (old proposal)
217 | #------------------------------------------------------------------------------
218 |
219 | contour(
220 | foo3,
221 | xlim = c(0.88, 1.05),
222 | ylim = c(0.00, 0.35),
223 | labels = NULL,
224 | bty = "n",
225 | col = "#66A61E",
226 | lwd = 1.5,
227 | labcex = 0.001,
228 | xlab = expression(phi),
229 | ylab = expression(sigma[v])
230 | )
231 |
232 | contour(
233 | gridth2,
234 | gridth3,
235 | dgrid3,
236 | labels = NULL,
237 | nlevels = 5,
238 | add = T,
239 | col = "grey20",
240 | labcex = 0.001,
241 | lwd = 2
242 | )
243 |
244 | #------------------------------------------------------------------------------
245 | # Mu versus phi (new proposal)
246 | #------------------------------------------------------------------------------
247 | contour(
248 | foo1,
249 | xlim = c(-1, 1),
250 | ylim = c(0.88, 1.05),
251 | labels = NULL,
252 | bty = "n",
253 | col = "#7570B3",
254 | lwd = 1.5,
255 | labcex = 0.001,
256 | xlab = expression(mu),
257 | ylab = expression(phi)
258 | )
259 |
260 | contour(
261 | gridth1,
262 | gridth2,
263 | dgrid4,
264 | labels = NULL,
265 | nlevels = 5,
266 | add = T,
267 | col = "grey20",
268 | labcex = 0.001,
269 | lwd = 2
270 | )
271 |
272 | #------------------------------------------------------------------------------
273 | # Mu versus sigma_v (new proposal)
274 | #------------------------------------------------------------------------------
275 |
276 | contour(
277 | foo2,
278 | xlim = c(-1, 1),
279 | ylim = c(0.00, 0.35),
280 | labels = NULL,
281 | bty = "n",
282 | col = "#E7298A",
283 | lwd = 1.5,
284 | labcex = 0.001,
285 | xlab = expression(mu),
286 | ylab = expression(sigma[v])
287 | )
288 |
289 | contour(
290 | gridth1,
291 | gridth3,
292 | dgrid5,
293 | labels = NULL,
294 | nlevels = 5,
295 | add = T,
296 | col = "grey20",
297 | labcex = 0.001,
298 | lwd = 2
299 | )
300 |
301 | #------------------------------------------------------------------------------
302 | # Phi versus sigma_v (new proposal)
303 | #------------------------------------------------------------------------------
304 |
305 | contour(
306 | foo3,
307 | xlim = c(0.88, 1.05),
308 | ylim = c(0.00, 0.35),
309 | labels = NULL,
310 | bty = "n",
311 | col = "#66A61E",
312 | lwd = 1.5,
313 | labcex = 0.001,
314 | xlab = expression(phi),
315 | ylab = expression(sigma[v])
316 | )
317 |
318 | contour(
319 | gridth2,
320 | gridth3,
321 | dgrid6,
322 | labels = NULL,
323 | nlevels = 5,
324 | add = T,
325 | col = "grey20",
326 | labcex = 0.001,
327 | lwd = 2
328 | )
329 |
330 | if (savePlotToFile) {
331 | dev.off()
332 | }
--------------------------------------------------------------------------------
/r/extra-code-for-tutorial/example4-sv-varyingN.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Example of particle Metropolis-Hastings in a stochastic volatility model
3 | # The effect on mixing while varying N.
4 | #
5 | # Johan Dahlin
6 | # Documentation at https://github.com/compops/pmh-tutorial
7 | # Published under GNU General Public License
8 | ##############################################################################
9 |
10 | library("Quandl")
11 | library("mvtnorm")
12 | source("../helpers/stateEstimation.R")
13 | source("../helpers/parameterEstimation.R")
14 | source("../helpers/plotting.R")
15 |
16 | # Set the random seed to replicate results in tutorial
17 | set.seed(10)
18 |
19 | # Should the results be loaded from file (to quickly generate plots)
20 | loadSavedWorkspace <- FALSE
21 |
22 | # Should the proposals be tuned by a pilot run
23 | tuneProposals <- FALSE
24 |
25 | # Should we use the tuned proposals (requires "../savedWorkspaces/example4-sv-varyingN-proposals.RData")
26 | useTunedProposals <- FALSE
27 |
28 | ##############################################################################
29 | # Load data
30 | ##############################################################################
31 | d <-
32 | Quandl(
33 | "NASDAQOMX/OMXS30",
34 | start_date = "2012-01-02",
35 | end_date = "2014-01-02",
36 | type = "zoo"
37 | )
38 | y <- as.numeric(100 * diff(log(d$"Index Value")))
39 |
40 |
41 | ##############################################################################
42 | # Likelihood estimation using particle filter
43 | ##############################################################################
44 | # True parameters estimated in example5-sv.R
45 | theta <- c(-0.12, 0.96, 0.17)
46 |
47 | # No. particles in the particle filter to try out
48 | noParticles <- c(50, 100, 200, 300, 400, 500)
49 |
50 | # No. repetitions of log-likelihood estimate
51 | noSimulations <- 1000
52 |
53 | logLikelihoodEstimates <- matrix(0, nrow = length(noParticles), ncol = noSimulations)
54 | logLikelihoodVariance <- rep(0, length(noParticles))
55 | computationalTimePerSample <- rep(0, length(noParticles))
56 |
57 | if (!loadSavedWorkspace) {
58 | for (k in 1:length(noParticles)) {
59 | # Save the current time
60 | ptm <- proc.time()
61 |
62 | for (i in 1:noSimulations) {
63 | # Run the particle filter
64 | res <- particleFilterSVmodel(y, theta, noParticles[k])
65 |
66 | # Save the log-Likelihood estimate
67 | logLikelihoodEstimates[k, i] <- res$logLikelihood
68 | }
69 |
70 | # Compute the variance of the log-likelihood and computational time per sample
71 | logLikelihoodVariance[k] <- var(logLikelihoodEstimates[k, ])
72 | computationalTimePerSample[k] <- (proc.time() - ptm)[3] / noSimulations
73 |
74 | # Print to screen
75 | print(paste(paste(paste(paste("Simulation: ", k, sep = ""), " of ", sep = ""), length(noParticles), sep = ""), " completed.", sep = ""))
76 | print(paste(paste(paste(paste("No. particles: ", noParticles[k], sep = ""), " requires ", sep = ""), computationalTimePerSample[k], sep = ""), " seconds for computing one sample.", sep = ""))
77 | }
78 | }
79 |
80 | ##############################################################################
81 | # PMH
82 | ##############################################################################
83 | # The inital guess of the parameter (use the estimate of the posterior mean to
84 | # accelerated the algorithm, i.e., so less PMH iterations can be used).
85 | initialTheta <- theta
86 |
87 | # The length of the burn-in and the no. iterations of PMH ( noBurnInIterations < noIterations )
88 | noBurnInIterations <- 2500
89 | noIterations <- 7500
90 |
91 | # The standard deviation in the random walk proposal
92 | if (useTunedProposals) {
93 | load(file = "../savedWorkspaces/example4-sv-varyingN-proposals.RData")
94 | } else {
95 | proposals <- array(0, dim = c(length(noParticles), 3, 3))
96 | for (k in 1:length(noParticles)) {
97 | proposals[k, , ] <- diag(c(0.10, 0.01, 0.05) ^ 2)
98 | }
99 | }
100 |
101 | if (loadSavedWorkspace) {
102 | load("../savedWorkspaces/example4-sv-varyingN.RData")
103 | } else {
104 | resTheta <- array(0, dim = c(length(noParticles), noIterations - noBurnInIterations + 1, 3))
105 | computationalTimePerIteration <- rep(0, length(noParticles))
106 | acceptProbability <- rep(0, length(noParticles))
107 |
108 | for (k in 1:length(noParticles)) {
109 | # Save the current time
110 | ptm <- proc.time()
111 |
112 | # Run the PMH algorithm
113 | res <- particleMetropolisHastingsSVmodel(y, initialTheta, noParticles[k], noIterations, stepSize = proposals[k, ,])
114 |
115 | # Save the parameter trace
116 | resTheta[k, ,] <- res$theta[noBurnInIterations:noIterations,]
117 |
118 | # Compute acceptance probability and computational time per sample
119 | computationalTimePerIteration[k] <- (proc.time() - ptm)[3] / noIterations
120 | acceptProbability[k] <- mean(res$proposedThetaAccepted[noBurnInIterations:noIterations])
121 |
122 | # Print to screen
123 | print(paste(paste(paste(paste("Simulation: ", k, sep = ""), " of ", sep = ""), length(noParticles), sep = ""), " completed.", sep = ""))
124 | }
125 | }
126 |
127 | ##############################################################################
128 | # Post-processing (computing IACT and IACT * time)
129 | ##############################################################################
130 | resThetaIACT <- matrix(0, nrow = length(noParticles), ncol = 3)
131 | resThetaIACTperSecond <- matrix(0, nrow = length(noParticles), ncol = 3)
132 |
133 | for (k in 1:length(noParticles)) {
134 | acf_mu <- acf(resTheta[k, , 1], plot = FALSE, lag.max = 250)
135 | acf_phi <- acf(resTheta[k, , 2], plot = FALSE, lag.max = 250)
136 | acf_sigmav <- acf(resTheta[k, , 3], plot = FALSE, lag.max = 250)
137 |
138 | resThetaIACT[k, ] <- 1 + 2 * c(sum(acf_mu$acf), sum(acf_phi$acf), sum(acf_sigmav$acf))
139 | resThetaIACTperSecond[k, ] <- resThetaIACT[k, ] / computationalTimePerIteration[k]
140 | }
141 |
142 | table <- rbind(noParticles, sqrt(logLikelihoodVariance), 100 * acceptProbability, apply(resThetaIACT, 1, max), apply(resThetaIACT, 1, max) * computationalTimePerIteration, computationalTimePerIteration)
143 | table <- round(table, 2)
144 | print(table)
145 |
146 | ##############################################################################
147 | # Tune the PMH proposal using a pilot run
148 | ##############################################################################
149 | if (tuneProposals) {
150 | proposals <- array(0, dim = c(length(noParticles), 3, 3))
151 |
152 | for (k in 1:length(noParticles)) {
153 | proposals[k, , ] <- cov(resTheta[k, , ]) * 2.562^2 / 3
154 | }
155 | save(proposals, file = "../savedWorkspaces/example4-sv-varyingN-proposals.RData")
156 | }
157 |
158 | # Save the workspace to file
159 | if (!loadSavedWorkspace) {
160 | save.image("../savedWorkspaces/example4-sv-varyingN.RData")
161 | }
--------------------------------------------------------------------------------
/r/helpers/dataGeneration.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Generating data from a LGSS model
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 | generateData <- function(theta, noObservations, initialState)
9 | {
10 | phi <- theta[1]
11 | sigmav <- theta[2]
12 | sigmae <- theta[3]
13 |
14 | state <- matrix(0, nrow = noObservations + 1, ncol = 1)
15 | observation <- matrix(0, nrow = noObservations + 1, ncol = 1)
16 |
17 | state[1] <- initialState
18 | observation[1] <- NA
19 |
20 | for (t in 2:(noObservations + 1)) {
21 | state[t] <- phi * state[t - 1] + sigmav * rnorm(1)
22 | observation[t] <- state[t] + sigmae * rnorm(1)
23 | }
24 |
25 | list(x = state, y = observation)
26 | }
--------------------------------------------------------------------------------
/r/helpers/parameterEstimation.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Particle Metropolis-Hastings implemenations for LGSS and SV models
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | # Particle Metropolis-Hastings (LGSS model)
10 | particleMetropolisHastings <- function(y, initialPhi, sigmav, sigmae,
11 | noParticles, initialState, noIterations, stepSize) {
12 |
13 | phi <- matrix(0, nrow = noIterations, ncol = 1)
14 | phiProposed <- matrix(0, nrow = noIterations, ncol = 1)
15 | logLikelihood <- matrix(0, nrow = noIterations, ncol = 1)
16 | logLikelihoodProposed <- matrix(0, nrow = noIterations, ncol = 1)
17 | proposedPhiAccepted <- matrix(0, nrow = noIterations, ncol = 1)
18 |
19 | # Set the initial parameter and estimate the initial log-likelihood
20 | phi[1] <- initialPhi
21 | theta <- c(phi[1], sigmav, sigmae)
22 | outputPF <- particleFilter(y, theta, noParticles, initialState)
23 | logLikelihood[1]<- outputPF$logLikelihood
24 |
25 | for (k in 2:noIterations) {
26 | # Propose a new parameter
27 | phiProposed[k] <- phi[k - 1] + stepSize * rnorm(1)
28 |
29 | # Estimate the log-likelihood (don't run if unstable system)
30 | if (abs(phiProposed[k]) < 1.0) {
31 | theta <- c(phiProposed[k], sigmav, sigmae)
32 | outputPF <- particleFilter(y, theta, noParticles, initialState)
33 | logLikelihoodProposed[k] <- outputPF$logLikelihood
34 | }
35 |
36 | # Compute the acceptance probability
37 | priorPart <- dnorm(phiProposed[k], log = TRUE)
38 | priorPart <- priorPart - dnorm(phi[k - 1], log = TRUE)
39 | likelihoodDifference <- logLikelihoodProposed[k] - logLikelihood[k - 1]
40 | acceptProbability <- exp(priorPart + likelihoodDifference)
41 | acceptProbability <- acceptProbability * (abs(phiProposed[k]) < 1.0)
42 |
43 | # Accept / reject step
44 | uniformRandomVariable <- runif(1)
45 | if (uniformRandomVariable < acceptProbability) {
46 | # Accept the parameter
47 | phi[k] <- phiProposed[k]
48 | logLikelihood[k] <- logLikelihoodProposed[k]
49 | proposedPhiAccepted[k] <- 1
50 | } else {
51 | # Reject the parameter
52 | phi[k] <- phi[k - 1]
53 | logLikelihood[k] <- logLikelihood[k - 1]
54 | proposedPhiAccepted[k] <- 0
55 | }
56 |
57 | # Write out progress
58 | if (k %% 100 == 0) {
59 | cat(
60 | sprintf(
61 | "#####################################################################\n"
62 | )
63 | )
64 | cat(sprintf(" Iteration: %d of : %d completed.\n \n", k, noIterations))
65 | cat(sprintf(" Current state of the Markov chain: %.4f \n", phi[k]))
66 | cat(sprintf(" Proposed next state of the Markov chain: %.4f \n", phiProposed[k]))
67 | cat(sprintf(" Current posterior mean: %.4f \n", mean(phi[0:k])))
68 | cat(sprintf(" Current acceptance rate: %.4f \n", mean(proposedPhiAccepted[0:k])))
69 | cat(
70 | sprintf(
71 | "#####################################################################\n"
72 | )
73 | )
74 | }
75 | }
76 |
77 | phi
78 | }
79 |
80 | ##############################################################################
81 | # Particle Metropolis-Hastings (SV model)
82 | ##############################################################################
83 | particleMetropolisHastingsSVmodel <- function(y, initialTheta, noParticles,
84 | noIterations, stepSize) {
85 |
86 | T <- length(y) - 1
87 |
88 | xHatFiltered <- matrix(0, nrow = noIterations, ncol = T + 1)
89 | xHatFilteredProposed <- matrix(0, nrow = noIterations, ncol = T + 1)
90 | theta <- matrix(0, nrow = noIterations, ncol = 3)
91 | thetaProposed <- matrix(0, nrow = noIterations, ncol = 3)
92 | logLikelihood <- matrix(0, nrow = noIterations, ncol = 1)
93 | logLikelihoodProposed <- matrix(0, nrow = noIterations, ncol = 1)
94 | proposedThetaAccepted <- matrix(0, nrow = noIterations, ncol = 1)
95 |
96 | # Set the initial parameter and estimate the initial log-likelihood
97 | theta[1, ] <- initialTheta
98 | res <- particleFilterSVmodel(y, theta[1, ], noParticles)
99 | logLikelihood[1] <- res$logLikelihood
100 | xHatFiltered[1, ] <- res$xHatFiltered
101 |
102 | for (k in 2:noIterations) {
103 | # Propose a new parameter
104 | thetaProposed[k, ] <- rmvnorm(1, mean = theta[k - 1, ], sigma = stepSize)
105 |
106 | # Estimate the log-likelihood (don't run if unstable system)
107 | if ((abs(thetaProposed[k, 2]) < 1.0) && (thetaProposed[k, 3] > 0.0)) {
108 | res <- particleFilterSVmodel(y, thetaProposed[k, ], noParticles)
109 | logLikelihoodProposed[k] <- res$logLikelihood
110 | xHatFilteredProposed[k, ] <- res$xHatFiltered
111 | }
112 |
113 | # Compute difference in the log-priors
114 | priorMu <- dnorm(thetaProposed[k, 1], 0, 1, log = TRUE)
115 | priorMu <- priorMu - dnorm(theta[k - 1, 1], 0, 1, log = TRUE)
116 | priorPhi <- dnorm(thetaProposed[k, 2], 0.95, 0.05, log = TRUE)
117 | priorPhi <- priorPhi - dnorm(theta[k - 1, 2], 0.95, 0.05, log = TRUE)
118 | priorSigmaV <- dgamma(thetaProposed[k, 3], 2, 10, log = TRUE)
119 | priorSigmaV <- priorSigmaV - dgamma(theta[k - 1, 3], 2, 10, log = TRUE)
120 | prior <- priorMu + priorPhi + priorSigmaV
121 |
122 | # Compute the acceptance probability
123 | likelihoodDifference <- logLikelihoodProposed[k] - logLikelihood[k - 1]
124 | acceptProbability <- exp(prior + likelihoodDifference)
125 |
126 | acceptProbability <- acceptProbability * (abs(thetaProposed[k, 2]) < 1.0)
127 | acceptProbability <- acceptProbability * (thetaProposed[k, 3] > 0.0)
128 |
129 | # Accept / reject step
130 | uniformRandomVariable <- runif(1)
131 | if (uniformRandomVariable < acceptProbability) {
132 | # Accept the parameter
133 | theta[k, ] <- thetaProposed[k, ]
134 | logLikelihood[k] <- logLikelihoodProposed[k]
135 | xHatFiltered[k, ] <- xHatFilteredProposed[k, ]
136 | proposedThetaAccepted[k] <- 1
137 | } else {
138 | # Reject the parameter
139 | theta[k, ] <- theta[k - 1, ]
140 | logLikelihood[k] <- logLikelihood[k - 1]
141 | xHatFiltered[k, ] <- xHatFiltered[k - 1, ]
142 | proposedThetaAccepted[k] <- 0
143 | }
144 |
145 | # Write out progress
146 | if (k %% 100 == 0) {
147 | cat(
148 | sprintf(
149 | "#####################################################################\n"
150 | )
151 | )
152 | cat(sprintf(" Iteration: %d of : %d completed.\n \n", k, noIterations))
153 |
154 | cat(sprintf(
155 | " Current state of the Markov chain: %.4f %.4f %.4f \n",
156 | theta[k, 1],
157 | theta[k, 2],
158 | theta[k, 3]
159 | ))
160 | cat(
161 | sprintf(
162 | " Proposed next state of the Markov chain: %.4f %.4f %.4f \n",
163 | thetaProposed[k, 1],
164 | thetaProposed[k, 2],
165 | thetaProposed[k, 3]
166 | )
167 | )
168 | cat(sprintf(
169 | " Current posterior mean: %.4f %.4f %.4f \n",
170 | mean(thetaProposed[0:k, 1]),
171 | mean(thetaProposed[0:k, 2]),
172 | mean(thetaProposed[0:k, 3])
173 | ))
174 | cat(sprintf(" Current acceptance rate: %.4f \n", mean(proposedThetaAccepted[0:k])))
175 | cat(
176 | sprintf(
177 | "#####################################################################\n"
178 | )
179 | )
180 | }
181 | }
182 |
183 | list(theta = theta, xHatFiltered = xHatFiltered, proposedThetaAccepted = proposedThetaAccepted)
184 | }
185 |
186 | ##############################################################################
187 | # Particle Metropolis-Hastings (reparameterised SV model)
188 | ##############################################################################
189 | particleMetropolisHastingsSVmodelReparameterised <- function(y, initialTheta,
190 | noParticles, noIterations, stepSize) {
191 |
192 | T <- length(y) - 1
193 |
194 | xHatFiltered <- matrix(0, nrow = noIterations, ncol = T + 1)
195 | xHatFilteredProposed <- matrix(0, nrow = noIterations, ncol = T + 1)
196 | theta <- matrix(0, nrow = noIterations, ncol = 3)
197 | thetaProposed <- matrix(0, nrow = noIterations, ncol = 3)
198 | thetaTransformed <- matrix(0, nrow = noIterations, ncol = 3)
199 | thetaTransformedProposed <- matrix(0, nrow = noIterations, ncol = 3)
200 | logLikelihood <- matrix(0, nrow = noIterations, ncol = 1)
201 | logLikelihoodProposed <- matrix(0, nrow = noIterations, ncol = 1)
202 | proposedThetaAccepted <- matrix(0, nrow = noIterations, ncol = 1)
203 |
204 | # Set the initial parameter and estimate the initial log-likelihood
205 | theta[1, ] <- initialTheta
206 | res <- particleFilterSVmodel(y, theta[1, ], noParticles)
207 | thetaTransformed[1, ] <- c(theta[1, 1], atanh(theta[1, 2]), log(theta[1, 3]))
208 | logLikelihood[1] <- res$logLikelihood
209 | xHatFiltered[1, ] <- res$xHatFiltered
210 |
211 | for (k in 2:noIterations) {
212 | # Propose a new parameter
213 | thetaTransformedProposed[k, ] <- rmvnorm(1, mean = thetaTransformed[k - 1, ], sigma = stepSize)
214 |
215 | # Run the particle filter
216 | thetaProposed[k, ] <- c(thetaTransformedProposed[k, 1], tanh(thetaTransformedProposed[k, 2]), exp(thetaTransformedProposed[k, 3]))
217 | res <- particleFilterSVmodel(y, thetaProposed[k, ], noParticles)
218 | xHatFilteredProposed[k, ] <- res$xHatFiltered
219 | logLikelihoodProposed[k] <- res$logLikelihood
220 |
221 | # Compute the acceptance probability
222 | logPrior1 <- dnorm(thetaProposed[k, 1], log = TRUE) - dnorm(theta[k - 1, 1], log = TRUE)
223 | logPrior2 <-dnorm(thetaProposed[k, 2], 0.95, 0.05, log = TRUE) - dnorm(theta[k - 1, 2], 0.95, 0.05, log = TRUE)
224 | logPrior3 <- dgamma(thetaProposed[k, 3], 3, 10, log = TRUE) - dgamma(theta[k - 1, 3], 3, 10, log = TRUE)
225 | logPrior <- logPrior1 + logPrior2 + logPrior3
226 |
227 | logJacob1 <- log(abs(1 - thetaProposed[k, 2]^2)) - log(abs(1 - theta[k - 1, 2]^2))
228 | logJacob2 <- log(abs(thetaProposed[k, 3])) - log(abs(theta[k - 1, 3]))
229 | logJacob <- logJacob1 + logJacob2
230 |
231 | acceptProbability <- exp(logPrior + logLikelihoodProposed[k] - logLikelihood[k - 1] + logJacob)
232 |
233 | # Accept / reject step
234 | uniformRandomVariable <- runif(1)
235 | if (uniformRandomVariable < acceptProbability) {
236 | # Accept the parameter
237 | theta[k, ] <- thetaProposed[k, ]
238 | thetaTransformed[k, ] <- thetaTransformedProposed[k, ]
239 | logLikelihood[k] <- logLikelihoodProposed[k]
240 | xHatFiltered[k, ] <- xHatFilteredProposed[k, ]
241 | proposedThetaAccepted[k] <- 1
242 | } else {
243 | # Reject the parameter
244 | theta[k, ] <- theta[k - 1, ]
245 | thetaTransformed[k, ] <- thetaTransformed[k - 1, ]
246 | logLikelihood[k] <- logLikelihood[k - 1]
247 | xHatFiltered[k, ] <- xHatFiltered[k - 1, ]
248 | proposedThetaAccepted[k] <- 0
249 | }
250 |
251 | # Write out progress
252 | if (k %% 100 == 0) {
253 | cat(
254 | sprintf(
255 | "#####################################################################\n"
256 | )
257 | )
258 | cat(sprintf(" Iteration: %d of : %d completed.\n \n", k, noIterations))
259 | cat(sprintf(
260 | " Current state of the Markov chain: %.4f %.4f %.4f \n",
261 | thetaTransformed[k, 1],
262 | thetaTransformed[k, 2],
263 | thetaTransformed[k, 3]
264 | ))
265 | cat(
266 | sprintf(
267 | " Proposed next state of the Markov chain: %.4f %.4f %.4f \n",
268 | thetaTransformedProposed[k, 1],
269 | thetaTransformedProposed[k, 2],
270 | thetaTransformedProposed[k, 3]
271 | )
272 | )
273 | cat(sprintf(
274 | " Current posterior mean: %.4f %.4f %.4f \n",
275 | mean(theta[0:k, 1]),
276 | mean(theta[0:k, 2]),
277 | mean(theta[0:k, 3])
278 | ))
279 | cat(sprintf(" Current acceptance rate: %.4f \n", mean(proposedThetaAccepted[0:k])))
280 | cat(
281 | sprintf(
282 | "#####################################################################\n"
283 | )
284 | )
285 |
286 | }
287 | }
288 |
289 | list(theta = theta,
290 | xHatFiltered = xHatFiltered,
291 | thetaTransformed = thetaTransformed)
292 | }
--------------------------------------------------------------------------------
/r/helpers/plotting.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # Make plots for tutorial
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 | makePlotsParticleMetropolisHastingsSVModel <- function(y, res, noBurnInIterations, noIterations, nPlot) {
9 |
10 | # Extract the states after burn-in
11 | resTh <- res$theta[noBurnInIterations:noIterations, ]
12 | resXh <- res$xHatFiltered[noBurnInIterations:noIterations, ]
13 |
14 | # Estimate the posterior mean and the corresponding standard deviation
15 | thhat <- colMeans(resTh)
16 | thhatSD <- apply(resTh, 2, sd)
17 |
18 | # Estimate the log-volatility and the corresponding standad deviation
19 | xhat <- colMeans(resXh)
20 | xhatSD <- apply(resXh, 2, sd)
21 |
22 | # Plot the parameter posterior estimate, solid black line indicate posterior mean
23 | # Plot the trace of the Markov chain after burn-in, solid black line indicate posterior mean
24 | layout(matrix(c(1, 1, 1, 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11), 5, 3, byrow = TRUE))
25 | par(mar = c(4, 5, 0, 0))
26 |
27 | # Grid for plotting the data and log-volatility
28 | gridy <- seq(1, length(y))
29 | gridx <- seq(1, length(y) - 1)
30 |
31 | #---------------------------------------------------------------------------
32 | # Observations
33 | #---------------------------------------------------------------------------
34 | plot(
35 | y,
36 | col = "#1B9E77",
37 | lwd = 1,
38 | type = "l",
39 | xlab = "time",
40 | ylab = "log-returns",
41 | ylim = c(-5, 5),
42 | bty = "n"
43 | )
44 | polygon(
45 | c(gridy, rev(gridy)),
46 | c(y, rep(-5, length(gridy))),
47 | border = NA,
48 | col = rgb(t(col2rgb("#1B9E77")) / 256, alpha = 0.25)
49 | )
50 |
51 | #---------------------------------------------------------------------------
52 | # Log-volatility
53 | #---------------------------------------------------------------------------
54 | plot(
55 | xhat[-1],
56 | col = "#D95F02",
57 | lwd = 1.5,
58 | type = "l",
59 | xlab = "time",
60 | ylab = "log-volatility estimate",
61 | ylim = c(-2, 2),
62 | bty = "n"
63 | )
64 | xhat_upper <- xhat[-1] + 1.96 * xhatSD[-1]
65 | xhat_lower <- xhat[-1] - 1.96 * xhatSD[-1]
66 |
67 | polygon(
68 | c(gridx, rev(gridx)),
69 | c(xhat_upper, rev(xhat_lower)),
70 | border = NA,
71 | col = rgb(t(col2rgb("#D95F02")) / 256, alpha = 0.25)
72 | )
73 |
74 | #---------------------------------------------------------------------------
75 | # Parameter posteriors
76 | #---------------------------------------------------------------------------
77 |
78 | grid <- seq(noBurnInIterations, noBurnInIterations + nPlot - 1, 1)
79 | parameterNames <- c(expression(mu), expression(phi), expression(sigma[v]))
80 | parameterACFnames <- c(expression("ACF of " * mu), expression("ACF of " * phi), expression("ACF of " * sigma[v]))
81 | parameterScales <- c(-1, 1, 0.88, 1.0, 0, 0.4)
82 | parameterScales <- matrix(parameterScales, nrow = 3, ncol = 2, byrow = TRUE)
83 | parameterColors <- c("#7570B3", "#E7298A", "#66A61E")
84 | iact <- c()
85 |
86 | for (k in 1:3) {
87 |
88 | # Histogram of the posterior
89 | hist(
90 | resTh[, k],
91 | breaks = floor(sqrt(noIterations - noBurnInIterations)),
92 | col = rgb(t(col2rgb(parameterColors[k])) / 256, alpha = 0.25),
93 | border = NA,
94 | xlab = parameterNames[k],
95 | ylab = "posterior estimate",
96 | main = "",
97 | xlim = parameterScales[k,],
98 | freq = FALSE
99 | )
100 |
101 | # Add lines for the kernel density estimate of the posterior
102 | kde <- density(resTh[, k], kernel = "e", from = parameterScales[k, 1], to = parameterScales[k, 2])
103 | lines(kde, lwd = 2, col = parameterColors[k])
104 |
105 | # Plot the estimate of the posterior mean
106 | abline(v = thhat[k], lwd = 1, lty = "dotted")
107 |
108 | # Add lines for prior
109 | prior_grid <- seq(parameterScales[k, 1], parameterScales[k, 2], 0.01)
110 | if (k==1) {prior_values = dnorm(prior_grid, 0, 1)}
111 | if (k==2) {prior_values = dnorm(prior_grid, 0.95, 0.05)}
112 | if (k==3) {prior_values = dgamma(prior_grid, 2, 10)}
113 | lines(prior_grid, prior_values, col = "darkgrey")
114 |
115 | # Plot trace of the Markov chain
116 | plot(
117 | grid,
118 | resTh[1:nPlot, k],
119 | col = parameterColors[k],
120 | type = "l",
121 | xlab = "iteration",
122 | ylab = parameterNames[k],
123 | ylim = parameterScales[k,],
124 | bty = "n"
125 | )
126 | polygon(
127 | c(grid, rev(grid)),
128 | c(resTh[1:nPlot, k], rep(-1, length(grid))),
129 | border = NA,
130 | col = rgb(t(col2rgb(parameterColors[k])) / 256, alpha = 0.25)
131 | )
132 | abline(h = thhat[k], lwd = 1, lty = "dotted")
133 |
134 | # Plot the autocorrelation function
135 | acf_res <- acf(resTh[, k], plot = FALSE, lag.max = 100)
136 | plot(
137 | acf_res$lag,
138 | acf_res$acf,
139 | col = parameterColors[k],
140 | type = "l",
141 | xlab = "iteration",
142 | ylab = parameterACFnames[k],
143 | lwd = 2,
144 | ylim = c(-0.2, 1),
145 | bty = "n"
146 | )
147 | polygon(
148 | c(acf_res$lag, rev(acf_res$lag)),
149 | c(acf_res$acf, rep(0, length(acf_res$lag))),
150 | border = NA,
151 | col = rgb(t(col2rgb(parameterColors[k])) / 256, alpha = 0.25)
152 | )
153 | abline(h = 1.96 / sqrt(noIterations - noBurnInIterations), lty = "dotted")
154 | abline(h = -1.96 / sqrt(noIterations - noBurnInIterations), lty = "dotted")
155 |
156 | iact <- c(iact, 1 + 2 * sum(acf_res$acf))
157 | }
158 |
159 | iact
160 | }
--------------------------------------------------------------------------------
/r/helpers/stateEstimation.R:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | # State estimation in LGSS and SV models using Kalman and particle filters.
3 | #
4 | # Johan Dahlin
5 | # Documentation at https://github.com/compops/pmh-tutorial
6 | # Published under GNU General Public License
7 | ##############################################################################
8 |
9 | ##############################################################################
10 | # Fully-adapted particle filter for the linear Gaussian SSM
11 | ##############################################################################
12 | particleFilter <- function(y, theta, noParticles, initialState) {
13 |
14 | T <- length(y) - 1
15 | phi <- theta[1]
16 | sigmav <- theta[2]
17 | sigmae <- theta[3]
18 |
19 | # Initialise variables
20 | particles <- matrix(0, nrow = noParticles, ncol = T + 1)
21 | ancestorIndices <- matrix(0, nrow = noParticles, ncol = T + 1)
22 | weights <- matrix(1, nrow = noParticles, ncol = T + 1)
23 | normalisedWeights <- matrix(0, nrow = noParticles, ncol = T + 1)
24 | xHatFiltered <- matrix(0, nrow = T, ncol = 1)
25 | logLikelihood <- 0
26 |
27 | ancestorIndices[, 1] <- 1:noParticles
28 | particles[ ,1] <- initialState
29 | xHatFiltered[ ,1] <- initialState
30 | normalisedWeights[, 1] = 1 / noParticles
31 |
32 | for (t in 2:T) {
33 | # Resample ( multinomial )
34 | newAncestors <- sample(noParticles, replace = TRUE, prob = normalisedWeights[, t - 1])
35 | ancestorIndices[, 1:(t - 1)] <- ancestorIndices[newAncestors, 1:(t - 1)]
36 | ancestorIndices[, t] <- newAncestors
37 |
38 | # Propagate
39 | part1 <- (sigmav^(-2) + sigmae^(-2))^(-1)
40 | part2 <- sigmae^(-2) * y[t]
41 | part2 <- part2 + sigmav^(-2) * phi * particles[newAncestors, t - 1]
42 | particles[, t] <- part1 * part2 + rnorm(noParticles, 0, sqrt(part1))
43 |
44 | # Compute weights
45 | yhatMean <- phi * particles[, t]
46 | yhatVariance <- sqrt(sigmae^2 + sigmav^2)
47 | weights[, t] <- dnorm(y[t + 1], yhatMean, yhatVariance, log = TRUE)
48 |
49 | maxWeight <- max(weights[, t])
50 | weights[, t] <- exp(weights[, t] - maxWeight)
51 |
52 | sumWeights <- sum(weights[, t])
53 | normalisedWeights[, t] <- weights[, t] / sumWeights
54 |
55 | # Estimate the state
56 | xHatFiltered[t] <- mean(particles[, t])
57 |
58 | # Estimate the log-likelihood
59 | predictiveLikelihood <- maxWeight + log(sumWeights) - log(noParticles)
60 | logLikelihood <- logLikelihood + predictiveLikelihood
61 |
62 | }
63 |
64 | list(xHatFiltered = xHatFiltered,
65 | logLikelihood = logLikelihood,
66 | particles = particles,
67 | weights = normalisedWeights)
68 |
69 | }
70 |
71 | ##############################################################################
72 | # Kalman filter for the linear Gaussian SSM
73 | ##############################################################################
74 | kalmanFilter <- function(y, theta, initialState, initialStateCovariance) {
75 |
76 | T <- length(y)
77 | yHatPredicted <- matrix(initialState, nrow = T, ncol = 1)
78 | xHatFiltered <- matrix(initialState, nrow = T, ncol = 1)
79 | xHatPredicted <- matrix(initialState, nrow = T + 1, ncol = 1)
80 | predictedStateCovariance <- initialStateCovariance
81 | logLikelihood <- 0
82 |
83 | A <- theta[1]
84 | C <- 1
85 | Q <- theta[2] ^ 2
86 | R <- theta[3] ^ 2
87 |
88 | for (t in 2:T) {
89 | # Correction step
90 | S <- C * predictedStateCovariance * C + R
91 | kalmanGain <- predictedStateCovariance * C / S
92 | filteredStateCovariance <- predictedStateCovariance - kalmanGain * S * kalmanGain
93 |
94 | yHatPredicted[t] <- C * xHatPredicted[t]
95 | xHatFiltered[t] <- xHatPredicted[t] + kalmanGain * (y[t] - yHatPredicted[t])
96 |
97 | # Prediction step
98 | xHatPredicted[t + 1] <- A * xHatFiltered[t]
99 | predictedStateCovariance <- A * filteredStateCovariance * A + Q
100 |
101 | # Estimate loglikelihood (not in the last iteration, to be able to compare with faPF)
102 | if (t < T) {
103 | logLikelihood = logLikelihood + dnorm(y[t], yHatPredicted[t], sqrt(S), log = TRUE)
104 | }
105 | }
106 |
107 | list(xHatFiltered = xHatFiltered, logLikelihood = logLikelihood)
108 | }
109 |
110 | ##############################################################################
111 | # Bootstrap particle filter for the stochastic volatility model
112 | ##############################################################################
113 | particleFilterSVmodel <- function(y, theta, noParticles) {
114 |
115 | T <- length(y) - 1
116 | mu <- theta[1]
117 | phi <- theta[2]
118 | sigmav <- theta[3]
119 |
120 | particles <- matrix(0, nrow = noParticles, ncol = T + 1)
121 | ancestorIndices <- matrix(0, nrow = noParticles, ncol = T + 1)
122 | weights <- matrix(1, nrow = noParticles, ncol = T + 1)
123 | normalisedWeights <- matrix(0, nrow = noParticles, ncol = T + 1)
124 | xHatFiltered <- matrix(0, nrow = T, ncol = 1)
125 | logLikelihood <- 0
126 |
127 | ancestorIndices[, 1] <- 1:noParticles
128 | normalisedWeights[, 1] = 1 / noParticles
129 |
130 | # Generate initial state
131 | particles[, 1] <- rnorm(noParticles, mu, sigmav / sqrt(1 - phi^2))
132 |
133 | for (t in 2:(T + 1)) {
134 | # Resample ( multinomial )
135 | newAncestors <- sample(noParticles, replace = TRUE, prob = normalisedWeights[, t - 1])
136 | ancestorIndices[, 1:(t - 1)] <- ancestorIndices[newAncestors, 1:(t - 1)]
137 | ancestorIndices[, t] <- newAncestors
138 |
139 | # Propagate
140 | part1 <- mu + phi * (particles[newAncestors, t - 1] - mu)
141 | particles[, t] <- part1 + rnorm(noParticles, 0, sigmav)
142 |
143 | # Compute weights
144 | yhatMean <- 0
145 | yhatVariance <- exp(particles[, t] / 2)
146 | weights[, t] <- dnorm(y[t - 1], yhatMean, yhatVariance, log = TRUE)
147 |
148 | maxWeight <- max(weights[, t])
149 | weights[, t] <- exp(weights[, t] - maxWeight)
150 |
151 | sumWeights <- sum(weights[, t])
152 | normalisedWeights[, t] <- weights[, t] / sumWeights
153 |
154 | # Estimate the log-likelihood
155 | logLikelihood <- logLikelihood + maxWeight + log(sumWeights) - log(noParticles)
156 |
157 | }
158 |
159 | # Sample the state estimate using the weights at t=T
160 | ancestorIndex <- sample(noParticles, 1, prob = normalisedWeights[, T])
161 | xHatFiltered <- particles[cbind(ancestorIndices[ancestorIndex, ], 1:(T + 1))]
162 |
163 | list(xHatFiltered = xHatFiltered, logLikelihood = logLikelihood)
164 | }
--------------------------------------------------------------------------------