├── .github └── workflows │ └── python-app.yml ├── .gitignore ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── experiments ├── README.md ├── co2 │ ├── co2_mm_mlo.txt │ ├── co2_weekly_mlo.txt │ └── mcmc.sh ├── sunspots │ ├── map.sh │ ├── mcmc.sh │ └── sunspots.csv └── toy_models │ ├── mcmc.sh │ ├── run_all.sh │ └── speed_and_stability.sh ├── notebooks ├── PSSGP101.ipynb └── data │ └── regression_1D.csv ├── pssgp ├── __init__.py ├── config.py ├── experiments │ ├── __init__.py │ ├── co2 │ │ ├── __init__.py │ │ ├── common.py │ │ ├── mcmc.py │ │ └── speed_and_stability.py │ ├── common.py │ ├── sunspot │ │ ├── __init__.py │ │ ├── common.py │ │ ├── map.py │ │ ├── mcmc.py │ │ └── speed_and_stability.py │ └── toy_models │ │ ├── __init__.py │ │ ├── common.py │ │ ├── mcmc.py │ │ └── speed_and_stability.py ├── kalman │ ├── __init__.py │ ├── base.py │ ├── parallel.py │ └── sequential.py ├── kernels │ ├── __init__.py │ ├── base.py │ ├── matern │ │ ├── __init__.py │ │ ├── common.py │ │ ├── matern12.py │ │ ├── matern32.py │ │ └── matern52.py │ ├── math_utils.py │ ├── periodic.py │ └── rbf.py ├── misc_utils.py ├── model.py └── toymodels │ ├── README.md │ ├── __init__.py │ └── data_funcs.py ├── requirements.txt ├── setup.py └── tests ├── test_gp_vs_kfs.py ├── test_periodic.py └── test_rbf.py /.github/workflows/python-app.yml: -------------------------------------------------------------------------------- 1 | # This workflow will install Python dependencies, run tests and lint with a single version of Python 2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions 3 | 4 | name: Python application 5 | 6 | on: 7 | push: 8 | branches: [ main ] 9 | pull_request: 10 | branches: [ main ] 11 | 12 | jobs: 13 | build: 14 | 15 | runs-on: ubuntu-latest 16 | 17 | steps: 18 | - uses: actions/checkout@v2 19 | - name: Set up Python 3.8 20 | uses: actions/setup-python@v2 21 | with: 22 | python-version: 3.8 23 | - name: Install 24 | run: | 25 | python -m pip install --upgrade pip 26 | pip install flake8 pytest 27 | if [ -f requirements.txt ]; then pip install -r requirements.txt; fi 28 | python setup.py develop 29 | - name: Lint with flake8 30 | run: | 31 | # stop the build if there are Python syntax errors or undefined names 32 | flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics 33 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 34 | flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 35 | - name: Test with pytest 36 | run: | 37 | # Runs all tests 38 | pytest tests 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /matlab-reference/ 2 | .idea/ 3 | experiments/results/ 4 | venv/ 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to contribute 2 | 3 | We welcome any contribution to the code with the only caveat that all PRs should be accompanied with an issue. 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 EEA-sensors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # parallel-gps 2 | 3 | Companion code leveraging [GPflow](https://gpflow.readthedocs.io/en/master/) for the paper Temporal Gaussian Process Regression in Logarithmic Time. 4 | 5 | Please cite the following paper [(arXiv preprint)](https://arxiv.org/abs/2102.09964) to use the code 6 | 7 | ``` 8 | @inproceedings{corenflos2022temporal, 9 | title={Temporal {G}aussian Process Regression in Logarithmic Time}, 10 | author={Corenflos, Adrien and Zhao, Zheng and S{\"a}rkk{\"a}, Simo}, 11 | booktitle={2022 25th International Conference on Information Fusion (FUSION)}, 12 | pages={1--5}, 13 | year={2022}, 14 | organization={IEEE} 15 | } 16 | ``` 17 | 18 | What is it? 19 | ----------- 20 | 21 | This is an implementation of temporally parallelized and sequential state space Gaussian processes with CPU and GPU 22 | support leveraging GPflow as a framework and TensorFlow as a calculation backend. 23 | 24 | Supported covariance functions 25 | ------------------------------ 26 | 27 | * Matern 12, 32, 52 28 | * RBF 29 | * Quasi-periodic 30 | * Sum and product of the above 31 | 32 | Installation 33 | ------------ 34 | Clone or download the project 35 | Run `python setup.py [install|develop]` depending on the level of installation you want. 36 | Note that in order to use the GPU capabilities you will need to install a tensorflow compatible CUDA version. 37 | Note that the `requirements.txt` file is a superset of what is actually necessary to use the library and also contains packages 38 | required for unittesting only. 39 | 40 | Example 41 | ------- 42 | 43 | ```python 44 | from pssgp.kernels import RBF 45 | from pssgp.model import StateSpaceGP 46 | from gpflow.model import GPR 47 | 48 | data = ... # Same format as for GPFlow 49 | noise_covariance = 1. 50 | lengthscale = 1. 51 | variance = 0.1 52 | 53 | order = 6 # Order of the RBF approximation for (P)SSGP, will not be used if the GP model is GPR 54 | balancing_iter = 5 # Number of balancing steps for the resulting SDE to make it more stable, will not be used if the GP model is GPR 55 | 56 | cov_function = RBF(variance=variance, lengthscales=lengthscale, order=order, balancing_iter=balancing_iter) 57 | 58 | gp = GPR(data=data, kernel=cov, noise_variance=noise_variance) 59 | ssgp = StateSpaceGP(data=data, kernel=cov, noise_variance=noise_variance, parallel=False) 60 | 61 | pssgp = StateSpaceGP(data=data, kernel=cov, noise_variance=noise_variance, parallel=True, max_parallel=1000) 62 | # max_parallel should be bigger than n_training + n_pred 63 | 64 | for model in [gp, ssgp, pssgp]: 65 | print(model.maximum_log_likelihood_objective()) 66 | 67 | ``` 68 | For more examples, see the notebooks or the runnable scripts in the experiments folder which reproduces the results of our paper. 69 | -------------------------------------------------------------------------------- /experiments/README.md: -------------------------------------------------------------------------------- 1 | This folder contains data and shell scripts for experiments, together with the results. -------------------------------------------------------------------------------- /experiments/co2/co2_mm_mlo.txt: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------- 2 | # USE OF NOAA ESRL DATA 3 | # 4 | # These data are made freely available to the public and the 5 | # scientific community in the belief that their wide dissemination 6 | # will lead to greater understanding and new scientific insights. 7 | # The availability of these data does not constitute publication 8 | # of the data. NOAA relies on the ethics and integrity of the user to 9 | # ensure that GML receives fair credit for their work. If the data 10 | # are obtained for potential use in a publication or presentation, 11 | # GML should be informed at the outset of the nature of this work. 12 | # If the GML data are essential to the work, or if an important 13 | # result or conclusion depends on the GML data, co-authorship 14 | # may be appropriate. This should be discussed at an early stage in 15 | # the work. Manuscripts using the GML data should be sent to GML 16 | # for review before they are submitted for publication so we can 17 | # ensure that the quality and limitations of the data are accurately 18 | # represented. 19 | # 20 | # Contact: Pieter Tans (303 497 6678; pieter.tans@noaa.gov) 21 | # 22 | # File Creation: Fri Feb 5 07:55:42 2021 23 | # 24 | # RECIPROCITY 25 | # 26 | # Use of these data implies an agreement to reciprocate. 27 | # Laboratories making similar measurements agree to make their 28 | # own data available to the general public and to the scientific 29 | # community in an equally complete and easily accessible form. 30 | # Modelers are encouraged to make available to the community, 31 | # upon request, their own tools used in the interpretation 32 | # of the GML data, namely well documented model code, transport 33 | # fields, and additional information necessary for other 34 | # scientists to repeat the work and to run modified versions. 35 | # Model availability includes collaborative support for new 36 | # users of the models. 37 | # -------------------------------------------------------------------- 38 | # 39 | # 40 | # See www.esrl.noaa.gov/gmd/ccgg/trends/ for additional details. 41 | # 42 | # Data from March 1958 through April 1974 have been obtained by C. David Keeling 43 | # of the Scripps Institution of Oceanography (SIO) and were obtained from the 44 | # Scripps website (scrippsco2.ucsd.edu). 45 | # Monthly mean CO2 constructed from daily mean values 46 | # Scripps data downloaded from http://scrippsco2.ucsd.edu/data/atmospheric_co2 47 | # Monthly values are corrected to center of month based on average seasonal 48 | # cycle. Missing days can be asymmetric which would produce a high or low bias. 49 | # Missing months have been interpolated, for NOAA data indicated by negative stdev 50 | # and uncertainty. We have no information for SIO data about Ndays, stdv, unc 51 | # so that they are also indicated by negative numbers 52 | # decimal monthly de-season #days st.dev unc. of 53 | # date average alized of days mon mean 54 | 1958 3 1958.2027 315.70 314.43 -1 -9.99 -0.99 55 | 1958 4 1958.2877 317.45 315.16 -1 -9.99 -0.99 56 | 1958 5 1958.3699 317.51 314.71 -1 -9.99 -0.99 57 | 1958 6 1958.4548 317.24 315.14 -1 -9.99 -0.99 58 | 1958 7 1958.5370 315.86 315.18 -1 -9.99 -0.99 59 | 1958 8 1958.6219 314.93 316.18 -1 -9.99 -0.99 60 | 1958 9 1958.7068 313.20 316.08 -1 -9.99 -0.99 61 | 1958 10 1958.7890 312.43 315.41 -1 -9.99 -0.99 62 | 1958 11 1958.8740 313.33 315.20 -1 -9.99 -0.99 63 | 1958 12 1958.9562 314.67 315.43 -1 -9.99 -0.99 64 | 1959 1 1959.0411 315.58 315.55 -1 -9.99 -0.99 65 | 1959 2 1959.1260 316.48 315.86 -1 -9.99 -0.99 66 | 1959 3 1959.2027 316.65 315.38 -1 -9.99 -0.99 67 | 1959 4 1959.2877 317.72 315.41 -1 -9.99 -0.99 68 | 1959 5 1959.3699 318.29 315.49 -1 -9.99 -0.99 69 | 1959 6 1959.4548 318.15 316.03 -1 -9.99 -0.99 70 | 1959 7 1959.5370 316.54 315.86 -1 -9.99 -0.99 71 | 1959 8 1959.6219 314.80 316.06 -1 -9.99 -0.99 72 | 1959 9 1959.7068 313.84 316.73 -1 -9.99 -0.99 73 | 1959 10 1959.7890 313.33 316.33 -1 -9.99 -0.99 74 | 1959 11 1959.8740 314.81 316.68 -1 -9.99 -0.99 75 | 1959 12 1959.9562 315.58 316.35 -1 -9.99 -0.99 76 | 1960 1 1960.0410 316.43 316.40 -1 -9.99 -0.99 77 | 1960 2 1960.1257 316.98 316.36 -1 -9.99 -0.99 78 | 1960 3 1960.2049 317.58 316.28 -1 -9.99 -0.99 79 | 1960 4 1960.2896 319.03 316.70 -1 -9.99 -0.99 80 | 1960 5 1960.3716 320.04 317.22 -1 -9.99 -0.99 81 | 1960 6 1960.4563 319.59 317.47 -1 -9.99 -0.99 82 | 1960 7 1960.5383 318.18 317.52 -1 -9.99 -0.99 83 | 1960 8 1960.6230 315.90 317.19 -1 -9.99 -0.99 84 | 1960 9 1960.7077 314.17 317.08 -1 -9.99 -0.99 85 | 1960 10 1960.7896 313.83 316.83 -1 -9.99 -0.99 86 | 1960 11 1960.8743 315.00 316.88 -1 -9.99 -0.99 87 | 1960 12 1960.9563 316.19 316.96 -1 -9.99 -0.99 88 | 1961 1 1961.0411 316.89 316.86 -1 -9.99 -0.99 89 | 1961 2 1961.1260 317.70 317.08 -1 -9.99 -0.99 90 | 1961 3 1961.2027 318.54 317.26 -1 -9.99 -0.99 91 | 1961 4 1961.2877 319.48 317.16 -1 -9.99 -0.99 92 | 1961 5 1961.3699 320.58 317.76 -1 -9.99 -0.99 93 | 1961 6 1961.4548 319.77 317.63 -1 -9.99 -0.99 94 | 1961 7 1961.5370 318.57 317.88 -1 -9.99 -0.99 95 | 1961 8 1961.6219 316.79 318.06 -1 -9.99 -0.99 96 | 1961 9 1961.7068 314.99 317.90 -1 -9.99 -0.99 97 | 1961 10 1961.7890 315.31 318.32 -1 -9.99 -0.99 98 | 1961 11 1961.8740 316.10 317.99 -1 -9.99 -0.99 99 | 1961 12 1961.9562 317.01 317.79 -1 -9.99 -0.99 100 | 1962 1 1962.0411 317.94 317.91 -1 -9.99 -0.99 101 | 1962 2 1962.1260 318.55 317.92 -1 -9.99 -0.99 102 | 1962 3 1962.2027 319.68 318.39 -1 -9.99 -0.99 103 | 1962 4 1962.2877 320.57 318.24 -1 -9.99 -0.99 104 | 1962 5 1962.3699 321.02 318.18 -1 -9.99 -0.99 105 | 1962 6 1962.4548 320.62 318.47 -1 -9.99 -0.99 106 | 1962 7 1962.5370 319.61 318.92 -1 -9.99 -0.99 107 | 1962 8 1962.6219 317.40 318.68 -1 -9.99 -0.99 108 | 1962 9 1962.7068 316.25 319.17 -1 -9.99 -0.99 109 | 1962 10 1962.7890 315.42 318.45 -1 -9.99 -0.99 110 | 1962 11 1962.8740 316.69 318.58 -1 -9.99 -0.99 111 | 1962 12 1962.9562 317.70 318.47 -1 -9.99 -0.99 112 | 1963 1 1963.0411 318.74 318.70 -1 -9.99 -0.99 113 | 1963 2 1963.1260 319.07 318.44 -1 -9.99 -0.99 114 | 1963 3 1963.2027 319.86 318.57 -1 -9.99 -0.99 115 | 1963 4 1963.2877 321.38 319.05 -1 -9.99 -0.99 116 | 1963 5 1963.3699 322.25 319.40 -1 -9.99 -0.99 117 | 1963 6 1963.4548 321.48 319.32 -1 -9.99 -0.99 118 | 1963 7 1963.5370 319.74 319.05 -1 -9.99 -0.99 119 | 1963 8 1963.6219 317.77 319.05 -1 -9.99 -0.99 120 | 1963 9 1963.7068 316.21 319.14 -1 -9.99 -0.99 121 | 1963 10 1963.7890 315.99 319.02 -1 -9.99 -0.99 122 | 1963 11 1963.8740 317.07 318.97 -1 -9.99 -0.99 123 | 1963 12 1963.9562 318.35 319.13 -1 -9.99 -0.99 124 | 1964 1 1964.0410 319.57 319.54 -1 -9.99 -0.99 125 | 1964 2 1964.1257 320.01 319.37 -1 -9.99 -0.99 126 | 1964 3 1964.2049 320.74 319.41 -1 -9.99 -0.99 127 | 1964 4 1964.2896 321.84 319.45 -1 -9.99 -0.99 128 | 1964 5 1964.3716 322.26 319.40 -1 -9.99 -0.99 129 | 1964 6 1964.4563 321.89 319.75 -1 -9.99 -0.99 130 | 1964 7 1964.5383 320.44 319.77 -1 -9.99 -0.99 131 | 1964 8 1964.6230 318.69 320.00 -1 -9.99 -0.99 132 | 1964 9 1964.7077 316.70 319.66 -1 -9.99 -0.99 133 | 1964 10 1964.7896 316.87 319.91 -1 -9.99 -0.99 134 | 1964 11 1964.8743 317.68 319.58 -1 -9.99 -0.99 135 | 1964 12 1964.9563 318.71 319.49 -1 -9.99 -0.99 136 | 1965 1 1965.0411 319.44 319.40 -1 -9.99 -0.99 137 | 1965 2 1965.1260 320.44 319.81 -1 -9.99 -0.99 138 | 1965 3 1965.2027 320.89 319.59 -1 -9.99 -0.99 139 | 1965 4 1965.2877 322.14 319.78 -1 -9.99 -0.99 140 | 1965 5 1965.3699 322.17 319.30 -1 -9.99 -0.99 141 | 1965 6 1965.4548 321.87 319.70 -1 -9.99 -0.99 142 | 1965 7 1965.5370 321.21 320.51 -1 -9.99 -0.99 143 | 1965 8 1965.6219 318.87 320.15 -1 -9.99 -0.99 144 | 1965 9 1965.7068 317.81 320.77 -1 -9.99 -0.99 145 | 1965 10 1965.7890 317.30 320.36 -1 -9.99 -0.99 146 | 1965 11 1965.8740 318.87 320.78 -1 -9.99 -0.99 147 | 1965 12 1965.9562 319.42 320.20 -1 -9.99 -0.99 148 | 1966 1 1966.0411 320.62 320.59 -1 -9.99 -0.99 149 | 1966 2 1966.1260 321.60 320.96 -1 -9.99 -0.99 150 | 1966 3 1966.2027 322.39 321.08 -1 -9.99 -0.99 151 | 1966 4 1966.2877 323.70 321.34 -1 -9.99 -0.99 152 | 1966 5 1966.3699 324.08 321.20 -1 -9.99 -0.99 153 | 1966 6 1966.4548 323.75 321.57 -1 -9.99 -0.99 154 | 1966 7 1966.5370 322.38 321.68 -1 -9.99 -0.99 155 | 1966 8 1966.6219 320.36 321.65 -1 -9.99 -0.99 156 | 1966 9 1966.7068 318.64 321.60 -1 -9.99 -0.99 157 | 1966 10 1966.7890 318.10 321.17 -1 -9.99 -0.99 158 | 1966 11 1966.8740 319.78 321.70 -1 -9.99 -0.99 159 | 1966 12 1966.9562 321.03 321.81 -1 -9.99 -0.99 160 | 1967 1 1967.0411 322.33 322.29 -1 -9.99 -0.99 161 | 1967 2 1967.1260 322.50 321.86 -1 -9.99 -0.99 162 | 1967 3 1967.2027 323.04 321.73 -1 -9.99 -0.99 163 | 1967 4 1967.2877 324.42 322.04 -1 -9.99 -0.99 164 | 1967 5 1967.3699 325.00 322.12 -1 -9.99 -0.99 165 | 1967 6 1967.4548 324.09 321.91 -1 -9.99 -0.99 166 | 1967 7 1967.5370 322.54 321.84 -1 -9.99 -0.99 167 | 1967 8 1967.6219 320.92 322.21 -1 -9.99 -0.99 168 | 1967 9 1967.7068 319.25 322.23 -1 -9.99 -0.99 169 | 1967 10 1967.7890 319.39 322.47 -1 -9.99 -0.99 170 | 1967 11 1967.8740 320.73 322.65 -1 -9.99 -0.99 171 | 1967 12 1967.9562 321.96 322.75 -1 -9.99 -0.99 172 | 1968 1 1968.0410 322.57 322.54 -1 -9.99 -0.99 173 | 1968 2 1968.1257 323.15 322.51 -1 -9.99 -0.99 174 | 1968 3 1968.2049 323.89 322.55 -1 -9.99 -0.99 175 | 1968 4 1968.2896 325.02 322.62 -1 -9.99 -0.99 176 | 1968 5 1968.3716 325.57 322.68 -1 -9.99 -0.99 177 | 1968 6 1968.4563 325.36 323.19 -1 -9.99 -0.99 178 | 1968 7 1968.5383 324.14 323.46 -1 -9.99 -0.99 179 | 1968 8 1968.6230 322.11 323.43 -1 -9.99 -0.99 180 | 1968 9 1968.7077 320.33 323.32 -1 -9.99 -0.99 181 | 1968 10 1968.7896 320.25 323.33 -1 -9.99 -0.99 182 | 1968 11 1968.8743 321.32 323.25 -1 -9.99 -0.99 183 | 1968 12 1968.9563 322.89 323.69 -1 -9.99 -0.99 184 | 1969 1 1969.0411 324.00 323.97 -1 -9.99 -0.99 185 | 1969 2 1969.1260 324.42 323.77 -1 -9.99 -0.99 186 | 1969 3 1969.2027 325.63 324.31 -1 -9.99 -0.99 187 | 1969 4 1969.2877 326.66 324.27 -1 -9.99 -0.99 188 | 1969 5 1969.3699 327.38 324.48 -1 -9.99 -0.99 189 | 1969 6 1969.4548 326.71 324.51 -1 -9.99 -0.99 190 | 1969 7 1969.5370 325.88 325.17 -1 -9.99 -0.99 191 | 1969 8 1969.6219 323.66 324.97 -1 -9.99 -0.99 192 | 1969 9 1969.7068 322.38 325.37 -1 -9.99 -0.99 193 | 1969 10 1969.7890 321.78 324.88 -1 -9.99 -0.99 194 | 1969 11 1969.8740 322.86 324.79 -1 -9.99 -0.99 195 | 1969 12 1969.9562 324.12 324.91 -1 -9.99 -0.99 196 | 1970 1 1970.0411 325.06 325.03 -1 -9.99 -0.99 197 | 1970 2 1970.1260 325.98 325.34 -1 -9.99 -0.99 198 | 1970 3 1970.2027 326.93 325.61 -1 -9.99 -0.99 199 | 1970 4 1970.2877 328.13 325.74 -1 -9.99 -0.99 200 | 1970 5 1970.3699 328.08 325.16 -1 -9.99 -0.99 201 | 1970 6 1970.4548 327.67 325.46 -1 -9.99 -0.99 202 | 1970 7 1970.5370 326.34 325.63 -1 -9.99 -0.99 203 | 1970 8 1970.6219 324.69 325.99 -1 -9.99 -0.99 204 | 1970 9 1970.7068 323.10 326.10 -1 -9.99 -0.99 205 | 1970 10 1970.7890 323.06 326.18 -1 -9.99 -0.99 206 | 1970 11 1970.8740 324.01 325.95 -1 -9.99 -0.99 207 | 1970 12 1970.9562 325.13 325.93 -1 -9.99 -0.99 208 | 1971 1 1971.0411 326.17 326.14 -1 -9.99 -0.99 209 | 1971 2 1971.1260 326.68 326.03 -1 -9.99 -0.99 210 | 1971 3 1971.2027 327.17 325.85 -1 -9.99 -0.99 211 | 1971 4 1971.2877 327.79 325.38 -1 -9.99 -0.99 212 | 1971 5 1971.3699 328.93 326.00 -1 -9.99 -0.99 213 | 1971 6 1971.4548 328.57 326.36 -1 -9.99 -0.99 214 | 1971 7 1971.5370 327.36 326.65 -1 -9.99 -0.99 215 | 1971 8 1971.6219 325.43 326.74 -1 -9.99 -0.99 216 | 1971 9 1971.7068 323.36 326.37 -1 -9.99 -0.99 217 | 1971 10 1971.7890 323.56 326.69 -1 -9.99 -0.99 218 | 1971 11 1971.8740 324.80 326.75 -1 -9.99 -0.99 219 | 1971 12 1971.9562 326.01 326.82 -1 -9.99 -0.99 220 | 1972 1 1972.0410 326.77 326.73 -1 -9.99 -0.99 221 | 1972 2 1972.1257 327.63 326.98 -1 -9.99 -0.99 222 | 1972 3 1972.2049 327.75 326.39 -1 -9.99 -0.99 223 | 1972 4 1972.2896 329.72 327.29 -1 -9.99 -0.99 224 | 1972 5 1972.3716 330.07 327.14 -1 -9.99 -0.99 225 | 1972 6 1972.4563 329.09 326.88 -1 -9.99 -0.99 226 | 1972 7 1972.5383 328.04 327.36 -1 -9.99 -0.99 227 | 1972 8 1972.6230 326.32 327.67 -1 -9.99 -0.99 228 | 1972 9 1972.7077 324.84 327.87 -1 -9.99 -0.99 229 | 1972 10 1972.7896 325.20 328.33 -1 -9.99 -0.99 230 | 1972 11 1972.8743 326.50 328.45 -1 -9.99 -0.99 231 | 1972 12 1972.9563 327.55 328.36 -1 -9.99 -0.99 232 | 1973 1 1973.0411 328.55 328.51 -1 -9.99 -0.99 233 | 1973 2 1973.1260 329.56 328.91 -1 -9.99 -0.99 234 | 1973 3 1973.2027 330.30 328.96 -1 -9.99 -0.99 235 | 1973 4 1973.2877 331.50 329.08 -1 -9.99 -0.99 236 | 1973 5 1973.3699 332.48 329.54 -1 -9.99 -0.99 237 | 1973 6 1973.4548 332.07 329.84 -1 -9.99 -0.99 238 | 1973 7 1973.5370 330.87 330.15 -1 -9.99 -0.99 239 | 1973 8 1973.6219 329.31 330.63 -1 -9.99 -0.99 240 | 1973 9 1973.7068 327.51 330.55 -1 -9.99 -0.99 241 | 1973 10 1973.7890 327.18 330.32 -1 -9.99 -0.99 242 | 1973 11 1973.8740 328.16 330.13 -1 -9.99 -0.99 243 | 1973 12 1973.9562 328.64 329.45 -1 -9.99 -0.99 244 | 1974 1 1974.0411 329.35 329.32 -1 -9.99 -0.99 245 | 1974 2 1974.1260 330.71 330.05 -1 -9.99 -0.99 246 | 1974 3 1974.2027 331.48 330.14 -1 -9.99 -0.99 247 | 1974 4 1974.2877 332.65 330.22 -1 -9.99 -0.99 248 | 1974 5 1974.3750 333.19 330.21 14 0.30 0.15 249 | 1974 6 1974.4583 332.16 329.75 26 0.40 0.15 250 | 1974 7 1974.5417 331.07 330.23 24 0.24 0.09 251 | 1974 8 1974.6250 329.12 330.51 27 0.33 0.12 252 | 1974 9 1974.7083 327.32 330.42 24 0.46 0.18 253 | 1974 10 1974.7917 327.28 330.53 24 0.22 0.09 254 | 1974 11 1974.8750 328.30 330.50 27 0.43 0.16 255 | 1974 12 1974.9583 329.58 330.55 28 0.29 0.11 256 | 1975 1 1975.0417 330.73 330.84 29 0.43 0.15 257 | 1975 2 1975.1250 331.46 330.85 26 0.45 0.17 258 | 1975 3 1975.2083 331.90 330.31 18 0.35 0.16 259 | 1975 4 1975.2917 333.17 330.57 25 0.59 0.23 260 | 1975 5 1975.3750 333.94 330.96 28 0.35 0.13 261 | 1975 6 1975.4583 333.45 331.04 26 0.45 0.17 262 | 1975 7 1975.5417 331.97 331.13 24 0.45 0.18 263 | 1975 8 1975.6250 329.95 331.34 24 0.47 0.18 264 | 1975 9 1975.7083 328.50 331.60 23 0.52 0.21 265 | 1975 10 1975.7917 328.34 331.60 12 0.19 0.10 266 | 1975 11 1975.8750 329.37 331.57 19 0.30 0.13 267 | 1975 12 1975.9583 330.64 331.62 -1 -9.99 -0.99 268 | 1976 1 1976.0417 331.59 331.69 20 0.25 0.11 269 | 1976 2 1976.1250 332.75 332.13 22 0.48 0.20 270 | 1976 3 1976.2083 333.52 331.94 20 0.51 0.22 271 | 1976 4 1976.2917 334.64 332.05 19 0.72 0.32 272 | 1976 5 1976.3750 334.77 331.79 22 0.56 0.23 273 | 1976 6 1976.4583 333.99 331.58 17 0.23 0.11 274 | 1976 7 1976.5417 333.06 332.22 16 0.24 0.11 275 | 1976 8 1976.6250 330.68 332.07 23 0.51 0.20 276 | 1976 9 1976.7083 328.95 332.05 13 0.69 0.37 277 | 1976 10 1976.7917 328.75 332.01 20 0.58 0.25 278 | 1976 11 1976.8750 330.15 332.35 25 0.36 0.14 279 | 1976 12 1976.9583 331.62 332.60 20 0.39 0.17 280 | 1977 1 1977.0417 332.66 332.74 24 0.38 0.15 281 | 1977 2 1977.1250 333.13 332.54 19 0.33 0.14 282 | 1977 3 1977.2083 334.95 333.38 23 0.52 0.21 283 | 1977 4 1977.2917 336.13 333.51 21 0.48 0.20 284 | 1977 5 1977.3750 336.93 333.98 20 0.32 0.14 285 | 1977 6 1977.4583 336.17 333.80 22 0.40 0.16 286 | 1977 7 1977.5417 334.88 334.01 21 0.22 0.09 287 | 1977 8 1977.6250 332.56 333.91 18 0.46 0.21 288 | 1977 9 1977.7083 331.29 334.35 19 0.46 0.20 289 | 1977 10 1977.7917 331.27 334.51 23 0.29 0.12 290 | 1977 11 1977.8750 332.41 334.64 21 0.32 0.13 291 | 1977 12 1977.9583 333.60 334.59 26 0.35 0.13 292 | 1978 1 1978.0417 334.95 335.01 22 0.53 0.21 293 | 1978 2 1978.1250 335.25 334.58 25 0.51 0.19 294 | 1978 3 1978.2083 336.66 335.00 28 0.59 0.21 295 | 1978 4 1978.2917 337.69 335.06 18 0.44 0.20 296 | 1978 5 1978.3750 338.03 335.07 26 0.46 0.17 297 | 1978 6 1978.4583 338.01 335.60 17 0.29 0.13 298 | 1978 7 1978.5417 336.40 335.57 21 0.41 0.17 299 | 1978 8 1978.6250 334.42 335.87 19 0.31 0.14 300 | 1978 9 1978.7083 332.37 335.51 17 0.75 0.35 301 | 1978 10 1978.7917 332.41 335.68 23 0.36 0.14 302 | 1978 11 1978.8750 333.75 335.99 24 0.26 0.10 303 | 1978 12 1978.9583 334.90 335.87 27 0.35 0.13 304 | 1979 1 1979.0417 336.14 336.22 27 0.55 0.20 305 | 1979 2 1979.1250 336.68 335.99 25 0.29 0.11 306 | 1979 3 1979.2083 338.27 336.54 21 0.61 0.25 307 | 1979 4 1979.2917 338.95 336.24 21 0.54 0.23 308 | 1979 5 1979.3750 339.21 336.21 12 0.53 0.30 309 | 1979 6 1979.4583 339.26 336.84 19 0.34 0.15 310 | 1979 7 1979.5417 337.54 336.72 26 0.59 0.22 311 | 1979 8 1979.6250 335.75 337.24 23 0.59 0.24 312 | 1979 9 1979.7083 333.98 337.20 19 0.65 0.29 313 | 1979 10 1979.7917 334.19 337.53 24 0.31 0.12 314 | 1979 11 1979.8750 335.31 337.57 27 0.30 0.11 315 | 1979 12 1979.9583 336.81 337.79 22 0.23 0.09 316 | 1980 1 1980.0417 337.90 338.09 29 0.57 0.20 317 | 1980 2 1980.1250 338.34 337.83 26 0.49 0.19 318 | 1980 3 1980.2083 340.01 338.43 25 0.61 0.23 319 | 1980 4 1980.2917 340.93 338.30 24 0.31 0.12 320 | 1980 5 1980.3750 341.48 338.43 25 0.54 0.21 321 | 1980 6 1980.4583 341.32 338.83 21 0.39 0.16 322 | 1980 7 1980.5417 339.40 338.54 21 0.65 0.27 323 | 1980 8 1980.6250 337.70 339.12 17 1.02 0.47 324 | 1980 9 1980.7083 336.19 339.33 17 0.68 0.32 325 | 1980 10 1980.7917 336.15 339.42 25 0.27 0.10 326 | 1980 11 1980.8750 337.27 339.42 24 0.24 0.10 327 | 1980 12 1980.9583 338.32 339.26 19 0.17 0.07 328 | 1981 1 1981.0417 339.29 339.38 28 0.40 0.14 329 | 1981 2 1981.1250 340.55 339.94 25 0.65 0.25 330 | 1981 3 1981.2083 341.62 340.06 25 0.50 0.19 331 | 1981 4 1981.2917 342.53 339.93 24 0.38 0.15 332 | 1981 5 1981.3750 343.03 339.98 30 0.20 0.07 333 | 1981 6 1981.4583 342.54 340.07 25 0.29 0.11 334 | 1981 7 1981.5417 340.78 339.92 24 0.40 0.16 335 | 1981 8 1981.6250 338.44 339.86 26 0.49 0.18 336 | 1981 9 1981.7083 336.95 340.17 27 0.55 0.20 337 | 1981 10 1981.7917 337.08 340.43 28 0.37 0.13 338 | 1981 11 1981.8750 338.58 340.74 25 0.32 0.12 339 | 1981 12 1981.9583 339.88 340.79 19 0.27 0.12 340 | 1982 1 1982.0417 340.96 341.10 27 0.30 0.11 341 | 1982 2 1982.1250 341.73 341.11 23 0.48 0.19 342 | 1982 3 1982.2083 342.81 341.20 18 0.41 0.19 343 | 1982 4 1982.2917 343.97 341.36 8 0.40 0.27 344 | 1982 5 1982.3750 344.63 341.55 26 0.35 0.13 345 | 1982 6 1982.4583 343.79 341.35 26 0.35 0.13 346 | 1982 7 1982.5417 342.32 341.54 28 0.34 0.12 347 | 1982 8 1982.6250 340.09 341.51 24 0.57 0.22 348 | 1982 9 1982.7083 338.28 341.47 21 0.59 0.25 349 | 1982 10 1982.7917 338.29 341.65 26 0.44 0.16 350 | 1982 11 1982.8750 339.60 341.73 25 0.42 0.16 351 | 1982 12 1982.9583 340.90 341.79 26 0.30 0.11 352 | 1983 1 1983.0417 341.68 341.84 28 0.47 0.17 353 | 1983 2 1983.1250 342.90 342.33 24 0.37 0.15 354 | 1983 3 1983.2083 343.33 341.81 26 0.81 0.31 355 | 1983 4 1983.2917 345.25 342.64 24 0.29 0.11 356 | 1983 5 1983.3750 346.03 342.86 28 0.54 0.19 357 | 1983 6 1983.4583 345.63 343.16 20 0.30 0.13 358 | 1983 7 1983.5417 344.19 343.44 20 0.52 0.22 359 | 1983 8 1983.6250 342.27 343.67 16 0.77 0.37 360 | 1983 9 1983.7083 340.35 343.50 15 0.51 0.25 361 | 1983 10 1983.7917 340.38 343.72 20 0.32 0.14 362 | 1983 11 1983.8750 341.59 343.72 26 0.24 0.09 363 | 1983 12 1983.9583 343.05 343.96 18 0.26 0.12 364 | 1984 1 1984.0417 344.10 344.21 23 0.41 0.16 365 | 1984 2 1984.1250 344.79 344.23 23 0.32 0.13 366 | 1984 3 1984.2083 345.52 344.07 19 0.29 0.13 367 | 1984 4 1984.2917 346.98 344.37 2 -9.99 -0.99 368 | 1984 5 1984.3750 347.63 344.44 21 0.42 0.18 369 | 1984 6 1984.4583 346.98 344.52 21 0.32 0.13 370 | 1984 7 1984.5417 345.53 344.77 21 0.33 0.14 371 | 1984 8 1984.6250 343.55 344.95 12 0.45 0.25 372 | 1984 9 1984.7083 341.40 344.58 14 0.71 0.36 373 | 1984 10 1984.7917 341.67 345.01 12 0.35 0.19 374 | 1984 11 1984.8750 343.07 345.17 17 0.40 0.19 375 | 1984 12 1984.9583 344.70 345.58 12 0.56 0.31 376 | 1985 1 1985.0417 345.21 345.32 23 0.30 0.12 377 | 1985 2 1985.1250 346.16 345.62 17 0.37 0.17 378 | 1985 3 1985.2083 347.74 346.36 16 0.45 0.21 379 | 1985 4 1985.2917 348.34 345.75 19 0.61 0.27 380 | 1985 5 1985.3750 349.06 345.90 24 0.53 0.21 381 | 1985 6 1985.4583 348.38 345.95 23 0.39 0.16 382 | 1985 7 1985.5417 346.71 345.90 18 0.33 0.15 383 | 1985 8 1985.6250 345.02 346.35 18 0.57 0.26 384 | 1985 9 1985.7083 343.27 346.40 25 0.56 0.21 385 | 1985 10 1985.7917 343.13 346.42 20 0.27 0.12 386 | 1985 11 1985.8750 344.49 346.61 22 0.39 0.16 387 | 1985 12 1985.9583 345.88 346.82 25 0.63 0.24 388 | 1986 1 1986.0417 346.56 346.59 25 0.31 0.12 389 | 1986 2 1986.1250 347.28 346.75 25 0.45 0.17 390 | 1986 3 1986.2083 348.07 346.73 16 0.69 0.33 391 | 1986 4 1986.2917 349.80 347.23 20 0.37 0.16 392 | 1986 5 1986.3750 350.44 347.31 17 0.32 0.15 393 | 1986 6 1986.4583 349.93 347.53 17 0.24 0.11 394 | 1986 7 1986.5417 348.16 347.33 20 0.48 0.20 395 | 1986 8 1986.6250 346.07 347.41 18 0.48 0.21 396 | 1986 9 1986.7083 345.22 348.36 17 0.62 0.29 397 | 1986 10 1986.7917 344.52 347.78 25 0.31 0.12 398 | 1986 11 1986.8750 345.92 348.05 22 0.32 0.13 399 | 1986 12 1986.9583 347.22 348.14 24 0.36 0.14 400 | 1987 1 1987.0417 348.46 348.42 25 0.47 0.18 401 | 1987 2 1987.1250 348.73 348.03 25 0.62 0.24 402 | 1987 3 1987.2083 349.73 348.28 22 0.39 0.16 403 | 1987 4 1987.2917 351.31 348.73 26 0.69 0.26 404 | 1987 5 1987.3750 352.06 348.98 28 0.36 0.13 405 | 1987 6 1987.4583 351.52 349.19 24 0.21 0.08 406 | 1987 7 1987.5417 350.11 349.39 16 0.78 0.37 407 | 1987 8 1987.6250 348.03 349.46 14 0.86 0.44 408 | 1987 9 1987.7083 346.52 349.71 23 0.61 0.24 409 | 1987 10 1987.7917 346.59 349.86 22 0.41 0.17 410 | 1987 11 1987.8750 347.96 350.08 22 0.34 0.14 411 | 1987 12 1987.9583 349.16 350.06 27 0.21 0.08 412 | 1988 1 1988.0417 350.39 350.39 24 0.22 0.09 413 | 1988 2 1988.1250 351.64 350.94 24 0.56 0.22 414 | 1988 3 1988.2083 352.40 350.86 25 0.78 0.30 415 | 1988 4 1988.2917 353.69 351.01 27 0.47 0.17 416 | 1988 5 1988.3750 354.21 351.06 28 0.36 0.13 417 | 1988 6 1988.4583 353.75 351.39 26 0.29 0.11 418 | 1988 7 1988.5417 352.69 352.02 27 0.49 0.18 419 | 1988 8 1988.6250 350.40 351.91 26 0.61 0.23 420 | 1988 9 1988.7083 348.92 352.13 27 0.46 0.17 421 | 1988 10 1988.7917 349.13 352.41 26 0.31 0.12 422 | 1988 11 1988.8750 350.20 352.34 25 0.21 0.08 423 | 1988 12 1988.9583 351.41 352.35 28 0.37 0.13 424 | 1989 1 1989.0417 352.91 352.85 27 0.46 0.17 425 | 1989 2 1989.1250 353.28 352.55 25 0.39 0.15 426 | 1989 3 1989.2083 353.97 352.47 29 0.54 0.19 427 | 1989 4 1989.2917 355.64 352.96 28 0.47 0.17 428 | 1989 5 1989.3750 355.86 352.67 28 0.50 0.18 429 | 1989 6 1989.4583 355.37 352.97 26 0.43 0.16 430 | 1989 7 1989.5417 353.99 353.30 25 0.41 0.16 431 | 1989 8 1989.6250 351.81 353.38 24 0.45 0.18 432 | 1989 9 1989.7083 350.05 353.32 23 0.69 0.27 433 | 1989 10 1989.7917 350.25 353.52 25 0.35 0.13 434 | 1989 11 1989.8750 351.49 353.65 27 0.36 0.13 435 | 1989 12 1989.9583 352.85 353.81 27 0.48 0.18 436 | 1990 1 1990.0417 353.80 353.75 25 0.34 0.13 437 | 1990 2 1990.1250 355.04 354.33 28 0.67 0.24 438 | 1990 3 1990.2083 355.73 354.23 28 0.60 0.22 439 | 1990 4 1990.2917 356.31 353.68 28 0.55 0.20 440 | 1990 5 1990.3750 357.32 354.17 29 0.30 0.11 441 | 1990 6 1990.4583 356.34 353.96 29 0.40 0.14 442 | 1990 7 1990.5417 354.84 354.19 30 0.89 0.31 443 | 1990 8 1990.6250 353.01 354.61 22 0.61 0.25 444 | 1990 9 1990.7083 351.31 354.61 27 0.71 0.26 445 | 1990 10 1990.7917 351.62 354.89 28 0.30 0.11 446 | 1990 11 1990.8750 353.07 355.13 24 0.19 0.08 447 | 1990 12 1990.9583 354.33 355.19 28 0.51 0.19 448 | 1991 1 1991.0417 354.84 354.82 28 0.50 0.18 449 | 1991 2 1991.1250 355.73 355.02 26 0.55 0.20 450 | 1991 3 1991.2083 357.23 355.68 30 0.72 0.25 451 | 1991 4 1991.2917 358.66 356.02 30 0.65 0.23 452 | 1991 5 1991.3750 359.13 356.01 29 0.52 0.18 453 | 1991 6 1991.4583 358.13 355.79 29 0.30 0.11 454 | 1991 7 1991.5417 356.22 355.61 23 0.45 0.18 455 | 1991 8 1991.6250 353.87 355.48 24 0.37 0.14 456 | 1991 9 1991.7083 352.25 355.56 27 0.37 0.14 457 | 1991 10 1991.7917 352.35 355.62 27 0.24 0.09 458 | 1991 11 1991.8750 353.81 355.80 28 0.25 0.09 459 | 1991 12 1991.9583 355.12 355.93 30 0.34 0.12 460 | 1992 1 1992.0417 356.25 356.20 31 0.60 0.21 461 | 1992 2 1992.1250 357.11 356.38 27 0.55 0.20 462 | 1992 3 1992.2083 357.86 356.27 24 0.72 0.28 463 | 1992 4 1992.2917 359.09 356.39 27 0.53 0.20 464 | 1992 5 1992.3750 359.59 356.41 26 0.73 0.28 465 | 1992 6 1992.4583 359.33 356.97 30 0.50 0.17 466 | 1992 7 1992.5417 357.01 356.44 26 0.62 0.23 467 | 1992 8 1992.6250 354.94 356.62 23 0.55 0.22 468 | 1992 9 1992.7083 352.95 356.29 26 0.97 0.37 469 | 1992 10 1992.7917 353.32 356.63 29 0.55 0.20 470 | 1992 11 1992.8750 354.32 356.38 29 0.34 0.12 471 | 1992 12 1992.9583 355.57 356.39 31 0.32 0.11 472 | 1993 1 1993.0417 357.00 356.96 28 0.58 0.21 473 | 1993 2 1993.1250 357.31 356.44 28 0.49 0.18 474 | 1993 3 1993.2083 358.47 356.76 30 0.72 0.25 475 | 1993 4 1993.2917 359.27 356.59 25 0.53 0.20 476 | 1993 5 1993.3750 360.19 357.03 30 0.45 0.16 477 | 1993 6 1993.4583 359.52 357.12 28 0.35 0.13 478 | 1993 7 1993.5417 357.33 356.75 25 0.78 0.30 479 | 1993 8 1993.6250 355.64 357.32 27 0.61 0.23 480 | 1993 9 1993.7083 354.03 357.39 23 0.73 0.29 481 | 1993 10 1993.7917 354.12 357.49 28 0.29 0.11 482 | 1993 11 1993.8750 355.41 357.54 29 0.25 0.09 483 | 1993 12 1993.9583 356.91 357.80 30 0.27 0.10 484 | 1994 1 1994.0417 358.24 358.13 27 0.33 0.12 485 | 1994 2 1994.1250 358.92 358.09 25 0.50 0.19 486 | 1994 3 1994.2083 359.99 358.29 29 0.82 0.29 487 | 1994 4 1994.2917 361.23 358.46 28 0.49 0.18 488 | 1994 5 1994.3750 361.65 358.46 30 0.45 0.16 489 | 1994 6 1994.4583 360.81 358.44 27 0.30 0.11 490 | 1994 7 1994.5417 359.38 358.78 31 0.41 0.14 491 | 1994 8 1994.6250 357.46 359.16 24 0.43 0.17 492 | 1994 9 1994.7083 355.73 359.17 24 0.58 0.23 493 | 1994 10 1994.7917 356.07 359.50 28 0.28 0.10 494 | 1994 11 1994.8750 357.53 359.68 28 0.51 0.19 495 | 1994 12 1994.9583 358.98 359.83 28 0.46 0.17 496 | 1995 1 1995.0417 359.92 359.79 30 0.47 0.16 497 | 1995 2 1995.1250 360.86 360.05 28 0.53 0.19 498 | 1995 3 1995.2083 361.83 360.22 29 0.78 0.28 499 | 1995 4 1995.2917 363.30 360.62 29 0.64 0.23 500 | 1995 5 1995.3750 363.69 360.58 29 0.66 0.23 501 | 1995 6 1995.4583 363.19 360.84 27 0.37 0.14 502 | 1995 7 1995.5417 361.64 360.97 28 0.36 0.13 503 | 1995 8 1995.6250 359.12 360.73 25 0.83 0.32 504 | 1995 9 1995.7083 358.17 361.55 24 0.68 0.27 505 | 1995 10 1995.7917 357.99 361.37 29 0.27 0.09 506 | 1995 11 1995.8750 359.45 361.59 27 0.25 0.09 507 | 1995 12 1995.9583 360.68 361.53 30 0.36 0.13 508 | 1996 1 1996.0417 362.07 361.85 29 0.37 0.13 509 | 1996 2 1996.1250 363.24 362.35 27 0.56 0.21 510 | 1996 3 1996.2083 364.17 362.53 27 0.68 0.25 511 | 1996 4 1996.2917 364.57 361.87 29 0.59 0.21 512 | 1996 5 1996.3750 365.13 362.10 30 0.57 0.20 513 | 1996 6 1996.4583 364.92 362.69 30 0.38 0.13 514 | 1996 7 1996.5417 363.55 362.85 31 0.32 0.11 515 | 1996 8 1996.6250 361.39 362.99 27 0.55 0.20 516 | 1996 9 1996.7083 359.54 362.99 25 0.75 0.29 517 | 1996 10 1996.7917 359.58 362.97 29 0.32 0.11 518 | 1996 11 1996.8750 360.89 363.03 29 0.29 0.10 519 | 1996 12 1996.9583 362.24 363.08 29 0.36 0.13 520 | 1997 1 1997.0417 363.09 362.88 31 0.40 0.14 521 | 1997 2 1997.1250 364.03 363.22 27 0.59 0.22 522 | 1997 3 1997.2083 364.51 362.88 31 0.40 0.14 523 | 1997 4 1997.2917 366.35 363.68 21 0.46 0.19 524 | 1997 5 1997.3750 366.64 363.74 29 0.51 0.18 525 | 1997 6 1997.4583 365.59 363.42 27 0.23 0.09 526 | 1997 7 1997.5417 364.31 363.60 24 0.47 0.18 527 | 1997 8 1997.6250 362.25 363.84 25 0.57 0.22 528 | 1997 9 1997.7083 360.29 363.68 26 0.63 0.24 529 | 1997 10 1997.7917 360.82 364.12 27 0.32 0.12 530 | 1997 11 1997.8750 362.49 364.56 30 0.31 0.11 531 | 1997 12 1997.9583 364.38 365.15 30 0.41 0.14 532 | 1998 1 1998.0417 365.26 365.07 30 0.43 0.15 533 | 1998 2 1998.1250 365.98 365.16 28 0.62 0.23 534 | 1998 3 1998.2083 367.24 365.60 31 0.82 0.28 535 | 1998 4 1998.2917 368.66 366.03 29 0.63 0.22 536 | 1998 5 1998.3750 369.42 366.55 30 0.77 0.27 537 | 1998 6 1998.4583 368.99 366.80 28 0.24 0.09 538 | 1998 7 1998.5417 367.82 367.14 23 0.65 0.26 539 | 1998 8 1998.6250 365.95 367.55 30 0.30 0.10 540 | 1998 9 1998.7083 364.02 367.37 28 0.40 0.14 541 | 1998 10 1998.7917 364.40 367.67 30 0.26 0.09 542 | 1998 11 1998.8750 365.52 367.56 23 0.24 0.10 543 | 1998 12 1998.9583 367.13 367.88 26 0.36 0.14 544 | 1999 1 1999.0417 368.18 367.96 27 0.47 0.17 545 | 1999 2 1999.1250 369.12 368.31 21 0.47 0.20 546 | 1999 3 1999.2083 369.68 368.07 25 0.81 0.31 547 | 1999 4 1999.2917 370.99 368.45 29 0.67 0.24 548 | 1999 5 1999.3750 370.96 368.15 26 0.58 0.22 549 | 1999 6 1999.4583 370.30 368.13 26 0.44 0.16 550 | 1999 7 1999.5417 369.45 368.77 27 0.63 0.23 551 | 1999 8 1999.6250 366.90 368.48 25 0.38 0.14 552 | 1999 9 1999.7083 364.81 368.14 28 0.73 0.26 553 | 1999 10 1999.7917 365.37 368.64 31 0.28 0.10 554 | 1999 11 1999.8750 366.72 368.71 28 0.25 0.09 555 | 1999 12 1999.9583 368.10 368.77 26 0.28 0.11 556 | 2000 1 2000.0417 369.29 369.08 26 0.47 0.18 557 | 2000 2 2000.1250 369.55 368.83 19 0.48 0.21 558 | 2000 3 2000.2083 370.60 369.09 30 0.46 0.16 559 | 2000 4 2000.2917 371.82 369.28 27 0.58 0.21 560 | 2000 5 2000.3750 371.58 368.71 28 0.53 0.19 561 | 2000 6 2000.4583 371.70 369.50 28 0.24 0.09 562 | 2000 7 2000.5417 369.86 369.20 25 0.31 0.12 563 | 2000 8 2000.6250 368.13 369.72 27 0.41 0.15 564 | 2000 9 2000.7083 367.00 370.30 26 0.36 0.13 565 | 2000 10 2000.7917 367.03 370.27 30 0.27 0.09 566 | 2000 11 2000.8750 368.37 370.32 25 0.30 0.12 567 | 2000 12 2000.9583 369.67 370.30 30 0.38 0.13 568 | 2001 1 2001.0417 370.59 370.43 30 0.56 0.19 569 | 2001 2 2001.1250 371.51 370.78 26 0.61 0.23 570 | 2001 3 2001.2083 372.46 370.89 25 0.47 0.18 571 | 2001 4 2001.2917 373.37 370.81 29 0.56 0.20 572 | 2001 5 2001.3750 373.84 370.93 24 0.41 0.16 573 | 2001 6 2001.4583 373.22 370.99 26 0.37 0.14 574 | 2001 7 2001.5417 371.50 370.90 25 0.62 0.24 575 | 2001 8 2001.6250 369.61 371.22 27 0.60 0.22 576 | 2001 9 2001.7083 368.18 371.44 28 0.49 0.18 577 | 2001 10 2001.7917 368.45 371.68 31 0.33 0.11 578 | 2001 11 2001.8750 369.76 371.74 24 0.24 0.09 579 | 2001 12 2001.9583 371.24 371.92 29 0.40 0.14 580 | 2002 1 2002.0417 372.53 372.30 28 0.52 0.19 581 | 2002 2 2002.1250 373.20 372.32 28 0.66 0.24 582 | 2002 3 2002.2083 374.12 372.44 24 0.62 0.24 583 | 2002 4 2002.2917 375.02 372.38 29 0.55 0.20 584 | 2002 5 2002.3750 375.76 372.82 29 0.56 0.20 585 | 2002 6 2002.4583 375.52 373.30 28 0.46 0.17 586 | 2002 7 2002.5417 374.01 373.42 26 0.46 0.17 587 | 2002 8 2002.6250 371.85 373.52 28 0.64 0.23 588 | 2002 9 2002.7083 370.75 374.11 23 0.74 0.29 589 | 2002 10 2002.7917 370.55 373.88 31 0.62 0.21 590 | 2002 11 2002.8750 372.25 374.34 29 0.43 0.15 591 | 2002 12 2002.9583 373.79 374.54 31 0.46 0.16 592 | 2003 1 2003.0417 374.88 374.63 30 0.51 0.18 593 | 2003 2 2003.1250 375.64 374.77 27 0.58 0.21 594 | 2003 3 2003.2083 376.45 374.80 28 0.63 0.23 595 | 2003 4 2003.2917 377.73 375.06 27 0.38 0.14 596 | 2003 5 2003.3750 378.60 375.55 30 0.78 0.27 597 | 2003 6 2003.4583 378.28 376.04 25 0.39 0.15 598 | 2003 7 2003.5417 376.70 376.19 29 0.70 0.25 599 | 2003 8 2003.6250 374.38 376.08 23 0.57 0.23 600 | 2003 9 2003.7083 373.17 376.48 25 0.37 0.14 601 | 2003 10 2003.7917 373.14 376.47 30 0.33 0.12 602 | 2003 11 2003.8750 374.66 376.81 26 0.45 0.17 603 | 2003 12 2003.9583 375.99 376.75 27 0.39 0.14 604 | 2004 1 2004.0417 377.00 376.79 30 0.45 0.16 605 | 2004 2 2004.1250 377.87 377.02 29 0.74 0.26 606 | 2004 3 2004.2083 378.88 377.23 27 0.84 0.31 607 | 2004 4 2004.2917 380.35 377.62 26 0.52 0.19 608 | 2004 5 2004.3750 380.62 377.48 28 0.61 0.22 609 | 2004 6 2004.4583 379.69 377.39 21 0.46 0.19 610 | 2004 7 2004.5417 377.47 376.94 25 0.50 0.19 611 | 2004 8 2004.6250 376.01 377.74 16 0.45 0.21 612 | 2004 9 2004.7083 374.25 377.62 15 0.57 0.28 613 | 2004 10 2004.7917 374.46 377.82 29 0.20 0.07 614 | 2004 11 2004.8750 376.16 378.31 29 0.62 0.22 615 | 2004 12 2004.9583 377.51 378.31 30 0.28 0.10 616 | 2005 1 2005.0417 378.46 378.21 31 0.33 0.11 617 | 2005 2 2005.1250 379.73 378.93 24 0.60 0.24 618 | 2005 3 2005.2083 380.77 379.27 26 1.16 0.43 619 | 2005 4 2005.2917 382.29 379.66 26 0.52 0.20 620 | 2005 5 2005.3750 382.45 379.31 31 0.60 0.21 621 | 2005 6 2005.4583 382.21 379.88 28 0.22 0.08 622 | 2005 7 2005.5417 380.74 380.19 29 0.37 0.13 623 | 2005 8 2005.6250 378.74 380.42 26 0.53 0.20 624 | 2005 9 2005.7083 376.70 380.01 26 0.51 0.19 625 | 2005 10 2005.7917 377.00 380.30 14 0.15 0.08 626 | 2005 11 2005.8750 378.35 380.49 23 0.45 0.18 627 | 2005 12 2005.9583 380.11 380.89 26 0.39 0.15 628 | 2006 1 2006.0417 381.38 381.14 24 0.31 0.12 629 | 2006 2 2006.1250 382.20 381.38 25 0.51 0.19 630 | 2006 3 2006.2083 382.67 381.14 30 0.55 0.19 631 | 2006 4 2006.2917 384.61 381.92 25 0.48 0.18 632 | 2006 5 2006.3750 385.03 381.87 24 0.45 0.17 633 | 2006 6 2006.4583 384.05 381.74 28 0.43 0.16 634 | 2006 7 2006.5417 382.46 381.91 24 0.32 0.12 635 | 2006 8 2006.6250 380.41 382.07 27 0.47 0.17 636 | 2006 9 2006.7083 378.85 382.16 27 0.41 0.15 637 | 2006 10 2006.7917 379.13 382.46 23 0.40 0.16 638 | 2006 11 2006.8750 380.18 382.35 28 0.35 0.13 639 | 2006 12 2006.9583 381.82 382.64 27 0.38 0.14 640 | 2007 1 2007.0417 382.89 382.67 24 0.76 0.30 641 | 2007 2 2007.1250 383.90 383.00 21 0.81 0.34 642 | 2007 3 2007.2083 384.58 382.95 26 0.64 0.24 643 | 2007 4 2007.2917 386.50 383.72 26 0.75 0.28 644 | 2007 5 2007.3750 386.56 383.34 29 0.65 0.23 645 | 2007 6 2007.4583 386.10 383.84 26 0.43 0.16 646 | 2007 7 2007.5417 384.50 384.02 27 0.46 0.17 647 | 2007 8 2007.6250 381.99 383.70 22 0.62 0.25 648 | 2007 9 2007.7083 380.96 384.32 21 0.45 0.19 649 | 2007 10 2007.7917 381.12 384.47 29 0.18 0.07 650 | 2007 11 2007.8750 382.45 384.64 30 0.31 0.11 651 | 2007 12 2007.9583 383.94 384.82 21 0.35 0.15 652 | 2008 1 2008.0417 385.52 385.28 31 0.57 0.19 653 | 2008 2 2008.1250 385.81 384.95 26 0.58 0.22 654 | 2008 3 2008.2083 386.03 384.48 30 0.60 0.21 655 | 2008 4 2008.2917 387.09 384.47 22 1.22 0.50 656 | 2008 5 2008.3750 388.54 385.45 25 0.57 0.22 657 | 2008 6 2008.4583 387.76 385.46 23 0.51 0.20 658 | 2008 7 2008.5417 386.36 385.80 10 0.96 0.58 659 | 2008 8 2008.6250 384.09 385.75 25 0.66 0.25 660 | 2008 9 2008.7083 383.19 386.46 26 0.34 0.13 661 | 2008 10 2008.7917 382.99 386.27 23 0.27 0.11 662 | 2008 11 2008.8750 384.19 386.36 28 0.29 0.11 663 | 2008 12 2008.9583 385.56 386.41 29 0.27 0.10 664 | 2009 1 2009.0417 386.94 386.63 30 0.38 0.13 665 | 2009 2 2009.1250 387.48 386.59 26 0.49 0.18 666 | 2009 3 2009.2083 388.82 387.32 28 0.68 0.25 667 | 2009 4 2009.2917 389.55 386.93 29 0.85 0.30 668 | 2009 5 2009.3750 390.14 387.02 30 0.51 0.18 669 | 2009 6 2009.4583 389.48 387.24 29 0.60 0.21 670 | 2009 7 2009.5417 388.03 387.55 22 0.31 0.13 671 | 2009 8 2009.6250 386.11 387.80 27 0.60 0.22 672 | 2009 9 2009.7083 384.74 388.00 28 0.56 0.20 673 | 2009 10 2009.7917 384.43 387.68 30 0.31 0.11 674 | 2009 11 2009.8750 386.02 388.16 30 0.30 0.10 675 | 2009 12 2009.9583 387.42 388.23 20 0.47 0.20 676 | 2010 1 2010.0417 388.71 388.41 30 0.92 0.32 677 | 2010 2 2010.1250 390.20 389.26 20 1.32 0.56 678 | 2010 3 2010.2083 391.17 389.65 25 1.06 0.41 679 | 2010 4 2010.2917 392.46 389.91 26 0.65 0.24 680 | 2010 5 2010.3750 393.00 389.88 28 0.66 0.24 681 | 2010 6 2010.4583 392.15 389.88 28 0.44 0.16 682 | 2010 7 2010.5417 390.20 389.72 29 0.47 0.17 683 | 2010 8 2010.6250 388.35 390.01 26 0.41 0.15 684 | 2010 9 2010.7083 386.85 390.14 29 0.55 0.19 685 | 2010 10 2010.7917 387.24 390.54 31 0.27 0.09 686 | 2010 11 2010.8750 388.67 390.79 28 0.43 0.15 687 | 2010 12 2010.9583 389.79 390.60 29 0.47 0.17 688 | 2011 1 2011.0417 391.33 391.03 29 0.94 0.33 689 | 2011 2 2011.1250 391.86 390.93 28 0.47 0.17 690 | 2011 3 2011.2083 392.60 391.08 29 0.97 0.35 691 | 2011 4 2011.2917 393.25 390.64 28 0.73 0.26 692 | 2011 5 2011.3750 394.19 391.03 30 0.94 0.33 693 | 2011 6 2011.4583 393.74 391.43 28 0.44 0.16 694 | 2011 7 2011.5417 392.51 392.03 26 0.71 0.27 695 | 2011 8 2011.6250 390.13 391.83 27 0.42 0.15 696 | 2011 9 2011.7083 389.08 392.40 26 0.31 0.12 697 | 2011 10 2011.7917 388.99 392.32 30 0.17 0.06 698 | 2011 11 2011.8750 390.28 392.43 28 0.27 0.10 699 | 2011 12 2011.9583 391.86 392.66 27 0.35 0.13 700 | 2012 1 2012.0417 393.12 392.88 30 0.77 0.27 701 | 2012 2 2012.1250 393.86 393.03 26 1.19 0.45 702 | 2012 3 2012.2083 394.40 392.81 30 0.62 0.22 703 | 2012 4 2012.2917 396.18 393.46 29 0.59 0.21 704 | 2012 5 2012.3750 396.74 393.54 30 0.49 0.17 705 | 2012 6 2012.4583 395.71 393.44 28 0.59 0.21 706 | 2012 7 2012.5417 394.36 393.92 26 0.31 0.11 707 | 2012 8 2012.6250 392.39 394.16 30 0.53 0.19 708 | 2012 9 2012.7083 391.13 394.55 26 0.42 0.16 709 | 2012 10 2012.7917 391.05 394.41 28 0.22 0.08 710 | 2012 11 2012.8750 392.98 395.01 29 0.53 0.19 711 | 2012 12 2012.9583 394.34 395.04 29 0.44 0.16 712 | 2013 1 2013.0417 395.55 395.40 28 0.60 0.22 713 | 2013 2 2013.1250 396.80 396.01 25 0.57 0.22 714 | 2013 3 2013.2083 397.43 395.85 30 0.71 0.25 715 | 2013 4 2013.2917 398.41 395.56 22 0.60 0.24 716 | 2013 5 2013.3750 399.78 396.41 28 0.37 0.13 717 | 2013 6 2013.4583 398.60 396.28 26 0.42 0.16 718 | 2013 7 2013.5417 397.32 396.93 21 0.52 0.22 719 | 2013 8 2013.6250 395.20 397.08 27 0.45 0.16 720 | 2013 9 2013.7083 393.45 396.98 27 0.43 0.16 721 | 2013 10 2013.7917 393.70 397.03 28 0.16 0.06 722 | 2013 11 2013.8750 395.16 397.14 30 0.60 0.21 723 | 2013 12 2013.9583 396.84 397.59 30 0.48 0.17 724 | 2014 1 2014.0417 397.85 397.55 31 0.49 0.17 725 | 2014 2 2014.1250 398.01 397.20 26 0.39 0.15 726 | 2014 3 2014.2083 399.71 398.20 22 0.84 0.34 727 | 2014 4 2014.2917 401.33 398.46 26 0.50 0.19 728 | 2014 5 2014.3750 401.78 398.38 22 0.51 0.21 729 | 2014 6 2014.4583 401.25 398.92 28 0.36 0.13 730 | 2014 7 2014.5417 399.11 398.68 25 0.55 0.21 731 | 2014 8 2014.6250 397.03 398.91 21 0.24 0.10 732 | 2014 9 2014.7083 395.38 398.96 21 0.55 0.23 733 | 2014 10 2014.7917 396.07 399.47 25 0.74 0.28 734 | 2014 11 2014.8750 397.28 399.36 27 0.37 0.14 735 | 2014 12 2014.9583 398.91 399.64 29 0.61 0.22 736 | 2015 1 2015.0417 399.98 399.73 30 0.55 0.19 737 | 2015 2 2015.1250 400.35 399.58 27 0.63 0.23 738 | 2015 3 2015.2083 401.52 400.02 25 1.01 0.39 739 | 2015 4 2015.2917 403.15 400.27 26 0.85 0.32 740 | 2015 5 2015.3750 403.96 400.51 30 0.32 0.11 741 | 2015 6 2015.4583 402.80 400.48 29 0.47 0.17 742 | 2015 7 2015.5417 401.29 400.92 24 0.57 0.22 743 | 2015 8 2015.6250 398.93 400.85 28 0.74 0.27 744 | 2015 9 2015.7083 397.63 401.25 25 0.32 0.12 745 | 2015 10 2015.7917 398.29 401.68 28 0.57 0.21 746 | 2015 11 2015.8750 400.16 402.11 26 0.77 0.29 747 | 2015 12 2015.9583 401.85 402.51 30 0.67 0.23 748 | 2016 1 2016.0417 402.50 402.23 26 0.51 0.19 749 | 2016 2 2016.1250 404.07 403.24 25 1.12 0.43 750 | 2016 3 2016.2083 404.87 403.38 28 0.81 0.29 751 | 2016 4 2016.2917 407.42 404.59 23 1.04 0.41 752 | 2016 5 2016.3750 407.72 404.23 29 0.50 0.18 753 | 2016 6 2016.4583 406.81 404.41 26 0.60 0.23 754 | 2016 7 2016.5417 404.40 404.05 28 0.88 0.32 755 | 2016 8 2016.6250 402.26 404.21 24 0.60 0.24 756 | 2016 9 2016.7083 401.05 404.67 24 0.45 0.18 757 | 2016 10 2016.7917 401.60 405.03 29 0.30 0.11 758 | 2016 11 2016.8750 403.53 405.53 27 0.72 0.27 759 | 2016 12 2016.9583 404.44 405.13 29 0.44 0.16 760 | 2017 1 2017.0417 406.17 405.86 27 0.68 0.25 761 | 2017 2 2017.1250 406.46 405.63 26 0.73 0.27 762 | 2017 3 2017.2083 407.23 405.76 23 0.87 0.35 763 | 2017 4 2017.2917 409.03 406.18 26 0.85 0.32 764 | 2017 5 2017.3750 409.69 406.17 27 0.57 0.21 765 | 2017 6 2017.4583 408.89 406.48 26 0.53 0.20 766 | 2017 7 2017.5417 407.13 406.81 28 0.61 0.22 767 | 2017 8 2017.6250 405.12 407.10 29 0.32 0.12 768 | 2017 9 2017.7083 403.37 406.97 26 0.37 0.14 769 | 2017 10 2017.7917 403.63 407.01 27 0.30 0.11 770 | 2017 11 2017.8750 405.12 407.15 26 0.41 0.15 771 | 2017 12 2017.9583 406.81 407.52 31 0.57 0.20 772 | 2018 1 2018.0417 407.96 407.69 29 0.55 0.19 773 | 2018 2 2018.1250 408.32 407.47 28 0.52 0.19 774 | 2018 3 2018.2083 409.39 407.91 29 0.65 0.23 775 | 2018 4 2018.2917 410.25 407.45 21 0.90 0.38 776 | 2018 5 2018.3750 411.24 407.74 24 0.85 0.33 777 | 2018 6 2018.4583 410.79 408.39 29 0.61 0.22 778 | 2018 7 2018.5417 408.70 408.35 27 0.46 0.17 779 | 2018 8 2018.6250 406.97 408.89 31 0.28 0.10 780 | 2018 9 2018.7083 405.52 409.10 29 0.46 0.16 781 | 2018 10 2018.7917 406.00 409.41 30 0.32 0.11 782 | 2018 11 2018.8750 408.02 410.04 24 0.55 0.22 783 | 2018 12 2018.9583 409.08 409.81 30 0.50 0.17 784 | 2019 1 2019.0417 410.83 410.56 26 1.25 0.47 785 | 2019 2 2019.1250 411.75 410.89 27 1.14 0.42 786 | 2019 3 2019.2083 411.97 410.49 28 1.11 0.40 787 | 2019 4 2019.2917 413.33 410.52 27 0.60 0.22 788 | 2019 5 2019.3750 414.64 411.14 28 0.50 0.18 789 | 2019 6 2019.4583 413.93 411.52 27 0.36 0.13 790 | 2019 7 2019.5417 411.74 411.39 25 0.81 0.31 791 | 2019 8 2019.6250 409.95 411.87 29 0.32 0.11 792 | 2019 9 2019.7083 408.54 412.13 29 0.35 0.13 793 | 2019 10 2019.7917 408.52 411.93 29 0.31 0.11 794 | 2019 11 2019.8750 410.25 412.27 26 0.40 0.15 795 | 2019 12 2019.9583 411.76 412.49 31 0.40 0.14 796 | 2020 1 2020.0417 413.39 413.12 29 0.73 0.26 797 | 2020 2 2020.1250 414.11 413.25 28 0.69 0.25 798 | 2020 3 2020.2083 414.51 413.03 26 0.33 0.12 799 | 2020 4 2020.2917 416.21 413.41 28 0.65 0.24 800 | 2020 5 2020.3750 417.07 413.57 27 0.61 0.23 801 | 2020 6 2020.4583 416.38 413.98 27 0.45 0.16 802 | 2020 7 2020.5417 414.38 414.03 31 0.56 0.19 803 | 2020 8 2020.6250 412.55 414.47 25 0.25 0.10 804 | 2020 9 2020.7083 411.29 414.88 29 0.31 0.11 805 | 2020 10 2020.7917 411.28 414.69 30 0.22 0.08 806 | 2020 11 2020.8750 412.89 414.91 27 0.80 0.30 807 | 2020 12 2020.9583 414.02 414.76 30 0.46 0.16 808 | 2021 1 2021.0417 415.28 415.01 29 0.42 0.15 809 | -------------------------------------------------------------------------------- /experiments/co2/mcmc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | source ~/pycharm-2020.3.3/bin/activate parallel-gps 3 | 4 | py_script=pssgp.experiments.co2.mcmc 5 | step_size=0.01 6 | noise_variance=0.05 7 | mcmc=HMC 8 | data_dir=~/PycharmProjects/parallel-gps/experiments/co2 9 | 10 | 11 | for qp_order in 3; do 12 | python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=GP --qp_order=$qp_order --noise_variance=$noise_variance --dtype=float64 --device="/gpu:1" --data_dir=$data_dir 13 | python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=PSSGP --qp_order=$qp_order --noise_variance=$noise_variance --dtype=float64 --device="/gpu:1" --data_dir=$data_dir 14 | python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=SSGP --qp_order=$qp_order --noise_variance=$noise_variance --dtype=float64 --device="/cpu:0" --data_dir=$data_dir 15 | done -------------------------------------------------------------------------------- /experiments/sunspots/map.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | source ~/pycharm-2020.3.3/bin/activate parallel-gps 3 | 4 | py_script=pssgp.experiments.sunspot.map 5 | noise_variance=350. 6 | data_dir=~/PycharmProjects/parallel-gps/experiments/ 7 | python -m $py_script --model=PSSGP --noise_variance=$noise_variance --dtype=float64 --device="/gpu:1" --data_dir=$data_dir 8 | #sleep 10s 9 | python -m $py_script --model=SSGP --noise_variance=$noise_variance --dtype=float64 --device="/cpu:0" --data_dir=$data_dir 10 | #sleep 10s 11 | python -m $py_script --model=GP --noise_variance=$noise_variance --dtype=float64 --device="/gpu:1" --data_dir=$data_dir 12 | -------------------------------------------------------------------------------- /experiments/sunspots/mcmc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | source ~/pycharm-2020.3.3/bin/activate parallel-gps 3 | 4 | py_script=pssgp.experiments.sunspot.mcmc 5 | step_size=1. 6 | noise_variance=300. 7 | data_dir=~/PycharmProjects/parallel-gps/experiments/ 8 | mcmc=HMC 9 | 10 | python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=PSSGP --noise_variance=$noise_variance --dtype=float64 --device="/gpu:0" --data_dir=$data_dir 11 | #sleep 10s 12 | #python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=SSGP --noise_variance=$noise_variance --dtype=float32 --device="/cpu:0" --data_dir=$data_dir 13 | #sleep 10s 14 | #python -m $py_script --step_size=$step_size --mcmc=$mcmc --model=GP --noise_variance=$noise_variance --dtype=float64 --device="/gpu:0" --data_dir=$data_dir 15 | sleep 10s 16 | -------------------------------------------------------------------------------- /experiments/toy_models/mcmc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | source ~/pycharm-2020.3.3/bin/activate parallel-gps 3 | 4 | py_script=pssgp.experiments.toy_models.mcmc 5 | step_size=0.01 6 | n_runs=10 7 | 8 | for mcmc in HMC MALA NUTS; do 9 | for cov in Matern32 Matern52 RBF; do 10 | python -m $py_script --step_size=$step_size --n_runs=$n_runs --mcmc=$mcmc --model=PSSGP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float32 --device="/gpu:0" 11 | sleep 10s 12 | python -m $py_script --step_size=$step_size --n_runs=$n_runs --mcmc=$mcmc --model=SSGP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float32 --device="/cpu:0" 13 | sleep 10s 14 | python -m $py_script --step_size=$step_size --n_runs=$n_runs --mcmc=$mcmc --model=GP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float64 --device="/gpu:0" 15 | sleep 10s 16 | done 17 | done 18 | -------------------------------------------------------------------------------- /experiments/toy_models/run_all.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | 3 | bash ./speed_and_stability.sh 4 | sleep 1m 5 | bash ./mcmc.sh -------------------------------------------------------------------------------- /experiments/toy_models/speed_and_stability.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash parallel-gps 2 | source ~/pycharm-2020.3.3/bin/activate parallel-gps 3 | 4 | mesh_size=10 5 | py_script=pssgp.experiments.toy_models.speed_and_stability 6 | 7 | for cov in Matern32 Matern52 RBF; do 8 | python -m $py_script --model=SSGP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float64 --device="/cpu:0" --mesh_size=$mesh_size 9 | python -m $py_script --model=PSSGP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float64 --device="/gpu:0" --mesh_size=$mesh_size 10 | python -m $py_script --model=GP --cov=$cov --rbf_order=6 --rbf_balance_iter=10 --qp_order=6 --data_model=SINE --noise_variance=0.1 --dtype=float64 --device="/gpu:1" --mesh_size=$mesh_size 11 | sleep 10s 12 | done 13 | -------------------------------------------------------------------------------- /notebooks/data/regression_1D.csv: -------------------------------------------------------------------------------- 1 | 7.044577166530285872e-03,3.790447985799683117e+00 2 | 2.633173728814863779e-02,3.868970757495299839e+00 3 | 2.859010213410595469e-02,3.704594906114498265e+00 4 | 1.479047835465483463e-01,3.368763948379832396e+00 5 | 1.718859661709991604e-01,3.493356575175871281e+00 6 | 2.432357456111999827e-01,3.871535091569236364e+00 7 | 6.661700880180961848e-01,3.643420296823000282e+00 8 | 7.714303440386238719e-01,3.774442382979624977e+00 9 | 8.049218148148531427e-01,3.010885733911660811e+00 10 | 8.658165855998894989e-01,1.525531433714437224e+00 11 | 8.666105548197428066e-01,1.550645204060850268e+00 12 | 8.897812990554012647e-01,1.428453882063584146e+00 13 | -------------------------------------------------------------------------------- /pssgp/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/__init__.py -------------------------------------------------------------------------------- /pssgp/config.py: -------------------------------------------------------------------------------- 1 | """ 2 | This is a workaround to be able to "pass" the number of balancing steps to the kernels as an argument. It would probably 3 | be beneficial to wrap GPFlow config instead, but I can't think of the current approach being detrimental in any way for 4 | the time being. 5 | """ 6 | NUMBER_OF_BALANCING_STEPS = 10 7 | 8 | 9 | def set_number_balancing_steps(n_balancing_steps): 10 | """ 11 | Sets default number of balancing steps in the construction of the GP equivalent state-space model. 12 | This is used in particular for combination of kernels such as sum and product. In practice 10 is more than enough 13 | but it is possible to iterate further. 14 | """ 15 | global NUMBER_OF_BALANCING_STEPS 16 | NUMBER_OF_BALANCING_STEPS = n_balancing_steps 17 | -------------------------------------------------------------------------------- /pssgp/experiments/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/experiments/__init__.py -------------------------------------------------------------------------------- /pssgp/experiments/co2/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/experiments/co2/__init__.py -------------------------------------------------------------------------------- /pssgp/experiments/co2/common.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import os 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | from absl import flags 7 | from gpflow import default_float 8 | 9 | from pssgp.experiments.common import ModelEnum 10 | 11 | 12 | class DataEnum(enum.Enum): 13 | SINE = "SINE" 14 | COMPOSITE_SINE = "COMPOSITE_SINE" 15 | RECT = "RECT" 16 | 17 | 18 | FLAGS = flags.FLAGS 19 | flags.DEFINE_string('model', ModelEnum.SSGP.value, 'Select model to run. Options are gp, ssgp, and pssgp.') 20 | flags.DEFINE_string('data_model', DataEnum.SINE.value, 'What is the model for the data.') 21 | flags.DEFINE_string('data_dir', "", 'Directory of the data.') 22 | flags.DEFINE_string('dtype', "float64", 'GPFLOW default float type.') 23 | flags.DEFINE_integer('qp_order', 3, 'Order of ss-quasiperiodic approximation.', lower_bound=1) 24 | flags.DEFINE_float('noise_variance', 0.05, 'Variance of the noise.', lower_bound=1e-4) 25 | 26 | 27 | # TODO: Put a flag for results dumping. 28 | 29 | 30 | 31 | def get_data(n_training): 32 | dtype = default_float() 33 | 34 | weekly = np.loadtxt(os.path.join(FLAGS.data_dir, 'co2_weekly_mlo.txt'))[:, 3:5] 35 | monthly = np.loadtxt(os.path.join(FLAGS.data_dir, 'co2_mm_mlo.txt'))[:, 2:4] 36 | data = np.concatenate([weekly, monthly], axis=0).astype(dtype) 37 | 38 | # Remove invalid data -999.99 in co2 column 39 | rm_mask = np.any(data < 0, axis=1) 40 | data = data[~rm_mask] 41 | 42 | # Sort data in temporal order 43 | idx = np.argsort(data, axis=0) 44 | idx[:, 1] = idx[:, 0] 45 | data = tf.constant(np.take_along_axis(data, idx, axis=0), dtype=dtype) 46 | 47 | # Split training and validation data 48 | train_data = data[-n_training:] 49 | 50 | # Return t, y, t, y in tf.Tensor 51 | return train_data[:, 0, None], train_data[:, 1, None] 52 | -------------------------------------------------------------------------------- /pssgp/experiments/co2/mcmc.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | 5 | import gpflow as gpf 6 | import numpy as np 7 | import tensorflow as tf 8 | import tqdm 9 | from absl import app, flags 10 | from gpflow import set_trainable 11 | from gpflow.kernels import SquaredExponential 12 | from gpflow.models import GPModel 13 | from tensorflow_probability.python.distributions import Normal 14 | 15 | from pssgp.experiments.co2.common import get_data, FLAGS 16 | from pssgp.experiments.common import ModelEnum, get_model, \ 17 | run_one_mcmc, MCMC 18 | from pssgp.kernels import Matern32, Periodic 19 | 20 | flags.DEFINE_integer('np_seed', 42, "data model seed") 21 | flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed") 22 | flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples") 23 | flags.DEFINE_string('mcmc', MCMC.HMC.value, "MCMC method enum") 24 | flags.DEFINE_integer('n_samples', 1000, "Number of samples required") 25 | flags.DEFINE_integer('n_burnin', 100, "Number of burnin samples") 26 | flags.DEFINE_float('step_size', 0.01, "Step size for the gradient based chain") 27 | flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC") 28 | 29 | flags.DEFINE_boolean('plot', False, "Plot the result") 30 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 31 | 32 | 33 | 34 | 35 | def set_gp_priors(gp_model: GPModel): 36 | if FLAGS.model == ModelEnum.GP.value: 37 | set_trainable(gp_model.likelihood.variance, False) 38 | else: 39 | set_trainable(gp_model.noise_variance, False) 40 | 41 | 42 | def get_covariance_function(): 43 | gp_dtype = gpf.config.default_float() 44 | # Matern 32 45 | m32_cov = Matern32(variance=1, lengthscales=100.) 46 | m32_cov.variance.prior = Normal(gp_dtype(1.), gp_dtype(0.1)) 47 | m32_cov.lengthscales.prior = Normal(gp_dtype(100.), gp_dtype(50.)) 48 | 49 | # Periodic base kernel 50 | periodic_base_cov = SquaredExponential(variance=5., lengthscales=1.) 51 | set_trainable(periodic_base_cov.variance, False) 52 | periodic_base_cov.lengthscales.prior = Normal(gp_dtype(5.), gp_dtype(1.)) 53 | 54 | # Periodic 55 | periodic_cov = Periodic(periodic_base_cov, period=1., order=FLAGS.qp_order) 56 | set_trainable(periodic_cov.period, False) 57 | 58 | # Periodic damping 59 | periodic_damping_cov = Matern32(variance=1e-1, lengthscales=50) 60 | periodic_damping_cov.variance.prior = Normal(gp_dtype(1e-1), gp_dtype(1e-3)) 61 | periodic_damping_cov.lengthscales.prior = Normal(gp_dtype(50), gp_dtype(10.)) 62 | 63 | # Final covariance 64 | co2_cov = periodic_cov * periodic_damping_cov + m32_cov 65 | return co2_cov 66 | 67 | 68 | def run(): 69 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 70 | 71 | tf.random.set_seed(FLAGS.tf_seed) 72 | f_times = os.path.join("results", f"mcmc-times-{FLAGS.model}-{FLAGS.mcmc}") 73 | # TODO: we need a flag for this directory really. 74 | f_posterior = os.path.join("results", f"mcmc-posterior-{FLAGS.model}-{FLAGS.mcmc}") 75 | 76 | n_training_logspace = [3192] 77 | 78 | if FLAGS.run: 79 | cov_fun = get_covariance_function() 80 | 81 | times = np.empty(len(n_training_logspace), dtype=float) 82 | 83 | for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=len(n_training_logspace)): 84 | t, y = get_data(n_training) 85 | gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun, 86 | t.shape[0]) 87 | set_gp_priors(gp_model) 88 | 89 | run_time, params_res = run_one_mcmc(n_training, gp_model) 90 | times[i] = run_time 91 | np.savez(f_posterior + f"-{n_training}", **params_res) 92 | np.save(f_times, np.stack([n_training_logspace, times], axis=1)) 93 | 94 | 95 | def main(_): 96 | device = tf.device(FLAGS.device) 97 | with device: 98 | run() 99 | 100 | 101 | if __name__ == '__main__': 102 | if not os.path.exists("results"): 103 | os.makedirs('results') 104 | app.run(main) 105 | -------------------------------------------------------------------------------- /pssgp/experiments/co2/speed_and_stability.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | import time 5 | from itertools import product 6 | 7 | import gpflow as gpf 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import tensorflow as tf 11 | import tqdm 12 | from absl import app, flags 13 | from gpflow.models.util import data_input_to_tensor 14 | from scipy.stats.kde import gaussian_kde 15 | 16 | from pssgp.experiments.common import ModelEnum, CovarianceEnum, get_simple_covariance_function, get_model 17 | from pssgp.experiments.toy_models.common import FLAGS, get_data 18 | from pssgp.misc_utils import rmse 19 | 20 | flags.DEFINE_integer('n_seeds', 21, "Seed for numpy random generator") 21 | flags.DEFINE_integer('mesh_size', 10, "Size of the mesh for prediction") 22 | flags.DEFINE_boolean('plot', False, "Plot the result") 23 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 24 | 25 | 26 | def run_one(seed, covariance_function, gp_model, n_training, n_pred): 27 | t, ft, t_pred, ft_pred, y = get_data(seed, n_training, n_pred) 28 | gp_dtype = gpf.config.default_float() 29 | 30 | if gp_model is None: 31 | model_name = ModelEnum(FLAGS.model) 32 | gp_model = get_model(model_name, (t, y), FLAGS.noise_variance, covariance_function, 33 | t.shape[0] + t_pred.shape[0]) 34 | else: 35 | gp_model.data = data_input_to_tensor((t, y)) 36 | 37 | tensor_t_pred = tf.convert_to_tensor(t_pred, dtype=gp_dtype) 38 | y_pred, _ = gp_model.predict_f(tensor_t_pred) 39 | error = rmse(y_pred, ft_pred) 40 | return error, gp_model 41 | 42 | 43 | def ridgeline(ax, data, overlap=0, fill=True, fill_color="b", n_points=150): 44 | """ 45 | Adapted from https://glowingpython.blogspot.com/2020/03/ridgeline-plots-in-pure-matplotlib.html 46 | """ 47 | if overlap > 1 or overlap < 0: 48 | raise ValueError('overlap must be in [0 1]') 49 | xx = np.linspace(np.min(np.concatenate(data)), 50 | np.max(np.concatenate(data)), n_points) 51 | ys = [] 52 | for i, d in enumerate(data): 53 | pdf = gaussian_kde(d) 54 | y = i * (1.0 - overlap) 55 | ys.append(y) 56 | curve = pdf(xx) 57 | if fill: 58 | ax.fill_between(xx, np.ones(n_points) * y, 59 | curve + y, zorder=len(data) - i + 1, color=fill_color, alpha=0.5) 60 | ax.plot(xx, curve + y, zorder=len(data) - i + 1, color=fill_color) 61 | 62 | 63 | def run(): 64 | f_stability = os.path.join("results", f"stability-matrix-{FLAGS.cov}-{FLAGS.model}") 65 | f_time = os.path.join("results", f"time-matrix-{FLAGS.cov}-{FLAGS.model}") 66 | 67 | if FLAGS.run: 68 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 69 | cov_name = CovarianceEnum(FLAGS.cov) 70 | cov_fun = get_simple_covariance_function(cov_name) 71 | errors = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 72 | times = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 73 | n_training_logspace = n_test_logspace = np.logspace(12, 15, FLAGS.mesh_size, base=2, dtype=int) 74 | 75 | for (i, n_training), (j, n_pred) in tqdm.tqdm(product(enumerate(n_training_logspace), 76 | enumerate(n_test_logspace)), 77 | total=FLAGS.mesh_size ** 2, 78 | desc=FLAGS.model): 79 | model = None 80 | for seed in tqdm.trange(FLAGS.n_seeds, leave=False): 81 | try: 82 | tic = time.time() 83 | error, model = run_one(seed, cov_fun, model, n_training, n_pred) 84 | toc = time.time() 85 | # the only reason we return the model is so that we don't have to recompile everytime 86 | errors[i, j, seed] = error 87 | times[i, j, seed] = toc - tic 88 | except Exception as e: # noqa: It's not clear what the error returned by TF could be, so well... 89 | errors[i, j, seed] = float("nan") 90 | times[i, j, seed] = float("nan") 91 | print( 92 | f"{FLAGS.model}-{FLAGS.cov} failed with n_training,n_pred={n_training, n_pred} and error: \n {e}") 93 | 94 | np.save(f_stability, errors) 95 | np.save(f_time, times) 96 | elif FLAGS.plot: 97 | errors = np.load(f_stability + ".npy") 98 | fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(17, 12)) 99 | for i, ax in enumerate(axes[0, :]): 100 | ridgeline(ax, errors[(i + 1) * FLAGS.mesh_size // 3 - 1]) 101 | for j, ax in enumerate(axes[1, :]): 102 | ridgeline(ax, errors[:, (j + 1) * FLAGS.mesh_size // 3 - 1]) 103 | fig.show() 104 | 105 | 106 | def main(_): 107 | device = tf.device(FLAGS.device) 108 | with device: 109 | run() 110 | 111 | 112 | if __name__ == '__main__': 113 | if not os.path.exists("results"): 114 | os.makedirs('results') 115 | app.run(main) 116 | -------------------------------------------------------------------------------- /pssgp/experiments/common.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import time 3 | 4 | import gpflow as gpf 5 | import numpy as np 6 | import tensorflow as tf 7 | from absl import flags 8 | from absl.flags import FLAGS 9 | from gpflow import config 10 | from gpflow.kernels import SquaredExponential 11 | from gpflow.models import GPR 12 | from tensorflow_probability.python.experimental.mcmc import ProgressBarReducer, WithReductions, \ 13 | make_tqdm_progress_bar_fn 14 | from tensorflow_probability.python.mcmc import HamiltonianMonteCarlo, MetropolisAdjustedLangevinAlgorithm, \ 15 | NoUTurnSampler, sample_chain 16 | 17 | from pssgp.kernels import Matern12, Matern32, Matern52, RBF, Periodic 18 | from pssgp.model import StateSpaceGP 19 | 20 | 21 | class MCMC(enum.Enum): 22 | HMC = "HMC" 23 | MALA = "MALA" 24 | NUTS = "NUTS" 25 | 26 | 27 | class ModelEnum(enum.Enum): 28 | GP = "GP" 29 | SSGP = "SSGP" 30 | PSSGP = "PSSGP" 31 | 32 | 33 | class CovarianceEnum(enum.Enum): 34 | Matern12 = 'Matern12' 35 | Matern32 = 'Matern32' 36 | Matern52 = 'Matern52' 37 | RBF = "RBF" 38 | QP = "QP" 39 | 40 | 41 | flags.DEFINE_string("device", "/cpu:0", "Device on which to run") 42 | 43 | 44 | def get_simple_covariance_function(covariance_enum, **kwargs): 45 | if not isinstance(covariance_enum, CovarianceEnum): 46 | covariance_enum = CovarianceEnum(covariance_enum) 47 | if covariance_enum == CovarianceEnum.Matern12: 48 | return Matern12(**kwargs) 49 | if covariance_enum == CovarianceEnum.Matern32: 50 | return Matern32(**kwargs) 51 | if covariance_enum == CovarianceEnum.Matern52: 52 | return Matern52(**kwargs) 53 | if covariance_enum == CovarianceEnum.RBF: 54 | return RBF(**kwargs) 55 | if covariance_enum == CovarianceEnum.QP: 56 | base_kernel = SquaredExponential(kwargs.pop("variance", 1.), kwargs.pop("lengthscales", 1.)) 57 | return Periodic(base_kernel, **kwargs) 58 | 59 | 60 | def get_model(model_enum, data, noise_variance, covariance_function, max_parallel=10000): 61 | if not isinstance(model_enum, ModelEnum): 62 | model_enum = ModelEnum(model_enum) 63 | if model_enum == ModelEnum.GP: 64 | gp_model = GPR(data, covariance_function, None, noise_variance) 65 | elif model_enum == ModelEnum.SSGP: 66 | gp_model = StateSpaceGP(data, covariance_function, noise_variance, parallel=False) 67 | elif model_enum == ModelEnum.PSSGP: 68 | gp_model = StateSpaceGP(data, covariance_function, noise_variance, parallel=True, max_parallel=max_parallel) 69 | else: 70 | raise ValueError("model not supported") 71 | return gp_model 72 | 73 | 74 | def run_one_mcmc(n_training, gp_model): 75 | num_burnin_steps = FLAGS.n_burnin 76 | num_samples = FLAGS.n_samples 77 | 78 | mcmc_helper, run_chain_fn = get_run_chain_fn(gp_model, num_samples, num_burnin_steps) 79 | try: 80 | tic = time.time() 81 | result, is_accepted = run_chain_fn() 82 | print(np.mean(is_accepted)) 83 | run_time = time.time() - tic 84 | parameter_samples = mcmc_helper.convert_to_constrained_values(result) 85 | 86 | except Exception as e: # noqa: It's not clear what the error returned by TF could be, so well... 87 | run_time = float("nan") 88 | parameter_samples = [np.nan * np.ones((num_samples,), dtype=config.default_float()) for _ in 89 | gp_model.trainable_parameters] 90 | print(f"{FLAGS.model}-{FLAGS.cov} failed with n_training={n_training} and error: \n {e}") 91 | 92 | return run_time, dict(zip(gpf.utilities.parameter_dict(gp_model), parameter_samples)) 93 | 94 | 95 | def get_run_chain_fn(gp_model, num_samples, num_burnin_steps): 96 | mcmc_helper = gpf.optimizers.SamplingHelper( 97 | gp_model.log_posterior_density, gp_model.trainable_parameters) 98 | 99 | if FLAGS.mcmc == MCMC.HMC.value: 100 | mcmc = HamiltonianMonteCarlo( 101 | target_log_prob_fn=mcmc_helper.target_log_prob_fn, 102 | num_leapfrog_steps=FLAGS.n_leapfrogs, 103 | step_size=FLAGS.step_size 104 | ) 105 | elif FLAGS.mcmc == MCMC.MALA.value: 106 | mcmc = MetropolisAdjustedLangevinAlgorithm( 107 | target_log_prob_fn=mcmc_helper.target_log_prob_fn, 108 | step_size=FLAGS.step_size 109 | ) 110 | elif FLAGS.mcmc == MCMC.NUTS.value: 111 | mcmc = NoUTurnSampler( 112 | target_log_prob_fn=mcmc_helper.target_log_prob_fn, 113 | step_size=FLAGS.step_size 114 | ) 115 | else: 116 | raise ValueError(f"mcmc must be a {MCMC} enum, {FLAGS.mcmc} was passed") 117 | pbar = ProgressBarReducer(num_samples + num_burnin_steps, 118 | make_tqdm_progress_bar_fn(f"{FLAGS.model}-{FLAGS.mcmc}", True)) 119 | pbar.initialize(None) 120 | 121 | mcmc = WithReductions(mcmc, pbar) 122 | 123 | @tf.function 124 | def run_chain_fn(): 125 | return sample_chain( 126 | num_results=num_samples, 127 | num_burnin_steps=num_burnin_steps, 128 | current_state=mcmc_helper.current_state, 129 | kernel=mcmc, 130 | trace_fn=lambda _, pkr: pkr.inner_results.is_accepted 131 | ) 132 | 133 | return mcmc_helper, run_chain_fn 134 | -------------------------------------------------------------------------------- /pssgp/experiments/sunspot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/experiments/sunspot/__init__.py -------------------------------------------------------------------------------- /pssgp/experiments/sunspot/common.py: -------------------------------------------------------------------------------- 1 | import enum 2 | import os 3 | 4 | import numpy as np 5 | import pandas as pd 6 | import tensorflow as tf 7 | from absl import flags 8 | from gpflow import default_float 9 | 10 | from pssgp.experiments.common import ModelEnum 11 | 12 | 13 | class DataEnum(enum.Enum): 14 | SINE = "SINE" 15 | COMPOSITE_SINE = "COMPOSITE_SINE" 16 | RECT = "RECT" 17 | 18 | 19 | FLAGS = flags.FLAGS 20 | flags.DEFINE_string('model', ModelEnum.SSGP.value, 'Select model to run. Options are gp, ssgp, and pssgp.') 21 | flags.DEFINE_string('data_model', DataEnum.SINE.value, 'What is the model for the data.') 22 | flags.DEFINE_string('data_dir', "", 'Directory of the data.') 23 | flags.DEFINE_string('dtype', "float64", 'GPFLOW default float type.') 24 | flags.DEFINE_float('noise_variance', 10., 'Variance of the noise.', lower_bound=1e-4) 25 | 26 | 27 | # TODO: Put a flag for results dumping. 28 | 29 | def get_data(n_training): 30 | data = pd.read_csv(os.path.join(FLAGS.data_dir, 'sunspots.csv'), parse_dates=[1], index_col=0, header=0) 31 | t = ((data["date"] - data.loc[0, "date"]) / np.timedelta64(1, "Y")).values 32 | vals = data["sunspots"].values 33 | return t[-n_training:, None], vals[-n_training:, None] 34 | -------------------------------------------------------------------------------- /pssgp/experiments/sunspot/map.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | import time 5 | 6 | import gpflow as gpf 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | import tensorflow as tf 10 | import tqdm 11 | from absl import app, flags 12 | from gpflow.models import GPModel 13 | from scipy.optimize import minimize 14 | from tensorflow_probability.python.distributions import Normal 15 | 16 | from pssgp.experiments.common import ModelEnum, get_model 17 | from pssgp.experiments.sunspot.common import get_data, FLAGS 18 | from pssgp.kernels import Matern32 19 | 20 | flags.DEFINE_integer('np_seed', 666, "data model seed") 21 | flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed") 22 | flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples") 23 | flags.DEFINE_integer('n_samples', 250, "Number of samples required") 24 | flags.DEFINE_integer('n_burnin', 250, "Number of burnin samples") 25 | flags.DEFINE_float('step_size', 0.1, "Step size for the gradient based chain") 26 | flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC") 27 | 28 | flags.DEFINE_boolean('plot', True, "Plot the result") 29 | flags.DEFINE_boolean('run', False, "Run the result or load the data") 30 | 31 | 32 | def set_gp_priors(gp_model: GPModel): 33 | gp_dtype = gpf.config.default_float() 34 | variance_prior = Normal(gp_dtype(FLAGS.noise_variance), gp_dtype(FLAGS.noise_variance)) 35 | if FLAGS.model == ModelEnum.GP.value: 36 | gp_model.likelihood.variance.prior = variance_prior 37 | else: 38 | gp_model.noise_variance.prior = variance_prior 39 | # set_trainable(gp_model.noise_variance, False) 40 | 41 | 42 | def get_covariance_function(): 43 | gp_dtype = gpf.config.default_float() 44 | 45 | matern_variance = 5500. 46 | matern_lengthscales = 5. 47 | 48 | m32_cov = Matern32(variance=matern_variance, lengthscales=matern_lengthscales) 49 | 50 | m32_cov.variance.prior = Normal(gp_dtype(matern_variance), gp_dtype(matern_variance)) 51 | m32_cov.lengthscales.prior = Normal(gp_dtype(matern_lengthscales), gp_dtype(matern_lengthscales)) 52 | 53 | return m32_cov 54 | 55 | 56 | def run(): 57 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 58 | dtype = gpf.config.default_float() 59 | tf.random.set_seed(FLAGS.tf_seed) 60 | f_times = os.path.join("results", f"map-times-{FLAGS.model}") 61 | # TODO: we need a flag for this directory really. 62 | f_posterior = os.path.join("results", f"map-posterior-{FLAGS.model}") 63 | 64 | n_training_logspace = [1200, 2200, 3200] # np.logspace(2, np.log10(3200), num=4, dtype=int) 65 | cov_fun = get_covariance_function() 66 | if FLAGS.run: 67 | times = np.empty(len(n_training_logspace), dtype=dtype) 68 | for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=len(n_training_logspace)): 69 | t, y = get_data(n_training) 70 | gp_model = get_model(ModelEnum(FLAGS.model), (t.astype(dtype), y.astype(dtype)), FLAGS.noise_variance, 71 | cov_fun, 72 | t.shape[0]) 73 | set_gp_priors(gp_model) 74 | opt = gpf.optimizers.Scipy() 75 | eval_func = opt.eval_func(gp_model.training_loss, gp_model.trainable_variables, compile=True) 76 | 77 | x0 = opt.initial_parameters(gp_model.trainable_variables).numpy() 78 | 79 | _ = eval_func(x0) 80 | tic = time.time() 81 | opt_log = minimize(eval_func, x0, jac=True, options=dict(maxiter=100, disp=1)) 82 | times[i] = time.time() - tic 83 | print(opt_log) 84 | params_res = gpf.utilities.parameter_dict(gp_model) 85 | for param in gp_model.trainable_parameters: 86 | print(param.name) 87 | np.savez(f_posterior + f"-{n_training}", **params_res) 88 | np.save(f_times, np.stack([n_training_logspace, times], axis=1)) 89 | 90 | if FLAGS.plot: 91 | T = 3200 * 30 92 | for n_training in n_training_logspace[-1:]: 93 | result = np.load(f_posterior + f"-{n_training}.npz") 94 | t, y = get_data(n_training) 95 | # rng = np.random.RandomState(FLAGS.np_seed) 96 | interpolation_times = np.linspace(t[0, 0], t[-1, 0], T).astype(gpf.config.default_float())[:, None] 97 | print(interpolation_times.shape) 98 | gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun, 99 | T + n_training) 100 | ax = plt.subplot() 101 | ax.scatter(t[:, 0], y[:, 0], s=1, marker="x", color="k") 102 | 103 | for param_name, param_val in result.items(): 104 | print(param_name, param_val) 105 | param = eval(f"gp_model{param_name}") 106 | param.assign(param_val) 107 | interpolated_points, interpolation_cov = gp_model.predict_f(interpolation_times) 108 | tic = time.time() 109 | interpolated_points, interpolation_cov = gp_model.predict_f(interpolation_times) 110 | print(time.time() - tic) 111 | print(interpolated_points.shape) 112 | ax.plot(interpolation_times[:, 0], interpolated_points[:, 0], alpha=0.25, color="blue") 113 | ax.fill_between(interpolation_times[:, 0], 114 | interpolated_points[:, 0] - 1.96 * np.sqrt(interpolation_cov[:, 0]), 115 | interpolated_points[:, 0] + 1.96 * np.sqrt(interpolation_cov[:, 0]), alpha=0.25, 116 | color="blue") 117 | plt.show() 118 | 119 | 120 | def main(_): 121 | device = tf.device(FLAGS.device) 122 | with device: 123 | run() 124 | 125 | 126 | if __name__ == '__main__': 127 | if not os.path.exists("results"): 128 | os.makedirs('results') 129 | app.run(main) 130 | -------------------------------------------------------------------------------- /pssgp/experiments/sunspot/mcmc.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | 5 | import gpflow as gpf 6 | import matplotlib.pyplot as plt 7 | import numpy as np 8 | import tensorflow as tf 9 | import tqdm 10 | from absl import app, flags 11 | from gpflow.models import GPModel 12 | from tensorflow_probability.python.distributions import Normal 13 | 14 | from pssgp.experiments.common import ModelEnum, get_model, \ 15 | run_one_mcmc, MCMC 16 | from pssgp.experiments.sunspot.common import get_data, FLAGS 17 | from pssgp.kernels import Matern32 18 | 19 | flags.DEFINE_integer('np_seed', 666, "data model seed") 20 | flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed") 21 | flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples") 22 | flags.DEFINE_string('mcmc', MCMC.HMC.value, "MCMC method enum") 23 | flags.DEFINE_integer('n_samples', 1000, "Number of samples required") 24 | flags.DEFINE_integer('n_burnin', 100, "Number of burnin samples") 25 | flags.DEFINE_float('step_size', 0.1, "Step size for the gradient based chain") 26 | flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC") 27 | 28 | flags.DEFINE_boolean('plot', True, "Plot the result") 29 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 30 | 31 | 32 | def set_gp_priors(gp_model: GPModel): 33 | gp_dtype = gpf.config.default_float() 34 | variance_prior = Normal(gp_dtype(FLAGS.noise_variance), gp_dtype(FLAGS.noise_variance)) 35 | if FLAGS.model == ModelEnum.GP.value: 36 | gp_model.likelihood.variance.prior = variance_prior 37 | else: 38 | gp_model.noise_variance.prior = variance_prior 39 | 40 | 41 | def get_covariance_function(): 42 | gp_dtype = gpf.config.default_float() 43 | 44 | matern_variance = 5500. 45 | matern_lengthscales = 5. 46 | 47 | m32_cov = Matern32(variance=matern_variance, lengthscales=matern_lengthscales) 48 | 49 | m32_cov.variance.prior = Normal(gp_dtype(matern_variance), gp_dtype(matern_variance)) 50 | m32_cov.lengthscales.prior = Normal(gp_dtype(matern_lengthscales), gp_dtype(matern_lengthscales)) 51 | 52 | return m32_cov 53 | 54 | 55 | def run(): 56 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 57 | 58 | tf.random.set_seed(FLAGS.tf_seed) 59 | f_times = os.path.join("results", f"mcmc-times-{FLAGS.model}-{FLAGS.mcmc}") 60 | # TODO: we need a flag for this directory really. 61 | f_posterior = os.path.join("results", f"mcmc-posterior-{FLAGS.model}-{FLAGS.mcmc}") 62 | 63 | n_training_logspace = [3000, 300, 50] 64 | cov_fun = get_covariance_function() 65 | if FLAGS.run: 66 | times = np.empty(len(n_training_logspace), dtype=float) 67 | for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=FLAGS.n_runs): 68 | t, y = get_data(n_training) 69 | gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun, 70 | t.shape[0]) 71 | set_gp_priors(gp_model) 72 | 73 | run_time, params_res = run_one_mcmc(n_training, gp_model) 74 | times[i] = run_time 75 | np.savez(f_posterior + f"-{n_training}", **params_res) 76 | np.save(f_times, np.stack([n_training_logspace, times], axis=1)) 77 | if FLAGS.plot: 78 | T = 2000 79 | for n_training in n_training_logspace: 80 | result = np.load(f_posterior + f"-{n_training}.npz") 81 | t, y = get_data(n_training) 82 | rng = np.random.RandomState(FLAGS.np_seed) 83 | interpolation_times = np.sort(rng.uniform(t[0], t[-1], 2000))[:, None] 84 | gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun, 85 | T + n_training) 86 | ax = plt.subplot() 87 | ax.scatter(t[:, ], y[:, 0], s=1, marker="x", color="k") 88 | for param_name, sample in result.items(): 89 | param = eval(f"gp_model{param_name}") 90 | print(param, np.mean(sample)) 91 | 92 | for i in rng.choice(FLAGS.n_samples, 10, replace=True): 93 | for param, sample in zip(gp_model.trainable_variables, result.values()): 94 | # param = eval(f"gp_model{param_name}") 95 | param.assign(sample[i]) 96 | interpolated_points, interpolation_cov = gp_model.predict_f(interpolation_times) 97 | ax.plot(interpolation_times[:, 0], interpolated_points[:, 0], alpha=0.25, color="blue") 98 | 99 | plt.show() 100 | 101 | 102 | def main(_): 103 | device = tf.device(FLAGS.device) 104 | with device: 105 | run() 106 | 107 | 108 | if __name__ == '__main__': 109 | if not os.path.exists("results"): 110 | os.makedirs('results') 111 | app.run(main) 112 | -------------------------------------------------------------------------------- /pssgp/experiments/sunspot/speed_and_stability.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | import time 5 | from itertools import product 6 | 7 | import gpflow as gpf 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import tensorflow as tf 11 | import tqdm 12 | from absl import app, flags 13 | from gpflow.models.util import data_input_to_tensor 14 | from scipy.stats.kde import gaussian_kde 15 | 16 | from pssgp.experiments.common import ModelEnum, CovarianceEnum, get_simple_covariance_function, get_model 17 | from pssgp.experiments.toy_models.common import FLAGS, get_data 18 | from pssgp.misc_utils import rmse 19 | 20 | flags.DEFINE_integer('n_seeds', 21, "Seed for numpy random generator") 21 | flags.DEFINE_integer('mesh_size', 10, "Size of the mesh for prediction") 22 | flags.DEFINE_boolean('plot', False, "Plot the result") 23 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 24 | 25 | 26 | def run_one(seed, covariance_function, gp_model, n_training, n_pred): 27 | t, ft, t_pred, ft_pred, y = get_data(seed, n_training, n_pred) 28 | gp_dtype = gpf.config.default_float() 29 | 30 | if gp_model is None: 31 | model_name = ModelEnum(FLAGS.model) 32 | gp_model = get_model(model_name, (t, y), FLAGS.noise_variance, covariance_function, 33 | t.shape[0] + t_pred.shape[0]) 34 | else: 35 | gp_model.data = data_input_to_tensor((t, y)) 36 | 37 | tensor_t_pred = tf.convert_to_tensor(t_pred, dtype=gp_dtype) 38 | y_pred, _ = gp_model.predict_f(tensor_t_pred) 39 | error = rmse(y_pred, ft_pred) 40 | return error, gp_model 41 | 42 | 43 | def ridgeline(ax, data, overlap=0, fill=True, fill_color="b", n_points=150): 44 | """ 45 | Adapted from https://glowingpython.blogspot.com/2020/03/ridgeline-plots-in-pure-matplotlib.html 46 | """ 47 | if overlap > 1 or overlap < 0: 48 | raise ValueError('overlap must be in [0 1]') 49 | xx = np.linspace(np.min(np.concatenate(data)), 50 | np.max(np.concatenate(data)), n_points) 51 | ys = [] 52 | for i, d in enumerate(data): 53 | pdf = gaussian_kde(d) 54 | y = i * (1.0 - overlap) 55 | ys.append(y) 56 | curve = pdf(xx) 57 | if fill: 58 | ax.fill_between(xx, np.ones(n_points) * y, 59 | curve + y, zorder=len(data) - i + 1, color=fill_color, alpha=0.5) 60 | ax.plot(xx, curve + y, zorder=len(data) - i + 1, color=fill_color) 61 | 62 | 63 | def run(): 64 | f_stability = os.path.join("results", f"stability-matrix-{FLAGS.cov}-{FLAGS.model}") 65 | f_time = os.path.join("results", f"time-matrix-{FLAGS.cov}-{FLAGS.model}") 66 | 67 | if FLAGS.run: 68 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 69 | cov_name = CovarianceEnum(FLAGS.cov) 70 | cov_fun = get_simple_covariance_function(cov_name) 71 | errors = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 72 | times = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 73 | n_training_logspace = n_test_logspace = np.logspace(12, 15, FLAGS.mesh_size, base=2, dtype=int) 74 | 75 | for (i, n_training), (j, n_pred) in tqdm.tqdm(product(enumerate(n_training_logspace), 76 | enumerate(n_test_logspace)), 77 | total=FLAGS.mesh_size ** 2, 78 | desc=FLAGS.model): 79 | model = None 80 | for seed in tqdm.trange(FLAGS.n_seeds, leave=False): 81 | try: 82 | tic = time.time() 83 | error, model = run_one(seed, cov_fun, model, n_training, n_pred) 84 | toc = time.time() 85 | # the only reason we return the model is so that we don't have to recompile everytime 86 | errors[i, j, seed] = error 87 | times[i, j, seed] = toc - tic 88 | except Exception as e: # noqa: It's not clear what the error returned by TF could be, so well... 89 | errors[i, j, seed] = float("nan") 90 | times[i, j, seed] = float("nan") 91 | print( 92 | f"{FLAGS.model}-{FLAGS.cov} failed with n_training,n_pred={n_training, n_pred} and error: \n {e}") 93 | 94 | np.save(f_stability, errors) 95 | np.save(f_time, times) 96 | elif FLAGS.plot: 97 | errors = np.load(f_stability + ".npy") 98 | fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(17, 12)) 99 | for i, ax in enumerate(axes[0, :]): 100 | ridgeline(ax, errors[(i + 1) * FLAGS.mesh_size // 3 - 1]) 101 | for j, ax in enumerate(axes[1, :]): 102 | ridgeline(ax, errors[:, (j + 1) * FLAGS.mesh_size // 3 - 1]) 103 | fig.show() 104 | 105 | 106 | def main(_): 107 | device = tf.device(FLAGS.device) 108 | with device: 109 | run() 110 | 111 | 112 | if __name__ == '__main__': 113 | if not os.path.exists("results"): 114 | os.makedirs('results') 115 | app.run(main) 116 | -------------------------------------------------------------------------------- /pssgp/experiments/toy_models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/experiments/toy_models/__init__.py -------------------------------------------------------------------------------- /pssgp/experiments/toy_models/common.py: -------------------------------------------------------------------------------- 1 | import enum 2 | 3 | import numpy as np 4 | from absl import flags 5 | from gpflow import default_float 6 | 7 | from pssgp.experiments.common import ModelEnum, CovarianceEnum 8 | from pssgp.toymodels import sinu, comp_sinu, rect, obs_noise 9 | 10 | 11 | class DataEnum(enum.Enum): 12 | SINE = "SINE" 13 | COMPOSITE_SINE = "COMPOSITE_SINE" 14 | RECT = "RECT" 15 | 16 | 17 | FLAGS = flags.FLAGS 18 | flags.DEFINE_string('model', ModelEnum.SSGP.value, 'Select model to run. Options are gp, ssgp, and pssgp.') 19 | flags.DEFINE_string('cov', CovarianceEnum.Matern32.value, 'Covariance function.') 20 | flags.DEFINE_string('data_model', DataEnum.SINE.value, 'What is the model for the data.') 21 | flags.DEFINE_string('dtype', "float32", 'GPFLOW default float type.') 22 | flags.DEFINE_integer('rbf_order', 6, 'Order of ss-RBF approximation.', lower_bound=1) 23 | flags.DEFINE_integer('rbf_balance_iter', 10, 'Iterations of RBF balancing.', lower_bound=1) 24 | flags.DEFINE_integer('qp_order', 6, 'Order of ss-quasiperiodic approximation.', lower_bound=1) 25 | flags.DEFINE_float('noise_variance', 0.5, 'Variance of the noise.', lower_bound=1e-4) 26 | 27 | 28 | def get_data(seed, n_training, n_pred): 29 | dtype = default_float() 30 | 31 | t = np.linspace(0, 4, n_training, dtype=dtype) 32 | t_pred = np.linspace(0, 4, n_pred, dtype=dtype) 33 | data_model = DataEnum(FLAGS.data_model) 34 | if data_model == DataEnum.SINE: 35 | data_fun = sinu 36 | elif data_model == DataEnum.COMPOSITE_SINE: 37 | data_fun = comp_sinu 38 | elif data_model == DataEnum.RECT: 39 | data_fun = rect 40 | else: 41 | raise ValueError("") 42 | ft = data_fun(t) 43 | ft_pred = data_fun(t_pred) 44 | 45 | y = obs_noise(ft, FLAGS.noise_variance, seed) 46 | return t.reshape(-1, 1), ft.reshape(-1, 1), t_pred.reshape(-1, 1), ft_pred.reshape(-1, 1), y.reshape(-1, 1) 47 | -------------------------------------------------------------------------------- /pssgp/experiments/toy_models/mcmc.py: -------------------------------------------------------------------------------- 1 | # Corresponds to the *** of paper. 2 | import os 3 | 4 | import gpflow as gpf 5 | import numpy as np 6 | import tensorflow as tf 7 | import tqdm 8 | from absl import app, flags 9 | from gpflow.base import PriorOn 10 | from gpflow.models import GPModel 11 | from tensorflow_probability.python.distributions import Normal 12 | 13 | from pssgp.experiments.common import ModelEnum, CovarianceEnum, get_simple_covariance_function, get_model, MCMC, \ 14 | run_one_mcmc 15 | from pssgp.experiments.toy_models.common import FLAGS, get_data 16 | 17 | flags.DEFINE_integer('np_seed', 42, "data model seed") 18 | flags.DEFINE_integer('tf_seed', 31415, "mcmc model seed") 19 | flags.DEFINE_integer('n_runs', 10, "size of the logspace for n training samples") 20 | flags.DEFINE_integer('n_samples', 1000, "Number of samples required") 21 | flags.DEFINE_integer('n_burnin', 100, "Number of burnin samples") 22 | flags.DEFINE_string('mcmc', MCMC.HMC.value, "Which MCMC algo") 23 | flags.DEFINE_float('step_size', 0.05, "Step size for the gradient based chain") 24 | flags.DEFINE_float('n_leapfrogs', 10, "Num leapfrogs for HMC") 25 | 26 | flags.DEFINE_boolean('plot', False, "Plot the result") 27 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 28 | 29 | 30 | def set_priors(gp_model: GPModel): 31 | to_dtype = gpf.utilities.to_default_float 32 | if FLAGS.model == ModelEnum.GP.value: 33 | gp_model.likelihood.variance.prior = Normal(to_dtype(0.1), to_dtype(1.)) 34 | gp_model.likelihood.variance.prior_on = PriorOn.UNCONSTRAINED 35 | else: 36 | gp_model.noise_variance.prior = Normal(to_dtype(0.1), to_dtype(1.)) 37 | gp_model.noise_variance.prior_on = PriorOn.UNCONSTRAINED 38 | 39 | gp_model.kernel.variance.prior = Normal(to_dtype(1.), to_dtype(3.)) 40 | gp_model.kernel.variance.prior_on = PriorOn.UNCONSTRAINED 41 | 42 | gp_model.kernel.lengthscales.prior = Normal(to_dtype(1.), to_dtype(3.)) 43 | gp_model.kernel.lengthscales.prior_on = PriorOn.UNCONSTRAINED 44 | return gp_model 45 | 46 | 47 | def run(): 48 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 49 | 50 | tf.random.set_seed(FLAGS.tf_seed) 51 | f_times = os.path.join("results", f"mcmc-times-{FLAGS.cov}-{FLAGS.model}-{FLAGS.mcmc}") 52 | f_posterior = os.path.join("results", f"mcmc-posterior-{FLAGS.cov}-{FLAGS.model}-{FLAGS.mcmc}") 53 | if FLAGS.cov == CovarianceEnum.QP.value: 54 | raise NotImplementedError("Quasiperiodic is not supported for this experiment") 55 | n_training_logspace = np.logspace(7, 14, FLAGS.n_runs, base=2, dtype=int) 56 | 57 | if FLAGS.run: 58 | times = np.empty(FLAGS.n_runs, dtype=float) 59 | cov_fun = get_simple_covariance_function(FLAGS.cov) 60 | for i, n_training in tqdm.tqdm(enumerate(n_training_logspace), total=FLAGS.n_runs): 61 | t, *_, y = get_data(FLAGS.np_seed, n_training, 1) 62 | gp_model = get_model(ModelEnum(FLAGS.model), (t, y), FLAGS.noise_variance, cov_fun, 63 | t.shape[0]) 64 | gp_model = set_priors(gp_model) 65 | run_time, params_res = run_one_mcmc(n_training, gp_model) 66 | times[i] = run_time 67 | np.savez(f_posterior + f"-{n_training}", **params_res) 68 | np.save(f_times, np.stack([n_training_logspace, times], axis=1)) 69 | 70 | 71 | def main(_): 72 | device = tf.device(FLAGS.device) 73 | with device: 74 | run() 75 | 76 | 77 | if __name__ == '__main__': 78 | if not os.path.exists("results"): 79 | os.makedirs('results') 80 | app.run(main) 81 | -------------------------------------------------------------------------------- /pssgp/experiments/toy_models/speed_and_stability.py: -------------------------------------------------------------------------------- 1 | # Regression experiments on sinusoidal signals. 2 | # Corresponds to the *** of paper. 3 | import os 4 | import time 5 | from itertools import product 6 | 7 | import gpflow as gpf 8 | import matplotlib.pyplot as plt 9 | import numpy as np 10 | import tensorflow as tf 11 | import tqdm 12 | from absl import app, flags 13 | from gpflow.models.util import data_input_to_tensor 14 | from scipy.stats.kde import gaussian_kde 15 | 16 | from pssgp.experiments.common import ModelEnum, CovarianceEnum, get_simple_covariance_function, get_model 17 | from pssgp.experiments.toy_models.common import FLAGS, get_data 18 | from pssgp.misc_utils import rmse 19 | 20 | flags.DEFINE_integer('n_seeds', 21, "Seed for numpy random generator") 21 | flags.DEFINE_integer('mesh_size', 10, "Size of the mesh for prediction") 22 | flags.DEFINE_boolean('plot', False, "Plot the result") 23 | flags.DEFINE_boolean('run', True, "Run the result or load the data") 24 | 25 | 26 | def run_one(seed, covariance_function, gp_model, n_training, n_pred): 27 | t, ft, t_pred, ft_pred, y = get_data(seed, n_training, n_pred) 28 | gp_dtype = gpf.config.default_float() 29 | 30 | if gp_model is None: 31 | model_name = ModelEnum(FLAGS.model) 32 | gp_model = get_model(model_name, (t, y), FLAGS.noise_variance, covariance_function, 33 | t.shape[0] + t_pred.shape[0]) 34 | else: 35 | gp_model.data = data_input_to_tensor((t, y)) 36 | 37 | tensor_t_pred = tf.convert_to_tensor(t_pred, dtype=gp_dtype) 38 | y_pred, _ = gp_model.predict_f(tensor_t_pred) 39 | error = rmse(y_pred, ft_pred) 40 | return error, gp_model 41 | 42 | 43 | def ridgeline(ax, data, overlap=0, fill=True, fill_color="b", n_points=150): 44 | """ 45 | Adapted from https://glowingpython.blogspot.com/2020/03/ridgeline-plots-in-pure-matplotlib.html 46 | """ 47 | if overlap > 1 or overlap < 0: 48 | raise ValueError('overlap must be in [0 1]') 49 | xx = np.linspace(np.min(np.concatenate(data)), 50 | np.max(np.concatenate(data)), n_points) 51 | ys = [] 52 | for i, d in enumerate(data): 53 | pdf = gaussian_kde(d) 54 | y = i * (1.0 - overlap) 55 | ys.append(y) 56 | curve = pdf(xx) 57 | if fill: 58 | ax.fill_between(xx, np.ones(n_points) * y, 59 | curve + y, zorder=len(data) - i + 1, color=fill_color, alpha=0.5) 60 | ax.plot(xx, curve + y, zorder=len(data) - i + 1, color=fill_color) 61 | 62 | 63 | def run(): 64 | f_stability = os.path.join("results", f"stability-matrix-{FLAGS.cov}-{FLAGS.model}") 65 | f_time = os.path.join("results", f"time-matrix-{FLAGS.cov}-{FLAGS.model}") 66 | 67 | if FLAGS.run: 68 | gpf.config.set_default_float(getattr(np, FLAGS.dtype)) 69 | cov_name = CovarianceEnum(FLAGS.cov) 70 | cov_fun = get_simple_covariance_function(cov_name) 71 | errors = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 72 | times = np.empty((FLAGS.mesh_size, FLAGS.mesh_size, FLAGS.n_seeds), dtype=float) 73 | n_training_logspace = n_test_logspace = np.logspace(12, 15, FLAGS.mesh_size, base=2, dtype=int) 74 | 75 | for (i, n_training), (j, n_pred) in tqdm.tqdm(product(enumerate(n_training_logspace), 76 | enumerate(n_test_logspace)), 77 | total=FLAGS.mesh_size ** 2, 78 | desc=FLAGS.model): 79 | model = None 80 | for seed in tqdm.trange(FLAGS.n_seeds, leave=False): 81 | try: 82 | tic = time.time() 83 | error, model = run_one(seed, cov_fun, model, n_training, n_pred) 84 | toc = time.time() 85 | # the only reason we return the model is so that we don't have to recompile everytime 86 | errors[i, j, seed] = error 87 | times[i, j, seed] = toc - tic 88 | except Exception as e: # noqa: It's not clear what the error returned by TF could be, so well... 89 | errors[i, j, seed] = float("nan") 90 | times[i, j, seed] = float("nan") 91 | print( 92 | f"{FLAGS.model}-{FLAGS.cov} failed with n_training,n_pred={n_training, n_pred} and error: \n {e}") 93 | 94 | np.save(f_stability, errors) 95 | np.save(f_time, times) 96 | elif FLAGS.plot: 97 | errors = np.load(f_stability + ".npy") 98 | fig, axes = plt.subplots(ncols=3, nrows=2, figsize=(17, 12)) 99 | for i, ax in enumerate(axes[0, :]): 100 | ridgeline(ax, errors[(i + 1) * FLAGS.mesh_size // 3 - 1]) 101 | for j, ax in enumerate(axes[1, :]): 102 | ridgeline(ax, errors[:, (j + 1) * FLAGS.mesh_size // 3 - 1]) 103 | fig.show() 104 | 105 | 106 | def main(_): 107 | device = tf.device(FLAGS.device) 108 | with device: 109 | run() 110 | 111 | 112 | if __name__ == '__main__': 113 | if not os.path.exists("results"): 114 | os.makedirs('results') 115 | app.run(main) 116 | -------------------------------------------------------------------------------- /pssgp/kalman/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/EEA-sensors/parallel-gps/f2bf8d9d3f0f0557b4b8ec9ae7f1f951a2b75ad9/pssgp/kalman/__init__.py -------------------------------------------------------------------------------- /pssgp/kalman/base.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | LGSSM = namedtuple("LGSSM", ["P0", "Fs", "Qs", "H", "R"]) 4 | -------------------------------------------------------------------------------- /pssgp/kalman/parallel.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import tensorflow as tf 4 | from tensorflow_probability.python.distributions import MultivariateNormalTriL 5 | from tensorflow_probability.python.math import scan_associative 6 | 7 | __all__ = ["pkf", "pks", "pkfs"] 8 | 9 | mv = tf.linalg.matvec 10 | mm = tf.linalg.matmul 11 | 12 | 13 | def first_filtering_element(m0, P0, F, Q, H, R, y): 14 | def _res_nan(): 15 | A = tf.zeros_like(F) 16 | b = m0 17 | C = P0 18 | eta = tf.zeros_like(m0) 19 | J = tf.zeros_like(F) 20 | 21 | return A, b, C, J, eta 22 | 23 | def _res_not_nan(): 24 | S1 = H @ mm(P0, H, transpose_b=True) + R 25 | S1_chol = tf.linalg.cholesky(S1) 26 | K1t = tf.linalg.cholesky_solve(S1_chol, H @ P0) 27 | 28 | A = tf.zeros_like(F) 29 | b = m0 + mv(K1t, y - mv(H, m0), transpose_a=True) 30 | C = P0 - mm(K1t, S1, transpose_a=True) @ K1t 31 | 32 | S = H @ mm(Q, H, transpose_b=True) + R 33 | chol = tf.linalg.cholesky(S) 34 | HF = H @ F 35 | eta = mv(HF, 36 | tf.squeeze(tf.linalg.cholesky_solve(chol, tf.expand_dims(y, 1)), 1), 37 | transpose_a=True) 38 | J = mm(HF, tf.linalg.cholesky_solve(chol, H @ F), transpose_a=True) 39 | 40 | return A, b, C, J, eta 41 | 42 | res = tf.cond(tf.math.is_nan(y), _res_nan, _res_not_nan) 43 | return res 44 | 45 | 46 | def _generic_filtering_element_nan(F, Q): 47 | A = F 48 | b = tf.zeros(tf.shape(F)[:2], dtype=F.dtype) 49 | C = Q 50 | eta = tf.zeros(tf.shape(F)[:2], dtype=F.dtype) 51 | J = tf.zeros_like(F) 52 | 53 | return A, b, C, J, eta 54 | 55 | 56 | def _generic_filtering_element(F, Q, H, R, y): 57 | S = H @ mm(Q, H, transpose_b=True) + tf.expand_dims(R, 0) 58 | chol = tf.linalg.cholesky(S) 59 | 60 | Kt = tf.linalg.cholesky_solve(chol, H @ Q) 61 | A = F - mm(Kt, H, transpose_a=True) @ F 62 | b = mv(Kt, y, transpose_a=True) 63 | C = Q - mm(Kt, H, transpose_a=True) @ Q 64 | 65 | HF = H @ F 66 | eta = mv(HF, 67 | tf.squeeze(tf.linalg.cholesky_solve(chol, tf.expand_dims(y, 1)), -1), 68 | transpose_a=True) 69 | 70 | J = mm(HF, tf.linalg.cholesky_solve(chol, HF), transpose_a=True) 71 | 72 | return A, b, C, J, eta 73 | 74 | 75 | def _combine_nan_and_ok(ok_elem, nan_elem, ok_indices, nan_indices, n): 76 | elem_shape = (n,) + tuple(s for s in nan_elem.shape[1:]) 77 | elem = tf.zeros(elem_shape, dtype=ok_elem.dtype) 78 | elem = tf.tensor_scatter_nd_update(elem, nan_indices, nan_elem) 79 | elem = tf.tensor_scatter_nd_update(elem, ok_indices, ok_elem) 80 | return elem 81 | 82 | 83 | def make_associative_filtering_elements(m0, P0, Fs, Qs, H, R, observations): 84 | init_res = first_filtering_element(m0, P0, Fs[0], Qs[0], H, R, observations[0]) 85 | 86 | nan_ys = tf.reshape(tf.math.is_nan(observations), (-1,)) 87 | nan_res = _generic_filtering_element_nan(Fs, Qs) 88 | ok_res = _generic_filtering_element(Fs, Qs, H, R, observations) 89 | 90 | gen_res = [] 91 | for nan_elem, ok_elem in zip(nan_res, ok_res): 92 | ndim = len(nan_elem.shape) 93 | gen_res.append(tf.where(tf.reshape(nan_ys, (-1,) + (1,) * (ndim - 1)), 94 | nan_elem, 95 | ok_elem)) 96 | return tuple(tf.tensor_scatter_nd_update(gen_es, [[0]], tf.expand_dims(first_e, 0)) 97 | for first_e, gen_es in zip(init_res, gen_res)) 98 | 99 | 100 | def filtering_operator(elem1, elem2): 101 | A1, b1, C1, J1, eta1 = elem1 102 | A2, b2, C2, J2, eta2 = elem2 103 | 104 | n, dim = tf.shape(A1)[0], A1.shape[1] 105 | I = tf.eye(dim, dtype=A1.dtype, batch_shape=(n,)) 106 | 107 | temp = tf.linalg.solve(I + C1 @ J2, tf.transpose(A2, perm=[0, 2, 1]), adjoint=True) 108 | A = mm(temp, A1, transpose_a=True) 109 | b = mv(temp, b1 + mv(C1, eta2), transpose_a=True) + b2 110 | C = mm(temp, mm(C1, A2, transpose_b=True), transpose_a=True) + C2 111 | 112 | temp = tf.linalg.solve(I + J2 @ C1, A1, adjoint=True) 113 | eta = mv(temp, eta2 - mv(J2, b1), transpose_a=True) + eta1 114 | J = mm(temp, J2 @ A1, transpose_a=True) + J1 115 | 116 | C = 0.5 * (C + tf.transpose(C, [0, 2, 1])) 117 | J = 0.5 * (J + tf.transpose(J, [0, 2, 1])) 118 | return A, b, C, J, eta 119 | 120 | 121 | def pkf(lgssm, observations, return_loglikelihood=False, max_parallel=10000): 122 | with tf.name_scope("parallel_filter"): 123 | P0, Fs, Qs, H, R = lgssm 124 | dtype = P0.dtype 125 | m0 = tf.zeros(tf.shape(P0)[0], dtype=dtype) 126 | 127 | max_num_levels = math.ceil(math.log2(max_parallel)) 128 | 129 | initial_elements = make_associative_filtering_elements(m0, P0, Fs, Qs, H, R, observations) 130 | 131 | final_elements = scan_associative(filtering_operator, 132 | initial_elements, 133 | max_num_levels=max_num_levels) 134 | 135 | if return_loglikelihood: 136 | filtered_means = tf.concat([tf.expand_dims(m0, 0), final_elements[1][:-1]], axis=0) 137 | filtered_cov = tf.concat([tf.expand_dims(P0, 0), final_elements[2][:-1]], axis=0) 138 | predicted_means = mv(Fs, filtered_means) 139 | predicted_covs = mm(Fs, mm(filtered_cov, Fs, transpose_b=True)) + Qs 140 | obs_means = mv(H, predicted_means) 141 | obs_covs = mm(H, mm(predicted_covs, H, transpose_b=True)) + tf.expand_dims(R, 0) 142 | 143 | dists = MultivariateNormalTriL(obs_means, tf.linalg.cholesky(obs_covs)) 144 | # TODO: some logic could be added here to avoid handling the covariance of non-nan models, but no impact for GPs 145 | logprobs = dists.log_prob(observations) 146 | 147 | logprobs_without_nans = tf.where(tf.math.is_nan(logprobs), 148 | tf.zeros_like(logprobs), 149 | logprobs) 150 | total_log_prob = tf.reduce_sum(logprobs_without_nans) 151 | return final_elements[1], final_elements[2], total_log_prob 152 | return final_elements[1], final_elements[2] 153 | 154 | 155 | def last_smoothing_element(m, P): 156 | return tf.zeros_like(P), m, P 157 | 158 | 159 | def generic_smoothing_element(F, Q, m, P): 160 | Pp = F @ mm(P, F, transpose_b=True) + Q 161 | chol = tf.linalg.cholesky(Pp) 162 | E = tf.transpose(tf.linalg.cholesky_solve(chol, F @ P), [0, 2, 1]) 163 | g = m - mv(E @ F, m) 164 | L = P - E @ mm(Pp, E, transpose_b=True) 165 | L = 0.5 * (L + tf.transpose(L, [0, 2, 1])) 166 | return E, g, L 167 | 168 | 169 | def make_associative_smoothing_elements(Fs, Qs, filtering_means, filtering_covariances): 170 | last_elems = last_smoothing_element(filtering_means[-1], filtering_covariances[-1]) 171 | generic_elems = generic_smoothing_element(Fs[1:], Qs[1:], filtering_means[:-1], filtering_covariances[:-1]) 172 | return tuple(tf.concat([gen_es, tf.expand_dims(last_e, 0)], axis=0) 173 | for gen_es, last_e in zip(generic_elems, last_elems)) 174 | 175 | 176 | def smoothing_operator(elem1, elem2): 177 | E1, g1, L1 = elem1 178 | E2, g2, L2 = elem2 179 | 180 | E = E2 @ E1 181 | g = mv(E2, g1) + g2 182 | L = E2 @ mm(L1, E2, transpose_b=True) + L2 183 | 184 | return E, g, L 185 | 186 | 187 | def pks(lgssm, ms, Ps, max_parallel=10000): 188 | max_num_levels = math.ceil(math.log2(max_parallel)) 189 | _, Fs, Qs, *_ = lgssm 190 | initial_elements = make_associative_smoothing_elements(Fs, Qs, ms, Ps) 191 | reversed_elements = tuple(tf.reverse(elem, axis=[0]) for elem in initial_elements) 192 | 193 | final_elements = scan_associative(smoothing_operator, 194 | reversed_elements, 195 | max_num_levels=max_num_levels) 196 | return tf.reverse(final_elements[1], axis=[0]), tf.reverse(final_elements[2], axis=[0]) 197 | 198 | 199 | def pkfs(model, observations, max_parallel=10000): 200 | fms, fPs = pkf(model, observations, False, max_parallel) 201 | return pks(model, fms, fPs, max_parallel) 202 | -------------------------------------------------------------------------------- /pssgp/kalman/sequential.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | __all__ = ["kf", "ks", "kfs"] 4 | 5 | from tensorflow_probability.python.distributions import MultivariateNormalTriL 6 | 7 | mv = tf.linalg.matvec 8 | mm = tf.linalg.matmul 9 | 10 | 11 | def kf(lgssm, observations, return_loglikelihood=False, return_predicted=False): 12 | P0, Fs, Qs, H, R = lgssm 13 | dtype = P0.dtype 14 | m0 = tf.zeros(tf.shape(P0)[0], dtype=dtype) 15 | 16 | def body(carry, inp): 17 | ell, m, P, *_ = carry 18 | y, F, Q = inp 19 | mp = mv(F, m) 20 | Pp = F @ mm(P, F, transpose_b=True) + Q 21 | Pp = 0.5 * (Pp + tf.transpose(Pp)) 22 | 23 | def update(m, P, ell): 24 | S = H @ mm(P, H, transpose_b=True) + R 25 | yp = mv(H, m) 26 | chol = tf.linalg.cholesky(S) 27 | predicted_dist = MultivariateNormalTriL(yp, chol) 28 | ell_t = predicted_dist.log_prob(y) 29 | Kt = tf.linalg.cholesky_solve(chol, H @ P) 30 | 31 | m = m + mv(Kt, y - yp, transpose_a=True) 32 | P = P - mm(Kt, S, transpose_a=True) @ Kt 33 | ell = ell + ell_t 34 | return ell, m, P 35 | 36 | nan_y = ~tf.math.is_nan(y) 37 | nan_res = (ell, mp, Pp) 38 | ell, m, P = tf.cond(nan_y, lambda: update(mp, Pp, ell), lambda: nan_res) 39 | P = 0.5 * (P + tf.transpose(P)) 40 | return ell, m, P, mp, Pp 41 | 42 | ells, fms, fPs, mps, Pps = tf.scan(body, 43 | (observations, Fs, Qs), 44 | (tf.constant(0., dtype), m0, P0, m0, P0)) 45 | returned_values = (fms, fPs) + ((ells[-1],) if return_loglikelihood else ()) + ( 46 | (mps, Pps) if return_predicted else ()) 47 | return returned_values 48 | 49 | 50 | def ks(lgssm, ms, Ps, mps, Pps): 51 | _, Fs, Qs, *_ = lgssm 52 | 53 | def body(carry, inp): 54 | F, Q, m, P, mp, Pp = inp 55 | sm, sP = carry 56 | 57 | chol = tf.linalg.cholesky(Pp) 58 | Ct = tf.linalg.cholesky_solve(chol, F @ P) 59 | sm = m + mv(Ct, sm - mp, transpose_a=True) 60 | sP = P + mm(Ct, sP - Pp, transpose_a=True) @ Ct 61 | return sm, 0.5 * (sP + tf.transpose(sP)) 62 | 63 | (sms, sPs) = tf.scan(body, 64 | (Fs[1:], Qs[1:], ms[:-1], Ps[:-1], mps[1:], Pps[1:]), 65 | (ms[-1], Ps[-1]), reverse=True) 66 | sms = tf.concat([sms, tf.expand_dims(ms[-1], 0)], 0) 67 | sPs = tf.concat([sPs, tf.expand_dims(Ps[-1], 0)], 0) 68 | return sms, sPs 69 | 70 | 71 | def kfs(model, observations): 72 | fms, fPs, mps, Pps = kf(model, observations, return_predicted=True) 73 | return ks(model, fms, fPs, mps, Pps) 74 | -------------------------------------------------------------------------------- /pssgp/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | from .matern import Matern12, Matern32, Matern52 2 | from .periodic import Periodic 3 | from .rbf import RBF 4 | -------------------------------------------------------------------------------- /pssgp/kernels/base.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from collections import namedtuple 3 | from functools import reduce 4 | from typing import List, Optional 5 | 6 | import gpflow 7 | import tensorflow as tf 8 | from gpflow import config 9 | from gpflow.kernels import Kernel 10 | 11 | from pssgp.kalman.base import LGSSM 12 | import pssgp.config as pssgp_config 13 | from pssgp.kernels.math_utils import balance_ss, solve_lyap_vec 14 | 15 | ContinuousDiscreteModel = namedtuple("ContinuousDiscreteModel", ["P0", "F", "L", "H", "Q"]) 16 | 17 | 18 | def get_lssm_spec(dim, T): 19 | dtype = config.default_float() 20 | P0_spec = tf.TensorSpec((dim, dim), dtype=dtype) 21 | Fs_spec = tf.TensorSpec((T, dim, dim), dtype=dtype) 22 | Qs_spec = tf.TensorSpec((T, dim, dim), dtype=dtype) 23 | H_spec = tf.TensorSpec((1, dim), dtype=dtype) 24 | R_spec = tf.TensorSpec((1, 1), dtype=dtype) 25 | 26 | return LGSSM(P0_spec, Fs_spec, Qs_spec, H_spec, R_spec) 27 | 28 | 29 | def _get_ssm(sde, ts, R, t0=0.): 30 | dtype = config.default_float() 31 | n = tf.shape(sde.F)[0] 32 | t0 = tf.reshape(tf.convert_to_tensor(t0, dtype=dtype), (1, 1)) 33 | 34 | ts = tf.concat([t0, ts], axis=0) 35 | dts = tf.reshape(ts[1:] - ts[:-1], (-1, 1, 1)) 36 | Fs = tf.linalg.expm(dts * tf.expand_dims(sde.F, 0)) 37 | zeros = tf.zeros_like(sde.F) 38 | 39 | Phi = tf.concat( 40 | [tf.concat([sde.F, sde.L @ tf.matmul(sde.Q, sde.L, transpose_b=True)], axis=1), 41 | tf.concat([zeros, -tf.transpose(sde.F)], axis=1)], 42 | axis=0) 43 | 44 | AB = tf.linalg.expm(dts * tf.expand_dims(Phi, 0)) 45 | AB = AB @ tf.concat([zeros, tf.eye(n, dtype=dtype)], axis=0) 46 | Qs = tf.matmul(AB[:, :n, :], Fs, transpose_b=True) 47 | return LGSSM(sde.P0, Fs, Qs, sde.H, R) 48 | 49 | 50 | class SDEKernelMixin(metaclass=abc.ABCMeta): 51 | def __init__(self, t0: float = 0., **_kwargs): 52 | """ 53 | 54 | Parameters: 55 | ----------- 56 | t0: float, optional 57 | rbf_order : int, default=6 58 | The order of Taylor expansion for RBF covariance function in state-space 59 | """ 60 | self.t0 = t0 61 | 62 | @abc.abstractmethod 63 | def get_sde(self) -> ContinuousDiscreteModel: 64 | """ 65 | Creates the linear time invariant continuous discrete system associated to the stationary kernel at hand 66 | 67 | Returns 68 | ------- 69 | sde: ContinuousDiscreteModel 70 | The associated LTI model 71 | """ 72 | 73 | def get_ssm(self, ts, R, t0=0.): 74 | """ 75 | Creates the linear Gaussian state space model associated to the stationary kernel at hand 76 | 77 | Parameters 78 | ---------- 79 | ts: tf.Tensor 80 | The times at which we have observations 81 | R: tf.Tensor 82 | The observation covariance 83 | t0: float 84 | Starting point of the model 85 | 86 | Returns 87 | ------- 88 | lgssm: ContinuousDiscreteModel 89 | The associated state space model 90 | """ 91 | sde = self.get_sde() 92 | ssm = _get_ssm(sde, ts, R, t0) 93 | return ssm 94 | 95 | def __add__(self, other): 96 | return SDESum([self, other]) # noqa: don't complain Pycharm, I know what's good for you. 97 | 98 | def __mul__(self, other): 99 | return SDEProduct([self, other]) # noqa: don't complain Pycharm, I know what's good for you. 100 | 101 | @abc.abstractmethod 102 | def get_spec(self, T): 103 | return None 104 | 105 | 106 | def _sde_combination_init(self, kernels: List[Kernel], name: Optional[str] = None, **kargs): 107 | if not all(isinstance(k, SDEKernelMixin) for k in kernels): 108 | raise TypeError("can only combine SDE Kernel instances") # pragma: no cover 109 | gpflow.kernels.Sum.__init__(self, kernels, name) 110 | SDEKernelMixin.__init__(self, **kargs) 111 | 112 | 113 | def block_diag(arrs): 114 | xdims = [tf.shape(a)[0] for a in arrs] 115 | ydims = [tf.shape(a)[1] for a in arrs] 116 | out_dtype = arrs[0].dtype 117 | out = tf.zeros((0, sum(ydims)), dtype=out_dtype) 118 | ydim = sum(ydims) 119 | r, c = 0, 0 120 | for i, (rr, cc) in enumerate(zip(xdims, ydims)): 121 | paddings = [[0, 0], 122 | [c, ydim - c - cc]] 123 | 124 | out = tf.concat([out, tf.pad(arrs[i], paddings)], 0) 125 | r = r + rr 126 | c = c + cc 127 | return out 128 | 129 | 130 | class SDESum(SDEKernelMixin, gpflow.kernels.Sum): 131 | __init__ = _sde_combination_init 132 | 133 | def get_spec(self, T): 134 | dim = 0 135 | for kernel in self.kernels: 136 | ker_spec = kernel.get_spec(T) 137 | if ker_spec is None: 138 | return None 139 | ker_P0_spec = ker_spec.P0 140 | dim += ker_P0_spec.shape[-1] 141 | return get_lssm_spec(dim, T) 142 | 143 | @staticmethod 144 | def _block_diagonal(matrices, is_positive_definite=False, square=True): 145 | if square: 146 | operators = [tf.linalg.LinearOperatorFullMatrix(matrix, is_positive_definite) for matrix in matrices] 147 | block_op = tf.linalg.LinearOperatorBlockDiag(operators) 148 | return block_op.to_dense() 149 | return block_diag(matrices) 150 | 151 | def get_sde(self) -> ContinuousDiscreteModel: 152 | """ 153 | Creates the linear time invariant continuous discrete system associated to the stationary kernel at hand 154 | 155 | Returns 156 | ------- 157 | sde: ContinuousDiscreteModel 158 | The associated LTI model 159 | """ 160 | kernels = self.kernels # type: List[SDEKernelMixin] 161 | P0s = [] 162 | Fs = [] 163 | Ls = [] 164 | Hs = [] 165 | Qs = [] 166 | 167 | for kernel in kernels: 168 | P0, F, L, H, Q = kernel.get_sde() 169 | P0s.append(P0) 170 | Fs.append(F) 171 | Ls.append(L) 172 | Hs.append(H) 173 | Qs.append(Q) 174 | 175 | Fsum = self._block_diagonal(Fs) 176 | Lsum = self._block_diagonal(Ls, square=False) 177 | Hsum = tf.concat(Hs, axis=1) 178 | Qsum = self._block_diagonal(Qs, is_positive_definite=True) 179 | 180 | Fb, Lb, Hb, Qb = balance_ss(Fsum, Lsum, Hsum, Qsum, pssgp_config.NUMBER_OF_BALANCING_STEPS) 181 | 182 | Pinf = solve_lyap_vec(Fb, Lb, Qb) 183 | return ContinuousDiscreteModel(Pinf, Fb, Lb, Hb, Qb) 184 | 185 | 186 | class SDEProduct(SDEKernelMixin, gpflow.kernels.Product): 187 | __init__ = _sde_combination_init 188 | 189 | def get_spec(self, T): 190 | dim = 1 191 | for kernel in self.kernels: 192 | ker_spec = kernel.get_spec(T) 193 | if ker_spec is None: 194 | return None 195 | ker_P0_spec = ker_spec.P0 196 | dim *= ker_P0_spec.shape[-1] 197 | return get_lssm_spec(dim, T) 198 | 199 | @staticmethod 200 | def _combine_F(op1, op2): 201 | I1 = tf.linalg.LinearOperatorIdentity(tf.shape(op1)[0], dtype=op1.dtype) 202 | I2 = tf.linalg.LinearOperatorIdentity(tf.shape(op2)[0], dtype=op2.dtype) 203 | op1 = tf.linalg.LinearOperatorFullMatrix(op1) 204 | op2 = tf.linalg.LinearOperatorFullMatrix(op2) 205 | kron_1 = tf.linalg.LinearOperatorKronecker([op1, I2]) 206 | kron_2 = tf.linalg.LinearOperatorKronecker([I1, op2]) 207 | return kron_1.to_dense() + kron_2.to_dense() 208 | 209 | @staticmethod 210 | def _combine_Q(sde1, sde2): 211 | gamma1 = tf.linalg.LinearOperatorFullMatrix(sde1.L @ sde1.Q @ tf.transpose(sde1.L), is_positive_definite=True, 212 | is_self_adjoint=True) 213 | gamma2 = tf.linalg.LinearOperatorFullMatrix(sde2.L @ sde2.Q @ tf.transpose(sde2.L), is_positive_definite=True, 214 | is_self_adjoint=True) 215 | Pinf1 = tf.linalg.LinearOperatorFullMatrix(sde1.P0, is_positive_definite=True, is_self_adjoint=True) 216 | Pinf2 = tf.linalg.LinearOperatorFullMatrix(sde2.P0, is_positive_definite=True, is_self_adjoint=True) 217 | 218 | kron_1 = tf.linalg.LinearOperatorKronecker([gamma1, Pinf2]) 219 | kron_2 = tf.linalg.LinearOperatorKronecker([Pinf1, gamma2]) 220 | return kron_1.to_dense() + kron_2.to_dense() 221 | 222 | def get_sde(self) -> ContinuousDiscreteModel: 223 | """ 224 | Creates the linear time invariant continuous discrete system associated to the stationary kernel at hand 225 | 226 | Returns 227 | ------- 228 | sde: ContinuousDiscreteModel 229 | The associated LTI model 230 | """ 231 | dtype = gpflow.config.default_float() 232 | kernels = self.kernels # type: List[SDEKernelMixin] 233 | 234 | sdes = [kernel.get_sde() for kernel in kernels] 235 | 236 | F = reduce(self._combine_F, [sde.F for sde in sdes]) 237 | Q: tf.Tensor = reduce(self._combine_Q, [sde for sde in sdes]) # noqa: this really returns a Tensor. 238 | H = tf.linalg.LinearOperatorKronecker([tf.linalg.LinearOperatorFullMatrix(sde.H) 239 | for sde in sdes]).to_dense() 240 | L = tf.eye(tf.shape(Q)[0], dtype=dtype) 241 | 242 | Fb, Lb, Hb, Qb = balance_ss(F, L, H, Q, pssgp_config.NUMBER_OF_BALANCING_STEPS) 243 | Pinf = solve_lyap_vec(Fb, Lb, Qb) 244 | return ContinuousDiscreteModel(Pinf, Fb, Lb, Hb, Qb) 245 | -------------------------------------------------------------------------------- /pssgp/kernels/matern/__init__.py: -------------------------------------------------------------------------------- 1 | from .matern12 import Matern12 2 | from .matern32 import Matern32 3 | from .matern52 import Matern52 4 | -------------------------------------------------------------------------------- /pssgp/kernels/matern/common.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import Tuple 3 | 4 | import gpflow.config as config 5 | import numpy as np 6 | import tensorflow as tf 7 | from scipy.special import binom 8 | 9 | 10 | def _get_transition_matrix(lamda: tf.Tensor, d: int, dtype: tf.DType) -> tf.Tensor: 11 | with tf.name_scope("get_transition_matrix"): 12 | F = tf.linalg.diag(tf.ones((d - 1,), dtype=dtype), k=1, num_cols=d, num_rows=d) 13 | binomial_coeffs = binom(d, np.arange(0, d, dtype=int)).astype(dtype) 14 | binomial_coeffs = tf.convert_to_tensor(binomial_coeffs, dtype=dtype) 15 | lambda_powers = lamda ** np.arange(d, 0, -1, dtype=dtype) 16 | update_indices = [[d - 1, k] for k in range(d)] 17 | F = tf.tensor_scatter_nd_sub(F, update_indices, lambda_powers * binomial_coeffs) 18 | return F 19 | 20 | 21 | def _get_brownian_cov(variance, lamda, d, dtype) -> tf.Tensor: 22 | q = (2 * lamda) ** (2 * d - 1) * variance * math.factorial(d - 1) ** 2 / math.factorial(2 * d - 2) 23 | return q * tf.eye(1, dtype=dtype) 24 | 25 | 26 | def get_matern_sde(variance, lengthscales, d: int) -> Tuple[tf.Tensor, ...]: 27 | """ 28 | TODO: write description 29 | 30 | Parameters 31 | ---------- 32 | variance: float 33 | observation noise 34 | lengthscales: tf.Tensor 35 | tensor with kernel lengthscale 36 | d: int 37 | the exponent of the Matern kernel plus one half 38 | for instance Matern32 -> 2, this will be used as the dimension of the latent SSM 39 | 40 | Returns 41 | ------- 42 | F, L, H, Q: tuple of tf.Tensor 43 | Parameters for the LTI sde 44 | """ 45 | dtype = config.default_float() 46 | lamda = math.sqrt(2 * d - 1) / lengthscales 47 | F = _get_transition_matrix(lamda, d, dtype) 48 | one = tf.ones((1,), dtype) 49 | L = tf.linalg.diag(one, k=-d + 1, num_rows=d, num_cols=1) # type: tf.Tensor 50 | H = tf.linalg.diag(one, num_rows=1, num_cols=d) # type: tf.Tensor 51 | Q = _get_brownian_cov(variance, lamda, d, dtype) 52 | return F, L, H, Q 53 | -------------------------------------------------------------------------------- /pssgp/kernels/matern/matern12.py: -------------------------------------------------------------------------------- 1 | import gpflow 2 | import tensorflow as tf 3 | 4 | from pssgp.kernels.base import ContinuousDiscreteModel, SDEKernelMixin, get_lssm_spec 5 | from pssgp.kernels.matern.common import get_matern_sde 6 | 7 | 8 | class Matern12(SDEKernelMixin, gpflow.kernels.Matern12): 9 | __doc__ = gpflow.kernels.Matern12.__doc__ 10 | 11 | def __init__(self, variance=1.0, lengthscales=1.0, **kwargs): 12 | gpflow.kernels.Matern12.__init__(self, variance, lengthscales, **kwargs) 13 | SDEKernelMixin.__init__(self, **kwargs) 14 | 15 | def get_spec(self, T): 16 | return get_lssm_spec(1, T) 17 | 18 | def get_sde(self) -> ContinuousDiscreteModel: 19 | F, L, H, Q = get_matern_sde(self.variance, self.lengthscales, 1) 20 | variance = tf.reduce_sum(self.variance) 21 | 22 | P_infty = tf.linalg.diag([variance]) 23 | return ContinuousDiscreteModel(P_infty, F, L, H, Q) 24 | -------------------------------------------------------------------------------- /pssgp/kernels/matern/matern32.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import gpflow 4 | import tensorflow as tf 5 | 6 | from pssgp.kernels.base import ContinuousDiscreteModel, SDEKernelMixin, get_lssm_spec 7 | from pssgp.kernels.matern.common import get_matern_sde 8 | 9 | 10 | class Matern32(SDEKernelMixin, gpflow.kernels.Matern32): 11 | __doc__ = gpflow.kernels.Matern32.__doc__ 12 | 13 | def __init__(self, variance=1.0, lengthscales=1.0, **kwargs): 14 | gpflow.kernels.Matern32.__init__(self, variance, lengthscales, **kwargs) 15 | SDEKernelMixin.__init__(self, **kwargs) 16 | 17 | def get_spec(self, T): 18 | return get_lssm_spec(2, T) 19 | 20 | def get_sde(self) -> ContinuousDiscreteModel: 21 | F, L, H, Q = get_matern_sde(self.variance, self.lengthscales, 2) 22 | 23 | lengthscales = tf.reduce_sum(self.lengthscales) 24 | lamda = math.sqrt(3) / lengthscales 25 | variance = tf.reduce_sum(self.variance) 26 | 27 | P_infty = tf.linalg.diag(tf.stack([variance, lamda ** 2 * variance], axis=0)) 28 | return ContinuousDiscreteModel(P_infty, F, L, H, Q) 29 | -------------------------------------------------------------------------------- /pssgp/kernels/matern/matern52.py: -------------------------------------------------------------------------------- 1 | import gpflow 2 | import tensorflow as tf 3 | 4 | from pssgp.kernels.base import ContinuousDiscreteModel, SDEKernelMixin, get_lssm_spec 5 | import pssgp.config as pssgp_config 6 | from pssgp.kernels.matern.common import get_matern_sde 7 | from pssgp.kernels.math_utils import balance_ss, solve_lyap_vec 8 | 9 | 10 | class Matern52(SDEKernelMixin, gpflow.kernels.Matern52): 11 | __doc__ = gpflow.kernels.Matern52.__doc__ 12 | 13 | def __init__(self, variance=1.0, lengthscales=1.0, **kwargs): 14 | self._balancing_iter = kwargs.pop('balancing_iter', pssgp_config.NUMBER_OF_BALANCING_STEPS) 15 | gpflow.kernels.Matern52.__init__(self, variance, lengthscales, **kwargs) 16 | SDEKernelMixin.__init__(self, **kwargs) 17 | 18 | def get_spec(self, T): 19 | return get_lssm_spec(3, T) 20 | 21 | def get_sde(self) -> ContinuousDiscreteModel: 22 | F, L, H, q = get_matern_sde(self.variance, self.lengthscales, 3) 23 | Fb, Lb, Hb, Qb = balance_ss(F, L, H, tf.reshape(q, (1, 1)), n_iter=self._balancing_iter) 24 | Pinf = solve_lyap_vec(Fb, Lb, Qb) 25 | return ContinuousDiscreteModel(Pinf, Fb, Lb, Hb, Qb) 26 | -------------------------------------------------------------------------------- /pssgp/kernels/math_utils.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from typing import Tuple 3 | 4 | import numba as nb 5 | import numpy as np 6 | import tensorflow as tf 7 | from gpflow import config 8 | 9 | 10 | @partial(nb.jit, nopython=True) 11 | def _numba_balance_ss(F: np.ndarray, 12 | iter: int) -> np.ndarray: 13 | dim = F.shape[0] 14 | dtype = F.dtype 15 | d = np.ones((dim,), dtype=dtype) 16 | for k in range(iter): 17 | for i in range(dim): 18 | tmp = np.copy(F[:, i]) 19 | tmp[i] = 0. 20 | c = np.linalg.norm(tmp, 2) 21 | tmp2 = np.copy(F[i, :]) 22 | tmp2[i] = 0. 23 | 24 | r = np.linalg.norm(tmp2, 2) 25 | f = np.sqrt(r / c) 26 | d[i] *= f 27 | F[:, i] *= f 28 | F[i, :] /= f 29 | return d 30 | 31 | 32 | def balance_ss(F: tf.Tensor, 33 | L: tf.Tensor, 34 | H: tf.Tensor, 35 | q: tf.Tensor, 36 | n_iter: int = 5) -> Tuple[tf.Tensor, ...]: 37 | """Balance state-space model to have better numerical stability 38 | 39 | Parameters 40 | ---------- 41 | F : tf.Tensor 42 | Matrix 43 | L : tf.Tensor 44 | Matrix 45 | H : tf.Tensor 46 | Measurement matrix 47 | q : tf.Tensor 48 | Spectral dnesity 49 | n_iter : int 50 | Iteration of balancing 51 | 52 | Returns 53 | ------- 54 | F : tf.Tensor 55 | ... 56 | L : tf.Tensor 57 | ... 58 | H : tf.Tensor 59 | ... 60 | q : tf.Tensor 61 | ... 62 | 63 | References 64 | ---------- 65 | https://arxiv.org/pdf/1401.5766.pdf 66 | """ 67 | dtype = config.default_float() 68 | d = tf.numpy_function(partial(_numba_balance_ss, iter=n_iter), (F,), dtype) 69 | d = tf.reshape(d, (tf.shape(F)[0],)) # This is to make sure that the shape of d is known at compilation time. 70 | F = F * d[None, :] / d[:, None] 71 | L = L / d[:, None] 72 | H = H * d[None, :] 73 | 74 | tmp3 = tf.reduce_max(tf.abs(L)) 75 | L = L / tmp3 76 | q = (tmp3 ** 2) * q 77 | 78 | tmp4 = tf.reduce_max(tf.abs(H)) 79 | H = H / tmp4 80 | q = (tmp4 ** 2) * q 81 | return F, L, H, q 82 | 83 | 84 | def solve_lyap_vec(F: tf.Tensor, 85 | L: tf.Tensor, 86 | Q: tf.Tensor) -> tf.Tensor: 87 | """Vectorized Lyapunov equation solver 88 | 89 | F P + P F' + L Q L' = 0 90 | 91 | Parameters 92 | ---------- 93 | F : tf.Tensor 94 | ... 95 | L : tf.Tensor 96 | ... 97 | Q : tf.Tensor 98 | ... 99 | 100 | Returns 101 | ------- 102 | Pinf : tf.Tensor 103 | Steady state covariance 104 | 105 | """ 106 | dtype = config.default_float() 107 | 108 | dim = tf.shape(F)[0] 109 | 110 | op1 = tf.linalg.LinearOperatorFullMatrix(F) 111 | op2 = tf.linalg.LinearOperatorIdentity(dim, dtype=dtype) 112 | 113 | F1 = tf.linalg.LinearOperatorKronecker([op2, op1]).to_dense() 114 | F2 = tf.linalg.LinearOperatorKronecker([op1, op2]).to_dense() 115 | 116 | F = F1 + F2 117 | Q = tf.matmul(L, tf.matmul(Q, L, transpose_b=True)) 118 | Pinf = tf.reshape(tf.linalg.solve(F, tf.reshape(Q, (-1, 1))), (dim, dim)) 119 | Pinf = -0.5 * (Pinf + tf.transpose(Pinf)) 120 | return Pinf 121 | -------------------------------------------------------------------------------- /pssgp/kernels/periodic.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from typing import Tuple, Union, List 3 | 4 | import math 5 | import gpflow 6 | import gpflow.config as config 7 | import numba as nb 8 | import numpy as np 9 | import tensorflow as tf 10 | 11 | from scipy.special import factorial, comb 12 | from gpflow.kernels import SquaredExponential 13 | from pssgp.kernels.base import ContinuousDiscreteModel, SDEKernelMixin, get_lssm_spec 14 | 15 | tf_kron = tf.linalg.LinearOperatorKronecker 16 | 17 | 18 | def _get_offline_coeffs(N) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: 19 | """ 20 | Get coefficients which are independent of parameters (ell, sigma, and period). That are, fixed. 21 | 22 | Parameters 23 | ---------- 24 | N : Approximation order of periodic state-space model. 25 | 26 | Returns 27 | ------- 28 | b: np.ndarray 29 | K: np.ndarray 30 | div_facto_K: np.ndarray 31 | """ 32 | r = np.arange(0, N + 1) 33 | J, K = np.meshgrid(r, r) 34 | div_facto_K = 1 / factorial(K) 35 | # Get b(K, J) 36 | b = 2 * comb(K, np.floor((K - J) / 2) * (J <= K)) / \ 37 | (1 + (J == 0)) * (J <= K) * (np.mod(K - J, 2) == 0) 38 | return b, K, div_facto_K 39 | 40 | 41 | class Periodic(SDEKernelMixin, gpflow.kernels.Periodic): 42 | __doc__ = gpflow.kernels.Periodic.__doc__ 43 | 44 | def __init__(self, base_kernel: SquaredExponential, period: Union[float, List[float]] = 1.0, **kwargs): 45 | assert isinstance(base_kernel, SquaredExponential), "Only SquaredExponential is supported at the moment" 46 | self._order = kwargs.pop('order', 6) 47 | gpflow.kernels.Periodic.__init__(self, base_kernel, period) 48 | SDEKernelMixin.__init__(self, **kwargs) 49 | 50 | def get_spec(self, T): 51 | return get_lssm_spec(2 * (self._order + 1), T) 52 | 53 | def get_sde(self) -> ContinuousDiscreteModel: 54 | dtype = config.default_float() 55 | N = self._order 56 | w0 = 2 * math.pi / self.period 57 | lengthscales = self.base_kernel.lengthscales * 2. 58 | 59 | # Prepare offline fixed coefficients 60 | b, K, div_facto_K = _get_offline_coeffs(N) 61 | b = tf.convert_to_tensor(b, dtype=dtype) 62 | K = tf.convert_to_tensor(K, dtype=dtype) 63 | div_facto_K = tf.convert_to_tensor(div_facto_K, dtype=dtype) 64 | 65 | op_F = tf.linalg.LinearOperatorFullMatrix([[0, -w0], [w0, 0]]) 66 | op_diag = tf.linalg.LinearOperatorDiag(np.arange(0, N + 1, dtype=dtype)) 67 | F = tf_kron([op_diag, op_F]).to_dense() 68 | 69 | L = tf.eye(2 * (N + 1), dtype=dtype) 70 | 71 | Q = tf.zeros((2 * (N + 1), 2 * (N + 1)), dtype=dtype) 72 | 73 | q2 = b * lengthscales ** (-2 * K) * div_facto_K * tf.math.exp(-lengthscales ** (-2)) * \ 74 | 2 ** (-K) * self.base_kernel.variance 75 | q2 = tf.linalg.LinearOperatorDiag(tf.reduce_sum(q2, axis=0)) 76 | 77 | Pinf = tf_kron([q2, tf.linalg.LinearOperatorIdentity(2, dtype=dtype)]).to_dense() 78 | 79 | H = tf_kron([tf.linalg.LinearOperatorFullMatrix(tf.ones((1, N + 1), dtype=dtype)), 80 | tf.linalg.LinearOperatorFullMatrix(tf.constant([[1, 0]], dtype=dtype))]).to_dense() 81 | return ContinuousDiscreteModel(Pinf, F, L, H, Q) 82 | -------------------------------------------------------------------------------- /pssgp/kernels/rbf.py: -------------------------------------------------------------------------------- 1 | import math 2 | from typing import Tuple 3 | 4 | import gpflow 5 | import gpflow.config as config 6 | import numpy as np 7 | import tensorflow as tf 8 | 9 | from pssgp.kernels.base import ContinuousDiscreteModel, SDEKernelMixin, get_lssm_spec 10 | import pssgp.config as pssgp_config 11 | from pssgp.kernels.math_utils import balance_ss, solve_lyap_vec 12 | 13 | 14 | def _get_unscaled_rbf_sde(order: int = 6) -> Tuple[np.ndarray, ...]: 15 | """Get un-scaled RBF SDE. 16 | Pre-computed before loading to tensorflow. 17 | 18 | Parameters 19 | ---------- 20 | order : int, default=6 21 | Order of Taylor expansion 22 | 23 | Returns 24 | ------- 25 | F, L, H, Q : np.ndarray 26 | SDE coefficients. 27 | """ 28 | dtype = config.default_float() 29 | B = math.sqrt(2 * math.pi) 30 | A = np.zeros((2 * order + 1,), dtype=dtype) 31 | 32 | i = 0 33 | for k in range(order, -1, -1): 34 | A[i] = 0.5 ** k / math.factorial(k) 35 | i = i + 2 36 | 37 | q = B / np.polyval(A, 0) 38 | 39 | LA = np.real(A / (1j ** np.arange(A.size - 1, -1, -1, dtype=dtype))) 40 | 41 | AR = np.roots(LA) 42 | 43 | GB = 1 44 | GA = np.poly(AR[np.real(AR) < 0]) 45 | 46 | GA = GA / GA[-1] 47 | 48 | GB = GB / GA[0] 49 | GA = GA / GA[0] 50 | 51 | F = np.zeros((GA.size - 1, GA.size - 1), dtype=dtype) 52 | F[-1, :] = -GA[:0:-1] 53 | F[:-1, 1:] = np.eye(GA.size - 2, dtype=dtype) 54 | 55 | L = np.zeros((GA.size - 1, 1), dtype=dtype) 56 | L[-1, 0] = 1 57 | 58 | H = np.zeros((1, GA.size - 1), dtype=dtype) 59 | H[0, 0] = GB 60 | 61 | return F, L, H, q 62 | 63 | 64 | class RBF(SDEKernelMixin, gpflow.kernels.RBF): 65 | __doc__ = gpflow.kernels.RBF.__doc__ 66 | 67 | def __init__(self, variance=1.0, lengthscales=1.0, **kwargs): 68 | self._order = kwargs.pop('order', 3) 69 | self._balancing_iter = kwargs.pop('balancing_iter', pssgp_config.NUMBER_OF_BALANCING_STEPS) 70 | gpflow.kernels.RBF.__init__(self, variance, lengthscales) 71 | SDEKernelMixin.__init__(self, **kwargs) 72 | 73 | __init__.__doc__ = r"""TODO: talk about order params \n\n""" + gpflow.kernels.RBF.__init__.__doc__ 74 | 75 | def get_spec(self, T): 76 | return get_lssm_spec(self._order, T) 77 | 78 | def get_sde(self) -> ContinuousDiscreteModel: 79 | F_, L_, H_, q_ = _get_unscaled_rbf_sde(self._order) 80 | 81 | dtype = config.default_float() 82 | F = tf.convert_to_tensor(F_, dtype=dtype) 83 | L = tf.convert_to_tensor(L_, dtype=dtype) 84 | H = tf.convert_to_tensor(H_, dtype=dtype) 85 | q = tf.convert_to_tensor(q_, dtype=dtype) 86 | 87 | dim = F.shape[0] 88 | 89 | ell_vec = self.lengthscales ** tf.range(dim, 0, -1, dtype=dtype) 90 | update_indices = [[dim - 1, k] for k in range(dim)] 91 | F = tf.tensor_scatter_nd_update(F, update_indices, F[-1, :] / ell_vec) 92 | 93 | H = H / (self.lengthscales ** dim) 94 | Q = self.variance * self.lengthscales * tf.reshape(q, (1, 1)) 95 | 96 | Fb, Lb, Hb, Qb = balance_ss(F, L, H, Q, n_iter=self._balancing_iter) 97 | 98 | Pinf = solve_lyap_vec(Fb, Lb, Qb) 99 | 100 | Q = tf.reshape(Qb, (1, 1)) 101 | return ContinuousDiscreteModel(Pinf, Fb, Lb, Hb, Q) 102 | -------------------------------------------------------------------------------- /pssgp/misc_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import matplotlib.pyplot as plt 4 | 5 | from typing import Union 6 | 7 | ACC_TYPES = Union[tf.Tensor, np.ndarray] 8 | 9 | 10 | def rmse(x1: ACC_TYPES, x2: ACC_TYPES) -> tf.Tensor: 11 | """Root mean square error 12 | """ 13 | x1 = tf.reshape(x1, (-1, )) 14 | x2 = tf.reshape(x2, (-1, )) 15 | return tf.math.sqrt(tf.reduce_mean(tf.math.square(x1 - x2))) 16 | 17 | 18 | def error_shade(t: ACC_TYPES, m: ACC_TYPES, cov: ACC_TYPES, **kwargs): 19 | """Plot .95 confidence interval 20 | """ 21 | t = tf.reshape(t, (-1, )) 22 | m = tf.reshape(m, (-1,)) 23 | cov = tf.reshape(cov, (-1,)) 24 | plt.fill_between(t, 25 | m - 1.96 * np.sqrt(cov), 26 | m + 1.96 * np.sqrt(cov), 27 | **kwargs) -------------------------------------------------------------------------------- /pssgp/model.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | 3 | import tensorflow as tf 4 | from gpflow import Parameter, config 5 | from gpflow.models import GPModel 6 | from gpflow.models.model import MeanAndVariance 7 | from gpflow.models.training_mixins import InputData, RegressionData, InternalDataTrainingLossMixin 8 | from gpflow.models.util import data_input_to_tensor 9 | from gpflow.utilities import positive 10 | 11 | from pssgp.kalman.parallel import pkf, pkfs 12 | from pssgp.kalman.sequential import kf, kfs 13 | 14 | 15 | def _merge_sorted(a, b, *args): 16 | """ 17 | Merge sorted arrays efficiently, inspired by https://stackoverflow.com/a/54131815 18 | 19 | Parameters 20 | ---------- 21 | a: tf.Tensor 22 | Sorted tensor for ordering 23 | b: tf.Tensor 24 | Sorted tensor for ordering 25 | args: list of tuple of tf.Tensor 26 | Some data ordered according to a and b that need to be merged whilst keeping the order. 27 | 28 | 29 | Returns 30 | ------- 31 | cs: list of tf.Tensor 32 | Merging of a_x and b_x in the right order. 33 | 34 | """ 35 | with tf.name_scope("merge_sorted"): 36 | assert len(a.shape) == len(b.shape) == 1 37 | a_shape, b_shape = tf.shape(a)[0], tf.shape(b)[0] 38 | c_len = tf.shape(a)[0] + tf.shape(b)[0] 39 | if a_shape < b_shape: 40 | a, b = b, a 41 | a_shape, b_shape = tf.shape(a)[0], tf.shape(b)[0] 42 | args = tuple((j, i) for i, j in args) 43 | b_indices = tf.range(b_shape, dtype=tf.int32) + tf.searchsorted(a, b) 44 | a_indices = tf.ones((c_len,), dtype=tf.bool) 45 | a_indices = tf.tensor_scatter_nd_update(a_indices, b_indices[:, None], tf.zeros_like(b_indices, tf.bool)) 46 | c_range = tf.range(c_len, dtype=tf.int32) 47 | a_mask = tf.boolean_mask(c_range, a_indices)[:, None] 48 | 49 | def _inner_merge(u, v): 50 | c = tf.concat([u, v], 0) 51 | c = tf.tensor_scatter_nd_update(c, b_indices[:, None], v) 52 | c = tf.tensor_scatter_nd_update(c, a_mask, u) 53 | return c 54 | 55 | return (_inner_merge(a, b),) + tuple(_inner_merge(i, j) for i, j in args) 56 | 57 | 58 | class StateSpaceGP(GPModel, InternalDataTrainingLossMixin): 59 | def __init__(self, 60 | data: RegressionData, 61 | kernel, 62 | noise_variance: float = 1.0, 63 | parallel=False, 64 | max_parallel=10000 65 | ): 66 | 67 | self.noise_variance = Parameter(noise_variance, transform=positive()) 68 | ts, ys = data_input_to_tensor(data) 69 | super().__init__(kernel, None, None, num_latent_gps=ys.shape[-1]) 70 | self.data = ts, ys 71 | filter_spec = kernel.get_spec(ts.shape[0]) 72 | filter_ys_spec = tf.TensorSpec((ts.shape[0], 1), config.default_float()) 73 | smoother_spec = kernel.get_spec(None) 74 | smoother_ys_spec = tf.TensorSpec((None, 1), config.default_float()) 75 | 76 | if not parallel: 77 | self._kf = tf.function(partial(kf, return_loglikelihood=True, return_predicted=False), 78 | input_signature=[filter_spec, filter_ys_spec]) 79 | self._kfs = tf.function(kfs, input_signature=[smoother_spec, smoother_ys_spec]) 80 | else: 81 | self._kf = tf.function(partial(pkf, return_loglikelihood=True, max_parallel=ts.shape[0]), 82 | input_signature=[filter_spec, filter_ys_spec]) 83 | self._kfs = tf.function(partial(pkfs, max_parallel=max_parallel), 84 | input_signature=[smoother_spec, smoother_ys_spec]) 85 | 86 | def _make_model(self, ts): 87 | with tf.name_scope("make_model"): 88 | R = tf.reshape(self.noise_variance, (1, 1)) 89 | ssm = self.kernel.get_ssm(ts, R) 90 | return ssm 91 | 92 | def predict_f( 93 | self, Xnew: InputData, full_cov: bool = False, full_output_cov: bool = False 94 | ) -> MeanAndVariance: 95 | ts, ys = self.data 96 | Xnew = tf.convert_to_tensor(Xnew, dtype=config.default_float()) 97 | squeezed_ts = tf.squeeze(ts) 98 | squeezed_Xnew = tf.squeeze(Xnew) 99 | float_ys = float("nan") * tf.ones((Xnew.shape[0], ys.shape[1]), dtype=ys.dtype) 100 | all_ts, all_ys, all_flags = _merge_sorted(squeezed_ts, squeezed_Xnew, 101 | (ys, float_ys), 102 | (tf.zeros_like(squeezed_ts, dtype=tf.bool), 103 | tf.ones_like(squeezed_Xnew, dtype=tf.bool))) 104 | # this merging is equivalent to using argsort but uses O(log(T)) operations instead. 105 | ssm = self._make_model(all_ts[:, None]) 106 | sms, sPs = self._kfs(ssm, all_ys) 107 | res = tf.boolean_mask(sms, all_flags, 0), tf.boolean_mask(sPs, all_flags, 0) 108 | return tf.linalg.matvec(ssm.H, res[0]), tf.linalg.diag_part(tf.linalg.matmul(ssm.H, 109 | tf.linalg.matmul(res[1], 110 | ssm.H, 111 | transpose_b=True))) 112 | 113 | def maximum_log_likelihood_objective(self) -> tf.Tensor: 114 | ts, Y = self.data 115 | ssm = self._make_model(ts) 116 | fms, fPs, ll = self._kf(ssm, Y) 117 | return ll 118 | -------------------------------------------------------------------------------- /pssgp/toymodels/README.md: -------------------------------------------------------------------------------- 1 | # Containing various toy models 2 | 3 | `deter_functions.py` has some test models based on determinitic functions. 4 | 5 | `gp_samples.py` has some test data that drawn from GP priors. -------------------------------------------------------------------------------- /pssgp/toymodels/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_funcs import * 2 | -------------------------------------------------------------------------------- /pssgp/toymodels/data_funcs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Some toy models based on deterministic test functions 3 | """ 4 | import math 5 | from typing import Optional 6 | 7 | import numpy as np 8 | 9 | 10 | def sinu(t: np.ndarray) -> np.ndarray: 11 | """ 12 | A sinusoidal test function. 13 | 14 | y = sin(pi t) + sin(2 pi t) + cos(3 pi t) 15 | 16 | Args: 17 | t: (n, ) Input (time) 18 | Return: 19 | y: (n, ) y(t) 20 | """ 21 | return np.sin(np.pi * t) \ 22 | + np.sin(2 * np.pi * t) \ 23 | + np.cos(3 * np.pi * t) 24 | 25 | 26 | def comp_sinu(t: np.ndarray) -> np.ndarray: 27 | """ 28 | A composite sinusoidal test function. It is very 29 | challenging for statitionary GP to model. 30 | 31 | y = sin^2(7 pi cos(2 pi t^2) t) / (cos(t pi t) + 2) 32 | 33 | Reference: 34 | Deep State-space Gaussian processes. 2020 35 | 36 | Args: 37 | t: (n, ) Input (time) 38 | Return: 39 | y: (n, ) y(t) 40 | """ 41 | return np.sin(7 * np.pi * np.cos(2 * np.pi * (t ** 2))) ** 2 / \ 42 | (np.cos(5 * np.pi * t) + 2) 43 | 44 | 45 | def rect(t: np.ndarray) -> np.ndarray: 46 | """ 47 | A magnitude-varing rectangle signal. Very challenging 48 | for a conventioanl GP to model. 49 | 50 | Reference: 51 | Deep State-space Gaussian processes. 2020 52 | 53 | Args: 54 | t: (n, ) Input (time) 55 | Return: 56 | y: (n, ) y(t) 57 | """ 58 | # Scale to [0, 1] 59 | tau = (t - np.min(t)) / (np.max(t) - np.min(t)) 60 | 61 | # Jumping points 62 | p = np.linspace(1 / 6, 5 / 6, 5) 63 | 64 | y = np.zeros(t.shape) 65 | y[(tau >= 0) & (tau < p[0])] = 0 66 | y[(tau >= p[0]) & (tau < p[1])] = 1 67 | y[(tau >= p[1]) & (tau < p[2])] = 0 68 | y[(tau >= p[2]) & (tau < p[3])] = 0.6 69 | y[(tau >= p[3]) & (tau < p[4])] = 0 70 | y[tau >= p[4]] = 0.4 71 | 72 | return y 73 | 74 | 75 | def obs_noise(x: np.ndarray, 76 | r: float, 77 | seed: Optional[int] = None) -> np.ndarray: 78 | """ 79 | Observe data x with Gaussian noises. 80 | y = x + r, r ~ N(0, R) 81 | 82 | Parameters 83 | ---------- 84 | x: np.ndarray (n, ) 85 | The input 86 | r: float 87 | The noise variance 88 | seed 89 | 90 | Returns 91 | ------- 92 | out: np.ndarray 93 | Noisified observations 94 | """ 95 | rng = np.random.RandomState(seed) 96 | dtype = x.dtype 97 | return x + np.sqrt(r) * rng.normal(x, math.sqrt(r), (x.shape[0],)).astype(dtype) 98 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | absl-py==0.11.0 2 | argon2-cffi==20.1.0 3 | astunparse==1.6.3 4 | async-generator==1.10 5 | attrs==20.3.0 6 | backcall==0.2.0 7 | bleach==3.3.0 8 | cachetools==4.2.0 9 | certifi==2020.12.5 10 | cffi==1.14.4 11 | chardet==4.0.0 12 | clang==5.0 13 | cloudpickle==1.6.0 14 | cycler==0.10.0 15 | dataclasses==0.6 16 | decorator==4.4.2 17 | defusedxml==0.6.0 18 | Deprecated==1.2.12 19 | dm-tree==0.1.5 20 | entrypoints==0.3 21 | flatbuffers==1.12 22 | gast==0.4.0 23 | google-auth==1.24.0 24 | google-auth-oauthlib==0.4.2 25 | google-pasta==0.2.0 26 | gpflow==2.2.1 27 | grpcio==1.39.0 28 | h5py==3.1.0 29 | idna==2.10 30 | iniconfig==1.1.1 31 | ipykernel==5.4.3 32 | ipython==7.19.0 33 | ipython-genutils==0.2.0 34 | ipywidgets==7.6.3 35 | jedi==0.18.0 36 | Jinja2==2.11.3 37 | jsonschema==3.2.0 38 | jupyter==1.0.0 39 | jupyter-client==6.1.11 40 | jupyter-console==6.2.0 41 | jupyter-core==4.7.0 42 | jupyterlab-pygments==0.1.2 43 | jupyterlab-widgets==1.0.0 44 | keras==2.6.0 45 | Keras-Preprocessing==1.1.2 46 | kiwisolver==1.3.1 47 | llvmlite==0.36.0 48 | Markdown==3.3.3 49 | MarkupSafe==1.1.1 50 | matplotlib==3.3.3 51 | mistune==0.8.4 52 | multipledispatch==0.6.0 53 | nbclient==0.5.1 54 | nbconvert==6.0.7 55 | nbformat==5.1.2 56 | nest-asyncio==1.5.1 57 | notebook 58 | numba==0.53.1 59 | numpy==1.19.5 60 | oauthlib==3.1.0 61 | opt-einsum==3.3.0 62 | packaging==20.8 63 | pandocfilters==1.4.3 64 | parso==0.8.1 65 | pexpect==4.8.0 66 | pickleshare==0.7.5 67 | Pillow 68 | pluggy==0.13.1 69 | prometheus-client==0.9.0 70 | prompt-toolkit==3.0.14 71 | protobuf==3.14.0 72 | ptyprocess==0.7.0 73 | py==1.10.0 74 | pyasn1==0.4.8 75 | pyasn1-modules==0.2.8 76 | pycparser==2.20 77 | Pygments==2.7.4 78 | pykalman==0.9.5 79 | pyparsing==2.4.7 80 | pyrsistent==0.17.3 81 | pytest==6.2.4 82 | python-dateutil==2.8.1 83 | pyzmq==22.0.0 84 | qtconsole==5.0.2 85 | QtPy==1.9.0 86 | requests==2.25.1 87 | requests-oauthlib==1.3.0 88 | rsa==4.7 89 | scipy==1.6.0 90 | Send2Trash==1.5.0 91 | six==1.15.0 92 | tabulate==0.8.7 93 | tensorboard==2.6.0 94 | tensorboard-data-server==0.6.1 95 | tensorboard-plugin-wit==1.7.0 96 | tensorflow==2.6.0 97 | tensorflow-estimator==2.6.0 98 | tensorflow-probability==0.13.0 99 | termcolor==1.1.0 100 | terminado==0.9.2 101 | testpath==0.4.4 102 | toml==0.10.2 103 | tornado==6.1 104 | tqdm==4.62.0 105 | traitlets==5.0.5 106 | typing-extensions==3.7.4.3 107 | urllib3==1.26.5 108 | wcwidth==0.2.5 109 | webencodings==0.5.1 110 | Werkzeug==1.0.1 111 | widgetsnbextension==3.5.1 112 | wrapt==1.12.1 113 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | with open('requirements.txt') as f: 4 | requirements = f.read().splitlines() 5 | 6 | setup( 7 | install_requires=requirements, 8 | author="Adrien Corenflos, Zheng Zhao", 9 | version="1.0.0" 10 | ) 11 | -------------------------------------------------------------------------------- /tests/test_gp_vs_kfs.py: -------------------------------------------------------------------------------- 1 | """ 2 | Numerically test if GP reg has the same results with KFS 3 | """ 4 | import unittest 5 | 6 | import gpflow as gpf 7 | import numpy as np 8 | import numpy.testing as npt 9 | import tensorflow as tf 10 | from gpflow.kernels import SquaredExponential 11 | 12 | from pssgp.kernels import RBF, Periodic 13 | from pssgp.kernels.matern import Matern12, Matern32, Matern52 14 | from pssgp.model import StateSpaceGP 15 | from pssgp.toymodels import sinu, obs_noise 16 | 17 | 18 | def setUpModule(): # noqa: unittest syntax. 19 | # goal is to test the logic, not the runtime. 20 | np.random.seed(31415926) 21 | tf.config.set_visible_devices([], 'GPU') 22 | 23 | 24 | class GPEquivalenceTest(unittest.TestCase): 25 | 26 | def setUp(self): 27 | self.T = 200 28 | self.K = 50 29 | self.t = np.sort(np.random.rand(self.T)) 30 | self.ft = sinu(self.t) 31 | self.y = obs_noise(self.ft, 0.1, None) 32 | periodic_base = SquaredExponential(variance=1., lengthscales=0.5) 33 | self.covs = ( 34 | (Matern12(variance=1., lengthscales=0.5), 1e-6, 1e-2), 35 | (Matern32(variance=1., lengthscales=0.5), 1e-6, 1e-2), 36 | (Matern52(variance=1., lengthscales=0.5), 1e-6, 1e-2), 37 | (RBF(variance=1., lengthscales=0.5, order=15, balancing_iter=10), 1e-2, 1e-2), 38 | (Periodic(periodic_base, period=0.5, order=10), 1e-3, 1e-3) 39 | ) 40 | self.covs += ((self.covs[1][0] + self.covs[2][0], 1e-6, 1e-2),) # whatever that means, just testing the sum 41 | self.covs += ((self.covs[1][0] * self.covs[2][0], 1e-6, 1e-1),) # whatever that means, just testing the prod 42 | 43 | self.data = (tf.constant(self.t[:, None]), tf.constant(self.y[:, None])) 44 | 45 | def test_loglikelihood(self): 46 | for cov, val_tol, grad_tol in self.covs: 47 | check_grad_vars = cov.trainable_variables 48 | 49 | gp_model = gpf.models.GPR(data=self.data, 50 | kernel=cov, 51 | noise_variance=0.1, 52 | mean_function=None) 53 | with tf.GradientTape() as tape: 54 | tape.watch(check_grad_vars) 55 | gp_model_ll = gp_model.maximum_log_likelihood_objective() 56 | gp_model_grad = tape.gradient(gp_model_ll, check_grad_vars) 57 | 58 | for parallel in [False, True]: 59 | ss_model = StateSpaceGP(data=self.data, 60 | kernel=cov, 61 | noise_variance=0.1, 62 | parallel=parallel, 63 | max_parallel=self.T + self.K) 64 | with tf.GradientTape() as tape: 65 | tape.watch(check_grad_vars) 66 | ss_model_ll = ss_model.maximum_log_likelihood_objective() 67 | ss_model_grad = tape.gradient(ss_model_ll, check_grad_vars) 68 | ss_model_ll = ss_model.maximum_log_likelihood_objective() 69 | 70 | npt.assert_allclose(gp_model_ll, 71 | ss_model_ll, 72 | atol=val_tol, 73 | rtol=val_tol) 74 | for gp_grad, ss_grad in zip(gp_model_grad, ss_model_grad): 75 | npt.assert_allclose(gp_grad, 76 | ss_grad, 77 | atol=grad_tol, 78 | rtol=grad_tol) 79 | 80 | def test_posterior(self): 81 | query = tf.constant(np.sort(np.random.rand(self.K, 1), 0)) 82 | for cov, val_tol, _ in self.covs: 83 | gp_model = gpf.models.GPR(data=self.data, 84 | kernel=cov, 85 | noise_variance=0.1, 86 | mean_function=None) 87 | mean_gp, var_gp = gp_model.predict_f(query) 88 | for parallel in [False, True]: 89 | print(parallel) 90 | ss_model = StateSpaceGP(data=self.data, 91 | kernel=cov, 92 | noise_variance=0.1, 93 | parallel=parallel, 94 | max_parallel=self.T + self.K) 95 | mean_ss, var_ss = ss_model.predict_f(query) 96 | npt.assert_allclose(mean_gp, mean_ss, 97 | atol=val_tol, rtol=val_tol) 98 | npt.assert_allclose(var_gp, var_ss, 99 | atol=val_tol, rtol=val_tol) 100 | -------------------------------------------------------------------------------- /tests/test_periodic.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import numpy.testing as npt 5 | import tensorflow as tf 6 | from gpflow.kernels import SquaredExponential 7 | 8 | from pssgp.kernels import Periodic 9 | from pssgp.kernels.periodic import _get_offline_coeffs 10 | from pssgp.toymodels import sinu, obs_noise 11 | 12 | 13 | class PeriodicTest(unittest.TestCase): 14 | 15 | def setUp(self): 16 | seed = 31415926 17 | self.T = 2000 18 | self.K = 800 19 | self.t = np.sort(np.random.rand(self.T)) 20 | self.ft = sinu(self.t) 21 | self.y = obs_noise(self.ft, 0.01, seed) 22 | 23 | self.data = (tf.constant(self.t[:, None]), tf.constant(self.y[:, None])) 24 | 25 | periodic_order = 2 26 | periodic_base_kernel = SquaredExponential(variance=1., lengthscales=0.1) 27 | self.cov = Periodic(periodic_base_kernel, period=1., order=periodic_order) 28 | 29 | def test_offline_coeffs(self): 30 | b, K, div_facto_K = _get_offline_coeffs(2) 31 | 32 | npt.assert_almost_equal(b, np.array([[1, 0, 0], 33 | [0, 2, 0], 34 | [2, 0, 2]]), decimal=8) 35 | npt.assert_almost_equal(K, np.array([[0, 0, 0], 36 | [1, 1, 1], 37 | [2, 2, 2]]), decimal=8) 38 | npt.assert_almost_equal(div_facto_K, np.array([[1, 1, 1], 39 | [1, 1, 1], 40 | [0.5, 0.5, 0.5]]), decimal=8) 41 | 42 | def test_sde_coeff(self): 43 | F_expected = np.zeros((6, 6)) 44 | F_expected[2, 3] = -6.283185307179586 45 | F_expected[4, 5] = -12.5663706143592 46 | F_expected = F_expected - F_expected.T 47 | 48 | H_expected = np.array([[1, 0, 1, 0, 1, 0]]) 49 | L_expected = np.eye(6) 50 | Q_expected = np.zeros((6, 6)) 51 | 52 | Pinf_expected = np.diag([1.20739740482544e-19, 1.20739740482544e-19, 9.64374923981979e-21, 53 | 9.64374923981979e-21, 1.20546865497747e-19, 1.20546865497747e-19]) 54 | 55 | Pinf, F, L, H, Q = self.cov.get_sde() 56 | 57 | npt.assert_almost_equal(F, F_expected) 58 | npt.assert_almost_equal(L, L_expected) 59 | npt.assert_almost_equal(H, H_expected) 60 | npt.assert_almost_equal(Q, Q_expected) 61 | npt.assert_almost_equal(Pinf, Pinf_expected) 62 | -------------------------------------------------------------------------------- /tests/test_rbf.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import numpy as np 4 | import numpy.testing as npt 5 | import tensorflow as tf 6 | 7 | from pssgp.kernels import RBF 8 | from pssgp.toymodels import sinu, obs_noise 9 | 10 | 11 | class RBFTest(unittest.TestCase): 12 | 13 | def setUp(self): 14 | seed = 31415926 15 | self.T = 2000 16 | self.K = 800 17 | self.t = np.sort(np.random.rand(self.T)) 18 | self.ft = sinu(self.t) 19 | self.y = obs_noise(self.ft, 0.01, seed) 20 | 21 | self.data = (tf.constant(self.t[:, None]), tf.constant(self.y[:, None])) 22 | 23 | self.rbf_order = 3 24 | self.cov = RBF(variance=1., lengthscales=0.1, order=self.rbf_order, balancing_iter=5) 25 | 26 | def test_sde_coefficients(self): 27 | F_expected = np.array([[0, 14.520676967550859, 0], 28 | [0, 0, 32.857489440296360], 29 | [-14.5210953665873, -29.4746060478111, -50.3678777987092]]) 30 | 31 | L_expected = np.array([0., 0., 1.]).reshape(3, 1) 32 | 33 | H_expected = np.array([1., 0., 0.]).reshape(1, 3) 34 | 35 | Q_expected = 52.8553179255264 36 | 37 | Pinf_expected = np.array([[1.04502531824891, -1.41636387123970e-17, -0.301281550265743], 38 | [-1.41636387123970e-17, 0.681741999944955, -1.70331397804495e-17], 39 | [-0.301281550265743, -1.70331397804495e-17, 0.611552410634913]]) 40 | 41 | Pinf, F, L, H, Q = self.cov.get_sde() 42 | 43 | npt.assert_array_almost_equal(F, F_expected, decimal=8) 44 | npt.assert_array_almost_equal(L, L_expected, decimal=8) 45 | npt.assert_array_almost_equal(H, H_expected, decimal=8) 46 | npt.assert_array_almost_equal(Q, Q_expected, decimal=8) 47 | npt.assert_array_almost_equal(Pinf, Pinf_expected, decimal=8) 48 | 49 | def test_coefficients_convergence(self): 50 | Pinf, F, L, H, Q = self.cov.get_sde() 51 | Pinf2, F2, L2, H2, Q2 = RBF(variance=1., lengthscales=0.1, order=self.rbf_order, balancing_iter=15).get_sde() 52 | 53 | npt.assert_array_almost_equal(Pinf, Pinf2, decimal=3) 54 | npt.assert_array_almost_equal(F, F2, decimal=3) 55 | npt.assert_array_almost_equal(L, L2, decimal=3) 56 | npt.assert_array_almost_equal(H, H2, decimal=3) 57 | npt.assert_array_almost_equal(Q, Q2, decimal=3) 58 | --------------------------------------------------------------------------------