├── .coveragerc ├── .flake8 ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── pull_request_template.md └── workflows │ ├── deploy.yaml │ ├── quality-check.yaml │ └── upload-pypi.yaml ├── .gitignore ├── .isort.cfg ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── CONTRIBUTORS.md ├── LICENSE ├── Makefile ├── README.md ├── benchmarking ├── benchmarking_requirements.txt ├── create_commands.py ├── main.py ├── runs │ ├── Jan28_102218_Power_0_dgp-1.json │ ├── Jan28_102515_Power_1_dgp-1.json │ ├── Jan28_102824_Power_2_dgp-1.json │ ├── Jan28_103252_Power_3_dgp-1.json │ ├── Jan28_103730_Power_4_dgp-1.json │ ├── Jan28_104542_Energy_0_dgp-1.json │ ├── Jan28_104651_Energy_1_dgp-1.json │ ├── Jan28_104748_Energy_2_dgp-1.json │ ├── Jan28_104831_Energy_3_dgp-1.json │ ├── Jan28_104924_Energy_4_dgp-1.json │ ├── Jan28_105034_Concrete_0_dgp-1.json │ ├── Jan28_105120_Concrete_1_dgp-1.json │ ├── Jan28_105202_Concrete_2_dgp-1.json │ ├── Jan28_105257_Concrete_3_dgp-1.json │ ├── Jan28_105340_Concrete_4_dgp-1.json │ ├── Jan28_105432_Kin8mn_0_dgp-1.json │ ├── Jan28_105655_Kin8mn_1_dgp-1.json │ ├── Jan28_110213_Kin8mn_2_dgp-1.json │ ├── Jan28_110525_Kin8mn_3_dgp-1.json │ ├── Jan28_110959_Kin8mn_4_dgp-1.json │ ├── Jan28_111344_Yacht_0_dgp-1.json │ ├── Jan28_111538_Yacht_1_dgp-1.json │ ├── Jan28_111627_Yacht_2_dgp-1.json │ ├── Jan28_111738_Yacht_3_dgp-1.json │ ├── Jan28_111831_Yacht_4_dgp-1.json │ ├── Jan28_170702_Power_0_dgp-2.json │ ├── Jan28_170702_Power_1_dgp-2.json │ ├── Jan28_172305_Power_2_dgp-2.json │ ├── Jan28_172305_Power_3_dgp-2.json │ ├── Jan28_173910_Energy_0_dgp-2.json │ ├── Jan28_173910_Power_4_dgp-2.json │ ├── Jan28_174113_Energy_1_dgp-2.json │ ├── Jan28_174324_Energy_2_dgp-2.json │ ├── Jan28_174536_Energy_3_dgp-2.json │ ├── Jan28_174747_Energy_4_dgp-2.json │ ├── Jan28_174957_Concrete_0_dgp-2.json │ ├── Jan28_175241_Concrete_1_dgp-2.json │ ├── Jan28_175443_Concrete_2_dgp-2.json │ ├── Jan28_175517_Concrete_3_dgp-2.json │ ├── Jan28_175720_Concrete_4_dgp-2.json │ ├── Jan28_175755_Kin8mn_0_dgp-2.json │ ├── Jan28_180000_Kin8mn_1_dgp-2.json │ ├── Jan28_181824_Kin8mn_2_dgp-2.json │ ├── Jan28_182029_Kin8mn_3_dgp-2.json │ ├── Jan28_183900_Kin8mn_4_dgp-2.json │ ├── Jan28_184058_Yacht_0_dgp-2.json │ ├── Jan28_184207_Yacht_1_dgp-2.json │ ├── Jan28_184315_Yacht_2_dgp-2.json │ ├── Jan28_184423_Yacht_3_dgp-2.json │ ├── Jan28_184530_Yacht_4_dgp-2.json │ ├── Jan28_195729_Power_0_dgp-3.json │ ├── Jan28_195729_Power_1_dgp-3.json │ ├── Jan28_202422_Power_2_dgp-3.json │ ├── Jan28_202426_Power_3_dgp-3.json │ ├── Jan28_205119_Power_4_dgp-3.json │ ├── Jan28_205122_Energy_0_dgp-3.json │ ├── Jan28_205500_Energy_1_dgp-3.json │ ├── Jan28_205843_Energy_2_dgp-3.json │ ├── Jan28_210226_Energy_3_dgp-3.json │ ├── Jan28_210608_Energy_4_dgp-3.json │ ├── Jan28_210951_Concrete_0_dgp-3.json │ ├── Jan28_211435_Concrete_1_dgp-3.json │ ├── Jan28_211755_Concrete_2_dgp-3.json │ ├── Jan28_211907_Concrete_3_dgp-3.json │ ├── Jan28_212232_Concrete_4_dgp-3.json │ ├── Jan28_212344_Kin8mn_0_dgp-3.json │ ├── Jan28_212712_Kin8mn_1_dgp-3.json │ ├── Jan28_220013_Kin8mn_2_dgp-3.json │ ├── Jan28_220359_Kin8mn_3_dgp-3.json │ ├── Jan28_223700_Kin8mn_4_dgp-3.json │ ├── Jan28_224033_Yacht_0_dgp-3.json │ ├── Jan28_224224_Yacht_1_dgp-3.json │ ├── Jan28_224414_Yacht_2_dgp-3.json │ ├── Jan28_224605_Yacht_3_dgp-3.json │ └── Jan28_224755_Yacht_4_dgp-3.json └── utils.py ├── docs ├── Makefile ├── README.md ├── _static │ ├── logo.png │ ├── pydata-custom.css │ ├── readthedocs-custom.css │ ├── single_layer_fit.png │ └── two_layer_fit.png ├── api.rst ├── conf.py ├── docs_requirements.txt ├── favicon.ico ├── index.rst ├── make.bat ├── notebooks │ ├── README.md │ ├── benchmarks.py │ ├── data │ │ └── motor.csv │ ├── deep_cde.ipynb │ ├── deep_gp_samples.py │ ├── efficient_posterior_sampling.py │ ├── efficient_sampling.py │ ├── gpflux_features.py │ ├── gpflux_with_keras_layers.py │ ├── intro.py │ ├── keras_integration.py │ └── weight_space_approximation.py ├── refs.bib └── tutorials.rst ├── gpflux ├── __init__.py ├── architectures │ ├── __init__.py │ └── constant_input_dim_deep_gp.py ├── callbacks.py ├── encoders │ ├── __init__.py │ └── directly_parameterized_encoder.py ├── exceptions.py ├── experiment_support │ ├── __init__.py │ ├── ci_utils.py │ ├── plotting.py │ └── tensorboard.py ├── helpers.py ├── layers │ ├── __init__.py │ ├── basis_functions │ │ ├── __init__.py │ │ └── fourier_features │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ ├── quadrature │ │ │ ├── __init__.py │ │ │ └── gaussian.py │ │ │ ├── random │ │ │ ├── __init__.py │ │ │ ├── base.py │ │ │ └── orthogonal.py │ │ │ └── utils.py │ ├── bayesian_dense_layer.py │ ├── gp_layer.py │ ├── latent_variable_layer.py │ ├── likelihood_layer.py │ └── trackable_layer.py ├── losses.py ├── math.py ├── models │ ├── __init__.py │ └── deep_gp.py ├── optimization │ ├── __init__.py │ └── keras_natgrad.py ├── py.typed ├── runtime_checks.py ├── sampling │ ├── __init__.py │ ├── kernel_with_feature_decomposition.py │ ├── sample.py │ └── utils.py ├── types.py └── version.py ├── mypy.ini ├── notebook_requirements.txt ├── pytest.ini ├── setup.py ├── tests ├── __init__.py ├── conftest.py ├── gpflux │ ├── __init__.py │ ├── architectures │ │ └── test_constant_input_dim_deep_gp.py │ ├── encoders │ │ └── test_directly_parameterized_encoder.py │ ├── layers │ │ ├── basis_functions │ │ │ └── fourier_features │ │ │ │ ├── test_quadrature.py │ │ │ │ └── test_random.py │ │ ├── test_bayesian_dense_layer.py │ │ ├── test_dedup_trackable_layer.py │ │ ├── test_gp_layer.py │ │ ├── test_latent_variable_layer.py │ │ ├── test_likelihood_layer.py │ │ └── test_trackable_layer.py │ ├── models │ │ ├── test_bayesian_model.py │ │ └── test_deep_gp.py │ ├── sampling │ │ └── test_sample.py │ ├── test_callbacks.py │ ├── test_ci_utils.py │ ├── test_helpers.py │ ├── test_losses.py │ ├── test_math.py │ └── test_runtime_checks.py ├── integration │ ├── test_compilation.py │ ├── test_latent_variable_integration.py │ └── test_svgp_equivalence.py ├── snelson1d.npz └── test_notebooks.py ├── tests_requirements.txt ├── tests_requirements_37.txt └── tests_requirements_38_39.txt /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = *tests*, setup.py 3 | exclude_lines = 4 | pragma: no cover 5 | raise NotImplementedError 6 | if __name__ == .__main__.: 7 | print 8 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | # ignore: 3 | # E226 missing whitespace around arithmetic operator 4 | # W503 Line break occurred before a binary operator 5 | # W504 line break after binary operator 6 | # F811 redefinition because of multiple dispatch 7 | # E203 whitespace before ':' 8 | ignore = E226,W503,W504,F811,E203 9 | max-line-length = 100 10 | max-complexity = 10 11 | exclude = .git,__pycache__,.mypy_cache,.pytest_cache 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To reproduce** 14 | Steps to reproduce the behaviour: 15 | 1. ... 16 | 2. ... 17 | 3. ... 18 | 4. ... 19 | 20 | Include a minimal reproducible code example if relevant. 21 | 22 | **Expected behaviour** 23 | A clear and concise description of what you expected to happen. 24 | 25 | **System information** 26 | - OS: ... 27 | - Python version: ... 28 | - GPflux version: ... (the pip version, release tag or commit hash) 29 | - TensorFlow version: ... 30 | - GPflow version: ... 31 | 32 | **Additional context** 33 | Add any other context about the problem here. -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the feature you'd like** 11 | A clear and concise description of what you want to happen. 12 | 13 | **Is your feature request related to a problem? Please describe.** 14 | A clear and concise description of what the problem is. 15 | 16 | **Describe alternatives you've considered** 17 | A clear and concise description of any alternative solutions or features you've considered. 18 | 19 | **Additional context** 20 | Add any other context about the feature request here. 21 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/.github/pull_request_template.md -------------------------------------------------------------------------------- /.github/workflows/deploy.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2021 The GPflux Contributors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Deploy 16 | 17 | on: 18 | push: 19 | branches: 20 | - develop 21 | 22 | jobs: 23 | docs: 24 | runs-on: ubuntu-20.04 25 | steps: 26 | - uses: actions/checkout@v3 27 | - uses: actions/setup-python@v4 28 | with: 29 | python-version: 3.8 30 | - run: | 31 | pip install --upgrade pip 32 | make install 33 | make docs 34 | - run: | 35 | TMP_DIR=$(mktemp -d -p $(pwd)) 36 | mv docs/_build/html/* $TMP_DIR 37 | rm -rf docs 38 | mv $TMP_DIR docs 39 | touch docs/.nojekyll 40 | - run: | 41 | git add . 42 | git config --global user.email "none" 43 | git config --global user.name "github-actions-bot" 44 | git commit -m "build documentation" 45 | git push -f origin HEAD:gh-pages 46 | -------------------------------------------------------------------------------- /.github/workflows/quality-check.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2021 The GPflux Contributors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Tests 16 | 17 | on: [push] 18 | 19 | jobs: 20 | check-and-test: 21 | runs-on: ubuntu-20.04 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | python-version: ["3.7", "3.8", "3.9", "3.10"] 26 | tensorflow: ["~=2.5.0", "~=2.6.0", "~=2.7.0", "~=2.8.0", "~=2.9.0", "~=2.10.0", "~=2.11.0", "~=2.12.0", "~=2.13.0", "~=2.14.0", "~=2.15.0", "~=2.16.0"] 27 | include: 28 | - tensorflow: "~=2.5.0" 29 | keras: "~=2.6.0" 30 | tensorflow-probability: "~=0.13.0" 31 | - tensorflow: "~=2.6.0" 32 | keras: "~=2.6.0" 33 | tensorflow-probability: "~=0.14.0" 34 | - tensorflow: "~=2.7.0" 35 | keras: "~=2.7.0" 36 | tensorflow-probability: "~=0.15.0" 37 | - tensorflow: "~=2.8.0" 38 | keras: "~=2.8.0" 39 | tensorflow-probability: "~=0.16.0" 40 | - tensorflow: "~=2.9.0" 41 | keras: "~=2.9.0" 42 | tensorflow-probability: "~=0.17.0" 43 | - tensorflow: "~=2.10.0" 44 | keras: "~=2.10.0" 45 | tensorflow-probability: "~=0.18.0" 46 | - tensorflow: "~=2.11.0" 47 | keras: "~=2.11.0" 48 | tensorflow-probability: "~=0.19.0" 49 | - tensorflow: "~=2.12.0" 50 | keras: "~=2.12.0" 51 | tensorflow-probability: "~=0.20.0" 52 | - tensorflow: "~=2.13.0" 53 | keras: "~=2.13.0" 54 | tensorflow-probability: "~=0.21.0" 55 | - tensorflow: "~=2.14.0" 56 | keras: "~=2.14.0" 57 | tensorflow-probability: "~=0.22.0" 58 | - tensorflow: "~=2.15.0" 59 | keras: "~=2.15.0" 60 | tensorflow-probability: "~=0.23.0" 61 | - tensorflow: "~=2.16.0" 62 | tensorflow-probability: "~=0.24.0" 63 | exclude: 64 | # These older versions of TensorFlow don't work with Python 3.10: 65 | - python-version: "3.10" 66 | tensorflow: "~=2.5.0" 67 | - python-version: "3.10" 68 | tensorflow: "~=2.6.0" 69 | - python-version: "3.10" 70 | tensorflow: "~=2.7.0" 71 | # These newer versions of TensorFlow don't work with Python 3.7: 72 | - python-version: "3.7" 73 | tensorflow: "~=2.12.0" 74 | - python-version: "3.7" 75 | tensorflow: "~=2.13.0" 76 | - python-version: "3.7" 77 | tensorflow: "~=2.14.0" 78 | - python-version: "3.7" 79 | tensorflow: "~=2.15.0" 80 | - python-version: "3.7" 81 | tensorflow: "~=2.16.0" 82 | # These newer versions of TensorFlow don't work with Python 3.8: 83 | - python-version: "3.8" 84 | tensorflow: "~=2.14.0" 85 | - python-version: "3.8" 86 | tensorflow: "~=2.15.0" 87 | - python-version: "3.8" 88 | tensorflow: "~=2.16.0" 89 | 90 | name: Python-${{ matrix.python-version }} tensorflow${{ matrix.tensorflow }} 91 | env: 92 | VERSION_TF: ${{ matrix.tensorflow }} 93 | VERSION_KERAS: ${{ matrix.keras }} 94 | VERSION_TFP: ${{ matrix.tensorflow-probability }} 95 | VERSION_PYTHON: ${{ matrix.python-version }} 96 | steps: 97 | - uses: actions/checkout@v3 98 | - uses: actions/setup-python@v4 99 | with: 100 | python-version: ${{ matrix.python-version }} 101 | - uses: actions/cache@v3 102 | with: 103 | path: ${{ env.pythonLocation }} 104 | key: ${{ env.pythonLocation }}-${{ hashFiles('setup.py') }}-${{ hashFiles('notebook_requirements.txt') }}-${{ hashFiles('tests_requirements.txt') }} 105 | - name: Install dependencies 106 | run: make install 107 | - name: Run checks 108 | run: make check 109 | - name: Run tests 110 | run: make test 111 | -------------------------------------------------------------------------------- /.github/workflows/upload-pypi.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2021 The GPflux Contributors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name: Upload-PyPI 16 | 17 | on: 18 | workflow_dispatch: 19 | push: 20 | # https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions#filter-pattern-cheat-sheet 21 | tags: v[0-9]+.[0-9]+.[0-9]+* 22 | 23 | jobs: 24 | upload-pypi: 25 | runs-on: ubuntu-20.04 26 | steps: 27 | - uses: actions/checkout@v3 28 | - uses: actions/setup-python@v4 29 | with: 30 | python-version: 3.8 31 | - name: Install twine and wheel 32 | run: | 33 | pip install twine wheel 34 | - name: Create pip package 35 | run: | 36 | python setup.py bdist_wheel sdist 37 | - name: Publish to PyPI 38 | uses: pypa/gh-action-pypi-publish@release/v1 39 | with: 40 | user: __token__ 41 | password: ${{ secrets.PYPI_PASSWORD }} 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | downloads/ 17 | eggs/ 18 | .eggs/ 19 | lib/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | .pytest_cache/ 51 | 52 | # Translations 53 | *.mo 54 | *.pot 55 | 56 | # Django stuff: 57 | *.log 58 | local_settings.py 59 | db.sqlite3 60 | 61 | # Flask stuff: 62 | instance/ 63 | .webassets-cache 64 | 65 | # Scrapy stuff: 66 | .scrapy 67 | 68 | # Sphinx documentation 69 | docs/_build/ 70 | 71 | # PyBuilder 72 | target/ 73 | 74 | # Jupyter Notebook 75 | .ipynb_checkpoints 76 | 77 | # pyenv 78 | .python-version 79 | 80 | # celery beat schedule file 81 | celerybeat-schedule 82 | 83 | # SageMath parsed files 84 | *.sage.py 85 | 86 | # Environments 87 | .env 88 | .venv 89 | env/ 90 | venv/ 91 | ENV/ 92 | env.bak/ 93 | venv.bak/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | .spyproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | 108 | # PyCharm 109 | .idea 110 | -------------------------------------------------------------------------------- /.isort.cfg: -------------------------------------------------------------------------------- 1 | [settings] 2 | known_first_party = gpflux 3 | known_gpflow = gpflow 4 | sections = FUTURE,STDLIB,THIRDPARTY,GPFLOW,FIRSTPARTY,LOCALFOLDER 5 | default_section = THIRDPARTY 6 | skip_glob = **/notebooks/**,*_jupytext.py 7 | src_paths = gpflux,tests,automl_implementations 8 | atomic = true 9 | ; Remaining settings for compatibility with black 10 | ; See https://github.com/psf/black/blob/master/docs/compatible_configs.md#isort 11 | multi_line_output = 3 12 | include_trailing_comma = true 13 | force_grid_wrap = 0 14 | use_parentheses = true 15 | ensure_newline_before_comments = true 16 | line_length = 100 17 | -------------------------------------------------------------------------------- /CONTRIBUTORS.md: -------------------------------------------------------------------------------- 1 | # List of contributors to GPflux 2 | 3 | Because GitHub's [graph of contributors](http://github.com/secondmind-labs/GPflux/graphs/contributors) does not include all contributors, this file contains a list of all contributors (in alphabetical order): 4 | 5 | [Artem Artemev](https://github.com/awav/), 6 | [Marc P. Deisenroth](https://deisenroth.cc/), 7 | [Vincent Dutordoir](https://vdutor.github.io/), 8 | [Eric Hambro](https://erichambro.com), 9 | [Alexandra Hayes](https://github.com/akhayes) 10 | [ST John](http://www.infinitecuriosity.org/about/), 11 | [James A. Leedham](https://github.com/JamesALeedham) 12 | [Felix Leibfried](https://github.com/fleibfried), 13 | [John A. McLeod](https://github.com/johnamcleod), 14 | [Hugh Salimbeni](https://github.com/hughsalimbeni), 15 | [Marcin B. Tomczak](https://github.com/marctom) 16 | [Hrvoje Stojic](https://github.com/hstojic) 17 | [Uri Granta](https://github.com/uri-granta) 18 | [Jesper Nielsen](https://github.com/jesnie) 19 | [Sebastian Ober](https://github.com/sebastianober) 20 | [Simon Chiu](https://github.com/sc336) 21 | [Khurram Ghani](https://github.com/khurram-ghani) 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help install docs format check test check-and-test 2 | 3 | 4 | LIB_NAME = gpflux 5 | TESTS_NAME = tests 6 | ARCHS_NAME = experiments 7 | LINT_NAMES = $(LIB_NAME) $(TESTS_NAME) docs/notebooks 8 | TYPE_NAMES = $(LIB_NAME) 9 | SUCCESS='\033[0;32m' 10 | UNAME_S = $(shell uname -s) 11 | 12 | PANDOC_DEB = https://github.com/jgm/pandoc/releases/download/2.10.1/pandoc-2.10.1-1-amd64.deb 13 | 14 | # the --per-file-ignores are to ignore "unused import" warnings in __init__.py files (F401) 15 | # the F403 ignore in gpflux/__init__.py allows the `from . import *` 16 | LINT_FILE_IGNORES = "$(LIB_NAME)/__init__.py:F401,F403 \ 17 | $(LIB_NAME)/architectures/__init__.py:F401 \ 18 | $(LIB_NAME)/encoders/__init__.py:F401 \ 19 | $(LIB_NAME)/experiment_support/__init__.py:F401 \ 20 | $(LIB_NAME)/initializers/__init__.py:F401 \ 21 | $(LIB_NAME)/layers/__init__.py:F401 \ 22 | $(LIB_NAME)/layers/basis_functions/__init__.py:F401 \ 23 | $(LIB_NAME)/models/__init__.py:F401 \ 24 | $(LIB_NAME)/optimization/__init__.py:F401 \ 25 | $(LIB_NAME)/sampling/__init__.py:F401 \ 26 | $(LIB_NAME)/utils/__init__.py:F401" 27 | 28 | # Older Python versions use separate test requirements files 29 | ifeq ("$(VERSION_PYTHON)", "3.7") 30 | TEST_REQUIREMENTS = "tests_requirements_37.txt" 31 | else ifeq ($(filter $(VERSION_PYTHON),3.8 3.9),$(VERSION_PYTHON)) 32 | TEST_REQUIREMENTS = "tests_requirements_38_39.txt" 33 | else 34 | TEST_REQUIREMENTS = "tests_requirements.txt" 35 | endif 36 | 37 | 38 | help: ## Shows this help message 39 | # $(MAKEFILE_LIST) is set by make itself; the following parses the `target: ## help line` format and adds color highlighting 40 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-24s\033[0m %s\n", $$1, $$2}' 41 | 42 | 43 | install: ## Install repo for developement 44 | @echo "\n=== pip install package with dev requirements ==============" 45 | pip install --upgrade --upgrade-strategy eager \ 46 | -r notebook_requirements.txt \ 47 | -r $(TEST_REQUIREMENTS) \ 48 | tensorflow${VERSION_TF} \ 49 | keras${VERSION_KERAS} \ 50 | tensorflow-probability${VERSION_TFP} \ 51 | -e . 52 | 53 | docs: ## Build the documentation 54 | @echo "\n=== pip install doc requirements ==============" 55 | pip install -r docs/docs_requirements.txt 56 | @echo "\n=== install pandoc ==============" 57 | ifeq ("$(UNAME_S)", "Linux") 58 | $(eval TEMP_DEB=$(shell mktemp)) 59 | @echo "Checking for pandoc installation..." 60 | @(which pandoc) || ( echo "\nPandoc not found." \ 61 | && echo "Trying to install automatically...\n" \ 62 | && wget -O "$(TEMP_DEB)" $(PANDOC_DEB) \ 63 | && echo "\nInstalling pandoc using dpkg -i from $(PANDOC_DEB)" \ 64 | && echo "(If this step does not work, manually install pandoc, see http://pandoc.org/)\n" \ 65 | && sudo dpkg -i "$(TEMP_DEB)" \ 66 | ) 67 | @rm -f "$(TEMP_DEB)" 68 | endif 69 | ifeq ($(UNAME_S),Darwin) 70 | brew install pandoc 71 | endif 72 | @echo "\n=== build docs ==============" 73 | (cd docs ; make html) 74 | @echo "\n${SUCCESS}=== Docs are available at docs/_build/html/index.html ============== ${SUCCESS}" 75 | 76 | format: ## Formats code with `black` and `isort` 77 | @echo "\n=== isort ==============================================" 78 | isort . 79 | @echo "\n=== black ==============================================" 80 | black --line-length=100 $(LINT_NAMES) 81 | 82 | 83 | check: ## Runs all static checks such as code formatting checks, linting, mypy 84 | @echo "\n=== black (formatting) =================================" 85 | black --check --line-length=100 $(LINT_NAMES) 86 | @echo "\n=== flake8 (linting)====================================" 87 | flake8 --statistics \ 88 | --per-file-ignores=$(LINT_FILE_IGNORES) \ 89 | --exclude=.ipynb_checkpoints ./gpflux 90 | @echo "\n=== mypy (static type checking) ========================" 91 | mypy $(TYPE_NAMES) 92 | 93 | test: ## Run unit and integration tests with pytest 94 | pytest --cov=$(LIB_NAME) \ 95 | --cov-report html:cover_html \ 96 | --cov-config .coveragerc \ 97 | --cov-report term \ 98 | --cov-report xml \ 99 | --cov-fail-under=94 \ 100 | --junitxml=reports/junit.xml \ 101 | -v --tb=short --durations=10 \ 102 | $(TESTS_NAME) 103 | 104 | quicktest: ## Run the tests, start with the failing ones and break on first fail. 105 | pytest -vv -x --ff -rN -Wignore -s 106 | 107 | check-and-test: check test ## Run pytest and static tests 108 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # GPflux 2 | 3 | 4 | 5 | 6 | [![Quality checks and Tests](https://github.com/secondmind-labs/GPflux/actions/workflows/quality-check.yaml/badge.svg)](https://github.com/secondmind-labs/GPflux/actions/workflows/quality-check.yaml) 7 | [![Docs build](https://github.com/secondmind-labs/GPflux/actions/workflows/deploy.yaml/badge.svg)](https://github.com/secondmind-labs/GPflux/actions/workflows/deploy.yaml) 8 | 9 | [Documentation](https://secondmind-labs.github.io/GPflux/) | 10 | [Tutorials](https://secondmind-labs.github.io/GPflux/tutorials.html) | 11 | [API reference](https://secondmind-labs.github.io/GPflux/autoapi/gpflux/index.html) | 12 | [Slack](https://join.slack.com/t/secondmind-labs/shared_invite/zt-ph07nuie-gMlkle__tjvXBay4FNSLkw) 13 | 14 | ## What does GPflux do? 15 | 16 | GPflux is a toolbox dedicated to Deep Gaussian processes (DGP), the hierarchical extension of Gaussian processes (GP). 17 | 18 | GPflux uses the mathematical building blocks from [GPflow](http://www.gpflow.org/) and marries these with the powerful layered deep learning API provided by [Keras](https://www.tensorflow.org/api_docs/python/tf/keras). 19 | This combination leads to a framework that can be used for: 20 | 21 | - researching new (deep) Gaussian process models, and 22 | - building, training, evaluating and deploying (deep) Gaussian processes in a modern way — making use of the tools developed by the deep learning community. 23 | 24 | 25 | ## Getting started 26 | 27 | In the [Documentation](https://secondmind-labs.github.io/GPflux/), we have multiple [Tutorials](https://secondmind-labs.github.io/GPflux/tutorials.html) showing the basic functionality of the toolbox, a [benchmark implementation](https://secondmind-labs.github.io/GPflux/notebooks/benchmarks.html) and a comprehensive [API reference](https://secondmind-labs.github.io/GPflux/autoapi/gpflux/index.html). 28 | 29 | 30 | ## Install GPflux 31 | 32 | This project is assuming you are using `python3`. 33 | 34 | #### For users 35 | 36 | To install the latest (stable) release of the toolbox from [PyPI](https://pypi.org/), use `pip`: 37 | ```bash 38 | $ pip install gpflux 39 | ``` 40 | #### For contributors 41 | 42 | To install this project in editable mode, run the commands below from the root directory of the `GPflux` repository. 43 | ```bash 44 | make install 45 | ``` 46 | Check that the installation was successful by running the tests: 47 | ```bash 48 | make test 49 | ``` 50 | You can have a peek at the [Makefile](Makefile) for the commands. 51 | 52 | 53 | ## The Secondmind Labs Community 54 | 55 | ### Getting help 56 | 57 | **Bugs, feature requests, pain points, annoying design quirks, etc:** 58 | Please use [GitHub issues](https://github.com/secondmind-labs/GPflux/issues/) to flag up bugs/issues/pain points, suggest new features, and discuss anything else related to the use of GPflux that in some sense involves changing the GPflux code itself. We positively welcome comments or concerns about usability, and suggestions for changes at any level of design. We aim to respond to issues promptly, but if you believe we may have forgotten about an issue, please feel free to add another comment to remind us. 59 | 60 | ### Slack workspace 61 | 62 | We have a public [Secondmind Labs slack workspace](https://secondmind-labs.slack.com/). Please use this [invite link](https://join.slack.com/t/secondmind-labs/shared_invite/zt-ph07nuie-gMlkle__tjvXBay4FNSLkw) and join the #gpflux channel, whether you'd just like to ask short informal questions or want to be involved in the discussion and future development of GPflux. 63 | 64 | 65 | ### Contributing 66 | 67 | All constructive input is very much welcome. For detailed information, see the [guidelines for contributors](CONTRIBUTING.md). 68 | 69 | 70 | ### Maintainers 71 | 72 | GPflux was originally created at [Secondmind Labs](https://www.secondmind.ai/labs/) and is now actively maintained by (in alphabetical order) 73 | [Vincent Dutordoir](https://vdutor.github.io/) and 74 | [ST John](https://github.com/st--/). 75 | **We are grateful to [all contributors](CONTRIBUTORS.md) who have helped shape GPflux.** 76 | 77 | GPflux is an open source project. If you have relevant skills and are interested in contributing then please do contact us (see ["The Secondmind Labs Community" section](#the-secondmind-labs-community) above). 78 | 79 | We are very grateful to our Secondmind Labs colleagues, maintainers of [GPflow](https://github.com/GPflow/GPflow), [Trieste](https://github.com/secondmind-labs/trieste) and [Bellman](https://github.com/Bellman-devs/bellman), for their help with creating contributing guidelines, instructions for users and open-sourcing in general. 80 | 81 | 82 | ## Citing GPflux 83 | 84 | To cite GPflux, please reference our [arXiv paper](https://arxiv.org/abs/2104.05674) where we review the framework and describe the design. Sample Bibtex is given below: 85 | 86 | ``` 87 | @article{dutordoir2021gpflux, 88 | author = {Dutordoir, Vincent and Salimbeni, Hugh and Hambro, Eric and McLeod, John and 89 | Leibfried, Felix and Artemev, Artem and van der Wilk, Mark and Deisenroth, Marc P. 90 | and Hensman, James and John, ST}, 91 | title = {GPflux: A library for Deep Gaussian Processes}, 92 | year = {2021}, 93 | journal = {arXiv:2104.05674}, 94 | url = {https://arxiv.org/abs/2104.05674} 95 | } 96 | ``` 97 | 98 | 99 | ## License 100 | 101 | [Apache License 2.0](LICENSE) 102 | -------------------------------------------------------------------------------- /benchmarking/benchmarking_requirements.txt: -------------------------------------------------------------------------------- 1 | sacred 2 | git+git://github.com/secondmind-labs/bayesian_benchmarks@master 3 | -------------------------------------------------------------------------------- /benchmarking/create_commands.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import os 17 | from itertools import product 18 | 19 | 20 | class CommandsBuilder: 21 | r""" 22 | Creates the outer-product of configurations to be executed. 23 | Returns a list with all the combinations. 24 | Here's an example: 25 | ``` 26 | commands = ( 27 | CommandsBuilder() 28 | .add("dataset", ["Power", "Kin8mn"]) 29 | .add("split", [0, 1]) 30 | .build() 31 | ) 32 | ``` 33 | Returns 34 | ``` 35 | commands = [ 36 | "python main.py with dataset=Power split=0;", 37 | "python main.py with dataset=Power split=1;", 38 | "python main.py with dataset=Kin8mn split=0;", 39 | "python main.py with dataset=Kin8mn split=1;", 40 | ] 41 | ``` 42 | """ 43 | command_template = "python main.py -p with{config};" 44 | single_config_template = " {key}={value}" 45 | 46 | keys = [] 47 | values = [] 48 | 49 | def add(self, key, values): 50 | self.keys.append(key) 51 | self.values.append(values) 52 | return self 53 | 54 | def build(self): 55 | commands = [] 56 | for args in product(*self.values): 57 | config = "" 58 | for key, value in zip(self.keys, args): 59 | config += self.single_config_template.format(key=key, value=value) 60 | command = self.command_template.format(config=config) 61 | commands.append(command) 62 | return commands 63 | 64 | 65 | DATASETS_UCI = ["Power", "Energy", "Concrete", "Kin8mn", "Yacht"] 66 | 67 | 68 | if __name__ == "__main__": 69 | NAME = "commands_uci.txt" 70 | 71 | if os.path.exists(NAME): 72 | print("File to store script already exists", NAME) 73 | exit(-1) 74 | 75 | commands = ( 76 | CommandsBuilder() 77 | .add("dataset", DATASETS_UCI) 78 | .add("split", range(5)) 79 | .add("num_layers", range(1, 4)) 80 | .build() 81 | ) 82 | 83 | with open(NAME, "w") as file: 84 | file.write("\n".join(commands)) -------------------------------------------------------------------------------- /benchmarking/main.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import datetime 17 | import json 18 | import pprint 19 | from pathlib import Path 20 | from typing import Any, Optional, Type 21 | 22 | import numpy as np 23 | import tensorflow as tf 24 | from bayesian_benchmarks import data as uci_datasets 25 | from bayesian_benchmarks.data import Dataset 26 | from sacred import Experiment 27 | from scipy.stats import norm 28 | from utils import ExperimentName, git_version 29 | 30 | from gpflow.keras import tf_keras 31 | 32 | from gpflux.architectures import Config, build_constant_input_dim_deep_gp 33 | 34 | THIS_DIR = Path(__file__).parent 35 | LOGS = THIS_DIR / "tmp" 36 | EXPERIMENT = Experiment("UCI") 37 | 38 | 39 | @EXPERIMENT.config 40 | def config(): 41 | # Timestamp (None) 42 | date = datetime.datetime.now().strftime("%b%d_%H%M%S") 43 | # Dataset (None) 44 | dataset = "Yacht" 45 | # Dataset split (None) 46 | split = 0 47 | # Number of layers (Ignore) 48 | num_layers = 1 49 | # Model name (None) 50 | model_name = f"dgp-{num_layers}" 51 | # number of inducing points (Ignore) 52 | num_inducing = 256 53 | # batch size (Ignore) 54 | batch_size = 1024 55 | # Number of times the complete training dataset is iterated over (Ignore) 56 | num_epochs = 1000 57 | 58 | 59 | @EXPERIMENT.capture 60 | def experiment_name(_config: Any) -> Optional[str]: 61 | config = _config.copy() 62 | del config["seed"] 63 | return ExperimentName(EXPERIMENT, config).get() 64 | 65 | 66 | def get_dataset_class(dataset) -> Type[Dataset]: 67 | return getattr(uci_datasets, dataset) 68 | 69 | 70 | @EXPERIMENT.capture 71 | def get_data(split, dataset): 72 | data = get_dataset_class(dataset)(split=split) 73 | return data 74 | 75 | 76 | @EXPERIMENT.capture 77 | def build_model(X, num_inducing, num_layers): 78 | config = Config( 79 | num_inducing=num_inducing, 80 | inner_layer_qsqrt_factor=1e-5, 81 | between_layer_noise_variance=1e-2, 82 | likelihood_noise_variance=1e-2, 83 | white=True 84 | ) 85 | model = build_constant_input_dim_deep_gp( 86 | X, 87 | num_layers, 88 | config=config, 89 | ) 90 | return model 91 | 92 | 93 | @EXPERIMENT.capture 94 | def train_model(model: tf_keras.models.Model, data_train, batch_size, num_epochs): 95 | X_train, Y_train = data_train 96 | callbacks = [ 97 | tf_keras.callbacks.ReduceLROnPlateau( 98 | 'loss', factor=0.95, patience=3, min_lr=1e-6, verbose=1 99 | ), 100 | ] 101 | model.fit( 102 | X_train, 103 | Y_train, 104 | batch_size=batch_size, 105 | epochs=num_epochs, 106 | callbacks=callbacks, 107 | verbose=1, 108 | ) 109 | 110 | 111 | def evaluate_model(model, data_test): 112 | XT, YT = data_test 113 | out = model(XT) 114 | y_mean, y_var = out.y_mean, out.y_var 115 | d = YT - y_mean 116 | l = norm.logpdf(YT, loc=y_mean, scale=y_var ** 0.5) 117 | mse = np.average(d ** 2) 118 | rmse = mse ** 0.5 119 | nlpd = -np.average(l) 120 | return dict(rmse=rmse, mse=mse, nlpd=nlpd) 121 | 122 | 123 | @EXPERIMENT.automain 124 | def main(_config): 125 | data = get_data() 126 | model = build_model(data.X_train) 127 | 128 | model.compile(optimizer=tf_keras.optimizers.Adam(0.01)) 129 | train_model(model, (data.X_train, data.Y_train)) 130 | 131 | metrics = evaluate_model(model, (data.X_test, data.Y_test)) 132 | 133 | _dict = {**_config, **metrics, "commit": git_version()} 134 | with open(f"{LOGS}/{experiment_name()}.json", "w") as fp: 135 | json.dump(_dict, fp, indent=2) 136 | 137 | print("=" * 60) 138 | print(experiment_name()) 139 | pprint.pprint(_dict) 140 | print("=" * 60) 141 | -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_102218_Power_0_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_102218", 3 | "dataset": "Power", 4 | "split": 0, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 767489119, 11 | "rmse": 0.24750071773506993, 12 | "mse": 0.061256605279374765, 13 | "nlpd": 0.04335509437165361, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_102515_Power_1_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_102515", 3 | "dataset": "Power", 4 | "split": 1, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 162059835, 11 | "rmse": 0.23783148168865034, 12 | "mse": 0.05656381368221882, 13 | "nlpd": -0.007695087723076693, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_102824_Power_2_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_102824", 3 | "dataset": "Power", 4 | "split": 2, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 290271505, 11 | "rmse": 0.24149302283178145, 12 | "mse": 0.058318880076431316, 13 | "nlpd": 0.008342006286812365, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_103252_Power_3_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_103252", 3 | "dataset": "Power", 4 | "split": 3, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 805362504, 11 | "rmse": 0.23717778077043672, 12 | "mse": 0.05625329969118934, 13 | "nlpd": -0.008828085516022826, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_103730_Power_4_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_103730", 3 | "dataset": "Power", 4 | "split": 4, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 50202638, 11 | "rmse": 0.2228036632127681, 12 | "mse": 0.04964147234102859, 13 | "nlpd": -0.08068639643651333, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_104542_Energy_0_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_104542", 3 | "dataset": "Energy", 4 | "split": 0, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 761476455, 11 | "rmse": 0.05133209509279212, 12 | "mse": 0.0026349839866154534, 13 | "nlpd": -1.0466984665269004, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_104651_Energy_1_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_104651", 3 | "dataset": "Energy", 4 | "split": 1, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 730385361, 11 | "rmse": 0.048300052002665794, 12 | "mse": 0.00233289502346022, 13 | "nlpd": -1.011826794002902, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_104748_Energy_2_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_104748", 3 | "dataset": "Energy", 4 | "split": 2, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 823177825, 11 | "rmse": 0.07064892289907365, 12 | "mse": 0.0049912703067992534, 13 | "nlpd": -0.9066679934920236, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_104831_Energy_3_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_104831", 3 | "dataset": "Energy", 4 | "split": 3, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 87560031, 11 | "rmse": 0.07869695960898822, 12 | "mse": 0.006193211451698722, 13 | "nlpd": -0.9393866950991742, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_104924_Energy_4_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_104924", 3 | "dataset": "Energy", 4 | "split": 4, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 888760030, 11 | "rmse": 0.05635044645107126, 12 | "mse": 0.0031753728152350497, 13 | "nlpd": -1.05468123284972, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105034_Concrete_0_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105034", 3 | "dataset": "Concrete", 4 | "split": 0, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 131988523, 11 | "rmse": 0.3509270640468073, 12 | "mse": 0.12314980428051198, 13 | "nlpd": 0.5667195868323248, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105120_Concrete_1_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105120", 3 | "dataset": "Concrete", 4 | "split": 1, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 320109169, 11 | "rmse": 0.3111479652406896, 12 | "mse": 0.09681305627342139, 13 | "nlpd": 0.4115049302669282, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105202_Concrete_2_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105202", 3 | "dataset": "Concrete", 4 | "split": 2, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 764047740, 11 | "rmse": 0.30061334618265445, 12 | "mse": 0.09036838390313244, 13 | "nlpd": 0.3466335901527876, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105257_Concrete_3_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105257", 3 | "dataset": "Concrete", 4 | "split": 3, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 16171765, 11 | "rmse": 0.30526305936190523, 12 | "mse": 0.09318553541099008, 13 | "nlpd": 0.3955580841379402, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105340_Concrete_4_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105340", 3 | "dataset": "Concrete", 4 | "split": 4, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 45121351, 11 | "rmse": 0.33971798474007797, 12 | "mse": 0.11540830915585984, 13 | "nlpd": 0.9139496655450027, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105432_Kin8mn_0_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105432", 3 | "dataset": "Kin8mn", 4 | "split": 0, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 875322006, 11 | "rmse": 0.32198451913815945, 12 | "mse": 0.10367403056463176, 13 | "nlpd": 0.2739430769467973, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_105655_Kin8mn_1_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_105655", 3 | "dataset": "Kin8mn", 4 | "split": 1, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 560314095, 11 | "rmse": 0.3101629434171318, 12 | "mse": 0.09620105146917889, 13 | "nlpd": 0.2616240307740899, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_110213_Kin8mn_2_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_110213", 3 | "dataset": "Kin8mn", 4 | "split": 2, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 912553259, 11 | "rmse": 0.3091525738910605, 12 | "mse": 0.0955753139434676, 13 | "nlpd": 0.261475821944983, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_110525_Kin8mn_3_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_110525", 3 | "dataset": "Kin8mn", 4 | "split": 3, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 266670156, 11 | "rmse": 0.30056931254993724, 12 | "mse": 0.09034191164674187, 13 | "nlpd": 0.23442959645659886, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_110959_Kin8mn_4_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_110959", 3 | "dataset": "Kin8mn", 4 | "split": 4, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 809724666, 11 | "rmse": 0.3272809690328025, 12 | "mse": 0.10711283269105024, 13 | "nlpd": 0.28740301490031017, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_111344_Yacht_0_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_111344", 3 | "dataset": "Yacht", 4 | "split": 0, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 217794835, 11 | "rmse": 0.12109479261133703, 12 | "mse": 0.014663948797582727, 13 | "nlpd": -0.7481162787894489, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_111538_Yacht_1_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_111538", 3 | "dataset": "Yacht", 4 | "split": 1, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 768727755, 11 | "rmse": 0.05693725464170446, 12 | "mse": 0.003241850966134296, 13 | "nlpd": -0.9806749034471265, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_111627_Yacht_2_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_111627", 3 | "dataset": "Yacht", 4 | "split": 2, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 747766432, 11 | "rmse": 0.057892377934925535, 12 | "mse": 0.0033515274229602527, 13 | "nlpd": -0.9593446925368703, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_111738_Yacht_3_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_111738", 3 | "dataset": "Yacht", 4 | "split": 3, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 16807513, 11 | "rmse": 0.08336516771235851, 12 | "mse": 0.006949751187709662, 13 | "nlpd": -0.8940203438186789, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_111831_Yacht_4_dgp-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_111831", 3 | "dataset": "Yacht", 4 | "split": 4, 5 | "num_layers": 1, 6 | "model_name": "dgp-1", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 512948864, 11 | "rmse": 0.03585803581265448, 12 | "mse": 0.0012857987323416113, 13 | "nlpd": -0.9606592834257929, 14 | "commit": "f6592411902e35d5141664eb9de061dcb16d2cb8" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_170702_Power_0_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_170702", 3 | "dataset": "Power", 4 | "split": 0, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 745863986, 11 | "rmse": 0.2156754619068517, 12 | "mse": 0.04651590486873384, 13 | "nlpd": -0.07808799236850988, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_170702_Power_1_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_170702", 3 | "dataset": "Power", 4 | "split": 1, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 999220291, 11 | "rmse": 0.20342114833041183, 12 | "mse": 0.041380163588063415, 13 | "nlpd": -0.16182778978960163, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_172305_Power_2_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_172305", 3 | "dataset": "Power", 4 | "split": 2, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 578273594, 11 | "rmse": 0.2066991633758079, 12 | "mse": 0.04272454414025893, 13 | "nlpd": -0.14096722497198255, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_172305_Power_3_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_172305", 3 | "dataset": "Power", 4 | "split": 3, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 5149783, 11 | "rmse": 0.23380281474968131, 12 | "mse": 0.0546637561848738, 13 | "nlpd": -0.031335542603260665, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_173910_Energy_0_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_173910", 3 | "dataset": "Energy", 4 | "split": 0, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 540702253, 11 | "rmse": 0.057098690194860253, 12 | "mse": 0.0032602604219686307, 13 | "nlpd": -1.0898352500693913, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_173910_Power_4_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_173910", 3 | "dataset": "Power", 4 | "split": 4, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 949787354, 11 | "rmse": 0.1913464600775094, 12 | "mse": 0.03661346778419391, 13 | "nlpd": -0.23471041121776237, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_174113_Energy_1_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_174113", 3 | "dataset": "Energy", 4 | "split": 1, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 156249301, 11 | "rmse": 0.04734681081617334, 12 | "mse": 0.002241720494462509, 13 | "nlpd": -1.144192416936825, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_174324_Energy_2_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_174324", 3 | "dataset": "Energy", 4 | "split": 2, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 152581785, 11 | "rmse": 0.07136565535972424, 12 | "mse": 0.005093056764922936, 13 | "nlpd": -1.0391231484053247, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_174536_Energy_3_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_174536", 3 | "dataset": "Energy", 4 | "split": 3, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 337924288, 11 | "rmse": 0.07833345361500996, 12 | "mse": 0.006136129955254916, 13 | "nlpd": -1.0705934000195723, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_174747_Energy_4_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_174747", 3 | "dataset": "Energy", 4 | "split": 4, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 714986057, 11 | "rmse": 0.06018207198852004, 12 | "mse": 0.0036218817888314086, 13 | "nlpd": -1.1046149572363289, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_174957_Concrete_0_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_174957", 3 | "dataset": "Concrete", 4 | "split": 0, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 281121708, 11 | "rmse": 0.305290665144591, 12 | "mse": 0.0932023902244268, 13 | "nlpd": 0.27725971214507256, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_175241_Concrete_1_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_175241", 3 | "dataset": "Concrete", 4 | "split": 1, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 973798586, 11 | "rmse": 0.30595467580588775, 12 | "mse": 0.09360826364748588, 13 | "nlpd": 0.32537592803576354, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_175443_Concrete_2_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_175443", 3 | "dataset": "Concrete", 4 | "split": 2, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 331830828, 11 | "rmse": 0.2971189504753251, 12 | "mse": 0.08827967073155868, 13 | "nlpd": 0.3415015193873915, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_175517_Concrete_3_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_175517", 3 | "dataset": "Concrete", 4 | "split": 3, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 605758679, 11 | "rmse": 0.3059937599008655, 12 | "mse": 0.09363218109826855, 13 | "nlpd": 0.2566377411912229, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_175720_Concrete_4_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_175720", 3 | "dataset": "Concrete", 4 | "split": 4, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 729144932, 11 | "rmse": 0.3151767839705729, 12 | "mse": 0.09933640515403318, 13 | "nlpd": 0.7415779860927585, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_175755_Kin8mn_0_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_175755", 3 | "dataset": "Kin8mn", 4 | "split": 0, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 799545010, 11 | "rmse": 0.2538170475041603, 12 | "mse": 0.06442309360372915, 13 | "nlpd": 0.07314441353257033, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_180000_Kin8mn_1_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_180000", 3 | "dataset": "Kin8mn", 4 | "split": 1, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 376438512, 11 | "rmse": 0.2512412103802809, 12 | "mse": 0.06312214579334859, 13 | "nlpd": 0.05551312320231531, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_181824_Kin8mn_2_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_181824", 3 | "dataset": "Kin8mn", 4 | "split": 2, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 348169015, 11 | "rmse": 0.24986134425166162, 12 | "mse": 0.062430691351247354, 13 | "nlpd": 0.048029103416397365, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_182029_Kin8mn_3_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_182029", 3 | "dataset": "Kin8mn", 4 | "split": 3, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 83966341, 11 | "rmse": 0.2436610984043974, 12 | "mse": 0.05937073087563743, 13 | "nlpd": 0.01578328569702008, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_183900_Kin8mn_4_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_183900", 3 | "dataset": "Kin8mn", 4 | "split": 4, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 513432086, 11 | "rmse": 0.24335475894609207, 12 | "mse": 0.059221538701710576, 13 | "nlpd": 0.00998402469784206, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_184058_Yacht_0_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_184058", 3 | "dataset": "Yacht", 4 | "split": 0, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 946974909, 11 | "rmse": 0.08662448387054805, 12 | "mse": 0.00750380120583884, 13 | "nlpd": -0.9595048083581038, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_184207_Yacht_1_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_184207", 3 | "dataset": "Yacht", 4 | "split": 1, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 889336570, 11 | "rmse": 0.0494192131156223, 12 | "mse": 0.002442258624967295, 13 | "nlpd": -1.0950139392981566, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_184315_Yacht_2_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_184315", 3 | "dataset": "Yacht", 4 | "split": 2, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 708970864, 11 | "rmse": 0.023553281487294266, 12 | "mse": 0.0005547570688197188, 13 | "nlpd": -1.122764775205993, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_184423_Yacht_3_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_184423", 3 | "dataset": "Yacht", 4 | "split": 3, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 251432489, 11 | "rmse": 0.029300165451908042, 12 | "mse": 0.0008584996955091855, 13 | "nlpd": -1.1351675993275707, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_184530_Yacht_4_dgp-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_184530", 3 | "dataset": "Yacht", 4 | "split": 4, 5 | "num_layers": 2, 6 | "model_name": "dgp-2", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 61179245, 11 | "rmse": 0.024226519394612928, 12 | "mse": 0.0005869242419775563, 13 | "nlpd": -1.1080140572437511, 14 | "commit": "1251726e88719b7970fa7c4e515c9c62d1f090a9" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_195729_Power_0_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_195729", 3 | "dataset": "Power", 4 | "split": 0, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 813807554, 11 | "rmse": 0.20509944394401528, 12 | "mse": 0.04206578190614426, 13 | "nlpd": -0.1005500203620659, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_195729_Power_1_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_195729", 3 | "dataset": "Power", 4 | "split": 1, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 62552153, 11 | "rmse": 0.1971548008312628, 12 | "mse": 0.0388700154908149, 13 | "nlpd": -0.14072775922656405, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_202422_Power_2_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_202422", 3 | "dataset": "Power", 4 | "split": 2, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 167253180, 11 | "rmse": 0.19754575387694456, 12 | "mse": 0.03902432487481036, 13 | "nlpd": -0.15362864948173058, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_202426_Power_3_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_202426", 3 | "dataset": "Power", 4 | "split": 3, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 476351706, 11 | "rmse": 0.22919007726437152, 12 | "mse": 0.05252809151644859, 13 | "nlpd": -0.04998274645517528, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_205119_Power_4_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_205119", 3 | "dataset": "Power", 4 | "split": 4, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 632624105, 11 | "rmse": 0.19958144225145685, 12 | "mse": 0.039832752091171604, 13 | "nlpd": -0.12381545588393578, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_205122_Energy_0_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_205122", 3 | "dataset": "Energy", 4 | "split": 0, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 862069668, 11 | "rmse": 0.05693064234229028, 12 | "mse": 0.003241098037505775, 13 | "nlpd": -1.0874393226508732, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_205500_Energy_1_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_205500", 3 | "dataset": "Energy", 4 | "split": 1, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 560537114, 11 | "rmse": 0.047423584622685304, 12 | "mse": 0.002248996378464994, 13 | "nlpd": -1.1500523140522045, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_205843_Energy_2_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_205843", 3 | "dataset": "Energy", 4 | "split": 2, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 718380073, 11 | "rmse": 0.07111086355426452, 12 | "mse": 0.005056754915433227, 13 | "nlpd": -1.0443196811150834, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_210226_Energy_3_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_210226", 3 | "dataset": "Energy", 4 | "split": 3, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 103360922, 11 | "rmse": 0.07809392494424727, 12 | "mse": 0.006098661113197726, 13 | "nlpd": -1.0721039291185526, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_210608_Energy_4_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_210608", 3 | "dataset": "Energy", 4 | "split": 4, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 717442829, 11 | "rmse": 0.060571587295038136, 12 | "mse": 0.0036689171874404254, 13 | "nlpd": -1.1043374739664185, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_210951_Concrete_0_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_210951", 3 | "dataset": "Concrete", 4 | "split": 0, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 554044787, 11 | "rmse": 0.308537376257315, 12 | "mse": 0.09519531254774795, 13 | "nlpd": 0.4029205372153874, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_211435_Concrete_1_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_211435", 3 | "dataset": "Concrete", 4 | "split": 1, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 590769994, 11 | "rmse": 0.3362979818850177, 12 | "mse": 0.1130963326199357, 13 | "nlpd": 0.6425922338359445, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_211755_Concrete_2_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_211755", 3 | "dataset": "Concrete", 4 | "split": 2, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 748198532, 11 | "rmse": 0.2853733737252865, 12 | "mse": 0.08143796243135204, 13 | "nlpd": 0.35200771300303874, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_211907_Concrete_3_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_211907", 3 | "dataset": "Concrete", 4 | "split": 3, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 831412688, 11 | "rmse": 0.30846513420474386, 12 | "mse": 0.09515073901995064, 13 | "nlpd": 0.3968812079058037, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_212232_Concrete_4_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_212232", 3 | "dataset": "Concrete", 4 | "split": 4, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 365907662, 11 | "rmse": 0.3621942359171927, 12 | "mse": 0.13118466453163902, 13 | "nlpd": 1.3272718430335344, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_212344_Kin8mn_0_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_212344", 3 | "dataset": "Kin8mn", 4 | "split": 0, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 950432145, 11 | "rmse": 0.26032462973986986, 12 | "mse": 0.06776891284920035, 13 | "nlpd": 0.19761482083930781, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_212712_Kin8mn_1_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_212712", 3 | "dataset": "Kin8mn", 4 | "split": 1, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 42191275, 11 | "rmse": 0.25436498318565537, 12 | "mse": 0.06470154467103874, 13 | "nlpd": 0.14372772191533023, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_220013_Kin8mn_2_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_220013", 3 | "dataset": "Kin8mn", 4 | "split": 2, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 394371951, 11 | "rmse": 0.2569444225724082, 12 | "mse": 0.06602043629106825, 13 | "nlpd": 0.18116152266269325, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_220359_Kin8mn_3_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_220359", 3 | "dataset": "Kin8mn", 4 | "split": 3, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 431453042, 11 | "rmse": 0.2483735583982879, 12 | "mse": 0.06168942451142773, 13 | "nlpd": 0.1032167477400957, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_223700_Kin8mn_4_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_223700", 3 | "dataset": "Kin8mn", 4 | "split": 4, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 876232645, 11 | "rmse": 0.24617189169183454, 12 | "mse": 0.06060060025913632, 13 | "nlpd": 0.0958342697921327, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_224033_Yacht_0_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_224033", 3 | "dataset": "Yacht", 4 | "split": 0, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 285174166, 11 | "rmse": 0.08594023265557742, 12 | "mse": 0.007385723588894776, 13 | "nlpd": -0.9634382863448366, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_224224_Yacht_1_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_224224", 3 | "dataset": "Yacht", 4 | "split": 1, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 671425253, 11 | "rmse": 0.04989895452840384, 12 | "mse": 0.002489905663027714, 13 | "nlpd": -1.095838168681383, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_224414_Yacht_2_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_224414", 3 | "dataset": "Yacht", 4 | "split": 2, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 49326495, 11 | "rmse": 0.02810806208911593, 12 | "mse": 0.0007900631544055961, 13 | "nlpd": -1.1196616932577568, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_224605_Yacht_3_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_224605", 3 | "dataset": "Yacht", 4 | "split": 3, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 664179460, 11 | "rmse": 0.02916783054539639, 12 | "mse": 0.0008507623387249586, 13 | "nlpd": -1.1358876243857225, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/runs/Jan28_224755_Yacht_4_dgp-3.json: -------------------------------------------------------------------------------- 1 | { 2 | "date": "Jan28_224755", 3 | "dataset": "Yacht", 4 | "split": 4, 5 | "num_layers": 3, 6 | "model_name": "dgp-3", 7 | "num_inducing": 256, 8 | "batch_size": 1024, 9 | "num_epochs": 1000, 10 | "seed": 830797725, 11 | "rmse": 0.024127279713929622, 12 | "mse": 0.0005821256263941998, 13 | "nlpd": -1.1134654838315847, 14 | "commit": "7d4d681bf04aeb0db80d99be43604c1a64b26f00" 15 | } -------------------------------------------------------------------------------- /benchmarking/utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import os 17 | import subprocess 18 | from typing import Mapping, Optional 19 | 20 | from sacred import Experiment 21 | 22 | 23 | class ExperimentName: 24 | """ 25 | Uses the comments of a argument to infer the name. 26 | Will read the arguments from top to bottom. 27 | An abbreviated name can be specified between brackets. 28 | - Setting (Ignore) will not include the parameter in the name 29 | - Setting (None) will only include the value of the parameter, without its name. 30 | - Not specifying any name within brackets will cause the variable name to be used. 31 | Example: 32 | ``` 33 | @ex.config 34 | def config(): 35 | # Timestamp (None) 36 | date = datetime.datetime.now().strftime("%b%d_%H%M%S") 37 | # number of inducing points (M) 38 | num_inducing = 400 39 | # batch size (Nb) 40 | batch_size = 2048 41 | # Number of times the complete training dataset is iterated over (E) 42 | num_epochs = 200 43 | # Index for the output 44 | output = 0 45 | assert output in [0, 1] 46 | # Learning rate starting value 47 | lr = 1e-2 48 | ``` 49 | returns: 50 | Sep07_170739_M-400_Nb-2048_E-200_output-0_lr-0.01 51 | """ 52 | 53 | def __init__(self, experiment: Experiment, config: Mapping, prefix: Optional[str] = None): 54 | assert len(experiment.configurations) == 1 55 | self.config = config 56 | self.docs = experiment.configurations[0]._var_docs 57 | self.prefix = prefix 58 | self._experiment_name: Optional[str] = None 59 | 60 | def _build(self) -> Optional[str]: 61 | experiment_name = "" if self.prefix is None else self.prefix 62 | 63 | def _has_comment(argument_name: str) -> bool: 64 | return argument_name in self.docs.keys() 65 | 66 | def _get_abbreviated_name(argument_name: str) -> Optional[str]: 67 | comment = self.docs[argument_name] 68 | left_bracket = comment.find("(") 69 | right_bracket = comment.find(")") 70 | if left_bracket < 0 or right_bracket < 0: 71 | return None 72 | else: 73 | return comment[left_bracket + 1 : right_bracket] 74 | 75 | for k, v in self.config.items(): 76 | prefix = "" if len(experiment_name) == 0 else "_" 77 | 78 | if _has_comment(k): 79 | abbreviated_name = _get_abbreviated_name(k) 80 | if abbreviated_name is None: 81 | experiment_name += f"{prefix}{k}-{v}" 82 | elif abbreviated_name.lower() == "none": 83 | experiment_name += f"{prefix}{v}" 84 | elif abbreviated_name.lower() == "ignore": 85 | continue 86 | else: 87 | experiment_name += f"{prefix}{abbreviated_name}-{v}" 88 | else: 89 | experiment_name += f"{prefix}{k}-{v}" 90 | 91 | return experiment_name 92 | 93 | def get(self) -> Optional[str]: 94 | if self._experiment_name is None: 95 | self._experiment_name = self._build() 96 | return self._experiment_name 97 | 98 | 99 | def git_version(): 100 | """ 101 | Return the git revision as a string 102 | """ 103 | def _minimal_ext_cmd(cmd): 104 | # construct minimal environment 105 | env = {} 106 | for k in ['SYSTEMROOT', 'PATH']: 107 | v = os.environ.get(k) 108 | if v is not None: 109 | env[k] = v 110 | # LANGUAGE is used on win32 111 | env['LANGUAGE'] = 'C' 112 | env['LANG'] = 'C' 113 | env['LC_ALL'] = 'C' 114 | out = subprocess.Popen(cmd, stdout = subprocess.PIPE, env=env).communicate()[0] 115 | return out 116 | 117 | try: 118 | out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) 119 | GIT_REVISION = out.strip().decode('ascii') 120 | except OSError: 121 | GIT_REVISION = "Unknown" 122 | 123 | return GIT_REVISION 124 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | 20 | # Create a .py (percent format with multiline comments) from an .ipynb in the first place: 21 | # jupytext --update-metadata '{"jupytext": {"cell_markers": "\"\"\""}}' --to py:percent .ipynb 22 | 23 | # Convert .py to .ipynb (don't seem to need to --execute? perhaps sphinx-build does it...), then build html: 24 | %: Makefile 25 | rm -rf _build 26 | rm -rf _autosummary 27 | jupytext --to notebook notebooks/*.py 28 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 29 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # GPflux documentation 2 | 3 | The API reference (generated from docstrings) and Jupyter Notebook tutorials are automatically built by a Github action (see .github/workflows/deploy.yaml) on commit to `develop` and published to [https://secondmind-labs.github.io/GPflux/](https://secondmind-labs.github.io/GPflux/). 4 | 5 | ## Jupyter Notebooks 6 | 7 | If you want to run the Jupyter Notebook tutorials interactively, install additional dependencies in the `docs` directory: 8 | 9 | `pip install -r docs_requirements.txt` 10 | 11 | ...and then run the appropriate Notebook: 12 | 13 | `jupyter-notebook notebooks/` 14 | 15 | If you want to create a new Notebook tutorial for inclusion in the doc set, see `notebooks/README.md`. 16 | 17 | ## API reference 18 | 19 | If you want to build the documentation locally: 20 | 21 | 1) Make sure you have a Python 3.7 virtualenv and `gpflux` is installed as per the instructions in `../README.md`) 22 | 23 | 3) In the `docs` directory, install dependencies: 24 | 25 | `pip install -r docs_requirements.txt` 26 | 27 | If pandoc does not install via pip, or step 3) fails with a 'Pandoc' error, download and install Pandoc separately from `https://github.com/jgm/pandoc/releases/` (e.g. `pandoc--amd64.deb` for Ubuntu), and try running step 2) again. 28 | 29 | 4) Compile the documentation: 30 | 31 | `make html` 32 | 33 | 5) Run a web server: 34 | 35 | `python -m http.server` 36 | 37 | 6) Check documentation locally by opening (in a browser): 38 | 39 | [`http://localhost:8000/_build/html/`](http://localhost:8000/_build/html/) 40 | 41 | 42 | 43 | ## Intersphinx and TensorFlow/Probability 44 | 45 | TensorFlow and TensorFlow Probability have their own custom API docs system. We have manually produced (web-scraped) intersphinx inventories to be able to cross-reference tf and tfp classes and functions. 46 | They are now hosted on [the GPflow/tensorflow-intersphinx GitHub](https://github.com/GPflow/tensorflow-intersphinx/). 47 | 48 | TF/P provides a lot of aliases for accessing objects. However, the intersphinx inventories only contain some of them. You can find the correct way of referencing by looking through the list generated by 49 | `python -msphinx.ext.intersphinx tf2_py_objects.inv` 50 | or 51 | `python -msphinx.ext.intersphinx tfp_py_objects.inv` 52 | Note that this requires you to have local copies, e.g. downloaded from GitHub ([TF](https://github.com/GPflow/tensorflow-intersphinx/raw/master/tf2_py_objects.inv)/[TFP](https://github.com/GPflow/tensorflow-intersphinx/raw/master/tfp_py_objects.inv)). 53 | -------------------------------------------------------------------------------- /docs/_static/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/docs/_static/logo.png -------------------------------------------------------------------------------- /docs/_static/pydata-custom.css: -------------------------------------------------------------------------------- 1 | /*Tweaks to the Pydata default CSS */ 2 | 3 | /*No yellow background highlight when targeted by summary tables */ 4 | /*dt:target { background-color: #f8f8f8; border: 1px solid black, }*/ 5 | dt:target { background: transparent;} 6 | /*More space between H1s and signatures in API reference*/ 7 | h1 { margin-bottom: 40px; } 8 | 9 | /*No line underneath summary table headings (clashes with line above first member)*/ 10 | p.rubric { border-bottom: 0px; } 11 | -------------------------------------------------------------------------------- /docs/_static/readthedocs-custom.css: -------------------------------------------------------------------------------- 1 | /* Override nav bar color */ 2 | /*.wy-side-nav-search { 3 | background-color: #fbfbb6; 4 | } 5 | .wy-side-nav-search > a { 6 | color: #b2355c 7 | }*/ 8 | 9 | /* Override text bar color */ 10 | /*.caption-text { 11 | color: #b2355c; 12 | }*/ 13 | 14 | /* Override code signature colour */ 15 | /*.rst-content dl:not(.docutils) dt { 16 | background: #fbfbb6; 17 | color: #b2355c; 18 | border-top: solid 3px #b2355c; 19 | }*/ 20 | 21 | /* Override hyperlink colour */ 22 | /* a { 23 | color: #b2355c; 24 | }*/ 25 | 26 | /* Make content width wider*/ 27 | .wy-nav-content { 28 | max-width: 60% !important; 29 | } 30 | 31 | -------------------------------------------------------------------------------- /docs/_static/single_layer_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/docs/_static/single_layer_fit.png -------------------------------------------------------------------------------- /docs/_static/two_layer_fit.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/docs/_static/two_layer_fit.png -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | .. 2 | DO NOT REMOVE! This file contains the `autosummary` directive, without which API documentation won't 3 | get extracted from docstrings by the `sphinx.ext.autosummary` engine. It is hidden (not declared in 4 | any toctree) to remove an unnecessary intermediate page; index.rst instead points directly to the 5 | package page. DO NOT REMOVE! 6 | 7 | API reference 8 | ============= 9 | 10 | .. autosummary:: 11 | :toctree: _autosummary 12 | :template: custom-module-template.rst 13 | :recursive: 14 | 15 | gpflux 16 | 17 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | import warnings 16 | 17 | # Point to root source dir for API doc, relative to this file: 18 | sys.path.insert(0, os.path.abspath("..")) 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = "GPflux" 23 | copyright = ( 24 | "Copyright 2021 The GPflux Contributors\n" 25 | "\n" 26 | "Licensed under the Apache License, Version 2.0\n" 27 | ) 28 | author = "The GPflux Contributors" 29 | 30 | # The full version, including alpha/beta/rc tags 31 | release = "0.1.0" 32 | 33 | # -- General configuration --------------------------------------------------- 34 | 35 | default_role = "any" # try and turn all `` into links 36 | add_module_names = False # Remove namespaces from class/method signatures 37 | 38 | 39 | # Add any Sphinx extension module names here, as strings. They can be 40 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 41 | # ones. 42 | extensions = [ 43 | "sphinx.ext.viewcode", # Add a link to the Python source code for classes, functions etc. 44 | "sphinx.ext.mathjax", # Render math via Javascript 45 | "IPython.sphinxext.ipython_console_highlighting", # syntax-highlighting ipython interactive sessions 46 | ] 47 | 48 | 49 | ### Automatic API doc generation 50 | extensions.append("autoapi.extension") 51 | autoapi_dirs = ["../gpflux"] 52 | autoapi_add_toctree_entry = False 53 | autoapi_python_class_content = "both" 54 | autoapi_options = [ 55 | "members", 56 | "private-members", 57 | "special-members", 58 | "imported-members", 59 | "show-inheritance", 60 | ] 61 | # autoapi_member_order = "bysource" # default 62 | # autoapi_member_order = "groupwise" # by type then alphabetically 63 | 64 | 65 | ### intersphinx: Link to other project's documentation (see mapping below) 66 | extensions.append("sphinx.ext.intersphinx") 67 | intersphinx_mapping = { 68 | "numpy": ("https://numpy.org/doc/stable/", None), 69 | "python": ("https://docs.python.org/3/", None), 70 | "tensorflow": ( 71 | "https://www.tensorflow.org/api_docs/python", 72 | "https://github.com/GPflow/tensorflow-intersphinx/raw/master/tf2_py_objects.inv" 73 | ), 74 | "tensorflow_probability": ( 75 | "https://www.tensorflow.org/probability/api_docs/python", 76 | "https://github.com/GPflow/tensorflow-intersphinx/raw/master/tfp_py_objects.inv" 77 | ), 78 | "gpflow": ("https://gpflow.readthedocs.io/en/master/", None), 79 | } 80 | 81 | ### todo: to-do notes 82 | extensions.append("sphinx.ext.todo") 83 | todo_include_todos = True # pre-1.0, it's worth actually including todos in the docs 84 | 85 | ### nbsphinx: Integrate Jupyter Notebooks and Sphinx 86 | extensions.append("nbsphinx") 87 | nbsphinx_allow_errors = True # Continue through Jupyter errors 88 | 89 | ### sphinxcontrib-bibtex 90 | extensions.append("sphinxcontrib.bibtex") 91 | bibtex_bibfiles = ["refs.bib"] 92 | 93 | 94 | # Add any paths that contain Jinja2 templates here, relative to this directory. 95 | templates_path = ["_templates"] 96 | 97 | # https://sphinxguide.readthedocs.io/en/latest/sphinx_basics/settings.html 98 | # -- Options for LaTeX ----------------------------------------------------- 99 | latex_elements = { 100 | "preamble": r""" 101 | \usepackage{amsmath,amsfonts,amssymb,amsthm} 102 | """, 103 | } 104 | 105 | # -- Options for HTML output ------------------------------------------------- 106 | 107 | # Pydata theme 108 | html_theme = "pydata_sphinx_theme" 109 | html_logo = "_static/logo.png" 110 | html_css_files = ["pydata-custom.css"] 111 | 112 | # theme-specific options. see theme docs for more info 113 | html_theme_options = { 114 | "show_prev_next": False, 115 | "github_url": "https://github.com/secondmind-labs/gpflux", 116 | } 117 | 118 | # If True, show link to rst source on rendered HTML pages 119 | html_show_sourcelink = False # Remove 'view source code' from top of page (for html, not python) 120 | 121 | # Add any paths that contain custom static files (such as style sheets) here, 122 | # relative to this directory. They are copied after the builtin static files, 123 | # so a file named "default.css" will overwrite the builtin "default.css". 124 | html_static_path = ["_static"] 125 | -------------------------------------------------------------------------------- /docs/docs_requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | ipython 3 | pandoc 4 | nbsphinx 5 | jupytext 6 | pydata-sphinx-theme 7 | sphinxcontrib-bibtex 8 | sphinx-autoapi 9 | -------------------------------------------------------------------------------- /docs/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/docs/favicon.ico -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Copyright 2021 The GPflux Contributors 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | .. 16 | Note: Items in the toctree form the top-level navigation. 17 | 18 | .. toctree:: 19 | :hidden: 20 | 21 | GPflux 22 | Benchmarks 23 | Tutorials 24 | API Reference 25 | 26 | 27 | Welcome to GPflux 28 | ================== 29 | 30 | GPflux is a research toolbox dedicated to Deep Gaussian processes (DGP) :cite:p:`damianou2013deep`, the hierarchical extension of Gaussian processes (GP) created by feeding the output of one GP into the next. 31 | 32 | GPflux uses the mathematical building blocks from `GPflow `_ :cite:p:`gpflow2020` and marries these with the powerful layered deep learning API provided by `Keras `_. 33 | This combination leads to a framework that can be used for: 34 | 35 | - researching (deep) Gaussian process models (e.g., :cite:p:`salimbeni2017doubly, dutordoir2018cde, salimbeni2019iwvi`), and 36 | - building, training, evaluating and deploying (deep) Gaussian processes in a modern way, making use of the tools developed by the deep learning community. 37 | 38 | 39 | Getting started 40 | --------------- 41 | 42 | We have provided multiple `Tutorials ` showing the basic functionality of the toolbox, and have a comprehensive `API Reference `. 43 | 44 | As a quick teaser, here's a snippet from the `intro notebook ` that demonstrates how a two-layer DGP is built and trained with GPflux for a simple one-dimensional dataset: 45 | 46 | 47 | .. code-block:: python 48 | 49 | # Layer 1 50 | Z = np.linspace(X.min(), X.max(), X.shape[0] // 2).reshape(-1, 1) 51 | kernel1 = gpflow.kernels.SquaredExponential() 52 | inducing_variable1 = gpflow.inducing_variables.InducingPoints(Z.copy()) 53 | gp_layer1 = gpflux.layers.GPLayer( 54 | kernel1, inducing_variable1, num_data=X.shape[0], num_latent_gps=X.shape[1] 55 | ) 56 | 57 | # Layer 2 58 | kernel2 = gpflow.kernels.SquaredExponential() 59 | inducing_variable2 = gpflow.inducing_variables.InducingPoints(Z.copy()) 60 | gp_layer2 = gpflux.layers.GPLayer( 61 | kernel2, 62 | inducing_variable2, 63 | num_data=X.shape[0], 64 | num_latent_gps=X.shape[1], 65 | mean_function=gpflow.mean_functions.Zero(), 66 | ) 67 | 68 | # Initialise likelihood and build model 69 | likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.1)) 70 | two_layer_dgp = gpflux.models.DeepGP([gp_layer1, gp_layer2], likelihood_layer) 71 | 72 | # Compile and fit 73 | model = two_layer_dgp.as_training_model() 74 | model.compile(gpflow.keras.tf_keras.optimizers.Adam(0.01)) 75 | history = model.fit({"inputs": X, "targets": Y}, epochs=int(1e3), verbose=0) 76 | 77 | The model described above produces the fit shown in Fig 1. For comparison, in Fig. 2 we show the fit on the same dataset by a vanilla single-layer GP model. 78 | 79 | .. list-table:: 80 | 81 | * - .. figure:: ./_static/two_layer_fit.png 82 | :alt: Fit on the Motorcycle dataset of a two-layer deep Gaussian process. 83 | :width: 90% 84 | 85 | Fig 1. Two-Layer Deep GP 86 | 87 | - .. figure:: ./_static/single_layer_fit.png 88 | :alt: Fit on the Motorcycle dataset of a single-layer Gaussian process. 89 | :width: 90% 90 | 91 | Fig 2. Single-Layer GP 92 | 93 | 94 | Installation 95 | ------------ 96 | 97 | Latest release from PyPI 98 | ^^^^^^^^^^^^^^^^^^^^^^^^ 99 | 100 | To install GPflux using the latest release from PyPI, run 101 | 102 | .. code:: 103 | 104 | $ pip install gpflux 105 | 106 | The library supports Python 3.7 onwards, and uses `semantic versioning `_. 107 | 108 | Latest development release from GitHub 109 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ 110 | 111 | In a check-out of the develop branch of the `GPflux GitHub repository `_, run 112 | 113 | .. code:: 114 | 115 | $ pip install -e . 116 | 117 | 118 | Join the community 119 | ------------------ 120 | 121 | GPflux is an open source project. We welcome contributions. To submit a pull request, file a bug report, or make a feature request, see the `contribution guidelines `_. 122 | 123 | We have a public `Slack workspace `_. Please use this invite link if you'd like to join, whether to ask short informal questions or to be involved in the discussion and future development of GPflux. 124 | 125 | 126 | Bibliography 127 | ------------ 128 | 129 | .. bibliography:: 130 | :all: 131 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/notebooks/README.md: -------------------------------------------------------------------------------- 1 | # Creating a new Jupyter Notebook 2 | 3 | To make Notebooks easier to work with, they are stored as `.py` files in the 4 | `docs/notebooks` directory within the GPflux repository, and only converted to 5 | actual Notebook files (`.ipynb`) when needed (for example, by Sphinx when 6 | building Readthedocs HTML). Please do not commit `.ipynb` files to the 7 | repository. 8 | 9 | ## Creating a new Notebook as a .py file 10 | 11 | Insert the following at the top of the file: 12 | 13 | # -*- coding: utf-8 -*- 14 | # --- 15 | # jupyter: 16 | # jupytext: 17 | # cell_markers: '"""' 18 | # formats: ipynb,py:percent 19 | # text_representation: 20 | # extension: .py 21 | # format_name: percent 22 | # format_version: '1.3' 23 | # jupytext_version: 1.3.3 24 | # kernelspec: 25 | # display_name: Python 3 26 | # language: python 27 | # name: python3 28 | # --- 29 | 30 | Make sure a text (ie. comment) cell is encapsulated as follows: 31 | 32 | # %% [markdown] 33 | """ 34 | 35 | """ 36 | 37 | Make sure a code cell is encapsulated as follows: 38 | 39 | # %% 40 | # Optional code comment 41 | def YourCodeGoesHere(): 42 | pass 43 | 44 | To test it as a Notebook, make sure Jupytext is installed (`pip install jupytext`) and run: 45 | 46 | jupytext --to notebook .py 47 | 48 | This creates `.ipynb`. Run `jupyter-notebook .ipynb` 49 | in the normal way to make sure it formats and executes correctly in the IPython environment. 50 | 51 | When ready, commit `.py`. You can delete `.ipynb`. 52 | 53 | ## Creating a new Notebook as a regular Notebook 54 | 55 | You can run `jupyter-notebook` and press the **New** button to create a 56 | Notebook using the Jupyter UI. When ready, save and exit, and run the following 57 | command to convert `.ipynb` file to `.py` (with 58 | text cells formatted as multi-line comments): 59 | 60 | jupytext --update-metadata '{"jupytext": {"cell_markers": "\"\"\""}}' --to py:percent .ipynb 61 | 62 | When ready, commit `.py`. You can delete `.ipynb`. 63 | 64 | ## Including your Notebook in GPflux's Sphinx-built documentation 65 | 66 | Within the GPflux repository, open [docs/tutorials.rst](../tutorials.rst) and 67 | insert a TOC entry for the Notebook, for example: 68 | 69 | .. toctree:: 70 | :maxdepth: 1 71 | 72 | notebooks/keras_integration 73 | notebooks/ 74 | 75 | Then, build the docs as detailed in [docs/README.md](../README.md). 76 | -------------------------------------------------------------------------------- /docs/notebooks/benchmarks.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # formats: ipynb,py:percent 5 | # text_representation: 6 | # extension: .py 7 | # format_name: percent 8 | # format_version: '1.3' 9 | # jupytext_version: 1.3.2 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # %% [markdown] 17 | """ 18 | # Benchmarks 19 | 20 | We benchmark GPflux' Deep GP on several UCI datasets. 21 | The code to run the experiments can be found in `benchmarking/main.py`. The results are stored in `benchmarking/runs/*.json`. In this script we aggregate and plot the outcomes. 22 | """ 23 | 24 | # %% {"nbsphinx": "hidden"} 25 | import glob 26 | import json 27 | 28 | import numpy as np 29 | import pandas as pd 30 | 31 | # %% {"nbsphinx": "hidden"} 32 | LOGS = "../../benchmarking/runs/*.json" 33 | 34 | data = [] 35 | for path in glob.glob(LOGS): 36 | with open(path) as json_file: 37 | data.append(json.load(json_file)) 38 | 39 | df = pd.DataFrame.from_records(data) 40 | df = df.rename(columns={"model_name": "model"}) 41 | 42 | # %% {"nbsphinx": "hidden"} 43 | table = df.groupby(["dataset", "model"]).agg( 44 | { 45 | "split": "count", 46 | **{metric: ["mean", "std"] for metric in ["mse", "nlpd"]}, 47 | } 48 | ) 49 | # %% [markdown] 50 | """ 51 | We report the mean and std. dev. of the MSE and Negative Log Predictive Density (NLPD) measured by running the experiment on 5 different splits. We use 90% of the data for training and the remaining 10% for testing. The output is normalised to have zero mean and unit variance. 52 | """ 53 | # %% 54 | table 55 | -------------------------------------------------------------------------------- /docs/notebooks/data/motor.csv: -------------------------------------------------------------------------------- 1 | "","times","accel","strata","v" 2 | "1",2.4,0,1,3.7 3 | "2",2.6,-1.3,1,3.7 4 | "3",3.2,-2.7,1,3.7 5 | "4",3.6,0,1,3.7 6 | "5",4,-2.7,1,3.7 7 | "6",6.2,-2.7,1,3.7 8 | "7",6.6,-2.7,1,3.7 9 | "8",6.8,-1.3,1,3.7 10 | "9",7.8,-2.7,1,3.7 11 | "10",8.2,-2.7,1,3.7 12 | "11",8.8,-1.3,1,3.7 13 | "13",9.6,-2.7,1,3.7 14 | "14",10,-2.7,1,3.7 15 | "15",10.2,-5.4,1,3.7 16 | "16",10.6,-2.7,1,3.7 17 | "17",11,-5.4,1,3.7 18 | "18",11.4,0,1,3.7 19 | "19",13.2,-2.7,2,607 20 | "20",13.6,-2.7,2,607 21 | "21",13.8,0,2,607 22 | "22",14.6,-13.3,2,607 23 | "28",14.8,-2.7,2,607 24 | "29",15.4,-22.8,2,607 25 | "33",15.6,-40.2,2,607 26 | "35",15.8,-21.5,2,607 27 | "37",16,-42.9,2,607 28 | "39",16.2,-21.5,2,607 29 | "42",16.4,-5.4,2,607 30 | "44",16.6,-59,2,607 31 | "45",16.8,-71,2,607 32 | "48",17.6,-37.5,2,607 33 | "52",17.8,-99.1,2,607 34 | "54",18.6,-112.5,2,607 35 | "56",19.2,-123.1,2,607 36 | "57",19.4,-85.6,2,607 37 | "59",19.6,-127.2,2,607 38 | "60",20.2,-123.1,2,607 39 | "61",20.4,-117.9,2,607 40 | "62",21.2,-134,2,607 41 | "63",21.4,-101.9,2,607 42 | "64",21.8,-108.4,2,607 43 | "65",22,-123.1,2,607 44 | "66",23.2,-123.1,2,607 45 | "67",23.4,-128.5,2,607 46 | "68",24,-112.5,2,607 47 | "69",24.2,-95.1,2,607 48 | "71",24.6,-53.5,2,607 49 | "72",25,-64.4,2,607 50 | "74",25.4,-72.3,2,607 51 | "76",25.6,-26.8,2,607 52 | "77",26,-5.4,2,607 53 | "78",26.2,-107.1,2,607 54 | "80",26.4,-65.6,2,607 55 | "81",27,-16,2,607 56 | "82",27.2,-45.6,2,607 57 | "85",27.6,4,2,607 58 | "86",28.2,12,2,607 59 | "87",28.4,-21.5,2,607 60 | "89",28.6,46.9,2,607 61 | "90",29.4,-17.4,2,607 62 | "91",30.2,36.2,2,607 63 | "92",31,75,2,607 64 | "93",31.2,8.1,2,607 65 | "94",32,54.9,2,607 66 | "96",32.8,46.9,2,607 67 | "97",33.4,16,2,607 68 | "98",33.8,45.6,2,607 69 | "99",34.4,1.3,2,607 70 | "100",34.8,75,2,607 71 | "101",35.2,-16,2,607 72 | "103",35.4,69.6,2,607 73 | "104",35.6,34.8,2,607 74 | "106",36.2,-37.5,2,607 75 | "108",38,46.9,2,607 76 | "110",39.2,5.4,2,607 77 | "111",39.4,-1.3,2,607 78 | "112",40,-21.5,2,607 79 | "113",40.4,-13.3,2,607 80 | "114",41.6,30.8,3,138 81 | "116",42.4,29.4,3,138 82 | "117",42.8,0,3,138 83 | "119",43,14.7,3,138 84 | "120",44,-1.3,3,138 85 | "121",44.4,0,3,138 86 | "122",45,10.7,3,138 87 | "123",46.6,10.7,3,138 88 | "124",47.8,-26.8,3,138 89 | "126",48.8,-13.3,3,138 90 | "127",50.6,0,3,138 91 | "128",52,10.7,3,138 92 | "129",53.2,-14.7,3,138 93 | "130",55,-2.7,3,138 94 | "132",55.4,-2.7,3,138 95 | "133",57.6,10.7,3,138 96 | -------------------------------------------------------------------------------- /docs/notebooks/deep_gp_samples.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # formats: ipynb,py:percent 5 | # text_representation: 6 | # extension: .py 7 | # format_name: percent 8 | # format_version: '1.3' 9 | # jupytext_version: 1.10.3 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # %% [markdown] 17 | """ 18 | # Deep GP samples 19 | 20 | To help develop a more intuitive understanding of deep Gaussian processes, in this notebook we show how to generate a sample from the full deep GP, by propagating a sample through the layers. 21 | """ 22 | # %% 23 | import matplotlib.pyplot as plt 24 | import numpy as np 25 | import tensorflow as tf 26 | 27 | import gpflow 28 | from gpflux.helpers import construct_basic_kernel, construct_basic_inducing_variables 29 | from gpflux.layers import GPLayer 30 | from gpflux.experiment_support.plotting import plot_layer 31 | 32 | tf.random.set_seed(42) 33 | 34 | # %% 35 | num_data = 200 36 | D = 1 37 | a, b = 0, 1 38 | X = np.linspace(a, b, num_data).reshape(-1, 1) 39 | 40 | # %% [markdown] 41 | """ 42 | ## Constructing the layers 43 | 44 | Note that we give the `full_cov=True` argument to `GPLayer` so that we obtain correlated samples. 45 | We give the last layer a `gpflow.mean_functions.Zero` mean function (the GPflux default is an Identity mean function). 46 | """ 47 | 48 | # %% 49 | num_samples = 5 50 | 51 | # %% 52 | Z = X.copy() 53 | M = Z.shape[0] 54 | 55 | # Layer 1 56 | inducing_var1 = construct_basic_inducing_variables(M, D, D, share_variables=True, z_init=Z.copy()) 57 | kernel1 = construct_basic_kernel( 58 | gpflow.kernels.SquaredExponential(lengthscales=0.15), 59 | output_dim=D, 60 | share_hyperparams=True, 61 | ) 62 | layer1 = GPLayer(kernel1, inducing_var1, num_data, full_cov=True, num_samples=num_samples) 63 | 64 | # Layer 2 65 | inducing_var2 = construct_basic_inducing_variables(M, D, D, share_variables=True, z_init=Z.copy()) 66 | kernel2 = construct_basic_kernel( 67 | gpflow.kernels.SquaredExponential(lengthscales=0.8, variance=0.1), 68 | output_dim=D, 69 | share_hyperparams=True, 70 | ) 71 | layer2 = GPLayer(kernel2, inducing_var2, num_data, full_cov=True, num_samples=num_samples) 72 | 73 | # Layer 3 74 | inducing_var3 = construct_basic_inducing_variables(M, D, D, share_variables=True, z_init=Z.copy()) 75 | kernel3 = construct_basic_kernel( 76 | gpflow.kernels.SquaredExponential(lengthscales=0.3, variance=0.1), 77 | output_dim=D, 78 | share_hyperparams=True, 79 | ) 80 | layer3 = GPLayer( 81 | kernel3, 82 | inducing_var3, 83 | num_data, 84 | full_cov=True, 85 | num_samples=num_samples, 86 | mean_function=gpflow.mean_functions.Zero(), 87 | ) 88 | 89 | gp_layers = [layer1, layer2, layer3] 90 | 91 | # %% [markdown] 92 | """ 93 | ## Propagating samples through the layers 94 | """ 95 | 96 | # %% 97 | layer_input = X 98 | 99 | # %% 100 | means, covs, samples = [], [], [] 101 | 102 | for layer in gp_layers: 103 | layer_output = layer(layer_input) 104 | 105 | mean = layer_output.mean() 106 | cov = layer_output.covariance() 107 | sample = tf.convert_to_tensor(layer_output) # generates num_samples samples... 108 | 109 | layer_input = sample[0] # for the next layer 110 | 111 | means.append(mean.numpy().T) # transpose to go from [1, N] to [N, 1] 112 | covs.append(cov.numpy()) 113 | samples.append(sample.numpy()) 114 | 115 | # %% [markdown] 116 | """ 117 | ## Visualising samples 118 | 119 | From top to bottom we plot the input to a layer, the covariance of outputs of that layer, and samples from the layer's output. 120 | """ 121 | 122 | # %% 123 | num_layers = len(gp_layers) 124 | fig, axes = plt.subplots(3, num_layers, figsize=(num_layers * 3.33, 10)) 125 | 126 | for i in range(num_layers): 127 | layer_input = X if i == 0 else samples[i - 1][0] 128 | plot_layer(X, layer_input, means[i], covs[i], samples[i], i, axes[:, i]) 129 | -------------------------------------------------------------------------------- /docs/notebooks/gpflux_with_keras_layers.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # cell_markers: '"""' 5 | # text_representation: 6 | # extension: .py 7 | # format_name: percent 8 | # format_version: '1.3' 9 | # jupytext_version: 1.4.2 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # %% [markdown] 17 | """ 18 | # Hybrid Deep GP models: combining GP and Neural Network layers 19 | 20 | In this notebook we show how to combine `gpflux.layers.GPLayer` layers with plain Keras neural network layers. This allows one to build hybrid deep GP models. Compared to the other tutorials, we are also going to use Keras's `Sequential` model to build our hierarchical model and use a `gpflux.losses.LikelihoodLoss` instead of a `gpflux.layers.LikelihoodLayer`. 21 | """ 22 | 23 | # %% 24 | import numpy as np 25 | import tensorflow as tf 26 | import matplotlib.pyplot as plt 27 | 28 | import gpflow 29 | import gpflux 30 | 31 | from gpflow.config import default_float 32 | from gpflow.keras import tf_keras 33 | 34 | 35 | # %% [markdown] 36 | """ 37 | ## Load Snelson dataset 38 | 39 | We use a simple one-dimensional dataset to allow for easy plotting. To help training we normalize the input features. 40 | """ 41 | 42 | # %% 43 | d = np.load("../../tests/snelson1d.npz") 44 | X, Y = data = d["X"], d["Y"] 45 | X = (X - X.mean()) / X.std() 46 | num_data, input_dim = X.shape 47 | 48 | # %% [markdown] 49 | """ 50 | ## Initialize the GP Layer 51 | 52 | As per usual we create a one-dimensional `gpflux.layers.GPLayer` with a simple `SquaredExponential` kernel and `InducingPoints` inducing variable: 53 | """ 54 | 55 | # %% 56 | num_data = len(X) 57 | num_inducing = 10 58 | output_dim = Y.shape[1] 59 | 60 | kernel = gpflow.kernels.SquaredExponential() 61 | inducing_variable = gpflow.inducing_variables.InducingPoints( 62 | np.linspace(X.min(), X.max(), num_inducing).reshape(-1, 1) 63 | ) 64 | gp_layer = gpflux.layers.GPLayer( 65 | kernel, inducing_variable, num_data=num_data, num_latent_gps=output_dim 66 | ) 67 | 68 | # %% [markdown] 69 | """ 70 | ## Sequential Keras model with GP and Neural net layers 71 | 72 | We construct a model that consists of three `tf.keras.layers.Dense` layers and a GP. The first two Dense layers are configured to have 100 units and use a ReLU non-linearity. The last neural network layers reduces the dimension to one and does not utilise a non-linearity. We can interpret these three neural network layers as performing non-linear feature warping. The final layer in the model is the GP we defined above. 73 | """ 74 | 75 | # %% 76 | likelihood = gpflow.likelihoods.Gaussian(0.1) 77 | 78 | # So that Keras can track the likelihood variance, we need to provide the likelihood as part of a "dummy" layer: 79 | likelihood_container = gpflux.layers.TrackableLayer() 80 | likelihood_container.likelihood = likelihood 81 | 82 | model = tf_keras.Sequential( 83 | [ 84 | tf_keras.layers.Dense(100, activation="relu"), 85 | tf_keras.layers.Dense(100, activation="relu"), 86 | tf_keras.layers.Dense(1, activation="linear"), 87 | gp_layer, 88 | likelihood_container, # no-op, for discovering trainable likelihood parameters 89 | ] 90 | ) 91 | loss = gpflux.losses.LikelihoodLoss(likelihood) 92 | 93 | # %% [markdown] 94 | """ 95 | We compile our model by specifying the loss and the optimizer to use. After this is done, we fit the data and plot the trajectory of the loss: 96 | """ 97 | 98 | # %% 99 | model.compile(loss=loss, optimizer="adam") 100 | hist = model.fit(X, Y, epochs=500, verbose=0) 101 | plt.plot(hist.history["loss"]) 102 | 103 | # %% [markdown] 104 | """ 105 | We can now inspect the final model by plotting its predictions. Note that `model(X_test)` now returns the output of the final `GPLayer` and *not* a `LikelihoodLayer`. The output of a `GPLayer` is a TFP distribution with a `mean()` and `variance()`. 106 | """ 107 | 108 | 109 | # %% 110 | def plot(model, X, Y, ax=None): 111 | if ax is None: 112 | fig, ax = plt.subplots() 113 | 114 | x_margin = 2.0 115 | N_test = 100 116 | X_test = np.linspace(X.min() - x_margin, X.max() + x_margin, N_test).reshape(-1, 1) 117 | f_distribution = model(X_test) 118 | 119 | mean = f_distribution.mean().numpy().squeeze() 120 | var = f_distribution.variance().numpy().squeeze() + model.layers[-1].likelihood.variance.numpy() 121 | X_test = X_test.squeeze() 122 | lower = mean - 2 * np.sqrt(var) 123 | upper = mean + 2 * np.sqrt(var) 124 | 125 | ax.set_ylim(Y.min() - 0.5, Y.max() + 0.5) 126 | ax.plot(X, Y, "kx", alpha=0.5) 127 | ax.plot(X_test, mean, "C1") 128 | 129 | ax.fill_between(X_test, lower, upper, color="C1", alpha=0.3) 130 | 131 | 132 | plot(model, X, Y) 133 | 134 | # %% 135 | gpflow.utilities.print_summary(model, fmt="notebook") 136 | -------------------------------------------------------------------------------- /docs/notebooks/keras_integration.py: -------------------------------------------------------------------------------- 1 | # --- 2 | # jupyter: 3 | # jupytext: 4 | # formats: ipynb,py:percent 5 | # text_representation: 6 | # extension: .py 7 | # format_name: percent 8 | # format_version: '1.3' 9 | # jupytext_version: 1.10.0 10 | # kernelspec: 11 | # display_name: Python 3 12 | # language: python 13 | # name: python3 14 | # --- 15 | 16 | # %% [markdown] 17 | """ 18 | # Keras integration 19 | 20 | TODO: Some explanation... 21 | """ 22 | # %% 23 | import numpy as np 24 | import tensorflow as tf 25 | import gpflow 26 | import gpflux 27 | from gpflow.ci_utils import reduce_in_tests 28 | from gpflow.keras import tf_keras 29 | 30 | import matplotlib.pyplot as plt 31 | 32 | 33 | # %% 34 | # %matplotlib inline 35 | 36 | # %% 37 | d = np.load("../../tests/snelson1d.npz") 38 | X, Y = d["X"], d["Y"] 39 | num_data, input_dim = X.shape 40 | _, output_dim = Y.shape 41 | 42 | # %% 43 | plt.figure() 44 | plt.plot(X, Y, ".") 45 | plt.show() 46 | 47 | 48 | # %% 49 | def create_layers(): 50 | num_inducing = 13 51 | hidden_dim = 1 52 | Z = np.linspace(X.min() - 0.1, X.max() + 0.1, num_inducing).reshape(-1, 1) 53 | 54 | layer1 = gpflux.helpers.construct_gp_layer( 55 | num_data, num_inducing, input_dim, hidden_dim, z_init=Z.copy() 56 | ) 57 | layer1.mean_function = gpflow.mean_functions.Identity() # TODO: pass layer_type instead 58 | layer1.q_sqrt.assign(layer1.q_sqrt * 0.01) 59 | 60 | layer2 = gpflux.helpers.construct_gp_layer( 61 | num_data, num_inducing, hidden_dim, output_dim, z_init=Z.copy() 62 | ) 63 | 64 | likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.01)) 65 | 66 | return layer1, layer2, likelihood_layer 67 | 68 | 69 | # %% 70 | def create_model(model_class): 71 | """ 72 | We pass in `model_class` to make it easy to use 73 | `gpflux.optimization.NatGradModel`, which is required for combining 74 | GPflow's `NaturalGradient` optimizer with Keras. `model_class` needs to be 75 | a constructor that has the same semantics as `tf.keras.Model.__init__`. 76 | """ 77 | layer1, layer2, likelihood_layer = create_layers() 78 | dgp = gpflux.models.DeepGP([layer1, layer2], likelihood_layer, default_model_class=model_class) 79 | return dgp 80 | 81 | 82 | # %% 83 | batch_size = 2 84 | num_epochs = reduce_in_tests(200) 85 | 86 | # %% 87 | dgp = create_model(tf_keras.Model) 88 | 89 | callbacks = [ 90 | tf_keras.callbacks.ReduceLROnPlateau( 91 | monitor="loss", 92 | patience=5, 93 | factor=0.95, 94 | verbose=1, 95 | min_lr=1e-6, 96 | ) 97 | ] 98 | 99 | dgp_train = dgp.as_training_model() 100 | dgp_train.compile(tf_keras.optimizers.Adam(learning_rate=0.1)) 101 | 102 | history = dgp_train.fit( 103 | {"inputs": X, "targets": Y}, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks 104 | ) 105 | 106 | # %% 107 | dgp_natgrad = create_model(gpflux.optimization.NatGradModel) 108 | 109 | callbacks = [ 110 | tf_keras.callbacks.ReduceLROnPlateau( 111 | monitor="loss", 112 | patience=5, 113 | factor=0.95, 114 | verbose=1, 115 | min_lr=1e-6, 116 | ) 117 | ] 118 | 119 | dgp_natgrad_train = dgp_natgrad.as_training_model() 120 | dgp_natgrad_train.natgrad_layers = ( 121 | True # we want all (here two) GPLayer instances trained by NaturalGradient 122 | ) 123 | # alternatively, we could set `natgrad_layers` explicitly to the GPLayer instances inside the model 124 | dgp_natgrad_train.compile( 125 | [ 126 | gpflow.optimizers.NaturalGradient(gamma=0.05), 127 | gpflow.optimizers.NaturalGradient(gamma=0.05), 128 | tf_keras.optimizers.Adam(learning_rate=0.1), 129 | ] 130 | ) 131 | 132 | history_natgrad = dgp_natgrad_train.fit( 133 | {"inputs": X, "targets": Y}, batch_size=batch_size, epochs=num_epochs, callbacks=callbacks 134 | ) 135 | 136 | # %% 137 | dgp_test = dgp.as_prediction_model() 138 | res = dgp_test(X) 139 | 140 | # %% 141 | plt.plot(X, Y, "x") 142 | plt.errorbar(X.squeeze(), np.squeeze(res.y_mean), np.sqrt(np.squeeze(res.y_var)), ls="") 143 | plt.show() 144 | 145 | # %% 146 | plt.plot(history.history["loss"], label="Adam") 147 | plt.plot(history_natgrad.history["loss"], label="NatGrad") 148 | plt.show() 149 | -------------------------------------------------------------------------------- /docs/refs.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{damianou2013deep, 2 | title={Deep {G}aussian processes}, 3 | author={Damianou, Andreas and Lawrence, Neil D}, 4 | booktitle={Artificial intelligence and statistics}, 5 | year={2013} 6 | } 7 | 8 | @inproceedings{salimbeni2017doubly, 9 | title = {Doubly stochastic variational inference for deep {G}aussian processes}, 10 | author = {Salimbeni, Hugh and Deisenroth, Marc}, 11 | booktitle = {Advances in Neural Information Processing Systems}, 12 | year = {2017} 13 | } 14 | 15 | @inproceedings{dutordoir2018cde, 16 | title = {Gaussian process conditional density estimation}, 17 | author = {Dutordoir, Vincent and Salimbeni, Hugh and Hensman, James and Deisenroth, Marc}, 18 | booktitle = {Advances in Neural Information Processing Systems}, 19 | year = {2018} 20 | } 21 | 22 | @inproceedings{salimbeni2019iwvi, 23 | title = {Deep {G}aussian Processes with Importance-Weighted Variational Inference}, 24 | author = {Salimbeni, Hugh and Dutordoir, Vincent and Hensman, James and Deisenroth, Marc}, 25 | booktitle = {International Conference on Machine Learning}, 26 | year = {2019} 27 | } 28 | 29 | @article{gpflow2020, 30 | title={A framework for interdomain and multioutput {G}aussian processes}, 31 | author={van der Wilk, Mark and Dutordoir, Vincent and John, ST and Artemev, Artem and Adam, Vincent and Hensman, James}, 32 | journal={arXiv preprint arXiv:2003.01115}, 33 | year={2020} 34 | } 35 | 36 | @book{rasmussen, 37 | title = {Gaussian Processes for Machine Learning}, 38 | author = {Rasmussen, CE. and Williams, CKI.}, 39 | series = {Adaptive Computation and Machine Learning}, 40 | publisher = {MIT Press}, 41 | organization = {Max-Planck-Gesellschaft}, 42 | school = {Biologische Kybernetik}, 43 | address = {Cambridge, MA, USA}, 44 | month = jan, 45 | year = {2006}, 46 | month_numeric = {1} 47 | } 48 | 49 | @article{leibfried2020tutorial, 50 | title={A Tutorial on Sparse Gaussian Processes and Variational Inference}, 51 | author={Leibfried, Felix and Dutordoir, Vincent and John, ST and Durrande, Nicolas}, 52 | journal={arXiv preprint arXiv:2012.13962}, 53 | year={2020} 54 | } 55 | 56 | @inproceedings{rahimi2007random, 57 | title={Random Features for Large-Scale Kernel Machines}, 58 | author={Rahimi, Ali and Recht, Benjamin}, 59 | booktitle = {Advances in Neural Information Processing Systems}, 60 | year={2007} 61 | } 62 | 63 | @inproceedings{wilson2020efficiently, 64 | title={Efficiently sampling functions from {Gaussian} process posteriors}, 65 | author={Wilson, James and Borovitskiy, Viacheslav and Terenin, Alexander and Mostowsky, Peter and Deisenroth, Marc}, 66 | booktitle={International Conference on Machine Learning}, 67 | year={2020} 68 | } 69 | 70 | @inproceedings{sutherland2015error, 71 | title={On the error of random {F}ourier features}, 72 | author={Sutherland, Dougal J and Schneider, Jeff}, 73 | booktitle={Proceedings of the Thirty-First Conference on Uncertainty in Artificial Intelligence}, 74 | pages={862--871}, 75 | year={2015} 76 | } 77 | 78 | @article{yu2016orthogonal, 79 | title={Orthogonal random features}, 80 | author={Yu, Felix Xinnan X and Suresh, Ananda Theertha and Choromanski, Krzysztof M and Holtmann-Rice, Daniel N and Kumar, Sanjiv}, 81 | journal={Advances in Neural Information Processing Systems}, 82 | volume={29}, 83 | pages={1975--1983}, 84 | year={2016} 85 | } 86 | -------------------------------------------------------------------------------- /docs/tutorials.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | .. toctree:: 5 | :caption: Introductory 6 | :maxdepth: 1 7 | 8 | notebooks/intro 9 | notebooks/gpflux_features 10 | notebooks/deep_cde 11 | 12 | .. toctree:: 13 | :caption: Advanced 14 | :maxdepth: 1 15 | 16 | notebooks/deep_gp_samples 17 | notebooks/gpflux_with_keras_layers 18 | 19 | .. toctree:: 20 | :caption: Sampling 21 | :maxdepth: 1 22 | 23 | notebooks/efficient_sampling 24 | notebooks/weight_space_approximation 25 | notebooks/efficient_posterior_sampling 26 | -------------------------------------------------------------------------------- /gpflux/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | The library root. See :mod:`~gpflux.models.deep_gp.DeepGP` for the core Deep GP model, 18 | which is built out of different GP :mod:`~gpflux.layers`. 19 | """ 20 | from gpflux import callbacks, encoders, helpers, layers, losses, models, optimization, sampling 21 | from gpflux.version import __version__ 22 | -------------------------------------------------------------------------------- /gpflux/architectures/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Pre-specified architectures 3 | """ 4 | from gpflux.architectures.constant_input_dim_deep_gp import Config, build_constant_input_dim_deep_gp 5 | -------------------------------------------------------------------------------- /gpflux/encoders/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | r""" 17 | Encoders are used by :class:`~gpflux.layers.LatentVariableLayer`\ s to 18 | parametrize the approximate posterior distribution over latent variables. 19 | """ 20 | from gpflux.encoders.directly_parameterized_encoder import DirectlyParameterizedNormalDiag 21 | -------------------------------------------------------------------------------- /gpflux/encoders/directly_parameterized_encoder.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | An "encoder" for parametrizing latent variables. Does not work with mini-batching. 18 | """ 19 | 20 | from typing import Any, Optional, Tuple 21 | 22 | import numpy as np 23 | import tensorflow as tf 24 | 25 | from gpflow import Parameter, default_float 26 | from gpflow.base import TensorType 27 | from gpflow.utilities.bijectors import positive 28 | 29 | from gpflux.exceptions import EncoderInitializationError 30 | from gpflux.layers import TrackableLayer 31 | 32 | 33 | class DirectlyParameterizedNormalDiag(TrackableLayer): 34 | """ 35 | This class implements direct parameterisation of the Normally-distributed 36 | posterior of the latent variables. A mean and standard deviation to parameterise a 37 | mean-field Normal distribution for each latent variable is created and learned 38 | during training. This type of encoder is not computationally very efficient for 39 | larger datasets, but can greatly simplify training, because no neural network is 40 | required to learn an amortized mapping. 41 | 42 | ..note:: 43 | No amortization is used; each datapoint element has an associated mean and 44 | standard deviation. This is not compatible with mini-batching. 45 | 46 | See :cite:t:`dutordoir2018cde` for a more thorough explanation of latent variable models 47 | and encoders. 48 | """ 49 | 50 | means: Parameter 51 | """ 52 | Each row contains the value of the mean for a latent variable in the model. 53 | ``means`` is a tensor of rank two with the shape ``[N, W]`` because we have the same number 54 | of latent variables as datapoints, and each latent variable is ``W``-dimensional. 55 | Consequently, the mean for each latent variable is also ``W``-dimensional. 56 | """ 57 | 58 | stds: Parameter 59 | """ 60 | Each row contains the value of the diagonal covariances for a latent variable. 61 | ``stds`` is a tensor of rank two with the shape ``[N, W]`` because we have the same number 62 | of latent variables as datapoints, and each latent variable is ``W``-dimensional. 63 | Consequently, the diagonal elements of the square covariance matrix for each latent variable 64 | is also ``W``-dimensional. 65 | 66 | Initialised to ``1e-5 * np.ones((N, W))``. 67 | """ 68 | 69 | def __init__(self, num_data: int, latent_dim: int, means: Optional[np.ndarray] = None): 70 | """ 71 | Directly parameterise the posterior of the latent variables associated with 72 | each datapoint with a diagonal multivariate Normal distribution. Note that across 73 | latent variables we assume a mean-field approximation. 74 | 75 | See :cite:t:`dutordoir2018cde` for a more thorough explanation of 76 | latent variable models and encoders. 77 | 78 | :param num_data: The number of datapoints, ``N``. 79 | :param latent_dim: The dimensionality of the latent variable, ``W``. 80 | :param means: The initialisation of the mean of the latent variable posterior 81 | distribution. (see :attr:`means`). If `None` (the default setting), set to 82 | ``np.random.randn(N, W) * 0.01``; otherwise, ``means`` should be an array of 83 | rank two with the shape ``[N, W]``. 84 | """ 85 | super().__init__() 86 | if means is None: 87 | # break the symmetry in the means: 88 | means = 0.01 * np.random.randn(num_data, latent_dim) 89 | else: 90 | if np.any(means.shape != (num_data, latent_dim)): 91 | raise EncoderInitializationError( 92 | f"means must have shape [num_data, latent_dim] = [{num_data}, {latent_dim}]; " 93 | f"got {means.shape} instead." 94 | ) 95 | 96 | # initialise distribution with a small standard deviation, as this has 97 | # been observed to help fitting: 98 | stds = 1e-5 * np.ones_like(means) 99 | 100 | # TODO: Rename to `scale` and `loc` to match tfp.distributions 101 | self.means = Parameter(means, dtype=default_float(), name="w_means") 102 | self.stds = Parameter(stds, transform=positive(), dtype=default_float(), name="w_stds") 103 | 104 | def call( 105 | self, inputs: Optional[TensorType] = None, *args: Any, **kwargs: Any 106 | ) -> Tuple[tf.Tensor, tf.Tensor]: 107 | """ 108 | Return the posterior's mean (see :attr:`means`) and standard deviation (see :attr:`stds`). 109 | """ 110 | if inputs is not None: 111 | tf.debugging.assert_shapes( 112 | [(self.means, ["N", "W"]), (self.stds, ["N", "W"]), (inputs, ["N", "D"])] 113 | ) 114 | return self.means, self.stds 115 | -------------------------------------------------------------------------------- /gpflux/exceptions.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ This module provides custom exceptions used by GPflux. """ 17 | 18 | 19 | class GPLayerIncompatibilityException(Exception): 20 | r""" 21 | This exception is raised when :class:`~gpflux.layers.GPLayer` is 22 | misconfigured. This can be caused by multiple reasons, but common misconfigurations are: 23 | 24 | - Incompatible or wrong type of :class:`~gpflow.kernels.Kernel`, 25 | :class:`~gpflow.inducing_variables.InducingVariables` and/or 26 | :class:`~gpflow.mean_functions.MeanFunction`. 27 | - Incompatible number of latent GPs specified. 28 | """ 29 | 30 | 31 | class EncoderInitializationError(Exception): 32 | r""" 33 | This exception is raised by an encoder (e.g. :class:`DirectlyParameterizedNormalDiag`) 34 | when parameters are not initialised correctly. 35 | """ 36 | -------------------------------------------------------------------------------- /gpflux/experiment_support/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Utility functions for notebooks and experiments 18 | """ 19 | -------------------------------------------------------------------------------- /gpflux/experiment_support/ci_utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ This module contains a set of utilities used in experiments and notebooks. """ 17 | 18 | import os 19 | 20 | 21 | def is_continuous_integration() -> bool: 22 | """ 23 | Return `True` if the code is running on continuous integration (CI) machines, 24 | otherwise `False`. 25 | 26 | ..note:: This check is based on the ``CI`` environment variable, which is set to ``True`` 27 | by GitHub actions, CircleCI, and Jenkins. This function may not work as expected 28 | under other CI frameworks. 29 | """ 30 | ci = os.environ.get("CI", "").lower() 31 | return (ci == "true") or (ci == "1") 32 | 33 | 34 | def notebook_niter(n: int, test_n: int = 2) -> int: 35 | """ 36 | Return a typically smaller number of iterations ``test_n`` if 37 | code is running on CI machines (see :func:`is_continuous_integration`), 38 | otherwise return ``n``. 39 | """ 40 | return test_n if is_continuous_integration() else n 41 | 42 | 43 | def notebook_range(n: int, test_n: int = 2) -> range: 44 | """ 45 | Return a typically shorter `range` (of the length specified by ``test_n``) if 46 | code is running on CI machines (see :func:`is_continuous_integration`), 47 | otherwise return a `range` of the length specified by ``n``. 48 | """ 49 | return range(notebook_niter(n, test_n)) 50 | 51 | 52 | def notebook_list(lst: list, test_n: int = 2) -> list: 53 | """ 54 | Return a subset of the length specified by ``test_n`` from a list ``lst`` if the code 55 | is running on CI machines (see :func:`is_continuous_integration`), 56 | otherwise return the complete list ``lst``. 57 | """ 58 | return lst[:test_n] if is_continuous_integration() else lst 59 | -------------------------------------------------------------------------------- /gpflux/experiment_support/plotting.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | from typing import List, Optional, Sequence 17 | 18 | import matplotlib.pyplot as plt 19 | import numpy as np 20 | 21 | from gpflow.base import TensorType 22 | 23 | 24 | def plot_layer( 25 | X: TensorType, 26 | layer_input: TensorType, 27 | mean: List[TensorType], 28 | cov: List[TensorType], 29 | sample: List[TensorType], 30 | idx: Optional[int], 31 | axes: Optional[Sequence[plt.Axes]] = None, 32 | ) -> None: 33 | """ 34 | :param X: original inputs to the DGP, shape [N, 1] 35 | :param layer_input: inputs to this layer, shape [N, 1] 36 | :param mean: mean of this layer's output, shape [N, 1] 37 | :param cov: covariance of this layer's output, shape [1, N, N] 38 | :param sample: samples from this layer's output, shape [S, N, 1] 39 | :param idx: the index of this layer (for labels) 40 | :param axes: the sequence of 3 axes on which to plot 41 | """ 42 | if axes is None: 43 | fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(3, 10)) 44 | else: 45 | assert len(axes) == 3 46 | ax1, ax2, ax3 = axes 47 | 48 | # Input 49 | title = "Input" 50 | if idx is not None: 51 | title = f"Layer {idx + 1}\n{title}" 52 | ax1.set_title(title) 53 | ax1.plot(X, layer_input) 54 | 55 | # covariance 56 | ax2.matshow(np.squeeze(cov, axis=0), aspect="auto") 57 | ax2.set_yticklabels([]) 58 | ax2.set_xticklabels([]) 59 | 60 | # samples 61 | ax3.set_title("Samples") 62 | ax3.plot(X, np.squeeze(sample, axis=-1).T) 63 | 64 | 65 | def plot_layers( 66 | X: TensorType, means: List[TensorType], covs: List[TensorType], samples: List[TensorType] 67 | ) -> None: # pragma: no cover 68 | L = len(means) 69 | fig, axes = plt.subplots(3, L, figsize=(L * 3.33, 10)) 70 | for i in range(L): 71 | layer_input = X if i == 0 else samples[i - 1][0] 72 | plot_layer(X, layer_input, means[i], covs[i], samples[i], i, axes[:, i]) 73 | -------------------------------------------------------------------------------- /gpflux/experiment_support/tensorboard.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | TensorBoard event iterator 18 | """ 19 | from dataclasses import dataclass 20 | from typing import Any, Iterator, List, Type, Union 21 | 22 | import tensorflow as tf 23 | from tensorflow.core.util import event_pb2 24 | from tensorflow.python.framework import tensor_util 25 | 26 | 27 | @dataclass 28 | class Event: 29 | """Minimal container to hold TensorBoard event data""" 30 | 31 | tag: str # summary name, e.g. "loss" or "lengthscales" 32 | step: int 33 | value: Any 34 | dtype: Type 35 | 36 | 37 | def tensorboard_event_iterator(file_pattern: Union[str, List[str], tf.Tensor]) -> Iterator[Event]: 38 | """ 39 | Iterator yielding preprocessed tensorboard Events. 40 | 41 | :param file_pattern: A string, a list of strings, or a `tf.Tensor` of string type 42 | (scalar or vector), representing the filename glob (i.e. shell wildcard) 43 | pattern(s) that will be matched. 44 | """ 45 | 46 | def get_scalar_value(value: Any) -> Any: 47 | # Note(Vincent): I'm sorry this is messy... 48 | # Using `value.simple_value` returns 0.0 for 49 | # np.ndarray values, so we need to try `MakeNdarray` 50 | # first, which breaks for non-tensors. 51 | try: 52 | v = tensor_util.MakeNdarray(value.tensor).item() 53 | except Exception: 54 | try: 55 | v = value.simple_value 56 | except Exception: 57 | raise ValueError("Unable to read value from tensor") 58 | return v 59 | 60 | event_files = tf.data.Dataset.list_files(file_pattern) 61 | serialized_examples = tf.data.TFRecordDataset(event_files) 62 | for serialized_example in serialized_examples: 63 | event = event_pb2.Event.FromString(serialized_example.numpy()) 64 | for value in event.summary.value: 65 | v = get_scalar_value(value) 66 | yield Event(tag=value.tag, step=event.step, value=v, dtype=type(v)) 67 | -------------------------------------------------------------------------------- /gpflux/layers/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Layers 18 | """ 19 | from gpflux.layers import basis_functions 20 | from gpflux.layers.bayesian_dense_layer import BayesianDenseLayer 21 | from gpflux.layers.gp_layer import GPLayer 22 | from gpflux.layers.latent_variable_layer import LatentVariableLayer, LayerWithObservations 23 | from gpflux.layers.likelihood_layer import LikelihoodLayer 24 | from gpflux.layers.trackable_layer import TrackableLayer 25 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Basis functions. 18 | """ 19 | from gpflux.layers.basis_functions import fourier_features 20 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | A kernel's features for efficient sampling, used by 18 | :class:`gpflux.sampling.KernelWithFeatureDecomposition` 19 | """ 20 | 21 | from gpflux.layers.basis_functions.fourier_features.quadrature import QuadratureFourierFeatures 22 | from gpflux.layers.basis_functions.fourier_features.random import ( 23 | OrthogonalRandomFeatures, 24 | RandomFourierFeatures, 25 | RandomFourierFeaturesCosine, 26 | ) 27 | 28 | __all__ = [ 29 | "QuadratureFourierFeatures", 30 | "OrthogonalRandomFeatures", 31 | "RandomFourierFeatures", 32 | "RandomFourierFeaturesCosine", 33 | ] 34 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/base.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ Shared functionality for stationary kernel basis functions. """ 17 | 18 | from abc import ABC, abstractmethod 19 | from typing import Mapping 20 | 21 | import tensorflow as tf 22 | 23 | import gpflow 24 | from gpflow.base import TensorType 25 | from gpflow.keras import tf_keras 26 | 27 | from gpflux.types import ShapeType 28 | 29 | 30 | class FourierFeaturesBase(ABC, tf_keras.layers.Layer): 31 | r""" 32 | The base class for all Fourier feature layers, used for both random Fourier feature layers and 33 | quadrature layers. We subclass :class:`tf.keras.layers.Layer`, so we must provide 34 | :method:`build` and :method:`call` methods. 35 | """ 36 | 37 | def __init__(self, kernel: gpflow.kernels.Kernel, n_components: int, **kwargs: Mapping): 38 | """ 39 | :param kernel: kernel to approximate using a set of Fourier bases. 40 | :param n_components: number of components (e.g. Monte Carlo samples, 41 | quadrature nodes, etc.) used to numerically approximate the kernel. 42 | """ 43 | super(FourierFeaturesBase, self).__init__(**kwargs) 44 | self.kernel = kernel 45 | self.n_components = n_components 46 | if isinstance(kernel, gpflow.kernels.MultioutputKernel): 47 | self.is_multioutput = True 48 | self.num_latent_gps = kernel.num_latent_gps 49 | else: 50 | self.is_multioutput = False 51 | self.num_latent_gps = 1 52 | 53 | if kwargs.get("input_dim", None): 54 | self._input_dim = kwargs["input_dim"] 55 | self.build(tf.TensorShape([self._input_dim])) 56 | else: 57 | self._input_dim = None 58 | 59 | def call(self, inputs: TensorType) -> tf.Tensor: 60 | """ 61 | Evaluate the basis functions at ``inputs``. 62 | 63 | :param inputs: The evaluation points, a tensor with the shape ``[N, D]``. 64 | 65 | :return: A tensor with the shape ``[N, M]``, or shape ``[P, N, M]'' in the multioutput case. 66 | """ 67 | if self.is_multioutput: 68 | X = [tf.divide(inputs, k.lengthscales) for k in self.kernel.latent_kernels] 69 | X = tf.stack(X, 0) # [1, N, D] or [P, N, D] 70 | else: 71 | X = tf.divide(inputs, self.kernel.lengthscales) # [N, D] 72 | const = self._compute_constant() # [] or [P, 1, 1] 73 | bases = self._compute_bases(X) # [N, M] or [P, N, M] 74 | output = const * bases 75 | tf.ensure_shape(output, self.compute_output_shape(inputs.shape)) 76 | return output 77 | 78 | def compute_output_shape(self, input_shape: ShapeType) -> tf.TensorShape: 79 | """ 80 | Computes the output shape of the layer. 81 | See `tf.keras.layers.Layer.compute_output_shape() 82 | `_. 83 | """ 84 | # TODO: Keras docs say "If the layer has not been built, this method 85 | # will call `build` on the layer." -- do we need to do so? 86 | tensor_shape = tf.TensorShape(input_shape).with_rank(2) 87 | output_dim = self._compute_output_dim(input_shape) 88 | trailing_shape = tensor_shape[:-1].concatenate(output_dim) 89 | if self.is_multioutput: 90 | return tf.TensorShape([self.num_latent_gps]).concatenate(trailing_shape) # [P, N, M] 91 | else: 92 | return trailing_shape # [N, M] 93 | 94 | def get_config(self) -> Mapping: 95 | """ 96 | Returns the config of the layer. 97 | See `tf.keras.layers.Layer.get_config() 98 | `_. 99 | """ 100 | config = super(FourierFeaturesBase, self).get_config() 101 | config.update( 102 | { 103 | "kernel": self.kernel, 104 | "n_components": self.n_components, 105 | "input_dim": self._input_dim, 106 | } 107 | ) 108 | 109 | return config 110 | 111 | @abstractmethod 112 | def _compute_output_dim(self, input_shape: ShapeType) -> int: 113 | pass 114 | 115 | @abstractmethod 116 | def _compute_constant(self) -> tf.Tensor: 117 | """ 118 | Compute normalizing constant for basis functions. 119 | """ 120 | pass 121 | 122 | @abstractmethod 123 | def _compute_bases(self, inputs: TensorType) -> tf.Tensor: 124 | """ 125 | Compute basis functions. 126 | """ 127 | pass 128 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/quadrature/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ A kernel's features and coefficients using quadrature Fourier features (QFF). """ 17 | from gpflux.layers.basis_functions.fourier_features.quadrature.gaussian import ( 18 | QuadratureFourierFeatures, 19 | ) 20 | 21 | __all__ = ["QuadratureFourierFeatures"] 22 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/quadrature/gaussian.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Kernel decompositon into features and coefficients based on Gauss-Christoffel 18 | quadrature aka Gaussian quadrature. 19 | """ 20 | 21 | import warnings 22 | from typing import Mapping, Tuple, Type 23 | 24 | import tensorflow as tf 25 | 26 | import gpflow 27 | from gpflow.base import TensorType 28 | from gpflow.quadrature.gauss_hermite import ndgh_points_and_weights 29 | 30 | from gpflux.layers.basis_functions.fourier_features.base import FourierFeaturesBase 31 | from gpflux.layers.basis_functions.fourier_features.utils import _bases_concat 32 | from gpflux.types import ShapeType 33 | 34 | """ 35 | Kernels supported by :class:`QuadratureFourierFeatures`. 36 | 37 | Currently we only support the :class:`gpflow.kernels.SquaredExponential` kernel. 38 | For Matern kernels please use :class:`RandomFourierFeatures` 39 | or :class:`RandomFourierFeaturesCosine`. 40 | """ 41 | QFF_SUPPORTED_KERNELS: Tuple[Type[gpflow.kernels.Stationary], ...] = ( 42 | gpflow.kernels.SquaredExponential, 43 | ) 44 | 45 | 46 | class QuadratureFourierFeatures(FourierFeaturesBase): 47 | def __init__(self, kernel: gpflow.kernels.Kernel, n_components: int, **kwargs: Mapping): 48 | assert isinstance(kernel, QFF_SUPPORTED_KERNELS), "Unsupported Kernel" 49 | if tf.reduce_any(tf.less(kernel.lengthscales, 1e-1)): 50 | warnings.warn( 51 | "Quadrature Fourier feature approximation of kernels " 52 | "with small lengthscale lead to unexpected behaviors!" 53 | ) 54 | super(QuadratureFourierFeatures, self).__init__(kernel, n_components, **kwargs) 55 | 56 | def build(self, input_shape: ShapeType) -> None: 57 | """ 58 | Creates the variables of the layer. 59 | See `tf.keras.layers.Layer.build() 60 | `_. 61 | """ 62 | input_dim = input_shape[-1] 63 | abscissa_value, omegas_value = ndgh_points_and_weights( 64 | dim=input_dim, n_gh=self.n_components 65 | ) 66 | omegas_value = tf.squeeze(omegas_value, axis=-1) 67 | 68 | # Quadrature node points 69 | self.abscissa = tf.Variable(initial_value=abscissa_value, trainable=False) # (M^D, D) 70 | # Gauss-Hermite weights 71 | self.factors = tf.Variable(initial_value=omegas_value, trainable=False) # (M^D,) 72 | super(QuadratureFourierFeatures, self).build(input_shape) 73 | 74 | def _compute_output_dim(self, input_shape: ShapeType) -> int: 75 | input_dim = input_shape[-1] 76 | return 2 * self.n_components ** input_dim 77 | 78 | def _compute_bases(self, inputs: TensorType) -> tf.Tensor: 79 | """ 80 | Compute basis functions. 81 | 82 | :return: A tensor with the shape ``[N, 2M^D]``. 83 | """ 84 | return _bases_concat(inputs, self.abscissa) 85 | 86 | def _compute_constant(self) -> tf.Tensor: 87 | """ 88 | Compute normalizing constant for basis functions. 89 | 90 | :return: A tensor with the shape ``[2M^D,]`` 91 | """ 92 | return tf.tile(tf.sqrt(self.kernel.variance * self.factors), multiples=[2]) 93 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/random/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ A kernel's features and coefficients using Random Fourier Features (RFF). """ 17 | 18 | from gpflux.layers.basis_functions.fourier_features.random.base import ( 19 | RandomFourierFeatures, 20 | RandomFourierFeaturesCosine, 21 | ) 22 | from gpflux.layers.basis_functions.fourier_features.random.orthogonal import ( 23 | OrthogonalRandomFeatures, 24 | ) 25 | 26 | __all__ = [ 27 | "OrthogonalRandomFeatures", 28 | "RandomFourierFeatures", 29 | "RandomFourierFeaturesCosine", 30 | ] 31 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/random/orthogonal.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | from typing import Mapping, Optional, Tuple, Type 18 | 19 | import numpy as np 20 | import tensorflow as tf 21 | 22 | import gpflow 23 | from gpflow.base import DType, TensorType 24 | 25 | from gpflux.layers.basis_functions.fourier_features.random.base import RandomFourierFeatures 26 | from gpflux.types import ShapeType 27 | 28 | """ 29 | Kernels supported by :class:`OrthogonalRandomFeatures`. 30 | 31 | This random matrix sampling scheme only applies to the :class:`gpflow.kernels.SquaredExponential` 32 | kernel. 33 | For Matern kernels please use :class:`RandomFourierFeatures` 34 | or :class:`RandomFourierFeaturesCosine`. 35 | """ 36 | ORF_SUPPORTED_KERNELS: Tuple[Type[gpflow.kernels.Stationary], ...] = ( 37 | gpflow.kernels.SquaredExponential, 38 | ) 39 | 40 | 41 | def _sample_chi_squared(nu: float, shape: ShapeType, dtype: DType) -> TensorType: 42 | """ 43 | Draw samples from Chi-squared distribution with `nu` degrees of freedom. 44 | 45 | See https://mathworld.wolfram.com/Chi-SquaredDistribution.html for further 46 | details regarding relationship to Gamma distribution. 47 | """ 48 | return tf.random.gamma(shape=shape, alpha=0.5 * nu, beta=0.5, dtype=dtype) 49 | 50 | 51 | def _sample_chi(nu: float, shape: ShapeType, dtype: DType) -> TensorType: 52 | """ 53 | Draw samples from Chi-distribution with `nu` degrees of freedom. 54 | """ 55 | s = _sample_chi_squared(nu, shape, dtype) 56 | return tf.sqrt(s) 57 | 58 | 59 | def _ceil_divide(a: float, b: float) -> int: 60 | """ 61 | Ceiling division. Returns the smallest integer `m` s.t. `m*b >= a`. 62 | """ 63 | return -np.floor_divide(-a, b) 64 | 65 | 66 | class OrthogonalRandomFeatures(RandomFourierFeatures): 67 | r""" 68 | Orthogonal random Fourier features (ORF) :cite:p:`yu2016orthogonal` for more 69 | efficient and accurate kernel approximations than :class:`RandomFourierFeatures`. 70 | """ 71 | 72 | def __init__(self, kernel: gpflow.kernels.Kernel, n_components: int, **kwargs: Mapping): 73 | assert isinstance(kernel, ORF_SUPPORTED_KERNELS), "Unsupported Kernel" 74 | super(OrthogonalRandomFeatures, self).__init__(kernel, n_components, **kwargs) 75 | 76 | def _weights_init(self, shape: TensorType, dtype: Optional[DType] = None) -> TensorType: 77 | n_components, input_dim = shape # M, D 78 | n_reps = _ceil_divide(n_components, input_dim) # K, smallest integer s.t. K*D >= M 79 | 80 | W = tf.random.normal(shape=(n_reps, input_dim, input_dim), dtype=dtype) 81 | Q, _ = tf.linalg.qr(W) # throw away R; shape [K, D, D] 82 | 83 | s = _sample_chi(nu=input_dim, shape=(n_reps, input_dim), dtype=dtype) # shape [K, D] 84 | U = tf.expand_dims(s, axis=-1) * Q # equiv: S @ Q where S = diag(s); shape [K, D, D] 85 | V = tf.reshape(U, shape=(-1, input_dim)) # shape [K*D, D] 86 | 87 | return V[: self.n_components] # shape [M, D] (throw away K*D - M rows) 88 | -------------------------------------------------------------------------------- /gpflux/layers/basis_functions/fourier_features/utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | This module provides a set of common utilities for kernel feature decompositions. 18 | """ 19 | import tensorflow as tf 20 | 21 | import gpflow 22 | from gpflow.base import TensorType 23 | 24 | 25 | def _matern_number(kernel: gpflow.kernels.Kernel) -> int: 26 | if isinstance(kernel, gpflow.kernels.Matern52): 27 | p = 2 28 | elif isinstance(kernel, gpflow.kernels.Matern32): 29 | p = 1 30 | elif isinstance(kernel, gpflow.kernels.Matern12): 31 | p = 0 32 | else: 33 | raise NotImplementedError("Not a recognized Matern kernel") 34 | return p 35 | 36 | 37 | def _bases_cosine(X: TensorType, W: TensorType, b: TensorType) -> TensorType: 38 | """ 39 | Feature map for random Fourier features (RFF) as originally prescribed 40 | by Rahimi & Recht, 2007 :cite:p:`rahimi2007random`. 41 | See also :cite:p:`sutherland2015error` for additional details. 42 | """ 43 | proj = tf.matmul(X, W, transpose_b=True) + b # [N, M] or [P, N, M] 44 | return tf.cos(proj) # [N, M] or [P, N, M] 45 | 46 | 47 | def _bases_concat(X: TensorType, W: TensorType) -> TensorType: 48 | """ 49 | Feature map for random Fourier features (RFF) as originally prescribed 50 | by Rahimi & Recht, 2007 :cite:p:`rahimi2007random`. 51 | See also :cite:p:`sutherland2015error` for additional details. 52 | """ 53 | proj = tf.matmul(X, W, transpose_b=True) # [N, M] or [P, N, M] 54 | return tf.concat([tf.sin(proj), tf.cos(proj)], axis=-1) # [N, 2M] or [P, N, M] 55 | -------------------------------------------------------------------------------- /gpflux/layers/trackable_layer.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """Utility layer that tracks variables in :class:`tf.Module`.""" 17 | 18 | from deprecated import deprecated 19 | 20 | from gpflow.keras import tf_keras 21 | 22 | 23 | @deprecated( 24 | reason=( 25 | "GPflux's `TrackableLayer` was prior to TF2.5 used to collect GPflow " 26 | "variables in subclassed layers. As of TF 2.5, `tf.Module` supports " 27 | "this natively and there is no need for `TrackableLayer` anymore. It will " 28 | "be removed in GPflux version `1.0.0`." 29 | ) 30 | ) 31 | class TrackableLayer(tf_keras.layers.Layer): 32 | """ 33 | With the release of TensorFlow 2.5, our TrackableLayer workaround is no 34 | longer needed. See https://github.com/Prowler-io/gpflux/issues/189. 35 | Will be removed in GPflux version 1.0.0 36 | """ 37 | 38 | pass 39 | -------------------------------------------------------------------------------- /gpflux/losses.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | This module provides the `LikelihoodLoss` adapter to use GPflow's 18 | :class:`~gpflow.likelihoods.Likelihood` implementations as a 19 | `tf.keras.losses.Loss`. 20 | """ 21 | from typing import Union 22 | 23 | import tensorflow as tf 24 | import tensorflow_probability as tfp 25 | 26 | import gpflow 27 | from gpflow.base import TensorType 28 | from gpflow.keras import tf_keras 29 | 30 | from gpflux.types import unwrap_dist 31 | 32 | 33 | class LikelihoodLoss(tf_keras.losses.Loss): 34 | r""" 35 | This class is a `tf.keras.losses.Loss` implementation that wraps a GPflow 36 | :class:`~gpflow.likelihoods.Likelihood` instance. 37 | 38 | When the prediction (last-layer output) is a 39 | :class:`~tfp.distributions.Distribution` ``q(f)``, calling this loss 40 | returns the negative variational expectation :math:`-\mathbb{E}_{q(f)}[\log 41 | p(y|f)]`. When the prediction is a `tf.Tensor`, calling this loss returns 42 | the negative log-probability :math:`-\log p(y|f)`. 43 | 44 | When you use this loss function in training a Keras model, the value of 45 | this loss is not logged explicitly (in contrast, the layer-specific losses 46 | are logged, as is the overall model loss). 47 | To output this loss value explicitly, wrap this class in a 48 | `tf.keras.metrics.Metric` and add it to the model metrics. 49 | 50 | .. note:: 51 | 52 | Use **either** this `LikelihoodLoss` (e.g. together with a 53 | `tf.keras.Sequential` model) **or** 54 | :class:`~gpflux.layers.LikelihoodLayer` (together with 55 | `gpflux.models.DeepGP`). Do **not** use both at once because this would 56 | add the loss twice. 57 | """ 58 | 59 | def __init__(self, likelihood: gpflow.likelihoods.Likelihood): 60 | """ 61 | :param likelihood: the GPflow likelihood object to use. 62 | 63 | .. note:: If you want to train any parameters of the likelihood 64 | (e.g. likelihood variance), you must include the likelihood as 65 | an attribute on a :class:`~gpflux.layers.TrackableLayer` 66 | instance that is part of your model. (This is not required when 67 | instead you use a :class:`gpflux.layers.LikelihoodLayer` 68 | together with :class:`gpflux.models.DeepGP`.) 69 | """ 70 | super().__init__() 71 | 72 | self.likelihood = likelihood 73 | 74 | def call( 75 | self, 76 | y_true: TensorType, 77 | f_prediction: Union[TensorType, tfp.distributions.MultivariateNormalDiag], 78 | ) -> tf.Tensor: 79 | """ 80 | Note that we deviate from the Keras Loss interface by calling the 81 | second argument *f_prediction* rather than *y_pred*. 82 | """ 83 | no_X = None 84 | if isinstance(unwrap_dist(f_prediction), tfp.distributions.MultivariateNormalDiag): 85 | 86 | F_mu = f_prediction.loc 87 | F_var = f_prediction.scale.diag ** 2 88 | return -self.likelihood.variational_expectations(no_X, F_mu, F_var, y_true) 89 | else: # Tensor 90 | f_samples = f_prediction 91 | return -self.likelihood.log_prob(no_X, f_samples, y_true) 92 | -------------------------------------------------------------------------------- /gpflux/math.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Math utilities 18 | """ 19 | import tensorflow as tf 20 | 21 | from gpflow import default_jitter 22 | from gpflow.base import TensorType 23 | 24 | 25 | def _cholesky_with_jitter(cov: TensorType) -> tf.Tensor: 26 | """ 27 | Compute the Cholesky of the covariance, adding jitter (determined by 28 | :func:`gpflow.default_jitter`) to the diagonal to improve stability. 29 | 30 | :param cov: full covariance with shape ``[..., N, D, D]``. 31 | """ 32 | # cov [..., N, D, D] 33 | cov_shape = tf.shape(cov) 34 | batch_shape = cov_shape[:-2] 35 | D = cov_shape[-2] 36 | jittermat = default_jitter() * tf.eye( 37 | D, batch_shape=batch_shape, dtype=cov.dtype 38 | ) # [..., N, D, D] 39 | return tf.linalg.cholesky(cov + jittermat) # [..., N, D, D] 40 | 41 | 42 | def compute_A_inv_b(A: TensorType, b: TensorType) -> tf.Tensor: 43 | r""" 44 | Computes :math:`A^{-1} b` using the Cholesky of ``A`` instead of the explicit inverse, 45 | as this is often numerically more stable. 46 | 47 | :param A: A positive-definite matrix with shape ``[..., M, M]``. 48 | Can contain any leading dimensions (``...``) as long as they correspond 49 | to the leading dimensions in ``b``. 50 | :param b: Tensor with shape ``[..., M, D]``. 51 | Can contain any leading dimensions (``...``) as long as they correspond 52 | to the leading dimensions in ``A``. 53 | 54 | :returns: Tensor with shape ``[..., M, D]``. 55 | Leading dimensions originate from ``A`` and ``b``. 56 | """ 57 | # A = L Lᵀ 58 | L = tf.linalg.cholesky(A) 59 | # A⁻¹ = L⁻ᵀ L⁻¹ 60 | L_inv_b = tf.linalg.triangular_solve(L, b) 61 | A_inv_b = tf.linalg.triangular_solve(L, L_inv_b, adjoint=True) # adjoint = transpose 62 | return A_inv_b 63 | -------------------------------------------------------------------------------- /gpflux/models/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Base model classes implemented in GPflux 18 | """ 19 | from gpflux.models.deep_gp import DeepGP 20 | -------------------------------------------------------------------------------- /gpflux/optimization/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Optimization-related modules, currently just contains the `NatGradModel` 18 | and `NatGradWrapper` classes to integrate 19 | `gpflow.optimizers.NaturalGradient` with Keras. 20 | """ 21 | from gpflux.optimization.keras_natgrad import NatGradModel, NatGradWrapper 22 | -------------------------------------------------------------------------------- /gpflux/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/gpflux/py.typed -------------------------------------------------------------------------------- /gpflux/runtime_checks.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ Runtime checks """ 17 | from typing import Optional, Tuple 18 | 19 | from gpflow.inducing_variables import ( 20 | FallbackSeparateIndependentInducingVariables, 21 | MultioutputInducingVariables, 22 | ) 23 | from gpflow.kernels import MultioutputKernel 24 | from gpflow.mean_functions import MeanFunction 25 | 26 | from gpflux.exceptions import GPLayerIncompatibilityException 27 | 28 | 29 | def verify_compatibility( 30 | kernel: MultioutputKernel, 31 | mean_function: MeanFunction, 32 | inducing_variable: MultioutputInducingVariables, 33 | ) -> Tuple[int, int]: 34 | """ 35 | Checks that the arguments are all compatible with each other for use in a `GPLayer`. 36 | 37 | :param kernel: The multioutput kernel for the layer. 38 | :param inducing_variable: The inducing features for the layer. 39 | :param mean_function: The mean function applied to the inputs. 40 | :raises GPLayerIncompatibilityException: If an incompatibility is detected. 41 | :returns: number of inducing variables and number of latent GPs 42 | """ 43 | # TODO: This function could be simplified by upstream enhancements to 44 | # GPflow: e.g. by adding an `output_dim` attribute to 45 | # MultioutputInducingVariable subclasses 46 | 47 | if not isinstance(inducing_variable, MultioutputInducingVariables): 48 | raise GPLayerIncompatibilityException( 49 | "`inducing_variable` must be a `gpflow.inducing_variables.MultioutputInducingVariables`" 50 | ) 51 | if not isinstance(kernel, MultioutputKernel): 52 | raise GPLayerIncompatibilityException( 53 | "`kernel` must be a `gpflow.kernels.MultioutputKernel`" 54 | ) 55 | if not isinstance(mean_function, MeanFunction): 56 | raise GPLayerIncompatibilityException( 57 | "`kernel` must be a `gpflow.mean_functions.MeanFunction`" 58 | ) 59 | 60 | latent_inducing_points: Optional[int] = None 61 | if isinstance(inducing_variable, FallbackSeparateIndependentInducingVariables): 62 | latent_inducing_points = len(inducing_variable.inducing_variable_list) 63 | 64 | num_latent_gps = kernel.num_latent_gps 65 | 66 | if latent_inducing_points is not None: 67 | if latent_inducing_points != num_latent_gps: 68 | raise GPLayerIncompatibilityException( 69 | f"The number of latent GPs ({num_latent_gps}) does not match " 70 | f"the number of separate independent inducing_variables ({latent_inducing_points})" 71 | ) 72 | 73 | num_inducing_points = inducing_variable.num_inducing # currently the same for each dim 74 | return num_inducing_points, num_latent_gps 75 | -------------------------------------------------------------------------------- /gpflux/sampling/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | This module enables you to sample from (Deep) GPs efficiently and consistently. 18 | """ 19 | from gpflux.sampling.kernel_with_feature_decomposition import KernelWithFeatureDecomposition 20 | from gpflux.sampling.sample import efficient_sample 21 | -------------------------------------------------------------------------------- /gpflux/sampling/utils.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | This module contains utilities for sampling from multivariate Gaussian distributions. 18 | """ 19 | import tensorflow as tf 20 | 21 | from gpflow.base import TensorType 22 | from gpflow.conditionals.util import sample_mvn 23 | 24 | from gpflux.math import _cholesky_with_jitter 25 | 26 | 27 | def draw_conditional_sample(mean: TensorType, cov: TensorType, f_old: TensorType) -> tf.Tensor: 28 | r""" 29 | Draw a sample :math:`\tilde{f}_\text{new}` from the conditional 30 | multivariate Gaussian :math:`p(f_\text{new} | f_\text{old})`, where the 31 | parameters ``mean`` and ``cov`` are the mean and covariance matrix of the 32 | joint multivariate Gaussian over :math:`[f_\text{old}, f_\text{new}]`. 33 | 34 | :param mean: A tensor with the shape ``[..., D, N+M]`` with the mean of 35 | ``[f_old, f_new]``. For each ``[..., D]`` this is a stacked vector of the 36 | form: 37 | 38 | .. math:: 39 | 40 | \begin{pmatrix} 41 | \operatorname{mean}(f_\text{old}) \;[N] \\ 42 | \operatorname{mean}(f_\text{new}) \;[M] 43 | \end{pmatrix} 44 | 45 | :param cov: A tensor with the shape ``[..., D, N+M, N+M]`` with the covariance of 46 | ``[f_old, f_new]``. For each ``[..., D]``, there is a 2x2 block matrix of the form: 47 | 48 | .. math:: 49 | 50 | \begin{pmatrix} 51 | \operatorname{cov}(f_\text{old}, f_\text{old}) \;[N, N] 52 | & \operatorname{cov}(f_\text{old}, f_\text{new}) \;[N, M] \\ 53 | \operatorname{cov}(f_\text{new}, f_\text{old}) \;[M, N] 54 | & \operatorname{cov}(f_\text{new}, f_\text{new}) \;[M, M] 55 | \end{pmatrix} 56 | 57 | :param f_old: A tensor of observations with the shape ``[..., D, N]``, 58 | drawn from Normal distribution with mean 59 | :math:`\operatorname{mean}(f_\text{old}) \;[N]`, and covariance 60 | :math:`\operatorname{cov}(f_\text{old}, f_\text{old}) \;[N, N]` 61 | 62 | :return: A sample :math:`\tilde{f}_\text{new}` from the conditional normal 63 | :math:`p(f_\text{new} | f_\text{old})` with the shape ``[..., D, M]``. 64 | """ 65 | N, D = tf.shape(f_old)[-1], tf.shape(f_old)[-2] # noqa: F841 66 | M = tf.shape(mean)[-1] - N 67 | cov_old = cov[..., :N, :N] # [..., D, N, N] 68 | cov_new = cov[..., -M:, -M:] # [..., D, M, M] 69 | cov_cross = cov[..., :N, -M:] # [..., D, N, M] 70 | L_old = _cholesky_with_jitter(cov_old) # [..., D, N, N] 71 | A = tf.linalg.triangular_solve(L_old, cov_cross, lower=True) # [..., D, N, M] 72 | var_new = cov_new - tf.matmul(A, A, transpose_a=True) # [..., D, M, M] 73 | mean_new = mean[..., -M:] # [..., D, M] 74 | mean_old = mean[..., :N] # [..., D, N] 75 | mean_old_diff = (f_old - mean_old)[..., None] # [..., D, N, 1] 76 | AM = tf.linalg.triangular_solve(L_old, mean_old_diff) # [..., D, N, 1] 77 | mean_new = mean_new + (tf.matmul(A, AM, transpose_a=True)[..., 0]) # [..., D, M] 78 | return sample_mvn(mean_new, var_new, full_cov=True) 79 | -------------------------------------------------------------------------------- /gpflux/types.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """ 17 | Types used within GPflux (for static type-checking). 18 | """ 19 | from typing import List, Tuple, Union 20 | 21 | import tensorflow as tf 22 | import tensorflow_probability as tfp 23 | 24 | from gpflow.base import TensorType 25 | 26 | 27 | def unwrap_dist(dist: tfp.distributions.Distribution) -> tfp.distributions.Distribution: 28 | """ 29 | Unwrap the given distribution, if it is wrapped in a ``_TensorCoercible``. 30 | """ 31 | while True: 32 | inner = getattr(dist, "tensor_distribution", None) 33 | if inner is None: 34 | return dist 35 | dist = inner 36 | 37 | 38 | ShapeType = Union[tf.TensorShape, List[int], Tuple[int, ...]] 39 | r""" Union of valid types for describing the shape of a `tf.Tensor`\ (-like) object """ 40 | 41 | ObservationType = List[TensorType] 42 | """ 43 | Type for the ``[inputs, targets]`` list used by :class:`~gpflux.layers.LayerWithObservations` 44 | """ 45 | -------------------------------------------------------------------------------- /gpflux/version.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | """Adds __version__""" 17 | 18 | __version__ = "0.4.4" 19 | -------------------------------------------------------------------------------- /mypy.ini: -------------------------------------------------------------------------------- 1 | [mypy] 2 | # allow redefinition because of multiple dispatch 3 | ignore_missing_imports = True 4 | strict_optional = False 5 | allow_untyped_defs = False 6 | -------------------------------------------------------------------------------- /notebook_requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter 2 | matplotlib<3.6 3 | scikit-learn 4 | pandas 5 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | slow: marks slow tests that we may want to exclude from a local run -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from pathlib import Path 4 | 5 | from setuptools import find_namespace_packages, setup 6 | 7 | requirements = [ 8 | "deprecated", 9 | "gpflow>=2.9.2", 10 | "numpy<2", 11 | "scipy", 12 | "tensorflow>=2.5.0,<2.17; platform_system!='Darwin' or platform_machine!='arm64'", 13 | # NOTE: Support of Apple Silicon MacOS platforms is in an experimental mode 14 | "tensorflow-macos>=2.5.0,<2.17; platform_system=='Darwin' and platform_machine=='arm64'", 15 | "tensorflow-probability>=0.13.0,<0.25", 16 | ] 17 | 18 | with open("README.md", "r") as file: 19 | long_description = file.read() 20 | 21 | with open(Path(__file__).parent / "gpflux" / "version.py", "r") as version_file: 22 | exec(version_file.read()) 23 | 24 | setup( 25 | name="gpflux", 26 | version=__version__, 27 | author="Secondmind Labs", 28 | author_email="gpflux@secondmind.ai", 29 | long_description=long_description, 30 | long_description_content_type="text/markdown", 31 | description="GPflux: Deep GP library", 32 | license="Apache License 2.0", 33 | keywords="Deep-Gaussian-processes", 34 | install_requires=requirements, 35 | packages=find_namespace_packages(include=["gpflux*"]), 36 | package_data={"gpflux": ["py.typed"]}, 37 | project_urls={ 38 | "Source on GitHub": "https://github.com/secondmind-labs/GPflux", 39 | "Documentation": "https://secondmind-labs.github.io/GPflux/", 40 | }, 41 | classifiers=[ 42 | "License :: OSI Approved :: Apache Software License", 43 | "Programming Language :: Python :: 3.7", 44 | "Programming Language :: Python :: 3.8", 45 | "Programming Language :: Python :: 3.9", 46 | "Programming Language :: Python :: 3.10", 47 | "Operating System :: OS Independent", 48 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 49 | ], 50 | ) 51 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | from packaging.version import Version 5 | 6 | from gpflow.kernels import SquaredExponential 7 | 8 | # TODO: It would be great to make serialisation work in general. See: 9 | # https://github.com/GPflow/GPflow/issues/1658 10 | skip_serialization_tests = pytest.mark.skipif( 11 | Version(tf.__version__) >= Version("2.6"), 12 | reason="GPflow Parameter cannot be serialized in newer version of TensorFlow.", 13 | ) 14 | 15 | 16 | @pytest.fixture 17 | def test_data(): 18 | x_dim, y_dim, w_dim = 2, 1, 2 19 | num_data = 31 20 | x_data = np.random.random((num_data, x_dim)) * 5 21 | w_data = np.random.random((num_data, w_dim)) 22 | w_data[: (num_data // 2), :] = 0.2 * w_data[: (num_data // 2), :] + 5 23 | 24 | input_data = np.concatenate([x_data, w_data], axis=1) 25 | assert input_data.shape == (num_data, x_dim + w_dim) 26 | y_data = np.random.multivariate_normal( 27 | mean=np.zeros(num_data), cov=SquaredExponential(variance=0.1)(input_data), size=y_dim 28 | ).T 29 | assert y_data.shape == (num_data, y_dim) 30 | return x_data, y_data 31 | -------------------------------------------------------------------------------- /tests/gpflux/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | -------------------------------------------------------------------------------- /tests/gpflux/architectures/test_constant_input_dim_deep_gp.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import tensorflow as tf 4 | 5 | from gpflux.architectures import Config, build_constant_input_dim_deep_gp 6 | from gpflux.helpers import make_dataclass_from_class 7 | 8 | 9 | class DemoConfig: 10 | num_inducing = 7 11 | inner_layer_qsqrt_factor = 1e-3 12 | between_layer_noise_variance = 1e-3 13 | likelihood_noise_variance = 1e-2 14 | whiten = True 15 | 16 | 17 | @pytest.mark.parametrize("input_dim", [7]) 18 | @pytest.mark.parametrize("num_layers", [3]) 19 | def test_smoke_build_constant_input_dim_deep_gp(input_dim, num_layers): 20 | config = make_dataclass_from_class(Config, DemoConfig) 21 | X = np.random.randn(13, input_dim) 22 | Y = np.random.randn(13, 1) 23 | dgp = build_constant_input_dim_deep_gp(X, num_layers, config) 24 | model_train = dgp.as_training_model() 25 | model_train.compile("Adam") 26 | model_train.fit((X, Y), epochs=1) 27 | model_test = dgp.as_prediction_model() 28 | _ = model_test(X) 29 | 30 | 31 | @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.int32]) 32 | def test_build_constant_input_dim_deep_gp_raises_on_incorrect_dtype(dtype): 33 | config = make_dataclass_from_class(Config, DemoConfig) 34 | X = np.random.randn(13, 2).astype(dtype) 35 | 36 | with pytest.raises(ValueError): 37 | build_constant_input_dim_deep_gp(X, 2, config) 38 | -------------------------------------------------------------------------------- /tests/gpflux/encoders/test_directly_parameterized_encoder.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | import numpy as np 18 | import pytest 19 | import tensorflow as tf 20 | 21 | from gpflux.encoders import DirectlyParameterizedNormalDiag 22 | from gpflux.exceptions import EncoderInitializationError 23 | 24 | num_data = 200 25 | latent_dim = 3 26 | 27 | 28 | def test_shapes(): 29 | seed = 300 30 | np.random.seed(seed) 31 | dp_encoder = DirectlyParameterizedNormalDiag(num_data, latent_dim) 32 | 33 | # Tests shapes 34 | assert np.all(tf.shape(dp_encoder.means) == (num_data, latent_dim)) 35 | assert np.all(tf.shape(dp_encoder.stds) == (num_data, latent_dim)) 36 | 37 | # Tests values 38 | np.random.seed(seed) 39 | expected_means = 0.01 * np.random.randn(num_data, latent_dim) 40 | expected_stds = 1e-5 * np.ones_like(expected_means) 41 | np.testing.assert_equal(dp_encoder.means.numpy(), expected_means) 42 | np.testing.assert_allclose(dp_encoder.stds.numpy(), expected_stds, rtol=1e-11) 43 | 44 | 45 | @pytest.mark.parametrize("means", [None, np.random.randn(num_data, latent_dim)]) 46 | def test_call(means): 47 | dp_encoder = DirectlyParameterizedNormalDiag(num_data, latent_dim, means) 48 | encoder_means, encoder_std = dp_encoder(inputs=None) 49 | 50 | assert encoder_means is dp_encoder.means 51 | assert encoder_std is dp_encoder.stds 52 | 53 | assert np.all(tf.shape(encoder_means) == (num_data, latent_dim)) 54 | if means is not None: 55 | np.testing.assert_array_equal(encoder_means.numpy(), means) 56 | 57 | 58 | def test_bad_shapes(): 59 | means = np.random.randn(num_data, latent_dim + 4) 60 | with pytest.raises(EncoderInitializationError): 61 | _ = DirectlyParameterizedNormalDiag(num_data, latent_dim, means) 62 | 63 | means = np.random.randn(num_data + 1, latent_dim) 64 | with pytest.raises(EncoderInitializationError): 65 | _ = DirectlyParameterizedNormalDiag(num_data, latent_dim, means) 66 | -------------------------------------------------------------------------------- /tests/gpflux/layers/test_likelihood_layer.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import numpy as np 17 | import pytest 18 | import tensorflow as tf 19 | 20 | from gpflow.kernels import Matern52 21 | from gpflow.likelihoods import Bernoulli, Beta, Gaussian, Poisson 22 | from gpflow.mean_functions import Zero 23 | 24 | from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel 25 | from gpflux.layers import GPLayer, LikelihoodLayer 26 | from gpflux.layers.likelihood_layer import LikelihoodOutputs 27 | from gpflux.losses import LikelihoodLoss 28 | 29 | TEST_GPFLOW_LIKELIHOODS = [Bernoulli, Beta, Gaussian, Poisson] 30 | 31 | 32 | def setup_gp_layer_and_data(num_inducing: int, **gp_layer_kwargs): 33 | input_dim = 5 34 | output_dim = 3 35 | num_data = 13 36 | data = make_data(input_dim, output_dim, num_data=num_data) 37 | 38 | kernel = construct_basic_kernel(Matern52(), output_dim) 39 | inducing_vars = construct_basic_inducing_variables(num_inducing, input_dim, output_dim) 40 | mean_function = Zero(output_dim) 41 | 42 | gp_layer = GPLayer( 43 | kernel, inducing_vars, num_data, mean_function=mean_function, **gp_layer_kwargs 44 | ) 45 | return gp_layer, data 46 | 47 | 48 | def make_data(input_dim: int, output_dim: int, num_data: int): 49 | lim = [0, 20] 50 | sigma = 0.1 51 | 52 | X = np.random.random(size=(num_data, input_dim)) * lim[1] 53 | cov = Matern52()(X) + np.eye(num_data) * sigma ** 2 54 | Y = [np.random.multivariate_normal(np.zeros(num_data), cov)[:, None] for _ in range(output_dim)] 55 | Y = np.hstack(Y) 56 | return X, Y # TODO: improve this test; for most of the likelihoods, Y won't actually be valid 57 | 58 | 59 | @pytest.mark.parametrize("GPflowLikelihood", TEST_GPFLOW_LIKELIHOODS) 60 | def test_call_shapes(GPflowLikelihood): 61 | gp_layer, (X, Y) = setup_gp_layer_and_data(num_inducing=5) 62 | likelihood_layer = LikelihoodLayer(GPflowLikelihood()) 63 | 64 | # Run tests with gp layer outputting f_mean, f_var 65 | f_distribution = gp_layer(X) 66 | y_dist_params = likelihood_layer(f_distribution) 67 | 68 | assert y_dist_params.y_mean.shape == f_distribution.shape 69 | assert y_dist_params.y_var.shape == f_distribution.scale.diag.shape 70 | # The mean might not change but the covariance should 71 | assert f_distribution.variance().shape == y_dist_params.y_var.shape 72 | assert np.all(y_dist_params.y_var != f_distribution.variance()) 73 | np.testing.assert_array_equal(y_dist_params.f_var, f_distribution.variance()) 74 | np.testing.assert_array_equal(y_dist_params.f_mean, f_distribution.mean()) 75 | 76 | 77 | @pytest.mark.parametrize("GPflowLikelihood", TEST_GPFLOW_LIKELIHOODS) 78 | def test_likelihood_layer_losses(GPflowLikelihood): 79 | gp_layer, (X, Y) = setup_gp_layer_and_data(num_inducing=5) 80 | likelihood = GPflowLikelihood() 81 | likelihood_layer = LikelihoodLayer(likelihood) 82 | 83 | # Run tests with gp layer output as distribution 84 | f_distribution = gp_layer(X) 85 | 86 | _ = likelihood_layer(f_distribution) 87 | [keras_loss] = likelihood_layer.losses 88 | 89 | assert keras_loss == 0.0 90 | 91 | _ = likelihood_layer(f_distribution, targets=Y, training=True) 92 | [keras_loss] = likelihood_layer.losses 93 | 94 | f_mean = f_distribution.loc 95 | f_var = f_distribution.scale.diag ** 2 96 | expected_loss = np.mean(-likelihood.variational_expectations(X, f_mean, f_var, Y)) 97 | 98 | np.testing.assert_almost_equal(keras_loss, expected_loss, decimal=5) 99 | 100 | 101 | @pytest.mark.parametrize("GPflowLikelihood", TEST_GPFLOW_LIKELIHOODS) 102 | def test_likelihood_loss(GPflowLikelihood): 103 | gp_layer, (X, Y) = setup_gp_layer_and_data(num_inducing=5) 104 | likelihood = GPflowLikelihood() 105 | likelihood_loss = LikelihoodLoss(likelihood) 106 | 107 | # 1. Run tests with gp layer output as distribution 108 | f_distribution = gp_layer(X) 109 | f_mean = f_distribution.loc 110 | f_var = f_distribution.scale.diag 111 | 112 | expected_loss = np.mean(-likelihood.variational_expectations(X, f_mean, f_var, Y)) 113 | np.testing.assert_almost_equal(likelihood_loss(Y, f_distribution), expected_loss, decimal=5) 114 | 115 | # 2. Run tests with gp_layer output coerced to sample 116 | f_sample = tf.convert_to_tensor(gp_layer(X)) 117 | 118 | expected_loss = np.mean(-likelihood.log_prob(X, f_sample, Y)) 119 | np.testing.assert_almost_equal(likelihood_loss(Y, f_sample), expected_loss, decimal=5) 120 | 121 | 122 | def test_tensor_coercible(): 123 | shape = [5, 3] 124 | f_mean = tf.random.normal(shape) 125 | f_var = tf.random.normal(shape) 126 | y_mean = tf.random.normal(shape) 127 | y_var = tf.random.normal(shape) 128 | tensor_coercible = LikelihoodOutputs(f_mean, f_var, y_mean, y_var) 129 | 130 | np.testing.assert_array_equal(f_mean, tf.convert_to_tensor(tensor_coercible)) 131 | -------------------------------------------------------------------------------- /tests/gpflux/models/test_bayesian_model.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import numpy as np 17 | import pytest 18 | import tensorflow as tf 19 | import tensorflow_probability as tfp 20 | 21 | from gpflow import default_float 22 | from gpflow.keras import tf_keras 23 | from gpflow.likelihoods import Gaussian 24 | 25 | from gpflux.layers import LatentVariableLayer, LikelihoodLayer 26 | from tests.integration.test_latent_variable_integration import build_gp_layers # noqa: F401 27 | 28 | MAXITER = int(80e3) 29 | PLOTTER_INTERVAL = 60 30 | 31 | 32 | def build_latent_layer(w_dim, x_dim, y_dim): 33 | def build_encoder(): 34 | inputs = tf_keras.Input((x_dim + y_dim,)) 35 | x1 = tf_keras.layers.Dense(100)(inputs) 36 | x2 = tf_keras.layers.Dense(20)(x1) 37 | mean = tf_keras.layers.Dense(w_dim, activation="linear", name="output_mean")(x2) 38 | std = tf_keras.layers.Dense(w_dim, activation="softplus", name="output_std")(x2) 39 | return tf_keras.Model(inputs=[inputs], outputs=[mean, std]) 40 | 41 | def build_prior(): 42 | mean = np.zeros(w_dim) 43 | std = np.ones(w_dim) 44 | return tfp.distributions.MultivariateNormalDiag(mean, std) 45 | 46 | return LatentVariableLayer(build_prior(), build_encoder()) 47 | 48 | 49 | def build_LVGPGP_model(x_dim, w_dim, y_dim, num_data): 50 | lv_layer = build_latent_layer(w_dim, x_dim, y_dim) 51 | layer_dims = [x_dim + w_dim, x_dim + w_dim, y_dim] 52 | gp_layers = build_gp_layers(layer_dims, num_data) 53 | likelihood_layer = LikelihoodLayer(Gaussian(0.1)) 54 | return DeepGP([lv_layer] + gp_layers, likelihood_layer, num_data=num_data) 55 | 56 | 57 | @pytest.fixture 58 | def dgp_model(test_data): 59 | X, Y = test_data 60 | num_data, x_dim = X.shape 61 | _, y_dim = Y.shape 62 | w_dim = 1 63 | 64 | return build_LVGPGP_model(x_dim, w_dim, y_dim, num_data) 65 | # model = dgp.as_training_model() 66 | # model.compile() 67 | # model.fit({"inputs": X, "targets": Y}, epochs=1) 68 | # return model 69 | -------------------------------------------------------------------------------- /tests/gpflux/sampling/test_sample.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import numpy as np 17 | import pytest 18 | import tensorflow as tf 19 | 20 | import gpflow 21 | from gpflow.config import default_float, default_jitter 22 | 23 | from gpflux.layers.basis_functions.fourier_features import RandomFourierFeaturesCosine 24 | from gpflux.sampling.kernel_with_feature_decomposition import KernelWithFeatureDecomposition 25 | from gpflux.sampling.sample import Sample, efficient_sample 26 | 27 | 28 | @pytest.fixture(name="kernel") 29 | def _kernel_fixture(): 30 | return gpflow.kernels.SquaredExponential() 31 | 32 | 33 | @pytest.fixture(name="inducing_variable") 34 | def _inducing_variable_fixture(): 35 | Z = np.linspace(-1, 1, 10).reshape(-1, 1) 36 | return gpflow.inducing_variables.InducingPoints(Z) 37 | 38 | 39 | @pytest.fixture(name="whiten", params=[True, False]) 40 | def _whiten_fixture(request): 41 | return request.param 42 | 43 | 44 | def _get_qmu_qsqrt(kernel, inducing_variable): 45 | """Returns q_mu and q_sqrt for a kernel and inducing_variable""" 46 | Z = inducing_variable.Z.numpy() 47 | Kzz = kernel(Z, full_cov=True).numpy() 48 | q_sqrt = np.linalg.cholesky(Kzz + default_jitter() * np.eye(len(Z))) 49 | q_mu = q_sqrt @ np.random.randn(len(Z), 1) 50 | return q_mu, q_sqrt 51 | 52 | 53 | def test_conditional_sample(kernel, inducing_variable, whiten): 54 | """Smoke and consistency test for efficient sampling using MVN Conditioning""" 55 | q_mu, q_sqrt = _get_qmu_qsqrt(kernel, inducing_variable) 56 | 57 | sample_func = efficient_sample( 58 | inducing_variable, 59 | kernel, 60 | q_mu, 61 | q_sqrt=1e-3 * tf.convert_to_tensor(q_sqrt[np.newaxis]), 62 | whiten=whiten, 63 | ) 64 | 65 | X = np.linspace(-1, 1, 100).reshape(-1, 1) 66 | # Check for consistency - i.e. evaluating the sample at the 67 | # same locations (X) returns the same value 68 | np.testing.assert_array_almost_equal( 69 | sample_func(X), 70 | sample_func(X), 71 | # MVN conditioning is numerically unstable. 72 | # Notice how in the Wilson sampling we can use the default 73 | # of decimal=7. 74 | decimal=2, 75 | ) 76 | 77 | 78 | def test_wilson_efficient_sample(kernel, inducing_variable, whiten): 79 | """Smoke and consistency test for efficient sampling using Wilson""" 80 | eigenfunctions = RandomFourierFeaturesCosine(kernel, 100, dtype=default_float()) 81 | eigenvalues = np.ones((100, 1), dtype=default_float()) 82 | # To apply Wilson sampling we require the features and eigenvalues of the kernel 83 | kernel2 = KernelWithFeatureDecomposition(kernel, eigenfunctions, eigenvalues) 84 | q_mu, q_sqrt = _get_qmu_qsqrt(kernel, inducing_variable) 85 | 86 | sample_func = efficient_sample( 87 | inducing_variable, 88 | kernel2, 89 | q_mu, 90 | q_sqrt=1e-3 * tf.convert_to_tensor(q_sqrt[np.newaxis]), 91 | whiten=whiten, 92 | ) 93 | 94 | X = np.linspace(-1, 0, 100).reshape(-1, 1) 95 | # Check for consistency - i.e. evaluating the sample at the 96 | # same locations (X) returns the same value 97 | np.testing.assert_array_almost_equal( 98 | sample_func(X), 99 | sample_func(X), 100 | ) 101 | 102 | 103 | class SampleMock(Sample): 104 | def __init__(self, a): 105 | self.a = a 106 | 107 | def __call__(self, X): 108 | return self.a * X 109 | 110 | 111 | def test_adding_samples(): 112 | X = np.random.randn(100, 2) 113 | 114 | sample1 = SampleMock(1.0) 115 | sample2 = SampleMock(2.0) 116 | sample3 = sample1 + sample2 117 | np.testing.assert_array_almost_equal(sample3(X), sample1(X) + sample2(X)) 118 | 119 | 120 | def test_adding_sample_and_mean_function(): 121 | X = np.random.randn(100, 2) 122 | 123 | mean_function = gpflow.mean_functions.Identity() 124 | sample = SampleMock(3.0) 125 | 126 | sample_and_mean_function = sample + mean_function 127 | 128 | np.testing.assert_array_almost_equal(sample_and_mean_function(X), sample(X) + mean_function(X)) 129 | -------------------------------------------------------------------------------- /tests/gpflux/test_ci_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest import mock 3 | 4 | import pytest 5 | 6 | from gpflux.experiment_support import ci_utils 7 | 8 | 9 | def mock_os_environ(os_env: dict): 10 | # without clear=True, os_env={} would have no effect 11 | return mock.patch.dict(os.environ, os_env, clear=True) 12 | 13 | 14 | def mock_ci_state(is_ci: bool): 15 | return mock_os_environ({"CI": "true"} if is_ci else {}) 16 | 17 | 18 | @pytest.mark.parametrize( 19 | "os_environ, is_ci", 20 | [ 21 | ({"CI": "true"}, True), # GitHub actions, CircleCI, Travis CI 22 | ({"CI": "1"}, True), # for lazy local use 23 | ({}, False), 24 | ({"CI": ""}, False), 25 | ({"CI": "0"}, False), 26 | ({"CI": "false"}, False), 27 | ], 28 | ) 29 | def test_is_continuous_integration(os_environ, is_ci): 30 | with mock_os_environ(os_environ): 31 | assert ci_utils.is_continuous_integration() == is_ci 32 | 33 | 34 | @pytest.mark.parametrize( 35 | "is_ci, args, expected_result", 36 | [ 37 | (True, (13,), 2), 38 | (True, (13, 5), 5), 39 | (False, (13,), 13), 40 | ], 41 | ) 42 | def test_notebook_niter(is_ci, args, expected_result): 43 | with mock_ci_state(is_ci): 44 | assert ci_utils.notebook_niter(*args) == expected_result 45 | -------------------------------------------------------------------------------- /tests/gpflux/test_losses.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import tensorflow_probability as tfp 4 | 5 | import gpflow 6 | 7 | from gpflux.layers import LikelihoodLayer 8 | from gpflux.losses import LikelihoodLoss 9 | 10 | 11 | def test_likelihood_layer_and_likelihood_loss_give_equal_results(): 12 | np.random.seed(123) 13 | f_mean = np.random.randn(7, 1) 14 | f_scale = np.random.randn(7, 1) ** 2 15 | targets = np.random.randn(7, 1) 16 | 17 | f_dist = tfp.distributions.MultivariateNormalDiag(loc=f_mean, scale_diag=f_scale) 18 | likelihood = gpflow.likelihoods.Gaussian(0.123) 19 | 20 | # evaluate layer object 21 | likelihood_layer = LikelihoodLayer(likelihood) 22 | _ = likelihood_layer(f_dist, targets=targets, training=True) 23 | [layer_loss] = likelihood_layer.losses 24 | 25 | # evaluate loss object 26 | likelihood_loss = LikelihoodLoss(likelihood) 27 | loss_loss = likelihood_loss(targets, f_dist) 28 | 29 | np.testing.assert_allclose(layer_loss, loss_loss) 30 | 31 | 32 | # additional tests are in tests/integration/test_svgp_equivalence.py 33 | # comparing both the keras.Sequential/LikelihoodLoss and DeepGP/LikelihoodLayer 34 | # against gpflow.models.SVGP 35 | -------------------------------------------------------------------------------- /tests/gpflux/test_math.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import numpy as np 17 | 18 | from gpflux.math import compute_A_inv_b 19 | 20 | 21 | def _get_psd_matrix(N): 22 | """Returns P.S.D matrix with shape [N, N]""" 23 | from gpflow.kernels import SquaredExponential 24 | 25 | x = np.linspace(-1, 1, N).reshape(-1, 1) 26 | A = SquaredExponential()(x, full_cov=True).numpy() # [N, N] 27 | return A + 1e-6 * np.eye(N, dtype=A.dtype) 28 | 29 | 30 | def test_compute_A_inv_x(): 31 | N = 100 32 | A = _get_psd_matrix(N) 33 | b = np.random.randn(N, 2) / 100 34 | np.testing.assert_array_almost_equal( 35 | np.linalg.inv(A) @ b, 36 | compute_A_inv_b(A, b).numpy(), 37 | decimal=3, 38 | ) 39 | -------------------------------------------------------------------------------- /tests/gpflux/test_runtime_checks.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import pytest 17 | 18 | from gpflow.inducing_variables import InducingPoints 19 | from gpflow.kernels import Matern52 20 | from gpflow.mean_functions import Zero 21 | 22 | from gpflux.exceptions import GPLayerIncompatibilityException 23 | from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel 24 | from gpflux.runtime_checks import verify_compatibility 25 | 26 | # has no effect on compatibility in these tests 27 | input_dim = 7 28 | num_inducing = 35 29 | 30 | 31 | def make_kernels(num_latent_k): 32 | return [ 33 | construct_basic_kernel([Matern52() for _ in range(num_latent_k)]), 34 | construct_basic_kernel(Matern52(), output_dim=num_latent_k, share_hyperparams=False), 35 | construct_basic_kernel(Matern52(), output_dim=num_latent_k, share_hyperparams=True), 36 | ] 37 | 38 | 39 | def make_inducing_variables(num_latent_iv): 40 | return [ 41 | construct_basic_inducing_variables( 42 | num_inducing=[num_inducing for _ in range(num_latent_iv)], 43 | input_dim=input_dim, 44 | ), 45 | construct_basic_inducing_variables( 46 | num_inducing=num_inducing, input_dim=input_dim, output_dim=num_latent_iv 47 | ), 48 | ] 49 | 50 | 51 | @pytest.mark.parametrize("kernel", make_kernels(3)) 52 | @pytest.mark.parametrize("inducing_variable", make_inducing_variables(3)) 53 | def test_verify_compatibility_num_latent_gps_compatible(kernel, inducing_variable): 54 | mean_function = Zero() 55 | _, num_latent_gps = verify_compatibility(kernel, mean_function, inducing_variable) 56 | assert num_latent_gps == kernel.num_latent_gps 57 | 58 | 59 | @pytest.mark.parametrize("kernel", make_kernels(5)) 60 | @pytest.mark.parametrize("inducing_variable", make_inducing_variables(10)) 61 | def test_verify_compatibility_num_latent_gps_incompatible(kernel, inducing_variable): 62 | mean_function = Zero() 63 | with pytest.raises(GPLayerIncompatibilityException): 64 | verify_compatibility(kernel, mean_function, inducing_variable) 65 | 66 | 67 | def test_verify_compatibility_type_errors(): 68 | valid_inducing_variable = construct_basic_inducing_variables([35], input_dim=40) 69 | valid_kernel = construct_basic_kernel([Matern52()]) 70 | valid_mean_function = Zero() # all gpflow mean functions are currently valid 71 | 72 | with pytest.raises( 73 | GPLayerIncompatibilityException 74 | ): # gpflow kernels must be MultioutputKernels 75 | verify_compatibility(Matern52(), valid_mean_function, valid_inducing_variable) 76 | 77 | Z = valid_inducing_variable.inducing_variable_list[0].Z 78 | inducing_variable = InducingPoints(Z) 79 | with pytest.raises( 80 | GPLayerIncompatibilityException 81 | ): # gpflow inducing_variables must be MultioutputInducingVariables 82 | verify_compatibility(valid_kernel, valid_mean_function, inducing_variable) 83 | -------------------------------------------------------------------------------- /tests/integration/test_latent_variable_integration.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | import itertools 17 | 18 | import numpy as np 19 | import pytest 20 | import tensorflow as tf 21 | import tensorflow_probability as tfp 22 | 23 | from gpflow.keras import tf_keras 24 | from gpflow.kernels import RBF 25 | from gpflow.likelihoods import Gaussian 26 | from gpflow.mean_functions import Zero 27 | 28 | from gpflux.encoders import DirectlyParameterizedNormalDiag 29 | from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel 30 | from gpflux.layers import GPLayer, LatentVariableLayer, LikelihoodLayer 31 | from gpflux.models import DeepGP 32 | 33 | ############ 34 | # Utilities 35 | ############ 36 | 37 | 38 | def build_gp_layers(layer_sizes, num_data): 39 | gp_layers = [] 40 | for input_dim, output_dim in zip(layer_sizes[:-1], layer_sizes[1:]): 41 | 42 | kernel = construct_basic_kernel(kernels=RBF(), output_dim=output_dim) 43 | inducing_vars = construct_basic_inducing_variables( 44 | num_inducing=25, input_dim=input_dim, output_dim=output_dim 45 | ) 46 | 47 | layer = GPLayer(kernel, inducing_vars, num_data) 48 | gp_layers.append(layer) 49 | 50 | gp_layers[-1].mean_function = Zero() 51 | 52 | return gp_layers 53 | 54 | 55 | def train_model(x_data, y_data, model, use_keras_compile): 56 | dataset_dict = {"inputs": x_data, "targets": y_data} 57 | num_data = len(x_data) 58 | 59 | optimizer = tf_keras.optimizers.Adam() 60 | 61 | epochs = 20 62 | 63 | if use_keras_compile: 64 | model.compile(optimizer=optimizer) 65 | history = model.fit(dataset_dict, batch_size=num_data, epochs=epochs) 66 | loss = history.history["loss"] 67 | 68 | else: 69 | train_dataset = tf.data.Dataset.from_tensor_slices(dataset_dict) 70 | dataset_iter = iter(train_dataset.repeat().batch(num_data)) 71 | 72 | def objective(data_minibatch): 73 | _ = model(data_minibatch, training=True) 74 | return tf.reduce_sum(model.losses) 75 | 76 | loss = [] 77 | 78 | def optimization_step(): 79 | data_batch = next(dataset_iter) 80 | optimizer.minimize(lambda: objective(data_batch), model.trainable_weights) 81 | loss.append(objective(data_batch)) 82 | 83 | for _ in range(epochs): 84 | optimization_step() 85 | 86 | return loss 87 | 88 | 89 | ############ 90 | # Tests 91 | ############ 92 | 93 | 94 | @pytest.mark.parametrize("w_dim", [1, 2]) 95 | @pytest.mark.parametrize("use_keras_compile", [True, False]) 96 | def test_cde_direct_parametrization(test_data, w_dim, use_keras_compile): 97 | """Test a directly parameterized CDE, using functional API, both eager or compiled. 98 | Test that the losses decrease.""" 99 | 100 | tf.random.set_seed(0) 101 | np.random.seed(0) 102 | 103 | # 1. Set up data 104 | x_data, y_data = test_data 105 | num_data, x_dim = x_data.shape 106 | 107 | # 2. Set up layers 108 | prior_means = np.zeros(w_dim) 109 | prior_std = np.ones(w_dim) 110 | encoder = DirectlyParameterizedNormalDiag(num_data, w_dim) 111 | prior = tfp.distributions.MultivariateNormalDiag(prior_means, prior_std) 112 | 113 | lv = LatentVariableLayer(prior, encoder) 114 | [gp] = build_gp_layers([x_dim + w_dim, 1], num_data) 115 | likelihood_layer = LikelihoodLayer(Gaussian()) 116 | 117 | # 3. Build the model 118 | dgp = DeepGP([lv, gp], likelihood_layer) 119 | model = dgp.as_training_model() 120 | 121 | # 4. Train the model and check 2nd half of loss is lower than first 122 | loss_history = train_model(x_data, y_data, model, use_keras_compile) 123 | epochs = len(loss_history) 124 | assert np.all(loss_history[: (epochs // 2)] > loss_history[(epochs // 2) :]) 125 | -------------------------------------------------------------------------------- /tests/snelson1d.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/secondmind-labs/GPflux/b3ad682ab95bdfbee999b945b6135f01b94f0eb6/tests/snelson1d.npz -------------------------------------------------------------------------------- /tests/test_notebooks.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright (c) 2021 The GPflux Contributors. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | # 16 | 17 | import glob 18 | import os 19 | import sys 20 | import time 21 | import traceback 22 | 23 | import jupytext 24 | import nbformat 25 | import pytest 26 | from nbconvert.preprocessors import ExecutePreprocessor 27 | from nbconvert.preprocessors.execute import CellExecutionError 28 | 29 | # To blacklist a notebook, add its full base name (including .ipynb extension, 30 | # but without any directory component). If there are several notebooks in 31 | # different directories with the same base name, they will all get blacklisted 32 | # (change the blacklisting check to something else in that case, if need be!) 33 | BLACKLISTED_NOTEBOOKS = [ 34 | "conditional_deep_gp.py", 35 | "deep_nonstationary_gp_samples.py", 36 | ] 37 | 38 | 39 | def _nbpath(): 40 | this_dir = os.path.dirname(__file__) 41 | return os.path.join(this_dir, "../docs/notebooks/") 42 | 43 | 44 | def test_notebook_dir_exists(): 45 | assert os.path.isdir(_nbpath()) 46 | 47 | 48 | def get_notebooks(): 49 | """ 50 | Returns all notebooks in `_nbpath` that are not blacklisted. 51 | """ 52 | 53 | def notebook_blacklisted(nb): 54 | blacklisted_notebooks_basename = map(os.path.basename, BLACKLISTED_NOTEBOOKS) 55 | return os.path.basename(nb) in blacklisted_notebooks_basename 56 | 57 | # recursively traverse the notebook directory in search for ipython notebooks 58 | all_notebooks = glob.iglob(os.path.join(_nbpath(), "**", "*.py"), recursive=True) 59 | notebooks_to_test = [nb for nb in all_notebooks if not notebook_blacklisted(nb)] 60 | return notebooks_to_test 61 | 62 | 63 | def _preproc(): 64 | pythonkernel = "python" + str(sys.version_info[0]) 65 | return ExecutePreprocessor(timeout=300, kernel_name=pythonkernel, interrupt_on_timeout=True) 66 | 67 | 68 | def _exec_notebook(notebook_filename): 69 | with open(notebook_filename) as notebook_file: 70 | nb = jupytext.read(notebook_file, as_version=nbformat.current_nbformat) 71 | try: 72 | meta_data = {"path": os.path.dirname(notebook_filename)} 73 | _preproc().preprocess(nb, {"metadata": meta_data}) 74 | except CellExecutionError as cell_error: 75 | traceback.print_exc(file=sys.stdout) 76 | msg = "Error executing the notebook {0}. See above for error.\nCell error: {1}" 77 | pytest.fail(msg.format(notebook_filename, str(cell_error))) 78 | 79 | 80 | @pytest.mark.notebooks 81 | @pytest.mark.parametrize("notebook_file", get_notebooks()) 82 | def test_notebook(notebook_file): 83 | _exec_notebook(notebook_file) 84 | 85 | 86 | def test_has_notebooks(): 87 | assert len(get_notebooks()) >= 2, "there are probably some notebooks that were not discovered" 88 | -------------------------------------------------------------------------------- /tests_requirements.txt: -------------------------------------------------------------------------------- 1 | # Code quality tools: 2 | black==21.7b0 3 | codecov 4 | click==8.0.4 5 | flake8==4.0.1 6 | isort==5.10.1 7 | mypy 8 | pytest 9 | pytest-cov 10 | pytest-random-order 11 | pytest-mock 12 | 13 | # For mypy stubs: 14 | types-Deprecated 15 | numpy 16 | 17 | tqdm 18 | 19 | # Notebook tests: 20 | jupytext 21 | nbformat 22 | nbconvert 23 | jupyter_client 24 | ipython!=8.23 # this claims to support typing-extensions-4.6 but doesn't 25 | ipykernel 26 | tornado 27 | -------------------------------------------------------------------------------- /tests_requirements_37.txt: -------------------------------------------------------------------------------- 1 | # Test requirements specific to Python 3.7 2 | 3 | # Code quality tools: 4 | black==21.7b0 5 | codecov 6 | click==8.0.4 7 | flake8==4.0.1 8 | isort==5.10.1 9 | mypy<1.4.0 # Newer mypy versions cause issues with Python 3.7 ('Self' from 'typing_extensions'). 10 | pytest 11 | pytest-cov 12 | pytest-random-order 13 | pytest-mock 14 | 15 | # For mypy stubs: 16 | types-Deprecated 17 | numpy<1.22.0 # Newer versions of numpy are not compatible with Python 3.7. 18 | 19 | tqdm 20 | 21 | # Notebook tests: 22 | jupytext 23 | nbformat 24 | nbconvert 25 | jupyter_client 26 | ipykernel 27 | tornado 28 | -------------------------------------------------------------------------------- /tests_requirements_38_39.txt: -------------------------------------------------------------------------------- 1 | # Test requirements specific to Python 3.8 and 3.9 2 | 3 | # Code quality tools: 4 | black==21.7b0 5 | codecov 6 | click==8.0.4 7 | flake8==4.0.1 8 | isort==5.10.1 9 | mypy<1.4.0 # Newer mypy versions cause issues with older Python ('Self' from 'typing_extensions'). 10 | pytest 11 | pytest-cov 12 | pytest-random-order 13 | pytest-mock 14 | 15 | # For mypy stubs: 16 | types-Deprecated 17 | numpy 18 | 19 | tqdm 20 | 21 | # Notebook tests: 22 | jupytext 23 | nbformat 24 | nbconvert 25 | jupyter_client 26 | ipykernel 27 | tornado 28 | --------------------------------------------------------------------------------