├── .flake8 ├── .gitattributes ├── .github ├── ISSUE_TEMPLATE │ ├── bugs-performance-issues.md │ ├── doc-issue.md │ ├── feature-request.md │ └── other-issue.md └── workflows │ ├── docs.yaml │ ├── publish-pypi.yaml │ └── quality-checks.yaml ├── .gitignore ├── CHANGELOG.md ├── CITATION.bib ├── CITATION.cff ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── Makefile ├── README.md ├── docs ├── Makefile ├── README.md ├── _static │ ├── css │ │ ├── bootstrap.css │ │ ├── bootstrap_namespaced.css │ │ └── bootstrap_namespaced.less │ └── scripts │ │ └── bootstrap.min.js ├── bibliography.rst ├── conf.py ├── examples │ ├── Graph.nblink │ ├── GraphEdges.nblink │ ├── Hyperbolic.nblink │ ├── HypercubeGraph.nblink │ ├── Hypersphere.nblink │ ├── Mesh.nblink │ ├── SPD.nblink │ ├── SpecialOrthogonal.nblink │ ├── SpecialUnitary.nblink │ ├── Torus.nblink │ ├── backends │ │ ├── JAX_Graph.nblink │ │ ├── PyTorch_Graph.nblink │ │ └── TensorFlow_Graph.nblink │ ├── examples.rst │ ├── frontends │ │ ├── GPJax.nblink │ │ ├── GPflow.nblink │ │ └── GPyTorch.nblink │ ├── index.rst │ ├── introduction.rst │ └── other │ │ ├── Bayesian Optimization.nblink │ │ ├── Hyperbolic Approximations.nblink │ │ └── SPD Approximations.nblink ├── index.rst ├── make.bat ├── references.bib ├── requirements.txt └── theory │ ├── addition_theorem.rst │ ├── compact.rst │ ├── feature_maps.rst │ ├── graphs.rst │ ├── hypercube_graph.rst │ ├── index.rst │ ├── meshes.rst │ ├── product_kernels.rst │ ├── product_spaces.rst │ └── symmetric.rst ├── geometric_kernels ├── __init__.py ├── _logging.py ├── feature_maps │ ├── __init__.py │ ├── base.py │ ├── deterministic.py │ ├── probability_densities.py │ ├── random_phase.py │ └── rejection_sampling.py ├── frontends │ ├── __init__.py │ ├── gpflow.py │ ├── gpjax.py │ └── gpytorch.py ├── jax.py ├── kernels │ ├── __init__.py │ ├── base.py │ ├── feature_map.py │ ├── hodge_compositional.py │ ├── karhunen_loeve.py │ ├── matern_kernel.py │ └── product.py ├── lab_extras │ ├── __init__.py │ ├── extras.py │ ├── jax │ │ ├── __init__.py │ │ └── extras.py │ ├── numpy │ │ ├── __init__.py │ │ ├── extras.py │ │ └── sparse_extras.py │ ├── tensorflow │ │ ├── __init__.py │ │ └── extras.py │ └── torch │ │ ├── __init__.py │ │ └── extras.py ├── resources │ ├── __init__.py │ └── precomputed_characters.json ├── sampling │ ├── __init__.py │ └── samplers.py ├── spaces │ ├── __init__.py │ ├── base.py │ ├── circle.py │ ├── eigenfunctions.py │ ├── graph.py │ ├── graph_edges.py │ ├── hyperbolic.py │ ├── hypercube_graph.py │ ├── hypersphere.py │ ├── lie_groups.py │ ├── mesh.py │ ├── product.py │ ├── so.py │ ├── spd.py │ └── su.py ├── tensorflow.py ├── torch.py ├── utils │ ├── __init__.py │ ├── kernel_formulas │ │ ├── __init__.py │ │ ├── euclidean.py │ │ ├── hyperbolic.py │ │ ├── hypercube_graph.py │ │ └── spd.py │ ├── manifold_utils.py │ ├── product.py │ ├── special_functions.py │ └── utils.py └── version.py ├── notebooks ├── Graph.ipynb ├── GraphEdges.ipynb ├── Hyperbolic.ipynb ├── HypercubeGraph.ipynb ├── Hypersphere.ipynb ├── Mesh.ipynb ├── SPD.ipynb ├── SpecialOrthogonal.ipynb ├── SpecialUnitary.ipynb ├── Torus.ipynb ├── backends │ ├── JAX_Graph.ipynb │ ├── PyTorch_Graph.ipynb │ └── TensorFlow_Graph.ipynb ├── data │ ├── bunny.obj │ ├── icosphere.obj │ └── teddy.obj ├── frontends │ ├── GPJax.ipynb │ ├── GPflow.ipynb │ └── GPyTorch.ipynb └── other │ ├── Bayesian Optimization.ipynb │ ├── Hyperbolic Approximations.ipynb │ └── SPD Approximations.ipynb ├── pyproject.toml ├── scripts ├── add_toc.py ├── compute_characters.py ├── increment_header_levels.py └── nblinks_for_ipynbs.py ├── test_requirements-3.10.txt ├── test_requirements-3.11.txt ├── test_requirements-3.8.txt ├── test_requirements-3.9.txt ├── test_requirements.txt └── tests ├── __init__.py ├── data.py ├── feature_maps ├── __init__.py ├── test_feature_maps.py └── test_student_t_sample.py ├── helper.py ├── kernels ├── __init__.py ├── test_feature_map_kernel.py ├── test_matern_karhunenloeve_kernel.py └── test_product_kernel.py ├── sampling ├── __init__.py └── test_samplers.py ├── spaces ├── __init__.py ├── test_basics.py ├── test_circle.py ├── test_eigenfunctions.py ├── test_eigenvalues.py ├── test_graph.py ├── test_graph_edges.py ├── test_hyperbolic.py ├── test_hypercube_graph.py ├── test_hypersphere.py ├── test_lie_groups.py ├── test_mesh.py ├── test_product_discrete_spectrum_space.py └── test_spd.py ├── teddy.obj └── utils ├── __init__.py ├── test_kernel_formulas.py ├── test_manifold_utils.py ├── test_special_functions.py └── test_utils.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length = 88 3 | select = C,E,F,W,B,B9 4 | # ignore: 5 | # E226 missing whitespace around arithmetic operator 6 | # W503 Line break occurred before a binary operator 7 | # W504 line break after binary operator 8 | # F811 redefinition because of multiple dispatch 9 | ignore = E203, E501, W503, E226, W503, W504, F811, F722 10 | max-complexity = 10 11 | exclude = __init__.py,.git,__pycache__,.mypy_cache,.pytest_cache 12 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.ipynb linguist-vendored -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bugs-performance-issues.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report (including performance and build issues) 3 | about: Help us find mistakes in the GeometricKernels code base 4 | labels: bug 5 | --- 6 | 7 | 8 | 9 | # Bug / performance issue / build issue 10 | 11 | 12 | 13 | ## To reproduce 14 | 15 | **Minimal, reproducible example** 16 | 17 | ```python 18 | # This is the place for your code that reproduces the bug. 19 | # Please make sure it does not depend on external libraries (beyond GeometricKernels's own requirements) or specific datasets, and the smaller, the better :) 20 | # For help on how to write a good bug report, see https://stackoverflow.com/help/minimal-reproducible-example 21 | ``` 22 | 23 | **Stack trace, or error message** 24 | ``` 25 | // Paste the full stack trace/error message here 26 | ``` 27 | 28 | ## Expected behavior 29 | 30 | 31 | 32 | ## System information 33 | 34 | * GeometricKernels version 35 | * TensorFlow/PyTorch/Jax version 36 | * Python version 37 | * Operating system 38 | 39 | ## Additional context 40 | 41 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/doc-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation or notebooks 3 | about: Let us know what would make the GeometricKernels documentation better 4 | --- 5 | 6 | # Documentation/tutorial notebooks 7 | 8 | *Is there anything missing in the docs?* 9 | 10 | *Are there any mistakes in the docs?* 11 | 12 | *Is there a feature that needs some example code in a notebook?* 13 | 14 | *Do you know how to fix the docs?* If so, it'd be amazing if you'd be willing to directly contribute a pull request :) 15 | 16 | 17 | Links: 18 | * [latest GeometricKernels documentation](https://geometric-kernels.github.io/GeometricKernels/index.html) 19 | * [Landing page](https://geometric-kernels.github.io/) -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for GeometricKernels 4 | labels: enhancement 5 | --- 6 | 7 | 8 | 9 | # Feature request 10 | 11 | 12 | 13 | ## Motivation 14 | 15 | **Is your feature request related to a problem?** 16 | 17 | 18 | 19 | 20 | ## Proposal 21 | 22 | **Describe the solution you would like** 23 | 24 | 25 | **What alternatives have you considered?** 26 | 27 | 28 | **Are you willing to open a pull request?** (We really appreciate contributions!) 29 | 30 | ## Additional context 31 | 32 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/other-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: '"How do I do ..." and other issues' 3 | about: How-To Questions 4 | --- -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Docs 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | 8 | jobs: 9 | docs: 10 | runs-on: ubuntu-latest 11 | steps: 12 | #---------------------------------------------- 13 | # check-out repo and set-up python 14 | #---------------------------------------------- 15 | - uses: actions/checkout@v4 16 | - uses: actions/setup-python@v4 17 | with: 18 | python-version: "3.10" 19 | #---------------------------------------------- 20 | # install 21 | #---------------------------------------------- 22 | - name: Install dependencies 23 | run: | 24 | make install 25 | pip install -r docs/requirements.txt 26 | - name: Build documentation 27 | run: | 28 | make docs 29 | ls -all docs/_build/html 30 | - name: Clean 31 | run: | 32 | TMP_DIR=$(mktemp -d -p $(pwd)) 33 | mv docs/_build/html/* $TMP_DIR 34 | rm -rf docs 35 | mv $TMP_DIR docs 36 | ls -all docs 37 | touch docs/.nojekyll 38 | - name: Push to GitHub 39 | run: | 40 | git add . 41 | git add -f docs/autoapi/* 42 | git config --global user.email "none" 43 | git config --global user.name "github-actions-bot" 44 | git commit -m "build documentation [ci skip]" 45 | git push -f origin HEAD:gh-pages 46 | -------------------------------------------------------------------------------- /.github/workflows/publish-pypi.yaml: -------------------------------------------------------------------------------- 1 | name: Publish-PyPI 2 | 3 | on: 4 | push: 5 | tags: v[0-9]+.[0-9]+.[0-9]+* 6 | 7 | jobs: 8 | upload-pypi: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | - uses: actions/setup-python@v4 13 | with: 14 | python-version: "3.10" 15 | - name: Install flit 16 | run: | 17 | pip install flit 18 | - name: Create pip package 19 | run: | 20 | flit build 21 | - name: Publish to PyPI 22 | run: | 23 | flit publish 24 | env: 25 | FLIT_INDEX_URL: ${{ secrets.PYPI_INDEX_URL }} 26 | FLIT_PASSWORD: ${{ secrets.PYPI_TOKEN }} 27 | FLIT_USERNAME: __token__ 28 | -------------------------------------------------------------------------------- /.github/workflows/quality-checks.yaml: -------------------------------------------------------------------------------- 1 | name: QualityChecks 2 | 3 | on: 4 | push: 5 | pull_request: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | check-and-test: 11 | runs-on: ubuntu-latest 12 | strategy: 13 | matrix: 14 | python-version: ["3.8", "3.9", "3.10", "3.11"] 15 | fail-fast: false 16 | 17 | name: Python-${{ matrix.python-version }} 18 | steps: 19 | #---------------------------------------------- 20 | # check-out repo and set-up python 21 | #---------------------------------------------- 22 | - uses: actions/checkout@v4 23 | - uses: actions/setup-python@v4 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | #---------------------------------------------- 27 | # install 28 | #---------------------------------------------- 29 | - name: Install dependencies 30 | run: GK_REQUIREMENTS=test_requirements-${{ matrix.python-version }}.txt make install 31 | #---------------------------------------------- 32 | # Lint and test 33 | #---------------------------------------------- 34 | - name: Run lint 35 | run: make lint 36 | - name: Run tests 37 | run: make test 38 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | docs/generated/ 74 | docs/autoapi/ 75 | docs/bin 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # Emacs backup files 135 | *~ 136 | 137 | # Vim swp files 138 | *.swp 139 | 140 | # Mac OS 141 | .DS_Store 142 | 143 | # Idea 144 | /.idea/ 145 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # CHANGELOG 2 | 3 | ## v0.3 - 29.01.2025 4 | * Implementation of kernels on the edge space of graphs or simplicial complexes by @cookbook-ms in https://github.com/geometric-kernels/GeometricKernels/pull/139 5 | 6 | ## v0.2.3 - 16.01.2025 7 | * Constraint version of plum-dispatch because of wesselb/lab#23 8 | 9 | ## v0.2.2 - 29.11.2024 10 | * Replace opt_einsum's contract with lab's einsum for better backend-independence by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/145 11 | * Hypersphere space small improvements by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/142 12 | * The Hypercube space for binary vectors and labeled unweighted graphs by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/141 13 | * Fix algorithm selecting signatures and add precomputed characters for SO(9), SU(7), SU(8), SU(9) by @imbirik in https://github.com/geometric-kernels/GeometricKernels/pull/151 14 | * Revise tests and numerous fixes by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/149 15 | 16 | ## v0.2.1 - 08.08.2024 17 | Minor release with mostly cosmetic changes: 18 | * Add "If you have a question" section to README.md by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/131 19 | * Github cosmetics by @stoprightthere in https://github.com/geometric-kernels/GeometricKernels/pull/133 20 | * Replace all references to "gpflow" organization with "geometric-kernels" organization by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/134 21 | * Use fit_gpytorch_model or fit.fit_gpytorch_mll depening on the botorсh version by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/137 22 | * Add a missing type cast and fix a typo in kernels/karhunen_loeve.py by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/136 23 | * Minor documentation improvements by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/135 24 | * Add citation to the preprint of the GeometricKernels paper by @vabor112 in https://github.com/geometric-kernels/GeometricKernels/pull/138 25 | * Add citation file by @aterenin in https://github.com/geometric-kernels/GeometricKernels/pull/140 26 | * Fix dependencies (Version 0.2.1) by @stoprightthere in https://github.com/geometric-kernels/GeometricKernels/pull/143 27 | 28 | ## v0.2 - 21.04.2024 29 | New geometric kernel that *just works*, `kernels.MaternGeometricKernel`. Relies on *(hopefully)* sensible defaults we defined. Mostly by @stoprightthere. 30 | 31 | New spaces, based on Azangulov et al. ([2022](https://arxiv.org/abs/2208.14960), [2023](https://arxiv.org/abs/2301.13088)), mostly by @imbirik and @stoprightthere: 32 | - hyperbolic spaces $\mathbb{H}_n$ in `spaces.Hyperbolic`, 33 | - manifolds of symmetric positive definite matrices $\mathrm{SPD}(n)$ endowed with the affine-invariant Riemannian metric in `spaces.SymmetricPositiveDefiniteMatrices`, 34 | - special orthogonal groups $\mathrm{SO}(n)$ in `spaces.SpecialOrthogonal`. 35 | - special unitary groups $\mathrm{SU}(n)$ in `spaces.SpecialUnitary`. 36 | 37 | New package `geometric_kernels.feature_maps` for (approximate) finite-dimensional feature maps. Mostly by @stoprightthere. 38 | 39 | New small package `geometric_kernels.sampling` for efficient sampling from geometric Gaussian process priors. Based on the (approximate) finite-dimensional feature maps. Mostly by @stoprightthere. 40 | 41 | Examples/Tutorials improvements, mostly by @vabor112: 42 | - new Jupyter notebooks `Graph.ipynb`, `Hyperbolic.ipynb`, `Hypersphere.ipynb`, `Mesh.ipynb`, `SPD.ipynb`, `SpecialOrthogonal.ipynb`, `SpecialUnitary.ipynb`, `Torus.ipynb` featuring tutorials on all the spaces in the library, 43 | - new Jupyter notebooks `backends/JAX_Graph.ipynb`, `backends/PyTorch_Graph.ipynb`, `backends/TensorFlow_Graph.ipynb` showcasing how to use all the backends supported by the library, 44 | - new Jupyter notebooks `frontends/GPflow.ipynb`, `frontends/GPJax.ipynb`, `frontends/GPyTorch.ipynb` showcasing how to use all the frontends supported by the library, 45 | - other notebooks updated and grouped together in `other/` folder. 46 | 47 | 48 | Documentation improvements, mostly by @vabor112: 49 | - all docstrings throughout the library revised, 50 | - added new documentation pages describing the basic theoretical concepts, in `docs/theory`, 51 | - notebooks are now rendered as part of the documentation, you can refer to them from the docstrings and other documentation pages, 52 | - introduced a more or less unified style for docstrings. 53 | 54 | Other: 55 | - refactoring and bug fixes, 56 | - added type hints throughout the library and enabled `mypy`, 57 | - updated frontends (with limited suppot for GPJax due to conflicting dependencies), 58 | - improved `spaces.ProductDiscreteSpectrumSpace` and `kernels.ProductGeometricKernel`, 59 | - filtered out or fixed some annoying external warnings, 60 | - added a new banner for `README.md` and for our [landing page](https://geometric-kernels.github.io/), courtesy of @aterenin, 61 | - example notebooks are now run as tests, 62 | - we now support Python 3.8, 3.9, 3.10, 3.11 and have test workflows for all the supported Python versions, 63 | - we now provide a PyPI package, 64 | - [LAB](https://github.com/wesselb/lab) is now a lightweight dependency, thanks to @wesselb, 65 | - kernels are now normalized to have unit outputscale by default. 66 | 67 | ## v0.1-alpha - 20.10.2022 68 | Alpha release. 69 | -------------------------------------------------------------------------------- /CITATION.bib: -------------------------------------------------------------------------------- 1 | @article{mostowsky2024, 2 | title = {The GeometricKernels Package: Heat and Matérn Kernels for Geometric Learning on Manifolds, Meshes, and Graphs}, 3 | author = {Peter Mostowsky and Vincent Dutordoir and Iskander Azangulov and Noémie Jaquier and Michael John Hutchinson and Aditya Ravuri and Leonel Rozo and Alexander Terenin and Viacheslav Borovitskiy}, 4 | year = {2024}, 5 | journal = {arXiv:2407.08086}, 6 | } -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If you use this software, please cite it as below." 3 | title: "GeometricKernels" 4 | authors: 5 | - name: "GeometricKernels Contributors" 6 | preferred-citation: 7 | type: "article" 8 | title: "The GeometricKernels Package: Heat and Matérn Kernels for Geometric Learning on Manifolds, Meshes, and Graphs" 9 | authors: 10 | - family-names: "Mostowsky" 11 | given-names: "Peter" 12 | - family-names: "Dutordoir" 13 | given-names: "Vincent" 14 | - family-names: "Azangulov" 15 | given-names: "Iskander" 16 | - family-names: "Jaquier" 17 | given-names: "Noémie" 18 | - family-names: "Hutchinson" 19 | given-names: "Michael John" 20 | - family-names: "Ravuri" 21 | given-names: "Aditya" 22 | - family-names: "Rozo" 23 | given-names: "Leonel" 24 | - family-names: "Terenin" 25 | given-names: "Alexander" 26 | - family-names: "Borovitskiy" 27 | given-names: "Viacheslav" 28 | year: "2024" 29 | journal: "arXiv:2407.08086" -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, religion, or sexual identity 10 | and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the 26 | overall community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or 31 | advances of any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email 35 | address, without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement (the [project maintainers](CONTRIBUTING.md#who-are-we?)). 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series 85 | of actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or 92 | permanent ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within 112 | the community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.0, available at 118 | https://www.contributor-covenant.org/version/2/0/code_of_conduct.html. 119 | 120 | Community Impact Guidelines were inspired by [Mozilla's code of conduct 121 | enforcement ladder](https://github.com/mozilla/diversity). 122 | 123 | [homepage]: https://www.contributor-covenant.org 124 | 125 | For answers to common questions about this code of conduct, see the FAQ at 126 | https://www.contributor-covenant.org/faq. Translations are available at 127 | https://www.contributor-covenant.org/translations. 128 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: help docs install format lint test 2 | 3 | SUCCESS='\033[0;32m' 4 | 5 | SHELL=/bin/bash 6 | PYVERSION:=$(shell python -c "import sys;t='{v[0]}.{v[1]}'.format(v=list(sys.version_info[:2]));sys.stdout.write(t)") 7 | GK_REQUIREMENTS?=test_requirements-$(PYVERSION).txt 8 | 9 | help: ## Shows this help message 10 | # $(MAKEFILE_LIST) is set by make itself; the following parses the `target: ## help line` format and adds color highlighting 11 | @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-24s\033[0m %s\n", $$1, $$2}' 12 | 13 | docs: 14 | (cd docs ; make clean; make doctest; make html) 15 | @echo "${SUCCESS}============== Docs are available at docs/_build/html/index.html ============== ${SUCCESS}" 16 | 17 | 18 | install: ## Install repo for developement (Only for Linux) 19 | @echo "=== pip install package with dev requirements (using $(GK_REQUIREMENTS)) ==============" 20 | pip install --upgrade pip 21 | pip install --upgrade --upgrade-strategy eager --no-cache-dir -r $(GK_REQUIREMENTS) | cat 22 | pip install -e . 23 | 24 | format: ## Formats code with `autoflake`, `black` and `isort` 25 | autoflake --remove-all-unused-imports --recursive --remove-unused-variables --in-place geometric_kernels tests --exclude=__init__.py 26 | black geometric_kernels tests 27 | isort geometric_kernels tests 28 | 29 | lint: 30 | flake8 geometric_kernels tests 31 | black geometric_kernels tests --check --diff 32 | isort geometric_kernels tests --check-only --diff 33 | mypy --namespace-packages geometric_kernels 34 | 35 | 36 | test: ## Run the tests, start with the failing ones and break on first fail. 37 | pytest -v -x --ff -rN -Wignore -s --tb=short --durations=0 --cov --cov-report=xml tests 38 | pytest --nbmake --nbmake-kernel=python3 --durations=0 --nbmake-timeout=1000 --ignore=notebooks/frontends/GPJax.ipynb notebooks/ 39 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Compiling documentation 2 | 3 | First, execute doctests by running the following command in this dirrectory 4 | ``` 5 | make doctest 6 | ``` 7 | 8 | If all tests are passed, run 9 | ``` 10 | make html 11 | ``` 12 | 13 | To view the compiled documentation open the file `_build/html/index.html`. 14 | -------------------------------------------------------------------------------- /docs/_static/css/bootstrap_namespaced.less: -------------------------------------------------------------------------------- 1 | .bootstrap { 2 | @import (less) url("bootstrap.css"); 3 | } 4 | -------------------------------------------------------------------------------- /docs/bibliography.rst: -------------------------------------------------------------------------------- 1 | ############ 2 | Bibliography 3 | ############ 4 | 5 | .. bibliography:: 6 | :style: gkunsrt -------------------------------------------------------------------------------- /docs/examples/Graph.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/Graph.ipynb"} -------------------------------------------------------------------------------- /docs/examples/GraphEdges.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/GraphEdges.ipynb"} -------------------------------------------------------------------------------- /docs/examples/Hyperbolic.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/Hyperbolic.ipynb"} -------------------------------------------------------------------------------- /docs/examples/HypercubeGraph.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/HypercubeGraph.ipynb"} -------------------------------------------------------------------------------- /docs/examples/Hypersphere.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/Hypersphere.ipynb"} -------------------------------------------------------------------------------- /docs/examples/Mesh.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/Mesh.ipynb"} -------------------------------------------------------------------------------- /docs/examples/SPD.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/SPD.ipynb"} -------------------------------------------------------------------------------- /docs/examples/SpecialOrthogonal.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/SpecialOrthogonal.ipynb"} -------------------------------------------------------------------------------- /docs/examples/SpecialUnitary.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/SpecialUnitary.ipynb"} -------------------------------------------------------------------------------- /docs/examples/Torus.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../notebooks/Torus.ipynb"} -------------------------------------------------------------------------------- /docs/examples/backends/JAX_Graph.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/backends/JAX_Graph.ipynb"} -------------------------------------------------------------------------------- /docs/examples/backends/PyTorch_Graph.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/backends/PyTorch_Graph.ipynb"} -------------------------------------------------------------------------------- /docs/examples/backends/TensorFlow_Graph.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/backends/TensorFlow_Graph.ipynb"} -------------------------------------------------------------------------------- /docs/examples/examples.rst: -------------------------------------------------------------------------------- 1 | .. include:: introduction.rst 2 | 3 | Spaces 4 | ############## 5 | 6 | .. toctree:: 7 | :titlesonly: 8 | 9 | Graph 10 | GraphEdges 11 | Hyperbolic 12 | HypercubeGraph 13 | Hypersphere 14 | Mesh 15 | SPD 16 | SpecialOrthogonal 17 | SpecialUnitary 18 | Torus 19 | 20 | Backends 21 | ############## 22 | 23 | .. toctree:: 24 | :titlesonly: 25 | 26 | JAX (Graph space) 27 | PyTorch (Graph space) 28 | TensorFlow (Graph space) 29 | 30 | Frontends 31 | ############## 32 | 33 | .. toctree:: 34 | :titlesonly: 35 | 36 | GPFlow (Mesh space) 37 | GPJax (Mesh space) 38 | GPyTorch (Mesh space) 39 | 40 | Other 41 | ############## 42 | 43 | .. toctree:: 44 | :titlesonly: 45 | 46 | Bayesian optimization on a sphere 47 | Comparing approximations on Hyperbolic 48 | Comparing approximations on SPD 49 | -------------------------------------------------------------------------------- /docs/examples/frontends/GPJax.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/frontends/GPJax.ipynb"} -------------------------------------------------------------------------------- /docs/examples/frontends/GPflow.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/frontends/GPflow.ipynb"} -------------------------------------------------------------------------------- /docs/examples/frontends/GPyTorch.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/frontends/GPyTorch.ipynb"} -------------------------------------------------------------------------------- /docs/examples/index.rst: -------------------------------------------------------------------------------- 1 | #################### 2 | Examples 3 | #################### 4 | 5 | .. include:: introduction.rst 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | examples 11 | -------------------------------------------------------------------------------- /docs/examples/introduction.rst: -------------------------------------------------------------------------------- 1 | Here you can find numerous example notebooks providing tutorials into GeometricKernels. 2 | 3 | Each of the *Spaces* notebooks explains the basic functionality of the library in relation to a specific space. 4 | 5 | If you are interested in using GeometricKernels with a backend other than NumPy, checkout the *Backends* notebooks. 6 | 7 | If you want to use GeometricKernels with one of the popular Gaussian process libraries, checkout the *Frontends* notebooks. -------------------------------------------------------------------------------- /docs/examples/other/Bayesian Optimization.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/other/Bayesian Optimization.ipynb"} -------------------------------------------------------------------------------- /docs/examples/other/Hyperbolic Approximations.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/other/Hyperbolic Approximations.ipynb"} -------------------------------------------------------------------------------- /docs/examples/other/SPD Approximations.nblink: -------------------------------------------------------------------------------- 1 | {"path": "../../../notebooks/other/SPD Approximations.ipynb"} -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/references.bib: -------------------------------------------------------------------------------- 1 | @inproceedings{borovitskiy2020, 2 | title={Matérn Gaussian processes on Riemannian manifolds}, 3 | author={Borovitskiy, Viacheslav and Terenin, Alexander and Mostowsky, Peter and Deisenroth, Marc Peter}, 4 | booktitle={Advances in Neural Information Processing Systems}, 5 | year={2020} 6 | } 7 | 8 | @inproceedings{borovitskiy2021, 9 | title={Matérn Gaussian Processes on Graphs}, 10 | author={Borovitskiy, Viacheslav and Azangulov, Iskander and Terenin, Alexander and Mostowsky, Peter and Deisenroth, Marc and Durrande, Nicolas}, 11 | booktitle={International Conference on Artificial Intelligence and Statistics}, 12 | year={2021}, 13 | } 14 | 15 | @inproceedings{kondor2002, 16 | title={Diffusion kernels on graphs and other discrete structures}, 17 | author={Kondor, Risi Imre and Lafferty, John}, 18 | booktitle={International Conference on Machine Learning}, 19 | year={2002} 20 | } 21 | 22 | @article{solin2020, 23 | title={Hilbert space methods for reduced-rank Gaussian process regression}, 24 | author={Solin, Arno and Särkkä, Simo}, 25 | journal={Statistics and Computing}, 26 | volume={30}, 27 | number={2}, 28 | pages={419--446}, 29 | year={2020}, 30 | } 31 | 32 | @article{coveney2020, 33 | title={Gaussian process manifold interpolation for probabilistic atrial activation maps and uncertain conduction velocity}, 34 | author={Coveney, Sam and Corrado, Cesare and Roney, Caroline H and O’Hare, Daniel and Williams, Steven E and O’Neill, Mark D and Niederer, Steven A and Clayton, Richard H and Oakley, Jeremy E and Wilkinson, Richard D}, 35 | journal={Philosophical Transactions of the Royal Society A}, 36 | volume={378}, 37 | number={2173}, 38 | pages={20190345}, 39 | year={2020}, 40 | } 41 | 42 | @inproceedings{jaquier2021, 43 | title={Geometry-aware Bayesian Optimization in Robotics using Riemannian Matérn Kernels}, 44 | author={Jaquier, Noémie and Borovitskiy, Viacheslav and Smolensky, Andrei and Terenin, Alexander and Asfour, Tamim and Rozo, Leonel}, 45 | booktitle={Annual Conference on Robot Learning}, 46 | year={2021} 47 | } 48 | 49 | @article{azangulov2024a, 50 | title={Stationary Kernels and Gaussian Processes on Lie Groups and their Homogeneous Spaces I: the compact case}, 51 | author={Azangulov, Iskander and Smolensky, Andrei and Terenin, Alexander and Borovitskiy, Viacheslav}, 52 | journal={Journal of Machine Learning Research}, 53 | year={2024}, 54 | volume={25}, 55 | number={280}, 56 | pages={1--52}, 57 | } 58 | 59 | @article{azangulov2024b, 60 | title={Stationary Kernels and Gaussian Processes on Lie Groups and their Homogeneous Spaces II: non-compact symmetric spaces}, 61 | author={Azangulov, Iskander and Smolensky, Andrei and Terenin, Alexander and Borovitskiy, Viacheslav}, 62 | journal={Journal of Machine Learning Research}, 63 | year={2024}, 64 | volume={25}, 65 | number={281}, 66 | pages={1--51}, 67 | } 68 | 69 | @inproceedings{yang2024, 70 | title={Hodge-Compositional Edge Gaussian Processes}, 71 | author={Yang, Maosheng and Borovitskiy, Viacheslav and Isufi, Elvin}, 72 | booktitle={International Conference on Artificial Intelligence and Statistics}, 73 | year={2024} 74 | } 75 | 76 | @article{alain2023, 77 | title={Gaussian Processes on Cellular Complexes}, 78 | author={Alain, Mathieu and Takao, So and Paige, Brooks and Deisenroth, Marc Peter}, 79 | journal={arXiv preprint arXiv:2311.01198}, 80 | year={2023} 81 | } 82 | 83 | @inproceedings{sharp2020, 84 | title={A Laplacian for nonmanifold triangle meshes}, 85 | author={Sharp, Nicholas and Crane, Keenan}, 86 | booktitle={Computer Graphics Forum}, 87 | year={2020}, 88 | } 89 | 90 | @inproceedings{rahimi2007, 91 | title={Random features for large-scale kernel machines}, 92 | author={Rahimi, Ali and Recht, Benjamin}, 93 | booktitle={Advances in Neural Information Processing Systems}, 94 | year={2007}, 95 | } 96 | 97 | @inproceedings{sutherland2015, 98 | title={On the Error of Random Fourier Features}, 99 | author={Sutherland, Danica J and Schneider, Jeff}, 100 | booktitle={Uncertainty in Artificial Intelligence}, 101 | year={2015}, 102 | } 103 | 104 | @book{rasmussen2006, 105 | title={Gaussian Processes for Machine Learning}, 106 | author={Rasmussen, Carl Edward and Williams, Christopher K I}, 107 | publisher={MIT Press}, 108 | year={2006} 109 | } 110 | 111 | @book{absil2008, 112 | title={Optimization algorithms on matrix manifolds}, 113 | author={Absil, P-A and Mahony, Robert and Sepulchre, Rodolphe}, 114 | year={2008}, 115 | publisher={Princeton University Press} 116 | } 117 | 118 | @book{jost2011, 119 | title={Riemannian geometry and geometric analysis}, 120 | author={Jost, Jürgen}, 121 | year={2011}, 122 | publisher={Springer} 123 | } 124 | 125 | @article{canzani2013, 126 | title={Analysis on manifolds via the Laplacian}, 127 | author={Canzani, Yaiza}, 128 | journal={Lecture Notes available at: http://www.math.harvard.edu/canzani/docs/Laplacian.pdf}, 129 | year={2013} 130 | } 131 | 132 | @book{macwilliams1977, 133 | title={The theory of error-correcting codes}, 134 | author={MacWilliams, Florence Jessie and Sloane, Neil James Alexander}, 135 | year={1977}, 136 | publisher={Elsevier} 137 | } 138 | 139 | @inproceedings{borovitskiy2023, 140 | title={Isotropic Gaussian Processes on Finite Spaces of Graphs}, 141 | author={Borovitskiy, Viacheslav and Karimi, Mohammad Reza and Somnath, Vignesh Ram and Krause, Andreas}, 142 | booktitle={International Conference on Artificial Intelligence and Statistics}, 143 | year={2023}, 144 | } 145 | 146 | @article{sawyer1992, 147 | author = {Sawyer, Patrice}, 148 | journal = {Canadian Journal of Mathematics}, 149 | number = {3}, 150 | pages = {624--651}, 151 | publisher = {Cambridge University Press}, 152 | title = {The heat equation on the spaces of positive definite matrices}, 153 | volume = {44}, 154 | year = {1992}, 155 | } -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx-autoapi 3 | furo 4 | pygments>=2.13.0,<3.0 5 | sphinx-math-dollar 6 | nbsphinx 7 | docutils<0.21 8 | nbsphinx_link 9 | pypandoc 10 | sphinxcontrib.bibtex 11 | -------------------------------------------------------------------------------- /docs/theory/addition_theorem.rst: -------------------------------------------------------------------------------- 1 | ################# 2 | Addition Theorem 3 | ################# 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :class:`~.kernels.MaternGeometricKernel`, following the :doc:`example notebook on hypersheres `. 7 | 8 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2020`. 9 | 10 | ====================== 11 | Theory 12 | ====================== 13 | 14 | This builds on the general :doc:`theory on compact manifolds ` and uses the same notation. 15 | 16 | Consider a hypersphere: $M = \mathbb{S}_d$. 17 | Then closed form expressions for $\lambda_j$ and $f_j$ are known (see, e.g., Appendix B of :cite:t:`borovitskiy2020`). 18 | The eigenfunctions $f_j$ in this case are the *(hyper)spherical harmonics*, restrictions of certain known polynomials in $\mathbb{R}^{d+1}$ on the unit sphere. 19 | 20 | However, although $\lambda_j, f_j$ are known for $M = \mathbb{S}_d$, using the general formula for $k(x, x')$ from the :doc:`compact manifolds page ` is suboptimal in this case. This is so because of the following theorem. 21 | 22 | **Addition theorem**. 23 | The spherical harmonics $f_j$ can be re-indexed as $f_{l s}$ with $l = 0, \ldots, \infty$ and $s = 1, \ldots, d_l$ with $d_l = (2l+d-1) \frac{\Gamma(l+d-1)}{\Gamma(d) \Gamma(l+1)}$ [#]_ such that 24 | 25 | * all eigenfunctions in the set $\{f_{l s}\}_{s=1}^{d_l}$ correspond to the same eigenvalue $\lambda_l = l(l+d-1)$, 26 | 27 | * the following equation holds 28 | $$ 29 | \sum_{s=1}^{d_l} f_{l s}(x) f_{l s}(x') 30 | = 31 | c_{l, d} \mathcal{C}^{(d-1)/2}_l(\cos(\mathrm{d}_{\mathbb{S}_d}(x, x'))) 32 | \qquad 33 | c_{l, d} 34 | = 35 | d_l \frac{\Gamma((d+1)/2)}{2 \pi^{(d+1)/2} \mathcal{C}_l^{(d-1)/2}(1)} 36 | , 37 | $$ 38 | where $\mathcal{C}^{(d-1)/2}_l$ are certain known polynomials called *Gegenbauer polynomials* and $\mathrm{d}_{\mathbb{S}_d}$ is the geodesic distance on the (hyper)sphere. 39 | 40 | Thanks to this, we have 41 | 42 | $$ 43 | k_{\nu, \kappa}(x,x') 44 | = 45 | \frac{1}{C_{\nu, \kappa}} \sum_{l=0}^{L-1} \Phi_{\nu, \kappa}(\lambda_l) c_{l, d} \mathcal{C}^{(d-1)/2}_l(\cos(\mathrm{d}_{\mathbb{S}_d}(x, x'))) 46 | \qquad 47 | \Phi_{\nu, \kappa}(\lambda) 48 | = 49 | \begin{cases} 50 | \left(\frac{2\nu}{\kappa^2} + \lambda\right)^{-\nu-\frac{d}{2}} 51 | & 52 | \nu < \infty \text{ — Matérn} 53 | \\ 54 | e^{-\frac{\kappa^2}{2} \lambda} 55 | & 56 | \nu = \infty \text{ — Heat (RBF)} 57 | \end{cases} 58 | $$ 59 | which is more efficient to use than the general formula above. The reason is simple: it is not harder to evaluate a Gegenbauer polynomial $\mathcal{C}^{(d-1)/2}_l$ than each single one of the respective (hyper)spherical harmonics. 60 | At the same time, you need much fewer Gegenbauer polynomials to achieve the same quality of approximation. 61 | For example, for $M = \mathbb{S}_2$ and $L = 20$ the corresponding $J$ is $400$. 62 | 63 | .. note:: 64 | The $l$ in the example above indexes what we call *levels* in the library. 65 | These are certain sets of eigenfunctions that correspond to the same eigenvalue (not necessarily a maximal set of those, i.e. not necessarily the full eigenspace), for which one can efficiently compute the outer product $\sum_{s} f_{l s}(x) f_{l s}(x')$ without having to compute the individual eigenfunctions. [#]_ 66 | 67 | .. note:: 68 | In the simplest special case of $\mathbb{S}_d$, the circle $\mathbb{S}_1$, the eigenfunctions are given by $\sin(l \theta), \cos(l \theta)$, where $l$ indexes levels. 69 | The outer product $\cos(l \theta) \cos(l \theta') + \sin(l \theta) \sin(l \theta')$ in this case can be simplified to $\cos(l (\theta-\theta')) = \cos(l d_{\mathbb{S}_1}(\theta, \theta'))$ thanks to an elementary trigonometric identity. 70 | 71 | Such addition theorems appear beyond hyperspheres, for example for Lie groups and other compact homogeneous spaces :cite:p:`azangulov2024a`. 72 | In the library, such spaces use the class :class:`~.EigenfunctionsWithAdditionTheorem` to represent the spectrum of $\Delta_{\mathcal{M}}$. 73 | For them, the *number of levels* parameter of the :class:`~.kernels.MaternKarhunenLoeveKernel` maps to $L$ in the above formula. 74 | 75 | .. rubric:: Footnotes 76 | 77 | .. [#] $\Gamma$ denotes the gamma function, $\Gamma(j) = (j-1)!$ for integer $j > 0$. 78 | 79 | .. [#] The notion of *levels* is discussed in the documentation of the :class:`~.kernels.MaternKarhunenLoeveKernel` and :class:`~.Eigenfunctions` classes. 80 | -------------------------------------------------------------------------------- /docs/theory/compact.rst: -------------------------------------------------------------------------------- 1 | ############################### 2 | Kernels on Compact Manifolds 3 | ############################### 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :class:`~.kernels.MaternGeometricKernel`, following the :doc:`example notebook on hypersheres `. 7 | 8 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2020`. [#]_ 9 | 10 | ======= 11 | Theory 12 | ======= 13 | 14 | For compact Riemannian manifolds, :class:`~.kernels.MaternGeometricKernel` is an alias to :class:`~.kernels.MaternKarhunenLoeveKernel`. 15 | For such a manifold $\mathcal{M}$ the latter is given by the formula 16 | $$ 17 | k_{\nu, \kappa}(x,x') 18 | \!=\! 19 | \frac{1}{C_{\nu, \kappa}} \sum_{j=0}^{J-1} \Phi_{\nu, \kappa}(\lambda_j) f_j(x) f_j(x') 20 | \quad 21 | \Phi_{\nu, \kappa}(\lambda) 22 | \!=\! 23 | \begin{cases} 24 | \left(\frac{2\nu}{\kappa^2} + \lambda\right)^{-\nu-\frac{d}{2}} 25 | & 26 | \nu < \infty \text{ — Matérn} 27 | \\ 28 | e^{-\frac{\kappa^2}{2} \lambda} 29 | & 30 | \nu = \infty \text{ — Heat (RBF)} 31 | \end{cases} 32 | $$ 33 | The notation here is as follows. 34 | 35 | * The values $\lambda_j \geq 0$ and the functions $f_j(\cdot)$ are *eigenvalues* and *eigenfunctions* of the minus *Laplace–Beltrami operator* $-\Delta_{\mathcal{M}}$ on $\mathcal{M}$ such that 36 | $$ 37 | \Delta_{\mathcal{M}} f_j = - \lambda_j f_j 38 | . 39 | $$ 40 | The functions $\left\{f_j\right\}_{j=0}^{\infty}$ constitute an orthonormal basis of the space $L^2(\mathcal{M})$ of square integrable functions on the manifold $\mathcal{M}$ with respect to the inner product $\langle f, g \rangle_{L^2(\mathcal{M})} = \frac{1}{\lvert\mathcal{M}\rvert} \int_{\mathcal{M}} f(x) g(x) \mathrm{d} x$, where $\lvert\mathcal{M}\rvert$ denotes the volume of the manifold $\mathcal{M}$. 41 | 42 | * $d$ is the dimension of the manifold. 43 | 44 | * The number of eigenpairs $1 \leq J < \infty$ controls the quality of approximation of the kernel. 45 | For some manifolds, e.g. manifolds represented by discrete :class:`meshes <.spaces.Mesh>`, this corresponds to the *number of levels* parameter of the :class:`~.kernels.MaternKarhunenLoeveKernel`. For others, for which the *addition theorem* holds (:doc:`see the respective page `), the *number of levels* parameter has a different meaning [#]_. 46 | 47 | * $C_{\nu, \kappa}$ is the constant which ensures that average variance is equal to $1$, i.e. $\frac{1}{\lvert\mathcal{M}\rvert}\int_{\mathcal{M}} k(x, x) \mathrm{d} x = 1$. 48 | It is easy to show that $C_{\nu, \kappa} = \sum_{j=0}^{J-1} \Phi_{\nu, \kappa}(\lambda_j)$. 49 | 50 | **Note:** For general manifolds, $k(x, x)$ can vary from point to point. 51 | You usually observe this for manifolds represented by meshes, the ones which do not have a lot of symmetries. 52 | On the other hand, for the hyperspheres $k(x, x)$ is a constant, as it is for all *homogeneous spaces* which hyperspheres are instances of, as well as for Lie groups (which are also instances of homogeneous spaces). 53 | 54 | .. rubric:: Footnotes 55 | 56 | .. [#] Similar ideas have also appeared in :cite:t:`solin2020` and :cite:t:`coveney2020`. 57 | 58 | .. [#] The notion of *levels* is discussed in the documentation of the :class:`~.kernels.MaternKarhunenLoeveKernel` and :class:`~.Eigenfunctions` classes. -------------------------------------------------------------------------------- /docs/theory/feature_maps.rst: -------------------------------------------------------------------------------- 1 | ########################################## 2 | Feature Maps and Sampling 3 | ########################################## 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :func:`~.kernels.default_feature_map`, following the example notebook on the specific space of interest. 7 | 8 | This is optional material meant to explain the basic theory. 9 | 10 | ======= 11 | Theory 12 | ======= 13 | 14 | Any kernel $k: X \times X \to \mathbb{R}$ satisfies $k(x, x') = \langle \phi(x), \phi(x') \rangle_{\mathcal{H}}$ for some Hilbert space $\mathcal{H}$ and some *feature map* $\phi: X \to \mathcal{H}$. 15 | 16 | Usually, the Hilbert space $\mathcal{H}$ is infinite-dimensional, rendering $\phi$ intractable. 17 | However, we can often find an *approximate* feature map $\widetilde{\phi}: X \to \mathbb{R}^M$, such that 18 | $$ 19 | k(x, x') = \langle \phi(x), \phi(x') \rangle_{\mathcal{H}} \approx \langle \widetilde{\phi}(x), \widetilde{\phi}(x') \rangle_{\mathbb{R}^M}. 20 | $$ 21 | 22 | .. note:: 23 | If the feature map is complex-valued $\widetilde{\phi}: X \to \mathbb{C}^M$, then 24 | 25 | .. math:: k(x, x') = \langle \phi(x), \phi(x') \rangle_{\mathcal{H}} \approx \mathrm{Re} \langle \widetilde{\phi}(x), \widetilde{\phi}(x') \rangle_{\mathbb{C}^M}. 26 | 27 | Such approximate finite-dimensional feature maps can be used to speed up computations, as in, for example, :cite:t:`rahimi2007`. 28 | Importantly, it can be used to efficiently sample (without incurring cubic costs) the Gaussian process $f \sim \mathrm{GP}(0, k)$. 29 | The key idea is that 30 | $$ 31 | f(x) \approx \sum_{m=1}^M w_j \widetilde{\phi}_j(x) 32 | , 33 | \qquad 34 | w_j \sim \mathrm{N}(0, 1) 35 | , 36 | \qquad 37 | \widetilde{\phi}(x) = (\widetilde{\phi}_1(x), \ldots, \widetilde{\phi}_M(x)) 38 | . 39 | $$ 40 | 41 | Matérn kernels on various spaces usually possess natural approximate finite-dimensional feature maps. 42 | In some cases, these are deterministic, in others—random. 43 | For the specific constructions, we refer the reader to the theory on specific spaces and the respective papers. 44 | -------------------------------------------------------------------------------- /docs/theory/hypercube_graph.rst: -------------------------------------------------------------------------------- 1 | ################################ 2 | Kernels on the Hypercube Graph 3 | ################################ 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :class:`~.kernels.MaternGeometricKernel`, following the respective :doc:`example notebook `. 7 | 8 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2023`. 9 | 10 | ========================== 11 | Motivation 12 | ========================== 13 | 14 | The :class:`~.spaces.HypercubeGraph` space $C^d$ can be used to model $d$-dimensional *binary vector* inputs. 15 | 16 | There are many settings where inputs are binary vectors or can be represented as such. For instance, upon flattening, binary vectors represent adjacency matrices of *unweighted labeled graphs* [#]_. 17 | 18 | ========================== 19 | Structure of the Space 20 | ========================== 21 | 22 | The elements of this space—given its `dim` is $d \in \mathbb{Z}_{>0}$—are exactly the binary vectors of length $d$. 23 | 24 | The geometry of this space is simple: it is a graph such that $x, x' \in C^d$ are connected by an edge if and only if they differ in exactly one coordinate (i.e. there is exactly *Hamming distance* $1$ between). 25 | 26 | Being a graph, $C^d$ could also be represented using the general :class:`~.spaces.Graph` space. 27 | However, the number of nodes in $C^d$ is $2^d$, which is exponential in $d$, rendering the general techniques infeasible. 28 | 29 | ========================== 30 | Eigenfunctions 31 | ========================== 32 | 33 | On graphs, kernels are computed using the eigenfunctions and eigenvalues of the Laplacian. 34 | 35 | The eigenfunctions of the Laplacian on the hypercube graph are the *Walsh functions* [#]_ given analytically by the simple formula 36 | $$ 37 | w_T(x_0, .., x_{d-1}) = (-1)^{\sum_{j \in T} x_j} 38 | $$ 39 | where $x = (x_0, .., x_{d-1}) \in C^d$ and the index $T$ is an arbitrary subset of the set $\{0, .., d-1\}$. 40 | 41 | The corresponding eigenvalues are $\lambda_T = \lambda_{\lvert T \rvert} = 2 \lvert T \rvert / d$, where $\lvert T \rvert$ is the cardinality of $T$. 42 | 43 | However, the problem is that the number of eigenfunctions is $2^d$. 44 | Hence naive truncation of the sum in the kernel formula to a few hundred terms leads to a poor approximation of the kernel for larger $d$. 45 | 46 | ========================== 47 | Addition Theorem 48 | ========================== 49 | 50 | Much like for the hyperspheres and unlike for the general graphs, there is an :doc:`addition theorem ` for the hypercube graph: 51 | 52 | $$ 53 | \sum_{T \subseteq \{0, .., d-1\}, \lvert T \rvert = j} w_T(x) w_T(x') 54 | = 55 | \sum_{T \subseteq \{0, .., d-1\}, \lvert T \rvert = j} w_T(x \oplus x') 56 | = 57 | \binom{d}{j} 58 | \widetilde{G}_{d, j, m} 59 | $$ 60 | where $\oplus$ is the elementwise XOR operation, $m$ is the Hamming distance between $x$ and $x'$, and $\widetilde{G}_{d, j, m}$ is the Kravchuk polynomial of degree $d$ and order $j$ normalized such that $\widetilde{G}_{d, j, 0} = 1$, evaluated at $m$. 61 | 62 | Normalized Kravchuk polynomials $\widetilde{G}_{d, j, m}$ satisfy the following three-term recurrence relation 63 | $$ 64 | \widetilde{G}_{d, j, m} 65 | = 66 | \frac{d - 2 m}{d - j + 1} \widetilde{G}_{d, j - 1, m} 67 | -\frac{j-1}{d - j + 1} \widetilde{G}_{d, j - 2, m}, 68 | \quad 69 | \widetilde{G}_{d, 0, m} = 1, 70 | \quad 71 | \widetilde{G}_{d, 1, m} = 1 - \frac{2}{d} m, 72 | $$ 73 | which allows for their efficient computation without the need to compute large sums of the individual Walsh functions. 74 | 75 | With that, the kernels on the hypercube graph can be computed efficiently using the formula 76 | $$ 77 | k_{\nu, \kappa}(x, x') 78 | = 79 | \frac{1}{C_{\nu, \kappa}} 80 | \sum_{l=0}^{L-1} 81 | \Phi_{\nu, \kappa}(\lambda_l) 82 | \binom{d}{j} \widetilde{G}_{d, j, m} 83 | \qquad 84 | \Phi_{\nu, \kappa}(\lambda) 85 | = 86 | \begin{cases} 87 | \left(\frac{2\nu}{\kappa^2} + \lambda\right)^{-\nu-\frac{d}{2}} 88 | & 89 | \nu < \infty \text{ — Matérn} 90 | \\ 91 | e^{-\frac{\kappa^2}{2} \lambda} 92 | & 93 | \nu = \infty \text{ — Heat (RBF)} 94 | \end{cases} 95 | $$ 96 | where $m$ is the Hamming distance between $x$ and $x'$, and $L \leq d + 1$ is the user-controlled number of levels parameters. 97 | 98 | **Notes:** 99 | 100 | #. We define the dimension of the :class:`~.spaces.HypercubeGraph` space $C^d$ to be $d$, in contrast to the graphs represented by the :class:`~.spaces.Graph` space, whose dimension is defined to be $0$. 101 | 102 | Because of this, much like in the Euclidean or the manifold case, the $1/2, 3/2, 5/2$ *are* in fact reasonable values of for the smoothness parameter $\nu$. 103 | 104 | .. rubric:: Footnotes 105 | 106 | .. [#] Every node of a labeled graph is associated with a unique label. Functions on labeled graphs do *not* have to be invariant to permutations of nodes. 107 | 108 | .. [#] Since the hypercube graph $C^d$ is $d$-regular, the unnormalized Laplacian and the symmetric normalized Laplacian coincide up to a multiplication by $d$. Thus their eigenfunctions are the same and eigenvalues coincide up to a multiplication by $d$. For better numerical stability, we use symmetric normalized Laplacian in the implementation and assume its use throughout this page. -------------------------------------------------------------------------------- /docs/theory/index.rst: -------------------------------------------------------------------------------- 1 | ######### 2 | Theory 3 | ######### 4 | 5 | .. toctree:: 6 | :maxdepth: 1 7 | 8 | Kernels on compact manifolds 9 | Addition theorem 10 | Kernels on graphs 11 | Kernels on meshes 12 | Kernels on non-compact symmetric spaces 13 | Kernels on product spaces 14 | Product kernels 15 | Feature maps and sampling 16 | Hypercube graph space 17 | -------------------------------------------------------------------------------- /docs/theory/meshes.rst: -------------------------------------------------------------------------------- 1 | #################### 2 | Kernels on Meshes 3 | #################### 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :class:`~.kernels.MaternGeometricKernel`, following the :doc:`example notebook on meshes `. 7 | 8 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2020`. [#]_ 9 | 10 | One one hand, this is similar to the first section of the :doc:`theory on compact manifolds ` because meshes are discretizations of compact 2-dimensional manifolds. 11 | On the other hand, this is similar to the :doc:`theory on compact graphs ` because meshes are graphs with additional structure. 12 | 13 | ======= 14 | Theory 15 | ======= 16 | 17 | Consider a mesh $M$ with $N$ nodes. 18 | There are a few notions of *Laplacian* $\mathbf{\Delta}$ for $M$, which is always a positive semidefinite matrix of size $N \times N$. We use the *robust Laplacian* by :cite:t:`sharp2020` implemented in the `robust_laplacian `_ package. 19 | 20 | Since $\mathbf{\Delta}$ is positive semidefinite, there is an orthonormal basis $\{\boldsymbol f_l\}_{l=0}^{N-1}$ in $\mathbb{R}^N$ of eigenvectors such that $\mathbf{\Delta} \boldsymbol f_l = \lambda_l \boldsymbol f_l$ for $0 = \lambda_0 \leq \lambda_2 \leq \ldots \leq \lambda_{N-1}$. 21 | 22 | The eigenvectors $f_l$ can be regarded as functions on the mesh nodes: $f_l(j) = (f_l)_j$. 23 | For meshes, :class:`~.kernels.MaternGeometricKernel` is an alias to :class:`~.kernels.MaternKarhunenLoeveKernel`. 24 | The latter is given by the formula 25 | $$ 26 | k_{\nu, \kappa}(i,j) 27 | = 28 | \frac{1}{C_{\nu, \kappa}} \sum_{l=0}^{L-1} \Phi_{\nu, \kappa}(\lambda_l) f_l(i) f_l(j) 29 | \quad 30 | \Phi_{\nu, \kappa}(\lambda) 31 | = 32 | \begin{cases} 33 | \left(\frac{2\nu}{\kappa^2} + \lambda\right)^{-\nu - d/2} 34 | & 35 | \nu < \infty \text{ — Matérn} 36 | \\ 37 | e^{-\frac{\kappa^2}{2} \lambda} 38 | & 39 | \nu = \infty \text{ — Heat (RBF)} 40 | \end{cases} 41 | $$ 42 | The notation here is as follows. 43 | 44 | * $d$ is the dimension of the mesh (i.e. the dimension of the implied manifold the mesh approximates). In our implementation $d = 2$ as we only handle 2-dimensional meshes in $\mathbb{R}^3$. 45 | 46 | * $1 \leq L \leq N$ controls the quality of approximation of the kernel. 47 | 48 | * Setting $L = N$ gives you the exact kernel but usually requires $O(N^3)$ to compute the eigenpairs. 49 | 50 | * Setting $L \ll N$ can in principle allow much faster eigenpair computation because the Laplacian is usually sparse for meshes. 51 | Such techniques are, however, not (yet) implemented in GeometricKernels. 52 | 53 | * The constant $C_{\nu, \kappa}$ above ensures that the average variance is equal to $1$, i.e. $\frac{1}{N} \sum_{n=1}^N k(n, n) = 1$. 54 | It is easy to show that $C_{\nu, \kappa} = \sum_{n=1}^L \Phi_{\nu, \kappa}(\lambda_n)$. 55 | 56 | .. note:: 57 | The "variance" $k(x, x)$ can vary from point to point. 58 | 59 | .. rubric:: Footnotes 60 | 61 | .. [#] Similar ideas have also appeared in :cite:t:`solin2020` and :cite:t:`coveney2020`. 62 | -------------------------------------------------------------------------------- /docs/theory/product_kernels.rst: -------------------------------------------------------------------------------- 1 | ################################################ 2 | Product Kernels 3 | ################################################ 4 | 5 | To work on a product space (i.e. a space which is itself a product of spaces), one can use either *product Matérn kernels* or *Matérn kernels on product spaces*. 6 | These are generally not the same. 7 | On this page we discuss the product Matérn kernels. 8 | For a discussion on Matérn kernels on product spaces, see :doc:`the respective page `. 9 | For a brief demonstration of the difference, see the example notebook :doc:`on the torus `. 10 | 11 | 12 | .. warning:: 13 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2020`. 14 | 15 | You can get by fine without reading this page for almost all use cases involving product spaces, either 16 | 17 | * by using the standard :class:`~.kernels.MaternGeometricKernel` with :class:`~.spaces.ProductDiscreteSpectrumSpace` (only works for discrete spectrum spaces), 18 | 19 | * or by using :class:`~.kernels.ProductGeometricKernel`, which can combine any types of kernels while also maintaining a separate lengthscale for each of them, much like ARD kernels in the Euclidean case. 20 | 21 | Both ways are described in the example notebook :doc:`on the torus `, which is a product of circles. 22 | 23 | ======= 24 | Theory 25 | ======= 26 | 27 | In GeometricKernels, there is a concept of product Matérn kernel (:class:`~.kernels.ProductGeometricKernel`). 28 | It allows you to define a kernel $k$ on a product $\mathcal{M} = \mathcal{M}_1 \times \ldots \times \mathcal{M}_S$ of some other spaces $\mathcal{M}_s$ by taking a product of some kernels $k_s: \mathcal{M}_s \times \mathcal{M}_s \to \mathbb{R}$: 29 | $$ 30 | k((x_1, \ldots, x_S), (x_1', \ldots, x_S')) 31 | = 32 | k_1(x_1, x_1') \cdot \ldots \cdot k_m(x_S, x_S') 33 | . 34 | $$ 35 | Each $k_s$ would usually be :class:`~.kernels.MaternGeometricKernel` on spaces $\mathcal{M}_s$, which can be anything: compact manifolds, graphs, meshes, non-compact symmetric spaces, etc. 36 | 37 | **Importantly**, this allows you to have a separate length scale parameter for each of the factors, enabling, e.g. *automatic relevance determination* (ARD, cf. :cite:t:`rasmussen2006`). 38 | 39 | For Matérn kernels, even if $\nu$ and $\kappa$ are the same for all $k_s$, the product kernel turns out to be different from the Matérn kernel on the product space whenever $\nu < \infty$. 40 | If $\nu = \infty$, i.e. in the case of the heat kernel (a.k.a. diffusion kernel, or squared exponential kernel, or RBF kernel), the product of kernels with same values of $\kappa$ coincides with the kernel on the product space with this same $\kappa$. 41 | This mirrors the standard Euclidean case. 42 | -------------------------------------------------------------------------------- /docs/theory/product_spaces.rst: -------------------------------------------------------------------------------- 1 | ################################################ 2 | Kernels on Product Spaces 3 | ################################################ 4 | 5 | To work on a product space (i.e. a space which is itself a product of spaces), one can use either *product Matérn kernels* or *Matérn kernels on product spaces*. 6 | These are generally not the same. 7 | On this page we discuss the Matérn kernels on product spaces. 8 | For a discussion on product Matérn kernels, see :doc:`the respective page `. 9 | For a brief demonstration of the difference, see the example notebook :doc:`on the torus `. 10 | 11 | .. warning:: 12 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`borovitskiy2020`. 13 | 14 | You can get by fine without reading this page for almost all use cases involving product spaces, either 15 | 16 | * by using the standard :class:`~.kernels.MaternGeometricKernel` with :class:`~.spaces.ProductDiscreteSpectrumSpace` (only works for discrete spectrum spaces), 17 | 18 | * or by using :class:`~.kernels.ProductGeometricKernel`, which can combine any types of kernels while also maintaining a separate lengthscale for each of them, much like ARD kernels in the Euclidean case. 19 | 20 | Both ways are described in the example notebook :doc:`on the torus `, which is a product of circles. 21 | 22 | ======= 23 | Theory 24 | ======= 25 | 26 | This builds on the general :doc:`theory on compact manifolds `. 27 | 28 | Assume that $\mathcal{M}$ is a product of compact Riemannian manifolds $\mathcal{M}_s$, i.e. $\mathcal{M} = \mathcal{M}_1 \times \ldots \times \mathcal{M}_S$. 29 | You can consider other discrete spectrum spaces in place of the manifolds, like graphs or meshes, just as well. 30 | Here we concentrate on manifolds for simplicity of exposition. 31 | 32 | Matérn kernels on $\mathcal{M}$ are determined by the *eigenvalues* $\lambda_j \geq 0$ and *eigenfunctions* $f_j(\cdot)$ of the minus *Laplacian* $-\Delta_{\mathcal{M}}$ on $\mathcal{M}$. 33 | 34 | The **key idea** is that $\lambda_j, f_j$ can be obtained from the eigenvalues and eigenfunctions on $\mathcal{M}_s$ therefore allowing to build Matérn kernels on the product space $\mathcal{M}$ from the components you would use to build Matérn kernels on the separate factors $\mathcal{M}_s$. 35 | 36 | In fact, all eigenfunctions on $\mathcal{M}$ have form 37 | $$ 38 | f_j(x_1, \ldots, x_S) 39 | = 40 | f^{(1)}_{j_1(j)}(x_1) \cdot \ldots \cdot f^{(S)}_{j_S(j)}(x_S) 41 | $$ 42 | where $f^{(s)}_{j}(\cdot)$ is the $j$-th eigenfunction on $\mathcal{M}_s$. 43 | What is more, 44 | $$ 45 | \Delta_{\mathcal{M}} f_j = \lambda_j f_j 46 | \qquad 47 | \text{for} 48 | \qquad 49 | \lambda_j = \lambda^{(1)}_{j_1(j)} + \ldots + \lambda^{(S)}_{j_S(j)} 50 | $$ 51 | where $\lambda^{(s)}_{j}$ is the $j$-th eigenvalue on $\mathcal{M}_s$. 52 | See, e.g., page 48 of the :cite:t:`canzani2013`. 53 | 54 | .. note:: 55 | The *levels* (see :class:`here <.kernels.MaternKarhunenLoeveKernel>` and :class:`here <.eigenfunctions.Eigenfunctions>`) on factors define levels on the product space, in the same fashion as individual eigenfunctions and eigenvalues on the factors define their counterparts on the product space. In practice we operate on levels rather than on individual eigenpairs. 56 | -------------------------------------------------------------------------------- /docs/theory/symmetric.rst: -------------------------------------------------------------------------------- 1 | ########################################## 2 | Kernels on Non-compact Symmetric Spaces 3 | ########################################## 4 | 5 | .. warning:: 6 | You can get by fine without reading this page for almost all use cases, just use the standard :class:`~.kernels.MaternGeometricKernel`, following the example notebooks :doc:`on hyperbolic spaces ` and :doc:`on the space of symmetric positive definite matrices (SPD) `. 7 | 8 | This is optional material meant to explain the basic theory and based mainly on :cite:t:`azangulov2024b`. 9 | 10 | ======= 11 | Theory 12 | ======= 13 | 14 | The theory for *non-compact symmetric spaces*—like hyperbolic spaces or manifolds of symmetric positive definite matrices (endowed with the affine-invariant metric)—is quite different from the theory for *discrete spectrum spaces* such as compact manifolds, graphs or meshes. 15 | For the latter, kernels are given by a finite sum or an infinite series and are approximated using *truncation*. 16 | For the former, kernels are given by integrals and are approximated using *Monte Carlo*. 17 | 18 | More specifically, for non-compact symmetric spaces, there exists an analog of the *random Fourier features* technique of :cite:t:`rahimi2007`. 19 | In the Euclidean case, closed form expressions for kernels are available and random Fourier features are only used to speed up computations. 20 | No closed form expressions for kernels are usually available on other non-compact symmetric spaces. 21 | Because of that, random Fourier features are the basic means of computing the kernels in this case. 22 | 23 | A complete mathematical treatise can be found in :cite:t:`azangulov2024b`. 24 | Here we briefly present the main ideas. 25 | Recall that the usual Euclidean random Fourier features boil down to 26 | 27 | $$ 28 | k(x, x') = \int_{\mathbb{R}^d} S(\lambda) e^{2 \pi i \langle x - x', \lambda \rangle} \mathrm{d} \lambda \approx \frac{1}{L} \sum_{l=1}^L e^{2 \pi i \langle x - x', \lambda_l\rangle} 29 | \qquad 30 | \lambda_l \sim S(\lambda) 31 | $$ 32 | where $S(\cdot)$ is the spectral density of the kernel $k$. 33 | For Matérn kernels, $S(\cdot)$ coincides with the Gaussian density if $\nu = \infty$ and with the Student's $t$ density with $\nu$ degrees of freedom if $\nu < \infty$. 34 | 35 | On a non-compact symmetric space, the following holds instead: 36 | $$ 37 | k(x, x') = \int_{\mathbb{R}^r} S(\lambda) \pi^{(\lambda)}(x, x') c(\lambda)^{-2} \mathrm{d} \lambda \approx \frac{1}{L} \sum_{l=1}^L \pi^{(\lambda_l)}(x, x') 38 | \qquad 39 | \lambda_l \sim c(\lambda)^{-2} S(\lambda) 40 | $$ 41 | Here, 42 | 43 | * $r$ is called the *rank* of the symmetric space, 44 | 45 | * $\pi^{(\lambda)}$ are called *zonal spherical functions*, 46 | 47 | * $c(\lambda)$ is called the *Harish-Chandra's $c$ function*. 48 | 49 | Both $r$ and $c$ can be computed exactly using algebraic-only considerations. 50 | On the other hand, $\pi^{(\lambda_l)}(x, x')$ are integrals that require numerical approximation. 51 | There are multiple ways to do this. 52 | The most important one is as follows: 53 | $$ 54 | \pi^{(\lambda_l)}(x, x') = \mathbb{E}_{h \sim \mu_H} 55 | e^{\langle i \lambda + \rho, \,a(h, x)\rangle} 56 | \overline{ 57 | e^{\langle i \lambda + \rho, \,a(h, x')\rangle}} 58 | \approx 59 | \frac{1}{P} \sum_{p=1}^P 60 | e^{\langle i \lambda + \rho, \,a(h_p, x)\rangle} 61 | \overline{ 62 | e^{\langle i \lambda + \rho, \,a(h_p, x')\rangle}} 63 | \qquad 64 | h_p \sim \mu_H 65 | $$ 66 | where, this time, 67 | 68 | * $\mu_H$ is some measure which is usually easy to sample from, 69 | 70 | * $i$ is the imaginary unit, 71 | 72 | * $a(\cdot, \cdot)$ is a function that can be computed exactly using algebraic-only considerations. 73 | 74 | The right-hand side here is an inner product. 75 | Same is true for the result of substituting this approximation of $\pi^{(\lambda_l)}(x, x')$ into the approximation of $k(x, x')$ above. 76 | More specifically, defining 77 | $$ 78 | \phi(x) = 79 | \frac{1}{\sqrt{P L}} 80 | ( 81 | e^{\langle i \lambda_1 + \rho, \,a(h_1, x)\rangle}, 82 | \ldots, 83 | e^{\langle i \lambda_1 + \rho, \,a(h_P, x)\rangle}, 84 | \ldots, 85 | e^{\langle i \lambda_L + \rho, \,a(h_1, x)\rangle}, 86 | \ldots, 87 | e^{\langle i \lambda_L + \rho, \,a(h_P, x)\rangle}) 88 | $$ 89 | we have 90 | $$ 91 | k(x, x') \approx \langle \phi(x), \phi(x') \rangle_{\mathbb{C}^{P L}}. 92 | $$ 93 | 94 | .. note:: 95 | Typically, in practice we set $P = 1$. This is akin to how it is commonly done for the random phase Fourier feature approximation in the Euclidean case, as in Equation (2) of :cite:t:`sutherland2015`. 96 | 97 | For non-compact symmetric spaces, :class:`~.kernels.MaternGeometricKernel` is an alias to :class:`~.kernels.MaternFeatureMapKernel`. 98 | The latter is a kernel defined in terms of feature map just like in the equation above. 99 | The feature map is exactly the $\phi(\cdot)$ above, implemented as :class:`~.feature_maps.RejectionSamplingFeatureMapHyperbolic` for hyperbolic spaces and as :class:`~.feature_maps.RejectionSamplingFeatureMapSPD` for manifolds of symmetric positive definite matrices. 100 | -------------------------------------------------------------------------------- /geometric_kernels/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | The library root. The kernel classes are contained within the 3 | :py:mod:`kernels ` package. They need to be paired 4 | with one of the space classes from the 5 | :py:mod:`spaces ` package. 6 | 7 | The :py:mod:`frontends ` package contains kernel 8 | wrapper classes compatible with Gaussian process libraries like 9 | `GPFlow `_, `GPyTorch `_, 10 | and `GPJax `_. 11 | 12 | The :py:mod:`feature_maps ` package provides 13 | (approximate) finite-dimensional feature maps for various geometric kernels. 14 | 15 | The :py:mod:`sampling ` package contains routines 16 | that allow efficient (approximate) sampling of functions from geometric Gaussian 17 | process priors. 18 | 19 | The :py:mod:`utils ` package provides (relatively) 20 | general purpose utility code used throughout the library. This is an internal 21 | part of the library. 22 | 23 | The :py:mod:`resources ` package contains static 24 | resources, such as results of symbolic algebra computations. This is an 25 | internal part of the library. 26 | 27 | The :py:mod:`lab_extras ` package contains our 28 | custom additions to `LAB `_, the framework that 29 | allows our library to be backend-independent. This is an internal part of the 30 | library. 31 | 32 | """ 33 | 34 | import logging 35 | 36 | import geometric_kernels._logging # noqa: F401 37 | 38 | logging.getLogger(__name__).info( 39 | "Numpy backend is enabled. To enable other backends, don't forget to `import geometric_kernels.*backend name*`." 40 | ) 41 | logging.getLogger(__name__).info( 42 | "We may be suppressing some logging of external libraries. To override the logging policy, call `logging.basicConfig`." 43 | ) 44 | -------------------------------------------------------------------------------- /geometric_kernels/_logging.py: -------------------------------------------------------------------------------- 1 | """ Setup logging """ 2 | 3 | import logging 4 | 5 | 6 | class DisableLogging: 7 | """ 8 | Temporarily disable logging (except for the `CRITICAL` level messages). 9 | Adapted from https://stackoverflow.com/a/20251235. Use as 10 | 11 | .. code-block:: python 12 | 13 | with DisableLogging(): 14 | do_your_stuff 15 | """ 16 | 17 | def __enter__(self): 18 | logging.disable(logging.CRITICAL) 19 | 20 | def __exit__(self, exit_type, exit_value, exit_traceback): 21 | logging.disable(logging.NOTSET) 22 | 23 | 24 | class FirstPartFilter(logging.Filter): 25 | """ 26 | A filter that provides the `name_first` variable for formatting. For a 27 | logger called "aaa.bbb.ccc", name_first="aaa". 28 | Adapted from https://stackoverflow.com/a/46961676. 29 | """ 30 | 31 | def filter(self, record): 32 | record.name_first = record.name.rsplit(".", 1)[0] 33 | return True 34 | 35 | 36 | class NoUsingBackendFilter(logging.Filter): 37 | """ 38 | A filter that removes the "Using ... backend" log record of geomstats. 39 | """ 40 | 41 | def filter(self, record): 42 | msg = record.getMessage() 43 | # TODO: when geomstats implements better logging, add 44 | # msg.name_first == "geomstats" 45 | # as the third condition for filtering. 46 | return not (msg.startswith("Using ") and msg.endswith(" backend")) 47 | 48 | 49 | root_handler = logging.StreamHandler() 50 | root_handler.addFilter(FirstPartFilter()) 51 | root_handler.addFilter(NoUsingBackendFilter()) 52 | formatter = logging.Formatter("%(levelname)s (%(name_first)s): %(message)s") 53 | root_handler.setFormatter(formatter) 54 | # Note: using baseConfig allows the "outermost" code to define the logging 55 | # policy: once one baseConfig has been called, each subsequent basicConfig 56 | # call is ignored. That is unless force=True parameter is set, which, 57 | # hopefully, is only done sparingly and with good reason. 58 | logging.basicConfig(handlers=[root_handler]) 59 | 60 | logger = logging.getLogger("geometric_kernels") 61 | logger.setLevel(logging.INFO) # can be easily changed by downstream code 62 | -------------------------------------------------------------------------------- /geometric_kernels/feature_maps/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | (Approximate) finite-dimensional feature maps for geometric kernels. 3 | 4 | A brief introduction into the theory can be found on :doc:`this page 5 | `. 6 | """ 7 | 8 | # noqa: F401 9 | from geometric_kernels.feature_maps.base import FeatureMap 10 | from geometric_kernels.feature_maps.deterministic import ( 11 | DeterministicFeatureMapCompact, 12 | HodgeDeterministicFeatureMapCompact, 13 | ) 14 | from geometric_kernels.feature_maps.random_phase import ( 15 | RandomPhaseFeatureMapCompact, 16 | RandomPhaseFeatureMapNoncompact, 17 | ) 18 | from geometric_kernels.feature_maps.rejection_sampling import ( 19 | RejectionSamplingFeatureMapHyperbolic, 20 | RejectionSamplingFeatureMapSPD, 21 | ) 22 | -------------------------------------------------------------------------------- /geometric_kernels/feature_maps/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the abstract base class :class:`FeatureMap` that all 3 | feature maps inherit from. It can be used for type hinting. 4 | """ 5 | 6 | import abc 7 | 8 | 9 | class FeatureMap(abc.ABC): 10 | """ 11 | Abstract base class for all feature maps. 12 | """ 13 | 14 | @abc.abstractmethod 15 | def __call__(self, *args, **kwargs): 16 | r""" 17 | `FeatureMap`\ s are callable. 18 | """ 19 | raise NotImplementedError 20 | -------------------------------------------------------------------------------- /geometric_kernels/frontends/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Wrappers for geometric kernels for the following Gaussian processes packages 3 | (frontends): 4 | 5 | * `GPflow (TensorFlow) `_ 6 | * `GPyTorch (PyTorch) `_ 7 | * `GPJax (Jax) `_ 8 | """ 9 | -------------------------------------------------------------------------------- /geometric_kernels/frontends/gpflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | GPflow kernel wrapper. 3 | 4 | A tutorial on how to use this wrapper to run Gaussian process regression on 5 | a geometric space is available in the 6 | :doc:`frontends/GPflow.ipynb ` notebook. 7 | """ 8 | 9 | import gpflow 10 | import numpy as np 11 | import tensorflow as tf 12 | from beartype.typing import List, Optional, Union 13 | from gpflow.base import TensorType 14 | from gpflow.kernels.base import ActiveDims 15 | from gpflow.utilities import positive 16 | 17 | from geometric_kernels.kernels import BaseGeometricKernel 18 | from geometric_kernels.spaces import Space 19 | 20 | 21 | class GPflowGeometricKernel(gpflow.kernels.Kernel): 22 | r""" 23 | GPflow wrapper for :class:`~.kernels.BaseGeometricKernel`. 24 | 25 | A tutorial on how to use this wrapper to run Gaussian process regression on 26 | a geometric space is available in the 27 | :doc:`frontends/GPflow.ipynb ` notebook. 28 | 29 | .. note:: 30 | Remember that the `base_kernel` itself does not store any of its 31 | hyperparameters (like `lengthscale` and `nu`). If you do not set them 32 | manually—when initializing the object or after, by setting the 33 | properties—this wrapper will use the values provided by 34 | `base_kernel.init_params`. 35 | 36 | .. note:: 37 | As customary in GPflow, this wrapper calls the length scale 38 | parameter `lengthscales` (plural), as opposed to the convention used by 39 | GeometricKernels, where we call it `lengthscale` (singular). 40 | 41 | :param base_kernel: 42 | The kernel to wrap. 43 | :param active_dims: 44 | Active dimensions, either a slice or list of indices into the 45 | columns of X (inherited from `gpflow.kernels.base.Kernel`). 46 | :param name: 47 | Optional kernel name (inherited from `gpflow.kernels.base.Kernel`). 48 | :param lengthscales: 49 | Initial value of the length scale. Note **s** in lengthscale\ **s**\ . 50 | 51 | If not given or set to None, uses the default value of the 52 | `base_kernel`, as provided by its `init_params` method. 53 | :param nu: 54 | Initial value of the smoothness parameter nu. 55 | 56 | If not given or set to None, uses the default value of the 57 | `base_kernel`, as provided by its `init_params` method. 58 | :param variance: 59 | Initial value of the variance (outputscale) parameter. 60 | 61 | Defaults to 1.0. 62 | :param trainable_nu: 63 | Whether or not the parameter nu is to be optimized over. 64 | 65 | Cannot be True if nu is equal to infinity. You cannot change 66 | this parameter after constructing the object. Defaults to False. 67 | 68 | :raises ValueError: 69 | If trying to set nu = infinity together with trainable_nu = True. 70 | """ 71 | 72 | def __init__( 73 | self, 74 | base_kernel: BaseGeometricKernel, 75 | active_dims: Optional[ActiveDims] = None, 76 | name: Optional[str] = None, 77 | lengthscales: Union[float, TensorType, np.ndarray] = None, 78 | nu: Union[float, TensorType, np.ndarray] = None, 79 | variance: Union[float, TensorType, np.ndarray] = 1.0, 80 | trainable_nu: bool = False, 81 | ): 82 | super().__init__(active_dims, name) 83 | self.base_kernel = base_kernel 84 | 85 | default_params = base_kernel.init_params() 86 | 87 | if nu is None: 88 | nu = default_params["nu"] 89 | if type(nu) is float: 90 | nu = np.array([nu]) 91 | 92 | if lengthscales is None: 93 | lengthscales = default_params["lengthscale"] 94 | if type(lengthscales) is float: 95 | lengthscales = np.array([lengthscales]) 96 | 97 | self.lengthscales = gpflow.Parameter(lengthscales, transform=positive()) 98 | self.variance = gpflow.Parameter(variance, transform=positive()) 99 | 100 | self.trainable_nu = trainable_nu 101 | if self.trainable_nu and tf.math.is_inf(nu): 102 | raise ValueError("Cannot have trainable `nu` parameter with infinite value") 103 | 104 | self.nu: Union[float, TensorType, np.ndarray, gpflow.Parameter] 105 | if self.trainable_nu: 106 | self.nu = gpflow.Parameter(nu, transform=positive()) 107 | else: 108 | self.nu = nu 109 | 110 | @property 111 | def space(self) -> Union[Space, List[Space]]: 112 | r"""Alias to the `base_kernel`\ s space property.""" 113 | return self.base_kernel.space 114 | 115 | def K(self, X: TensorType, X2: Optional[TensorType] = None) -> TensorType: 116 | """Evaluate the covariance matrix K(X, X2) (or K(X, X) if X2=None).""" 117 | lengthscale = tf.convert_to_tensor(self.lengthscales) 118 | nu = tf.cast(tf.convert_to_tensor(self.nu), lengthscale.dtype) 119 | variance = tf.convert_to_tensor(self.variance) 120 | params = dict(lengthscale=lengthscale, nu=nu) 121 | return variance * self.base_kernel.K(params, X, X2) 122 | 123 | def K_diag(self, X: TensorType) -> TensorType: 124 | """Evaluate the diagonal of the covariance matrix K(X, X).""" 125 | lengthscale = tf.convert_to_tensor(self.lengthscales) 126 | nu = tf.cast(tf.convert_to_tensor(self.nu), lengthscale.dtype) 127 | variance = tf.convert_to_tensor(self.variance) 128 | params = dict(lengthscale=lengthscale, nu=nu) 129 | return variance * self.base_kernel.K_diag(params, X) 130 | 131 | 132 | class DefaultFloatZeroMeanFunction(gpflow.mean_functions.Constant): 133 | """ 134 | Zero mean function. The default GPflow's `ZeroMeanFunction` uses the 135 | input's dtype as output type, this minor adaptation uses GPflow's 136 | `default_float` instead. This is to allow integer-valued inputs, like 137 | in the case of :class:`~.spaces.Graph` and :class:`~.spaces.Mesh`. 138 | """ 139 | 140 | def __init__(self, output_dim: int = 1): 141 | super().__init__() 142 | self.output_dim = output_dim 143 | del self.c 144 | 145 | def __call__(self, inputs: TensorType) -> TensorType: 146 | output_shape = tf.concat([tf.shape(inputs)[:-1], [self.output_dim]], axis=0) 147 | return tf.zeros(output_shape, dtype=gpflow.default_float()) 148 | -------------------------------------------------------------------------------- /geometric_kernels/frontends/gpjax.py: -------------------------------------------------------------------------------- 1 | """ 2 | GPJax kernel wrapper. 3 | 4 | A tutorial on how to use this wrapper to run Gaussian process regression on 5 | a geometric space is available in the 6 | :doc:`frontends/GPJax.ipynb ` notebook. 7 | """ 8 | 9 | from dataclasses import dataclass 10 | 11 | import gpjax 12 | import jax.numpy as jnp 13 | import tensorflow_probability.substrates.jax.bijectors as tfb 14 | from beartype.typing import List, TypeVar, Union 15 | from gpjax.base import param_field, static_field 16 | from gpjax.kernels.computations.base import AbstractKernelComputation 17 | from gpjax.typing import Array, ScalarFloat 18 | from jaxtyping import Float, Num 19 | 20 | from geometric_kernels.kernels import BaseGeometricKernel 21 | from geometric_kernels.spaces import Space 22 | 23 | Kernel = TypeVar("Kernel", bound="gpjax.kernels.base.AbstractKernel") # noqa: F821 24 | 25 | 26 | class _GeometricKernelComputation(gpjax.kernels.computations.AbstractKernelComputation): 27 | """ 28 | A class for computing the covariance matrix of a geometric kernel. 29 | """ 30 | 31 | def cross_covariance( 32 | self, 33 | kernel: Kernel, 34 | x: Float[Array, "N #D1 D2"], # noqa: F821 35 | y: Float[Array, "M #D1 D2"], # noqa: F821 36 | ) -> Float[Array, "N M"]: 37 | """ 38 | Compute the cross covariance matrix between two batches of vectors (or 39 | batches of matrices) of inputs. 40 | 41 | :param x: 42 | A batch of N inputs, each of which is a matrix of size D1xD2, 43 | or a vector of size D2 if D1 is absent. 44 | :param y: 45 | A batch of M inputs, each of which is a matrix of size D1xD2, 46 | or a vector of size D2 if D1 is absent. 47 | 48 | :return: 49 | The N x M covariance matrix. 50 | """ 51 | return jnp.asarray(kernel(x, y)) 52 | 53 | 54 | @dataclass 55 | class GPJaxGeometricKernel(gpjax.kernels.AbstractKernel): 56 | r""" 57 | GPJax wrapper for :class:`~.kernels.BaseGeometricKernel`. 58 | 59 | A tutorial on how to use this wrapper to run Gaussian process regression on 60 | a geometric space is available in the 61 | :doc:`frontends/GPJax.ipynb ` notebook. 62 | 63 | .. note:: 64 | Remember that the `base_kernel` itself does not store any of its 65 | hyperparameters (like `lengthscale` and `nu`). If you do not set them 66 | manually—when initializing the object or after, by setting the 67 | properties—this wrapper will use the values provided by 68 | `base_kernel.init_params`. 69 | 70 | .. note:: 71 | Unlike the frontends for GPflow and GPyTorch, GPJaxGeometricKernel 72 | does not have the `trainable_nu` parameter which determines whether or 73 | not the smoothness parameter nu is to be optimized over. By default, it 74 | is not trainable. If you want to make it trainable, do 75 | :code:`kernel = kernel.replace_trainable(nu=False)` on an instance of 76 | the `GPJaxGeometricKernel`. 77 | 78 | :param base_kernel: 79 | The kernel to wrap. 80 | :type base_kernel: geometric_kernels.kernels.BaseGeometricKernel 81 | :param name: 82 | Optional kernel name (inherited from `gpjax.kernels.AbstractKernel`). 83 | 84 | Defaults to "Geometric Kernel". 85 | :type name: str 86 | :param lengthscale: 87 | Initial value of the length scale. 88 | 89 | If not given or set to None, uses the default value of the 90 | `base_kernel`, as provided by its `init_params` method. 91 | :type lengthscale: Union[ScalarFloat, Float[Array, " D"]] 92 | :param nu: 93 | Initial value of the smoothness parameter nu. 94 | 95 | If not given or set to None, uses the default value of the 96 | `base_kernel`, as provided by its `init_params` method. 97 | :type nu: ScalarFloat 98 | :param variance: 99 | Initial value of the variance (outputscale) parameter. 100 | 101 | Defaults to 1.0. 102 | :type variance: ScalarFloat 103 | """ 104 | 105 | nu: ScalarFloat = param_field(None, bijector=tfb.Softplus(), trainable=False) 106 | lengthscale: Union[ScalarFloat, Float[Array, " D"]] = param_field( 107 | None, bijector=tfb.Softplus() 108 | ) 109 | variance: ScalarFloat = param_field(jnp.array(1.0), bijector=tfb.Softplus()) 110 | base_kernel: BaseGeometricKernel = static_field(None) 111 | compute_engine: AbstractKernelComputation = static_field( 112 | _GeometricKernelComputation(), repr=False 113 | ) 114 | name: str = "Geometric Kernel" 115 | 116 | def __post_init__(self): 117 | if self.base_kernel is None: 118 | raise ValueError("base_kernel must be specified") 119 | 120 | default_params = self.base_kernel.init_params() 121 | 122 | if self.nu is None: 123 | self.nu = jnp.array(default_params["nu"]) 124 | if isinstance(self.nu, ScalarFloat): 125 | self.nu = jnp.array([self.nu]) 126 | 127 | if self.lengthscale is None: 128 | self.lengthscale = jnp.array(default_params["lengthscale"]) 129 | if isinstance(self.lengthscale, ScalarFloat): 130 | self.lengthscale = jnp.array([self.lengthscale]) 131 | 132 | @property 133 | def space(self) -> Union[Space, List[Space]]: 134 | r"""Alias to the `base_kernel`\ s space property.""" 135 | return self.base_kernel.space 136 | 137 | def __call__( 138 | self, x: Num[Array, "N #D1 D2"], y: Num[Array, "M #D1 D2"] # noqa: F821 139 | ) -> Float[Array, "N M"]: 140 | """ 141 | Compute the cross-covariance matrix between two batches of vectors (or 142 | batches of matrices) of inputs. 143 | 144 | :param x: 145 | A batch of N inputs, each of which is a matrix of size D1xD2, 146 | or a vector of size D2 if D1 is absent. 147 | :param y: 148 | A batch of M inputs, each of which is a matrix of size D1xD2, 149 | or a vector of size D2 if D1 is absent. 150 | 151 | :return: 152 | The N x M cross-covariance matrix. 153 | """ 154 | return self.variance * self.base_kernel.K( 155 | {"lengthscale": self.lengthscale, "nu": self.nu}, x, y 156 | ) 157 | -------------------------------------------------------------------------------- /geometric_kernels/frontends/gpytorch.py: -------------------------------------------------------------------------------- 1 | """ 2 | GPyTorch kernel wrapper. 3 | 4 | A tutorial on how to use this wrapper to run Gaussian process regression on 5 | a geometric space is available in the 6 | :doc:`frontends/GPyTorch.ipynb ` notebook. 7 | """ 8 | 9 | import gpytorch 10 | import numpy as np 11 | import torch 12 | from beartype.typing import List, Union 13 | 14 | from geometric_kernels.kernels import BaseGeometricKernel 15 | from geometric_kernels.spaces import Space 16 | 17 | 18 | class GPyTorchGeometricKernel(gpytorch.kernels.Kernel): 19 | r""" 20 | GPyTorch wrapper for :class:`~.kernels.BaseGeometricKernel`. 21 | 22 | A tutorial on how to use this wrapper to run Gaussian process regression on 23 | a geometric space is available in the 24 | :doc:`frontends/GPyTorch.ipynb ` notebook. 25 | 26 | .. note:: 27 | Remember that the `base_kernel` itself does not store any of its 28 | hyperparameters (like `lengthscale` and `nu`). If you do not set them 29 | manually—when initializing the object or after, by setting the 30 | properties—this wrapper will use the values provided by 31 | `base_kernel.init_params`. 32 | 33 | .. note:: 34 | As customary in GPyTorch, this wrapper does not maintain a 35 | variance (outputscale) parameter. To add it, use 36 | :code:`gpytorch.kernels.ScaleKernel(GPyTorchGeometricKernel(...))`. 37 | 38 | :param base_kernel: 39 | The kernel to wrap. 40 | :param lengthscale: 41 | Initial value of the length scale. 42 | 43 | If not given or set to None, uses the default value of the 44 | `base_kernel`, as provided by its `init_params` method. 45 | :param nu: 46 | Initial value of the smoothness parameter nu. 47 | 48 | If not given or set to None, uses the default value of the 49 | `base_kernel`, as provided by its `init_params` method. 50 | :param trainable_nu: 51 | Whether or not the parameter nu is to be optimized over. 52 | 53 | Cannot be True if nu is equal to infinity. You cannot change 54 | this parameter after constructing the object. Defaults to False. 55 | 56 | :raises ValueError: 57 | If trying to set nu = infinity together with trainable_nu = True. 58 | 59 | .. todo:: 60 | Handle `ard_num_dims` properly when base_kernel is a product kernel. 61 | """ 62 | 63 | has_lengthscale = True 64 | 65 | def __init__( 66 | self, 67 | base_kernel: BaseGeometricKernel, 68 | lengthscale: Union[float, torch.Tensor, np.ndarray] = None, 69 | nu: Union[float, torch.Tensor, np.ndarray] = None, 70 | trainable_nu: bool = False, 71 | **kwargs, 72 | ): 73 | super().__init__(**kwargs) 74 | 75 | self.base_kernel = base_kernel 76 | 77 | default_params = base_kernel.init_params() 78 | 79 | if nu is None: 80 | nu = default_params["nu"] 81 | if type(nu) is float: 82 | nu = np.array([nu]) 83 | 84 | if lengthscale is None: 85 | lengthscale = default_params["lengthscale"] 86 | if type(lengthscale) is float: 87 | lengthscale = np.array([lengthscale]) 88 | 89 | lengthscale = torch.as_tensor(lengthscale) 90 | nu = torch.as_tensor(nu) 91 | 92 | self._trainable_nu = trainable_nu 93 | if self._trainable_nu and torch.isinf(nu): 94 | raise ValueError("Cannot have trainable `nu` parameter with infinite value") 95 | 96 | self.lengthscale = lengthscale 97 | 98 | if self._trainable_nu: 99 | self.register_parameter( 100 | name="raw_nu", parameter=torch.nn.Parameter(torch.tensor(1.0)) 101 | ) 102 | self.register_constraint("raw_nu", gpytorch.constraints.Positive()) 103 | self.nu = nu 104 | else: 105 | self.register_buffer("raw_nu", nu) 106 | 107 | @property 108 | def space(self) -> Union[Space, List[Space]]: 109 | r"""Alias to the `base_kernel`\ s space property.""" 110 | return self.base_kernel.space 111 | 112 | @property 113 | def nu(self) -> torch.Tensor: 114 | """The smoothness parameter""" 115 | if self._trainable_nu: 116 | return self.raw_nu_constraint.transform(self.raw_nu) 117 | else: 118 | return self.raw_nu 119 | 120 | @nu.setter 121 | def nu(self, value): 122 | if self._trainable_nu: 123 | if torch.isinf(value): 124 | raise ValueError( 125 | "Cannot have infinite `nu` value when trainable_nu is True" 126 | ) 127 | value = torch.as_tensor(value).to(self.raw_nu) 128 | self.initialize(raw_nu=self.raw_nu_constraint.inverse_transform(value)) 129 | else: 130 | self.raw_nu = torch.as_tensor(value) 131 | 132 | def forward( 133 | self, 134 | x1: torch.Tensor, 135 | x2: torch.Tensor, 136 | diag: bool = False, 137 | last_dim_is_batch: bool = False, 138 | **kwargs, 139 | ) -> torch.Tensor: 140 | """ 141 | Evaluate the covariance matrix K(x1, x2). 142 | 143 | :param x1: 144 | First batch of inputs. 145 | :param x2: 146 | Second batch of inputs. 147 | :param diag: 148 | If set to True, ignores `x2` and returns the diagonal of K(x1, x1). 149 | :param last_dim_is_batch: 150 | Ignored. 151 | 152 | :return: 153 | The covariance matrix K(x1, x2) or, if diag=True, the diagonal 154 | of the covariance matrix K(x1, x1). 155 | 156 | .. todo:: 157 | Support GPyTorch-style output batching. 158 | """ 159 | params = dict(lengthscale=self.lengthscale.flatten(), nu=self.nu.flatten()) 160 | if diag: 161 | return self.base_kernel.K_diag(params, x1) 162 | return self.base_kernel.K(params, x1, x2) 163 | -------------------------------------------------------------------------------- /geometric_kernels/jax.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loads JAX backend in lab, spherical_harmonics and geometric_kernels. 3 | 4 | ..note:: 5 | A tutorial on the JAX backend is available in the 6 | :doc:`backends/JAX_Graph.ipynb ` notebook. 7 | """ 8 | 9 | import logging 10 | 11 | import lab.jax # noqa 12 | import spherical_harmonics.jax # noqa 13 | 14 | import geometric_kernels.lab_extras.jax # noqa 15 | 16 | logging.getLogger(__name__).info("JAX backend enabled.") 17 | -------------------------------------------------------------------------------- /geometric_kernels/kernels/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the abstract base class for geometric kernels and 3 | specialized classes for various types of spaces. 4 | 5 | Unless you know exactly what you are doing, always use the 6 | :class:`MaternGeometricKernel` that "just works". 7 | """ 8 | 9 | # noqa: F401 10 | from geometric_kernels.kernels.base import BaseGeometricKernel 11 | from geometric_kernels.kernels.feature_map import MaternFeatureMapKernel 12 | from geometric_kernels.kernels.hodge_compositional import MaternHodgeCompositionalKernel 13 | from geometric_kernels.kernels.karhunen_loeve import MaternKarhunenLoeveKernel 14 | from geometric_kernels.kernels.matern_kernel import ( 15 | MaternGeometricKernel, 16 | default_feature_map, 17 | ) 18 | from geometric_kernels.kernels.product import ProductGeometricKernel 19 | -------------------------------------------------------------------------------- /geometric_kernels/kernels/base.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the :class:`BaseGeometricKernel` kernel, the base class 3 | for all geometric kernels. 4 | """ 5 | 6 | import abc 7 | 8 | import lab as B 9 | from beartype.typing import Dict, List, Optional, Union 10 | 11 | from geometric_kernels.spaces import Space 12 | 13 | 14 | class BaseGeometricKernel(abc.ABC): 15 | """ 16 | Abstract base class for geometric kernels. 17 | 18 | :param space: 19 | The space on which the kernel is defined. 20 | """ 21 | 22 | def __init__(self, space: Space): 23 | self._space = space 24 | 25 | @property 26 | def space(self) -> Union[Space, List[Space]]: 27 | """ 28 | The space on which the kernel is defined. 29 | """ 30 | return self._space 31 | 32 | @abc.abstractmethod 33 | def init_params(self) -> Dict[str, B.NPNumeric]: 34 | """ 35 | Initializes the dict of the trainable parameters of the kernel. 36 | 37 | It typically contains only two keys: `"nu"` and `"lengthscale"`. 38 | 39 | This dict can be modified and is passed around into such methods as 40 | :meth:`~.K` or :meth:`~.K_diag`, as the `params` argument. 41 | 42 | .. note:: 43 | The values in the returned dict are always of the NumPy array type. 44 | Thus, if you want to use some other backend for internal 45 | computations when calling :meth:`~.K` or :meth:`~.K_diag`, you 46 | need to replace the values with the analogs typed as arrays of 47 | the desired backend. 48 | """ 49 | raise NotImplementedError 50 | 51 | @abc.abstractmethod 52 | def K( 53 | self, 54 | params: Dict[str, B.Numeric], 55 | X: B.Numeric, 56 | X2: Optional[B.Numeric] = None, 57 | **kwargs, 58 | ) -> B.Numeric: 59 | """ 60 | Compute the cross-covariance matrix between two batches of vectors of 61 | inputs, or batches of matrices of inputs, depending on the space. 62 | 63 | :param params: 64 | A dict of kernel parameters, typically containing two keys: 65 | `"lengthscale"` for length scale and `"nu"` for smoothness. 66 | 67 | The types of values in the params dict determine the output type 68 | and the backend used for the internal computations, see the 69 | warning below for more details. 70 | 71 | .. note:: 72 | The values `params["lengthscale"]` and `params["nu"]` are 73 | typically (1,)-shaped arrays of the suitable backend. This 74 | serves to point at the backend to be used for internal 75 | computations. 76 | 77 | In some cases, for example, when the kernel is 78 | :class:`~.kernels.ProductGeometricKernel`, the values of 79 | `params` may be (s,)-shaped arrays instead, where `s` is the 80 | number of factors. 81 | 82 | .. note:: 83 | Finite values of `params["nu"]` typically correspond to the 84 | generalized (geometric) Matérn kernels. 85 | 86 | Infinite `params["nu"]` typically corresponds to the heat 87 | kernel (a.k.a. diffusion kernel, generalized squared 88 | exponential kernel, generalized Gaussian kernel, 89 | generalized RBF kernel). Although it is often considered to be 90 | a separate entity, we treat the heat kernel as a member of 91 | the Matérn family, with smoothness parameter equal to infinity. 92 | 93 | :param X: 94 | A batch of N inputs, each of which is a vector or a matrix, 95 | depending on how the elements of the `self.space` are represented. 96 | :param X2: 97 | A batch of M inputs, each of which is a vector or a matrix, 98 | depending on how the elements of the `self.space` are represented. 99 | 100 | `X2=None` sets `X2=X1`. 101 | 102 | Defaults to None. 103 | 104 | :return: 105 | The N x M cross-covariance matrix. 106 | 107 | .. warning:: 108 | The types of values in the `params` dict determine the backend 109 | used for internal computations and the output type. 110 | 111 | Even if, say, `geometric_kernels.jax` is imported but the values in 112 | the `params` dict are NumPy arrays, the output type will be a NumPy 113 | array, and NumPy will be used for internal computations. To get a 114 | JAX array as an output and use JAX for internal computations, all 115 | the values in the `params` dict must be JAX arrays. 116 | """ 117 | raise NotImplementedError 118 | 119 | @abc.abstractmethod 120 | def K_diag(self, params: Dict[str, B.Numeric], X: B.Numeric, **kwargs) -> B.Numeric: 121 | """ 122 | Returns the diagonal of the covariance matrix `self.K(params, X, X)`, 123 | typically in a more efficient way than actually computing the full 124 | covariance matrix with `self.K(params, X, X)` and then extracting its 125 | diagonal. 126 | 127 | :param params: 128 | Same as for :meth:`~.K`. 129 | 130 | :param X: 131 | A batch of N inputs, each of which is a vector or a matrix, 132 | depending on how the elements of the `self.space` are represented. 133 | 134 | :return: 135 | The N-dimensional vector representing the diagonal of the 136 | covariance matrix `self.K(params, X, X)`. 137 | """ 138 | raise NotImplementedError 139 | -------------------------------------------------------------------------------- /geometric_kernels/kernels/feature_map.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the :class:`MaternFeatureMapKernel` kernel, the basic 3 | kernel for non-compact symmetric spaces, subclasses of 4 | :class:`~.spaces.NoncompactSymmetricSpace`. 5 | """ 6 | 7 | import lab as B 8 | import numpy as np 9 | from beartype.typing import Dict, Optional 10 | 11 | from geometric_kernels.feature_maps import FeatureMap 12 | from geometric_kernels.kernels.base import BaseGeometricKernel 13 | from geometric_kernels.spaces.base import Space 14 | from geometric_kernels.utils.utils import make_deterministic 15 | 16 | 17 | class MaternFeatureMapKernel(BaseGeometricKernel): 18 | r""" 19 | This class computes a (Matérn) kernel based on a feature map. 20 | 21 | .. math :: k_{\nu, \kappa}(x, y) = \langle \phi_{\nu, \kappa}(x), \phi_{\nu, \kappa}(y) \rangle_{\mathbb{R}^n} 22 | 23 | where $\langle \cdot , \cdot \rangle_{\mathbb{R}^n}$ is the standard inner 24 | product in $\mathbb{R}^n$ and $\phi_{\nu, \kappa}: X \to \mathbb{R}^n$ is 25 | an arbitrary function called *feature map*. We assume that it depends 26 | on the smoothness and length scale parameters $\nu$ and $\kappa$, 27 | respectively, which makes this kernel specifically Matérn. 28 | 29 | .. note:: 30 | A brief introduction into feature maps and related kernels can be 31 | found on :doc:`this page `. 32 | 33 | Note that the finite-dimensional feature maps this kernel is meant to 34 | be used with are, in most cases, some approximations of the 35 | intractable infinite-dimensional feature maps. 36 | 37 | :param space: 38 | The space on which the kernel is defined. 39 | :param feature_map: 40 | A :class:`~.feature_maps.FeatureMap` object that represents an 41 | arbitrary function $\phi_{\nu, \kappa}: X \to \mathbb{R}^n$, where 42 | $X$ is the `space`, $n$ can be an arbitrary finite integer, and 43 | $\nu, \kappa$ are the smoothness and length scale parameters. 44 | :param key: 45 | Random state, either `np.random.RandomState`, 46 | `tf.random.Generator`, `torch.Generator` or `jax.tensor` (which 47 | represents a random state). 48 | 49 | Many feature maps used in the library are randomized, thus requiring a 50 | `key` to work. The :class:`MaternFeatureMapKernel` uses this `key` to 51 | make them (and thus the kernel) deterministic, applying the utility 52 | function :func:`~.make_deterministic` to the pair `feature_map, key`. 53 | 54 | .. note:: 55 | Even if the `feature_map` is deterministic, you need to provide a 56 | valid key, although it will essentially be ignored. In the future, 57 | we should probably make the `key` parameter optional. 58 | 59 | :param normalize: 60 | This parameter is directly passed on to the `feature_map` as a keyword 61 | argument "normalize". If normalize=True, then either $k(x, x) = 1$ for 62 | all $x \in X$, or $\int_X k(x, x) d x = 1$, depending on the type of 63 | the feature map and on the space $X$. 64 | 65 | .. note:: 66 | For many kernel methods, $k(\cdot, \cdot)$ and $a k(\cdot, \cdot)$ 67 | are indistinguishable, whatever the positive constant $a$ is. For 68 | these, it makes sense to use normalize=False to save up some 69 | computational overhead. For others, like for the Gaussian process 70 | regression, the normalization of the kernel might be important. In 71 | these cases, you will typically want to set normalize=True. 72 | """ 73 | 74 | def __init__( 75 | self, 76 | space: Space, 77 | feature_map: FeatureMap, 78 | key: B.RandomState, 79 | normalize: bool = True, 80 | ): 81 | super().__init__(space) 82 | self.feature_map = make_deterministic(feature_map, key) 83 | self.normalize = normalize 84 | 85 | def init_params(self) -> Dict[str, B.NPNumeric]: 86 | """ 87 | Initializes the dict of the trainable parameters of the kernel. 88 | 89 | Returns `dict(nu=np.array([np.inf]), lengthscale=np.array([1.0]))`. 90 | 91 | This dict can be modified and is passed around into such methods as 92 | :meth:`~.K` or :meth:`~.K_diag`, as the `params` argument. 93 | 94 | .. note:: 95 | The values in the returned dict are always of the NumPy array type. 96 | Thus, if you want to use some other backend for internal 97 | computations when calling :meth:`~.K` or :meth:`~.K_diag`, you 98 | need to replace the values with the analogs typed as arrays of 99 | the desired backend. 100 | """ 101 | params = dict(nu=np.array([np.inf]), lengthscale=np.array([1.0])) 102 | return params 103 | 104 | def K( 105 | self, 106 | params: Dict[str, B.Numeric], 107 | X: B.Numeric, 108 | X2: Optional[B.Numeric] = None, 109 | **kwargs, 110 | ): 111 | assert "lengthscale" in params 112 | assert params["lengthscale"].shape == (1,) 113 | assert "nu" in params 114 | assert params["nu"].shape == (1,) 115 | 116 | _, features_X = self.feature_map( 117 | X, params, normalize=self.normalize, **kwargs 118 | ) # [N, O] 119 | if X2 is not None: 120 | _, features_X2 = self.feature_map( 121 | X2, params, normalize=self.normalize, **kwargs 122 | ) # [N2, O] 123 | else: 124 | features_X2 = features_X 125 | 126 | feature_product = B.einsum("...no,...mo->...nm", features_X, features_X2) 127 | return feature_product 128 | 129 | def K_diag(self, params: Dict[str, B.Numeric], X: B.Numeric, **kwargs): 130 | assert "lengthscale" in params 131 | assert params["lengthscale"].shape == (1,) 132 | assert "nu" in params 133 | assert params["nu"].shape == (1,) 134 | 135 | _, features_X = self.feature_map( 136 | X, params, normalize=self.normalize, **kwargs 137 | ) # [N, O] 138 | return B.sum(features_X**2, axis=-1) # [N, ] 139 | -------------------------------------------------------------------------------- /geometric_kernels/kernels/product.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the :class:`ProductGeometricKernel` kernel for 3 | constructing product kernels from a sequence of kernels. 4 | 5 | See :doc:`this page ` for a brief account on 6 | theory behind product kernels and the :doc:`Torus.ipynb ` 7 | notebook for a tutorial on how to use them. 8 | """ 9 | 10 | import math 11 | 12 | import lab as B 13 | from beartype.typing import Dict, List, Optional 14 | 15 | from geometric_kernels.kernels.base import BaseGeometricKernel 16 | from geometric_kernels.spaces import Space 17 | from geometric_kernels.utils.product import params_to_params_list, project_product 18 | 19 | 20 | class ProductGeometricKernel(BaseGeometricKernel): 21 | r""" 22 | Product kernel, defined as the product of a sequence of kernels. 23 | 24 | See :doc:`this page ` for a brief account on 25 | theory behind product kernels and the :doc:`Torus.ipynb ` 26 | notebook for a tutorial on how to use them. 27 | 28 | :param ``*kernels``: 29 | A sequence of kernels to compute the product of. Cannot contain another 30 | instance of :class:`ProductGeometricKernel`. We denote the number of 31 | factors, i.e. the length of the "sequence", by s. 32 | :param dimension_indices: 33 | Determines how a product kernel input vector `x` is to be mapped into 34 | the inputs `xi` for the factor kernels. `xi` are assumed to be equal to 35 | `x[dimension_indices[i]]`, possibly up to a reshape. Such a reshape 36 | might be necessary to accommodate the spaces whose elements are matrices 37 | rather than vectors, as determined by `element_shapes`. The 38 | transformation of `x` into the list of `xi`\ s is performed 39 | by :func:`~.project_product`. 40 | 41 | If None, assumes the each input is layed-out flattened and concatenated, 42 | in the same order as the factor spaces. In this case, the inverse to 43 | :func:`~.project_product` is :func:`~.make_product`. 44 | 45 | Defaults to None. 46 | 47 | .. note:: 48 | `params` of a :class:`ProductGeometricKernel` are such that 49 | `params["lengthscale"]` and `params["nu"]` are (s,)-shaped arrays, where 50 | `s` is the number of factors. 51 | 52 | Basically, `params["lengthscale"][i]` stores the length scale parameter 53 | for the `i`-th factor kernel. Same goes for `params["nu"]`. Importantly, 54 | this enables *automatic relevance determination*-like behavior. 55 | """ 56 | 57 | def __init__( 58 | self, 59 | *kernels: BaseGeometricKernel, 60 | dimension_indices: Optional[List[List[int]]] = None, 61 | ): 62 | self.kernels = kernels 63 | self.spaces: List[Space] = [] 64 | for kernel in self.kernels: 65 | # Make sure there is no product kernel in the list of kernels. 66 | assert isinstance(kernel.space, Space) 67 | self.spaces.append(kernel.space) 68 | self.element_shapes = [space.element_shape for space in self.spaces] 69 | self.element_dtypes = [space.element_dtype for space in self.spaces] 70 | 71 | if dimension_indices is None: 72 | dimensions = [math.prod(shape) for shape in self.element_shapes] 73 | self.dimension_indices: List[List[int]] = [] 74 | i = 0 75 | inds = [*range(sum(dimensions))] 76 | for dim in dimensions: 77 | self.dimension_indices.append(inds[i : i + dim]) 78 | i += dim 79 | else: 80 | assert len(dimension_indices) == len(self.kernels) 81 | for idx_list in dimension_indices: 82 | assert all(idx >= 0 for idx in idx_list) 83 | 84 | self.dimension_indices = dimension_indices 85 | 86 | @property 87 | def space(self) -> List[Space]: 88 | """ 89 | The list of spaces upon which the factor kernels are defined. 90 | """ 91 | return self.spaces 92 | 93 | def init_params(self) -> Dict[str, B.NPNumeric]: 94 | r""" 95 | Returns a dict `params` where `params["lengthscale"]` is the 96 | concatenation of all `self.kernels[i].init_params()["lengthscale"]` and 97 | same for `params["nu"]`. 98 | """ 99 | nu_list: List[B.NPNumeric] = [] 100 | lengthscale_list: List[B.NPNumeric] = [] 101 | 102 | for kernel in self.kernels: 103 | cur_params = kernel.init_params() 104 | assert cur_params["lengthscale"].shape == (1,) 105 | assert cur_params["nu"].shape == (1,) 106 | nu_list.append(cur_params["nu"]) 107 | lengthscale_list.append(cur_params["lengthscale"]) 108 | 109 | params: Dict[str, B.NPNumeric] = {} 110 | params["nu"] = B.concat(*nu_list) 111 | params["lengthscale"] = B.concat(*lengthscale_list) 112 | return params 113 | 114 | def K(self, params: Dict[str, B.Numeric], X, X2=None, **kwargs) -> B.Numeric: 115 | if X2 is None: 116 | X2 = X 117 | 118 | Xs = project_product( 119 | X, self.dimension_indices, self.element_shapes, self.element_dtypes 120 | ) 121 | X2s = project_product( 122 | X2, self.dimension_indices, self.element_shapes, self.element_dtypes 123 | ) 124 | params_list = params_to_params_list(len(self.kernels), params) 125 | 126 | return B.prod( 127 | B.stack( 128 | *[ 129 | kernel.K(p, X, X2) 130 | for kernel, X, X2, p in zip(self.kernels, Xs, X2s, params_list) 131 | ], 132 | axis=-1, 133 | ), 134 | axis=-1, 135 | ) 136 | 137 | def K_diag(self, params, X): 138 | Xs = project_product( 139 | X, self.dimension_indices, self.element_shapes, self.element_dtypes 140 | ) 141 | params_list = params_to_params_list(len(self.kernels), params) 142 | 143 | return B.prod( 144 | B.stack( 145 | *[ 146 | kernel.K_diag(p, X) 147 | for kernel, X, p in zip(self.kernels, Xs, params_list) 148 | ], 149 | axis=-1, 150 | ), 151 | axis=-1, 152 | ) 153 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Custom extensions for `LAB `_. 3 | """ 4 | 5 | from lab import dispatch 6 | 7 | from geometric_kernels.lab_extras.extras import * 8 | 9 | # Always load the numpy backend because we assume numpy is always installed. 10 | from geometric_kernels.lab_extras.numpy import * 11 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/jax/__init__.py: -------------------------------------------------------------------------------- 1 | from geometric_kernels.lab_extras.jax.extras import * 2 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/numpy/__init__.py: -------------------------------------------------------------------------------- 1 | from geometric_kernels.lab_extras.numpy.extras import * 2 | from geometric_kernels.lab_extras.numpy.sparse_extras import * 3 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/numpy/sparse_extras.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import lab as B 4 | import scipy 5 | import scipy.sparse as sp 6 | from beartype.typing import Union 7 | from lab import dispatch 8 | from plum import Signature 9 | 10 | from .extras import _Numeric 11 | 12 | """ 13 | SparseArray defines a lab data type that covers all possible sparse 14 | scipy arrays, so that multiple dispatch works with such arrays. 15 | """ 16 | if sys.version_info[:2] <= (3, 8): 17 | SparseArray = Union[ 18 | sp.bsr_matrix, 19 | sp.coo_matrix, 20 | sp.csc_matrix, 21 | sp.csr_matrix, 22 | sp.dia_matrix, 23 | sp.dok_matrix, 24 | sp.lil_matrix, 25 | ] 26 | else: 27 | SparseArray = Union[ 28 | sp.sparray, 29 | sp.spmatrix, 30 | ] 31 | 32 | 33 | @dispatch 34 | def degree(a: SparseArray): # type: ignore 35 | """ 36 | Given an adjacency matrix `a`, return a diagonal matrix 37 | with the col-sums of `a` as main diagonal - this is the 38 | degree matrix representing the number of nodes each node 39 | is connected to. 40 | """ 41 | d = a.sum(axis=0) # type: ignore 42 | return sp.spdiags(d, 0, d.size, d.size) 43 | 44 | 45 | @dispatch 46 | def eigenpairs(L: Union[SparseArray, _Numeric], k: int): 47 | """ 48 | Obtain the eigenpairs that correspond to the `k` lowest eigenvalues 49 | of a symmetric positive semi-definite matrix `L`. 50 | """ 51 | if sp.issparse(L) and (k == L.shape[0]): 52 | L = L.toarray() 53 | if sp.issparse(L): 54 | return sp.linalg.eigsh(L, k, sigma=1e-8) 55 | else: 56 | eigenvalues, eigenvectors = scipy.linalg.eigh(L) 57 | return (eigenvalues[:k], eigenvectors[:, :k]) 58 | 59 | 60 | @dispatch 61 | def set_value(a: Union[SparseArray, _Numeric], index: int, value: float): 62 | """ 63 | Set a[index] = value. 64 | This operation is not done in place and a new array is returned. 65 | """ 66 | a = a.copy() 67 | a[index] = value 68 | return a 69 | 70 | 71 | """ Register methods for simple ops for a sparse array. """ 72 | 73 | 74 | def pinv(a: Union[SparseArray]): 75 | i, j = a.nonzero() 76 | if not (i == j).all(): 77 | raise NotImplementedError( 78 | "pinv is not supported for non-diagonal sparse arrays." 79 | ) 80 | else: 81 | a = sp.csr_matrix(a.copy()) 82 | a[i, i] = 1 / a[i, i] 83 | return a 84 | 85 | 86 | # putting "ignore" here for now, seems like some plum/typing issue 87 | _SparseArray = Signature(SparseArray) # type: ignore 88 | 89 | B.T.register(lambda a: a.T, _SparseArray) 90 | B.shape.register(lambda a: a.shape, _SparseArray) 91 | B.sqrt.register(lambda a: a.sqrt(), _SparseArray) 92 | B.any.register(lambda a: bool((a == True).sum()), _SparseArray) # noqa 93 | 94 | B.linear_algebra.pinv.register(pinv, _SparseArray) 95 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/tensorflow/__init__.py: -------------------------------------------------------------------------------- 1 | from geometric_kernels.lab_extras.tensorflow.extras import * 2 | -------------------------------------------------------------------------------- /geometric_kernels/lab_extras/torch/__init__.py: -------------------------------------------------------------------------------- 1 | from geometric_kernels.lab_extras.torch.extras import * 2 | -------------------------------------------------------------------------------- /geometric_kernels/resources/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/geometric_kernels/resources/__init__.py -------------------------------------------------------------------------------- /geometric_kernels/sampling/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helper functions for efficiently sampling functions from Gaussian processes 3 | with Matérn geometric kernels. 4 | """ 5 | 6 | # noqa: F401 7 | from geometric_kernels.sampling.samplers import sample_at, sampler 8 | -------------------------------------------------------------------------------- /geometric_kernels/sampling/samplers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Samplers. 3 | """ 4 | 5 | from __future__ import annotations # By https://stackoverflow.com/a/62136491 6 | 7 | from functools import partial 8 | 9 | import lab as B 10 | from beartype.typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Tuple 11 | 12 | # By https://stackoverflow.com/a/62136491 13 | if TYPE_CHECKING: 14 | from geometric_kernels.feature_maps import FeatureMap 15 | 16 | 17 | def sample_at( 18 | feature_map: FeatureMap, 19 | s: int, 20 | X: B.Numeric, 21 | params: Dict[str, B.Numeric], 22 | key: B.RandomState = None, 23 | normalize: bool = None, 24 | hodge_type: Optional[str] = None, 25 | ) -> Tuple[B.RandomState, B.Numeric]: 26 | r""" 27 | Given a `feature_map` $\phi_{\nu, \kappa}: X \to \mathbb{R}^n$, where 28 | $\nu, \kappa$ are determined by `params["nu"]` and `params["lengthscale"]`, 29 | respectively, compute `s` samples of the Gaussian process with kernel 30 | 31 | .. math :: k_{\nu, \kappa}(x, y) = \langle \phi_{\nu, \kappa}(x), \phi_{\nu, \kappa}(y) \rangle_{\mathbb{R}^n} 32 | 33 | at input locations `X` and using the random state `key`. 34 | 35 | Generating a sample from $GP(0, k_{\nu, \kappa})$ is as simple as computing 36 | 37 | .. math :: \sum_{j=1}^n w_j \cdot (\phi_{\nu, \kappa}(x))_j \qquad w_j \stackrel{IID}{\sim} N(0, 1). 38 | 39 | .. note:: 40 | Fixing $w_j$, and treating $x \to (\phi_{\nu, \kappa}(x))_j$ as basis 41 | functions, while letting $x$ vary, you get an actual *function* as a 42 | sample, meaning something that can be evaluated at any $x \in X$. 43 | The way to fix $w_j$ in code is to apply :func:`~.make_deterministic` 44 | utility function. 45 | 46 | :param feature_map: 47 | The feature map $\phi_{\nu, \kappa}$ that defines the Gaussian process 48 | $GP(0, k_{\nu, \kappa})$ to sample from. 49 | :param s: 50 | The number of samples to generate. 51 | :param X: 52 | An [N, ]-shaped array containing N elements of the space 53 | `feature_map` is defined on. is the shape of these elements. 54 | These are the points to evaluate the samples at. 55 | :param params: 56 | Parameters of the kernel (length scale and smoothness). 57 | :param key: random state, either `np.random.RandomState`, 58 | `tf.random.Generator`, `torch.Generator` or `jax.tensor` (which 59 | represents a random state). 60 | :param normalize: 61 | Passed down to `feature_map` directly. Controls whether to force the 62 | average variance of the Gaussian process to be around 1 or not. If None, 63 | follows the standard behavior, typically same as normalize=True. 64 | 65 | Defaults to None. 66 | :param hodge_type: 67 | The type of Hodge component to sample. Only used when using Hodge-compositional edge kernels 68 | 69 | Defaults to None. 70 | 71 | :return: 72 | [N, s]-shaped array containing s samples of the $GP(0, k_{\nu, \kappa})$ 73 | evaluated at X. 74 | """ 75 | 76 | if key is None: 77 | key = B.global_random_state(B.dtype(X)) 78 | 79 | if hodge_type is None: 80 | _context, features = feature_map( 81 | X, params, key=key, normalize=normalize 82 | ) # [N, M] 83 | else: 84 | _context, features = feature_map( 85 | X, params, key=key, normalize=normalize, hodge_type=hodge_type 86 | ) 87 | 88 | if _context is not None: 89 | key = _context 90 | 91 | num_features = B.shape(features)[-1] 92 | 93 | if "lengthscale" in params: 94 | dtype = B.dtype(params["lengthscale"]) 95 | else: 96 | dtype = B.dtype(params["gradient"]["lengthscale"]) 97 | 98 | key, random_weights = B.randn(key, dtype, num_features, s) # [M, S] 99 | 100 | random_sample = B.matmul(features, random_weights) # [N, S] 101 | 102 | return key, random_sample 103 | 104 | 105 | def sampler( 106 | feature_map: FeatureMap, s: Optional[int] = 1, **kwargs 107 | ) -> Callable[[Any], Any]: 108 | """ 109 | A helper wrapper around `sample_at` that fixes `feature_map`, `s` and the 110 | keyword arguments in ``**kwargs`` but leaves `X`, `params` and the other 111 | keyword arguments vary. 112 | 113 | :param feature_map: 114 | The feature map to fix. 115 | :param s: 116 | The number of samples parameter to fix. 117 | 118 | Defaults to 1. 119 | :param ``**kwargs``: 120 | Keyword arguments to fix. 121 | 122 | :return: 123 | The version of :func:`sample_at` with parameters `feature_map` and `s` 124 | fixed, together with the keyword arguments in ``**kwargs``. 125 | """ 126 | 127 | sample_f = partial(sample_at, feature_map, s, **kwargs) 128 | new_docstring = f""" 129 | This is a version of the `{sample_at.__name__}` function with 130 | - feature_map={feature_map}, 131 | - s={s}, 132 | - and additional keyword arguments {kwargs}. 133 | 134 | The original docstring follows. 135 | 136 | {sample_at.__doc__} 137 | """ 138 | sample_f.__name__ = sample_at.__name__ # type: ignore[attr-defined] 139 | sample_f.__doc__ = new_docstring 140 | 141 | return sample_f 142 | -------------------------------------------------------------------------------- /geometric_kernels/spaces/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Various spaces supported by the library as input domains for kernels. 3 | """ 4 | 5 | # noqa: F401 6 | from geometric_kernels.spaces.base import ( 7 | DiscreteSpectrumSpace, 8 | HodgeDiscreteSpectrumSpace, 9 | NoncompactSymmetricSpace, 10 | Space, 11 | ) 12 | from geometric_kernels.spaces.circle import Circle 13 | from geometric_kernels.spaces.graph import Graph 14 | from geometric_kernels.spaces.graph_edges import GraphEdges 15 | from geometric_kernels.spaces.hyperbolic import Hyperbolic 16 | from geometric_kernels.spaces.hypercube_graph import HypercubeGraph 17 | from geometric_kernels.spaces.hypersphere import Hypersphere 18 | from geometric_kernels.spaces.lie_groups import CompactMatrixLieGroup 19 | from geometric_kernels.spaces.mesh import Mesh 20 | from geometric_kernels.spaces.product import ProductDiscreteSpectrumSpace 21 | from geometric_kernels.spaces.so import SpecialOrthogonal 22 | from geometric_kernels.spaces.spd import SymmetricPositiveDefiniteMatrices 23 | from geometric_kernels.spaces.su import SpecialUnitary 24 | -------------------------------------------------------------------------------- /geometric_kernels/spaces/spd.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module provides the :class:`SymmetricPositiveDefiniteMatrices` space. 3 | """ 4 | 5 | import geomstats as gs 6 | import lab as B 7 | 8 | from geometric_kernels.lab_extras import ( 9 | complex_like, 10 | create_complex, 11 | dtype_double, 12 | from_numpy, 13 | qr, 14 | slogdet, 15 | ) 16 | from geometric_kernels.spaces.base import NoncompactSymmetricSpace 17 | from geometric_kernels.utils.utils import ordered_pairwise_differences 18 | 19 | 20 | class SymmetricPositiveDefiniteMatrices( 21 | NoncompactSymmetricSpace, gs.geometry.spd_matrices.SPDMatrices 22 | ): 23 | r""" 24 | The GeometricKernels space representing the manifold of symmetric positive 25 | definite matrices $SPD(n)$ with the affine-invariant Riemannian metric. 26 | 27 | The elements of this space are represented by positive definite matrices of 28 | size n x n. Positive definite means _strictly_ positive definite here, not 29 | positive semi-definite. 30 | 31 | The class inherits the interface of geomstats's `SPDMatrices`. 32 | 33 | .. note:: 34 | A tutorial on how to use this space is available in the 35 | :doc:`SPD.ipynb ` notebook. 36 | 37 | :param n: 38 | Size of the matrices, the $n$ in $SPD(n)$. 39 | 40 | .. note:: 41 | As mentioned in :ref:`this note `, any symmetric space 42 | is a quotient G/H. For the manifold of symmetric positive definite 43 | matrices $SPD(n)$, the group of symmetries $G$ is the identity component 44 | $GL(n)_+$ of the general linear group $GL(n)$, while the isotropy 45 | subgroup $H$ is the special orthogonal group $SO(n)$. See the 46 | mathematical details in :cite:t:`azangulov2024b`. 47 | 48 | .. admonition:: Citation 49 | 50 | If you use this GeometricKernels space in your research, please consider 51 | citing :cite:t:`azangulov2024b`. 52 | """ 53 | 54 | def __init__(self, n): 55 | super().__init__(n) 56 | 57 | def __str__(self): 58 | return f"SymmetricPositiveDefiniteMatrices({self.n})" 59 | 60 | @property 61 | def dimension(self) -> int: 62 | """ 63 | Returns n(n+1)/2 where `n` was passed down to `__init__`. 64 | """ 65 | dim = self.n * (self.n + 1) / 2 66 | return dim 67 | 68 | @property 69 | def degree(self) -> int: 70 | return self.n 71 | 72 | @property 73 | def rho(self): 74 | return (B.range(self.degree) + 1) - (self.degree + 1) / 2 75 | 76 | @property 77 | def num_axes(self): 78 | """ 79 | Number of axes in an array representing a point in the space. 80 | 81 | :return: 82 | 2. 83 | """ 84 | return 2 85 | 86 | def random_phases(self, key, num): 87 | if not isinstance(num, tuple): 88 | num = (num,) 89 | key, x = B.randn(key, dtype_double(key), *num, self.degree, self.degree) 90 | Q, R = qr(x) 91 | r_diag_sign = B.sign(B.diag_extract(R)) # [B, N] 92 | Q *= B.expand_dims(r_diag_sign, -1) # [B, D, D] 93 | sign_det, _ = slogdet(Q) # [B, ] 94 | 95 | # equivalent to Q[..., 0] *= B.expand_dims(sign_det, -1) 96 | Q0 = Q[..., 0] * B.expand_dims(sign_det, -1) # [B, D] 97 | Q = B.concat(B.expand_dims(Q0, -1), Q[..., 1:], axis=-1) # [B, D, D] 98 | return key, Q 99 | 100 | def inv_harish_chandra(self, lam): 101 | diffs = ordered_pairwise_differences(lam) 102 | diffs = B.abs(diffs) 103 | logprod = B.sum( 104 | B.log(B.pi * diffs) + B.log(B.tanh(B.pi * diffs)), axis=-1 105 | ) # [B, ] 106 | return B.exp(0.5 * logprod) 107 | 108 | def power_function(self, lam, g, h): 109 | g = B.cholesky(g) 110 | gh = B.matmul(g, h) 111 | Q, R = qr(gh) 112 | 113 | u = B.abs(B.diag_extract(R)) 114 | logu = B.cast(complex_like(R), B.log(u)) 115 | exponent = create_complex(from_numpy(lam, self.rho), lam) # [..., D] 116 | logpower = logu * exponent # [..., D] 117 | logproduct = B.sum(logpower, axis=-1) # [...,] 118 | logproduct = B.cast(complex_like(lam), logproduct) 119 | return B.exp(logproduct) 120 | 121 | def random(self, key, number): 122 | """ 123 | Non-uniform random sampling, reimplements the algorithm from geomstats. 124 | 125 | Always returns [N, n, n] float64 array of the `key`'s backend. 126 | 127 | :param key: 128 | Either `np.random.RandomState`, `tf.random.Generator`, 129 | `torch.Generator` or `jax.tensor` (representing random state). 130 | :param number: 131 | Number of samples to draw. 132 | 133 | :return: 134 | An array of `number` random samples on the space. 135 | """ 136 | 137 | key, mat = B.rand(key, dtype_double(key), number, self.n, self.n) 138 | mat = 2 * mat - 1 139 | mat_symm = 0.5 * (mat + B.transpose(mat, (0, 2, 1))) 140 | 141 | return key, B.expm(mat_symm) 142 | 143 | @property 144 | def element_shape(self): 145 | """ 146 | :return: 147 | [n, n]. 148 | """ 149 | return [self.n, self.n] 150 | 151 | @property 152 | def element_dtype(self): 153 | """ 154 | :return: 155 | B.Float. 156 | """ 157 | return B.Float 158 | -------------------------------------------------------------------------------- /geometric_kernels/tensorflow.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loads TensorFlow backend in lab, spherical_harmonics and geometric_kernels. 3 | 4 | ..note:: 5 | A tutorial on the JAX backend is available in the 6 | :doc:`backends/TensorFlow_Graph.ipynb ` 7 | notebook. 8 | """ 9 | 10 | import logging 11 | 12 | import lab.tensorflow # noqa 13 | import spherical_harmonics.tensorflow # noqa 14 | 15 | import geometric_kernels.lab_extras.tensorflow # noqa 16 | 17 | logging.getLogger(__name__).info("Tensorflow backend enabled.") 18 | -------------------------------------------------------------------------------- /geometric_kernels/torch.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loads PyTorch backend in lab, spherical_harmonics and geometric_kernels. 3 | 4 | ..note:: 5 | A tutorial on the JAX backend is available in the 6 | :doc:`backends/PyTorch_Graph.ipynb ` 7 | notebook. 8 | """ 9 | 10 | import logging 11 | 12 | import lab.torch # noqa 13 | import spherical_harmonics.torch # noqa 14 | 15 | import geometric_kernels.lab_extras.torch # noqa 16 | 17 | logging.getLogger(__name__).info("Torch backend enabled.") 18 | -------------------------------------------------------------------------------- /geometric_kernels/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains utilities. 3 | """ 4 | -------------------------------------------------------------------------------- /geometric_kernels/utils/kernel_formulas/__init__.py: -------------------------------------------------------------------------------- 1 | from geometric_kernels.utils.kernel_formulas.euclidean import ( 2 | euclidean_matern_12_kernel, 3 | euclidean_matern_32_kernel, 4 | euclidean_matern_52_kernel, 5 | euclidean_rbf_kernel, 6 | ) 7 | from geometric_kernels.utils.kernel_formulas.hyperbolic import ( 8 | hyperbolic_heat_kernel_even, 9 | hyperbolic_heat_kernel_odd, 10 | ) 11 | from geometric_kernels.utils.kernel_formulas.hypercube_graph import ( 12 | hypercube_graph_heat_kernel, 13 | ) 14 | from geometric_kernels.utils.kernel_formulas.spd import spd_heat_kernel_2x2 15 | -------------------------------------------------------------------------------- /geometric_kernels/utils/kernel_formulas/euclidean.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implements the standard formulas for the RBF kernel and some Matérn kernels. 3 | 4 | The implementation is provided mainly for testing purposes. 5 | """ 6 | 7 | from math import sqrt 8 | 9 | import lab as B 10 | from beartype.typing import Optional 11 | 12 | 13 | def euclidean_matern_12_kernel( 14 | r: B.Numeric, 15 | lengthscale: Optional[float] = 1.0, 16 | ): 17 | """ 18 | Analytic formula for the Matérn 1/2 kernel on R^d, as a function of 19 | distance `r` between inputs. 20 | 21 | :param r: 22 | A batch of distances, an array of shape [...]. 23 | :param lengthscale: 24 | The length scale of the kernel, defaults to 1. 25 | 26 | :return: 27 | The kernel values evaluated at `r`, an array of shape [...]. 28 | """ 29 | 30 | assert B.all(r >= 0.0) 31 | 32 | return B.exp(-r / lengthscale) 33 | 34 | 35 | def euclidean_matern_32_kernel( 36 | r: B.Numeric, 37 | lengthscale: Optional[float] = 1.0, 38 | ): 39 | """ 40 | Analytic formula for the Matérn 3/2 kernel on R^d, as a function of 41 | distance `r` between inputs. 42 | 43 | :param r: 44 | A batch of distances, an array of shape [...]. 45 | :param lengthscale: 46 | The length scale of the kernel, defaults to 1. 47 | 48 | :return: 49 | The kernel values evaluated at `r`, an array of shape [...]. 50 | """ 51 | 52 | assert B.all(r >= 0.0) 53 | 54 | sqrt3 = sqrt(3.0) 55 | r = r / lengthscale 56 | return (1.0 + sqrt3 * r) * B.exp(-sqrt3 * r) 57 | 58 | 59 | def euclidean_matern_52_kernel( 60 | r: B.Numeric, 61 | lengthscale: Optional[float] = 1.0, 62 | ): 63 | """ 64 | Analytic formula for the Matérn 5/2 kernel on R^d, as a function of 65 | distance `r` between inputs. 66 | 67 | :param r: 68 | A batch of distances, an array of shape [...]. 69 | :param lengthscale: 70 | The length scale of the kernel, defaults to 1. 71 | 72 | :return: 73 | The kernel values evaluated at `r`, an array of shape [...]. 74 | """ 75 | 76 | assert B.all(r >= 0.0) 77 | 78 | sqrt5 = sqrt(5.0) 79 | r = r / lengthscale 80 | return (1.0 + sqrt5 * r + 5.0 / 3.0 * (r**2)) * B.exp(-sqrt5 * r) 81 | 82 | 83 | def euclidean_rbf_kernel( 84 | r: B.Numeric, 85 | lengthscale: Optional[float] = 1.0, 86 | ): 87 | """ 88 | Analytic formula for the RBF kernel on R^d, as a function of 89 | distance `r` between inputs. 90 | 91 | :param r: 92 | A batch of distances, an array of shape [...]. 93 | :param lengthscale: 94 | The length scale of the kernel, defaults to 1. 95 | 96 | :return: 97 | The kernel values evaluated at `r`, an array of shape [...]. 98 | """ 99 | 100 | assert B.all(r >= 0.0) 101 | 102 | r = r / lengthscale 103 | return B.exp(-0.5 * r**2) 104 | -------------------------------------------------------------------------------- /geometric_kernels/utils/kernel_formulas/hypercube_graph.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implements the closed form expression for the heat kernel on the hypercube graph. 3 | 4 | The implementation is provided mainly for testing purposes. 5 | """ 6 | 7 | from math import sqrt 8 | 9 | import lab as B 10 | from beartype.typing import Optional 11 | 12 | from geometric_kernels.lab_extras import float_like 13 | from geometric_kernels.utils.utils import hamming_distance 14 | 15 | 16 | def hypercube_graph_heat_kernel( 17 | lengthscale: B.Numeric, 18 | X: B.Numeric, 19 | X2: Optional[B.Numeric] = None, 20 | normalized_laplacian: bool = True, 21 | ): 22 | """ 23 | Analytic formula for the heat kernel on the hypercube graph, see 24 | Equation (14) in :cite:t:`borovitskiy2023`. 25 | 26 | :param lengthscale: 27 | The length scale of the kernel, an array of shape [1]. 28 | :param X: 29 | A batch of inputs, an array of shape [N, d]. 30 | :param X2: 31 | A batch of inputs, an array of shape [N2, d]. If None, defaults to X. 32 | 33 | :return: 34 | The kernel matrix, an array of shape [N, N2]. 35 | """ 36 | if X2 is None: 37 | X2 = X 38 | 39 | assert lengthscale.shape == (1,) 40 | assert X.ndim == 2 and X2.ndim == 2 41 | assert X.shape[-1] == X2.shape[-1] 42 | 43 | if normalized_laplacian: 44 | d = X.shape[-1] 45 | lengthscale = lengthscale / sqrt(d) 46 | 47 | # For TensorFlow, we need to explicitly cast the distances to double. 48 | # Note: if we use B.dtype_float(X) instead of float_like(X), it gives 49 | # float16 and TensorFlow is still complaining. 50 | hamming_distances = B.cast(float_like(X), hamming_distance(X, X2)) 51 | 52 | return B.tanh(lengthscale**2 / 2) ** hamming_distances 53 | -------------------------------------------------------------------------------- /geometric_kernels/utils/kernel_formulas/spd.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implements an alternative formula for the heat kernel on the manifold of 3 | symmetric positive definite matrices by :cite:t:`sawyer1992`. 4 | 5 | The implementation is adapted from https://github.com/imbirik/LieStationaryKernels. 6 | Since the resulting approximation 7 | * can fail to be positive semi-definite, 8 | * is very slow, 9 | * and is rather numerically unstable, 10 | it is not recommended to use it in practice. The implementation is provided 11 | mainly for testing purposes. 12 | """ 13 | 14 | import lab as B 15 | import numpy as np 16 | import scipy 17 | from beartype.typing import Optional 18 | 19 | 20 | def _spd_heat_kernel_2x2_base( 21 | t: float, 22 | x: B.NPNumeric, 23 | x2: Optional[B.NPNumeric] = None, 24 | ) -> float: 25 | """ 26 | The semi-analytic formula for the heat kernel on manifold of symmetric 27 | positive definite matrices 2x2 from :cite:t:`sawyer1992`. The implementation 28 | is adapted from https://github.com/imbirik/LieStationaryKernels. 29 | 30 | :param t: 31 | The time parameter, a positive float. 32 | :param x: 33 | A single input, an array of shape [2, 2]. 34 | :param x2: 35 | A single input, an array of shape [2, 2]. If None, defaults to x. 36 | 37 | :return: 38 | An approximation of the kernel value k(x, x2), a float. The kernel is not 39 | normalized, i.e. k(x, x) may be an arbitrary (implementation-dependent) 40 | positive number. For the normalized kernel which can also handle batch 41 | inputs outputting covariance matrices, use :func:`sawyer_heat_kernel`. 42 | """ 43 | if x2 is None: 44 | x2 = x 45 | 46 | assert x.shape == (2, 2) 47 | assert x2.shape == (2, 2) 48 | 49 | cl_1 = np.linalg.cholesky(x) 50 | cl_2 = np.linalg.cholesky(x2) 51 | diff = np.linalg.inv(cl_2) @ cl_1 52 | _, singular_values, _ = np.linalg.svd(diff) 53 | # Note: singular values that np.linalg.svd outputs are sorted, the following 54 | # code relies on this fact. 55 | H1, H2 = np.log(singular_values[0]), np.log(singular_values[1]) 56 | assert H1 >= H2 57 | 58 | r_H_sq = H1 * H1 + H2 * H2 59 | alpha = H1 - H2 60 | 61 | # Non-integral part 62 | result = 1.0 63 | result *= np.exp(-r_H_sq / (4 * t)) 64 | 65 | # Integrand 66 | def link_function(x): 67 | if x < 1e-5: 68 | x = 1e-5 69 | res = 1.0 70 | res *= 2 * x + alpha 71 | res *= np.exp(-x * (x + alpha) / (2 * t)) 72 | res *= pow(np.sinh(x) * np.sinh(x + alpha), -1 / 2) 73 | return res 74 | 75 | # Evaluating the integral 76 | 77 | # scipy.integrate.quad is much more accurate than np.trapz with 78 | # b_vals = np.logspace(-3., 1, 1000), at least if we believe 79 | # that Mathematica's NIntegrate is accurate. Also, you might think that 80 | # scipy.integrate.quad_vec can be used to compute a whole covariance matrix 81 | # at once. However, it seems to show terrible accuracy in this case. 82 | 83 | integral, error = scipy.integrate.quad(link_function, 0, np.inf) 84 | 85 | result *= integral 86 | 87 | return result 88 | 89 | 90 | def spd_heat_kernel_2x2( 91 | t: float, 92 | X: B.NPNumeric, 93 | X2: Optional[B.NPNumeric] = None, 94 | ) -> B.NPNumeric: 95 | """ 96 | The semi-analytic formula for the heat kernel on manifold of symmetric 97 | positive definite matrices 2x2 from :cite:t:`sawyer1992`, normalized to 98 | have k(x, x) = 1 for all x. The implementation is adapted from 99 | https://github.com/imbirik/LieStationaryKernels. 100 | 101 | :param t: 102 | The time parameter, a positive float. 103 | :param X: 104 | A batch of inputs, an array of shape [N, 2, 2]. 105 | :param X2: 106 | A batch of inputs, an array of shape [N2, 2, 2]. If None, defaults to X. 107 | 108 | :return: 109 | The kernel matrix, an array of shape [N, N2]. The kernel is normalized, 110 | i.e. k(x, x) = 1 for all x. 111 | """ 112 | 113 | if X2 is None: 114 | X2 = X 115 | 116 | normalization = _spd_heat_kernel_2x2_base(t, np.eye(2, 2)) 117 | 118 | result = np.zeros((X.shape[0], X2.shape[0])) 119 | 120 | # This is a very inefficient implementation, but it will do for tests. The 121 | # straightforward vectorization of _sawyer_heat_kernel_base is not possible 122 | # due to scipy.integrate.quad_vec giving very bad accuracy in this case. 123 | for i, x in enumerate(X): 124 | for j, x2 in enumerate(X2): 125 | result[i, j] = _spd_heat_kernel_2x2_base(t, x, x2) / normalization 126 | 127 | return result 128 | -------------------------------------------------------------------------------- /geometric_kernels/utils/manifold_utils.py: -------------------------------------------------------------------------------- 1 | """ Utilities for dealing with manifolds. """ 2 | 3 | import lab as B 4 | import numpy as np 5 | from beartype.typing import Optional 6 | 7 | from geometric_kernels.lab_extras import from_numpy 8 | 9 | 10 | def minkowski_inner_product(vector_a: B.Numeric, vector_b: B.Numeric) -> B.Numeric: 11 | r""" 12 | Computes the Minkowski inner product of vectors. 13 | 14 | .. math:: \langle a, b \rangle = a_0 b_0 - a_1 b_1 - \ldots - a_n b_n. 15 | 16 | :param vector_a: 17 | An [..., n+1]-shaped array of points in the hyperbolic space $\mathbb{H}_n$. 18 | :param vector_b: 19 | An [..., n+1]-shaped array of points in the hyperbolic space $\mathbb{H}_n$. 20 | 21 | :return: 22 | An [...,]-shaped array of inner products. 23 | """ 24 | assert vector_a.shape == vector_b.shape 25 | n = vector_a.shape[-1] - 1 26 | assert n > 0 27 | diagonal = from_numpy(vector_a, [-1.0] + [1.0] * n) # (n+1) 28 | diagonal = B.cast(B.dtype(vector_a), diagonal) 29 | return B.einsum("...i,...i->...", diagonal * vector_a, vector_b) 30 | 31 | 32 | def hyperbolic_distance( 33 | x1: B.Numeric, x2: B.Numeric, diag: Optional[bool] = False 34 | ) -> B.Numeric: 35 | """ 36 | Compute the hyperbolic distance between `x1` and `x2`. 37 | 38 | The code is a reimplementation of 39 | `geomstats.geometry.hyperboloid.HyperbolicMetric` for `lab`. 40 | 41 | :param x1: 42 | An [N, n+1]-shaped array of points in the hyperbolic space. 43 | :param x2: 44 | An [M, n+1]-shaped array of points in the hyperbolic space. 45 | :param diag: 46 | If True, compute elementwise distance. Requires N = M. 47 | 48 | Default False. 49 | 50 | :return: 51 | An [N, M]-shaped array if diag=False or [N,]-shaped array 52 | if diag=True. 53 | """ 54 | if diag: 55 | # Compute a pointwise distance between `x1` and `x2` 56 | x1_ = x1 57 | x2_ = x2 58 | else: 59 | if B.rank(x1) == 1: 60 | x1 = B.expand_dims(x1) 61 | if B.rank(x2) == 1: 62 | x2 = B.expand_dims(x2) 63 | 64 | # compute pairwise distance between arrays of points `x1` and `x2` 65 | # `x1` (N, n+1) 66 | # `x2` (M, n+1) 67 | x1_ = B.tile(x1[..., None, :], 1, x2.shape[0], 1) # (N, M, n+1) 68 | x2_ = B.tile(x2[None], x1.shape[0], 1, 1) # (N, M, n+1) 69 | 70 | sq_norm_1 = minkowski_inner_product(x1_, x1_) 71 | sq_norm_2 = minkowski_inner_product(x2_, x2_) 72 | inner_prod = minkowski_inner_product(x1_, x2_) 73 | 74 | cosh_angle = -inner_prod / B.sqrt(sq_norm_1 * sq_norm_2) 75 | 76 | one = B.cast(B.dtype(cosh_angle), from_numpy(cosh_angle, [1.0])) 77 | large_constant = B.cast(B.dtype(cosh_angle), from_numpy(cosh_angle, [1e24])) 78 | 79 | # clip values into [1.0, 1e24] 80 | cosh_angle = B.where(cosh_angle < one, one, cosh_angle) 81 | cosh_angle = B.where(cosh_angle > large_constant, large_constant, cosh_angle) 82 | 83 | dist = B.log(cosh_angle + B.sqrt(cosh_angle**2 - 1)) # arccosh 84 | dist = B.cast(B.dtype(x1_), dist) 85 | return dist 86 | 87 | 88 | def manifold_laplacian(x: B.Numeric, manifold, egrad, ehess): 89 | r""" 90 | Computes the manifold Laplacian of a given function at a given point x. 91 | The manifold Laplacian equals the trace of the manifold Hessian, i.e., 92 | $\Delta_M f(x) = \sum_{i=1}^{d} \nabla^2 f(x_i, x_i)$, where 93 | $[x_i]_{i=1}^{d}$ is an orthonormal basis of the tangent space at x. 94 | 95 | .. warning:: 96 | This function only works for hyperspheres out of the box. We will 97 | need to change that in the future. 98 | 99 | .. todo:: 100 | See warning above. 101 | 102 | :param x: 103 | A point on the manifold at which to compute the Laplacian. 104 | :param manifold: 105 | A geomstats manifold. 106 | :param egrad: 107 | Euclidean gradient of the given function at x. 108 | :param ehess: 109 | Euclidean Hessian of the given function at x. 110 | 111 | :return: 112 | Manifold Laplacian of the given function at x. 113 | 114 | See :cite:t:`jost2011` (Chapter 3.1) for mathematical details. 115 | """ 116 | dim = manifold.dim 117 | 118 | onb = tangent_onb(manifold, B.to_numpy(x)) 119 | result = 0.0 120 | for j in range(dim): 121 | cur_vec = onb[:, j] 122 | egrad_x = B.to_numpy(egrad(x)) 123 | ehess_x = B.to_numpy(ehess(x, from_numpy(x, cur_vec))) 124 | hess_vec_prod = manifold.ehess2rhess(B.to_numpy(x), egrad_x, ehess_x, cur_vec) 125 | result += manifold.metric.inner_product( 126 | hess_vec_prod, cur_vec, base_point=B.to_numpy(x) 127 | ) 128 | 129 | return result 130 | 131 | 132 | def tangent_onb(manifold, x): 133 | r""" 134 | Computes an orthonormal basis on the tangent space at x. 135 | 136 | .. warning:: 137 | This function only works for hyperspheres out of the box. We will 138 | need to change that in the future. 139 | 140 | .. todo:: 141 | See warning above. 142 | 143 | :param manifold: 144 | A geomstats manifold. 145 | :param x: 146 | A point on the manifold. 147 | 148 | :return: 149 | An [d, d]-shaped array containing the orthonormal basis 150 | on `manifold` at `x`. 151 | """ 152 | ambient_dim = manifold.dim + 1 153 | manifold_dim = manifold.dim 154 | ambient_onb = np.eye(ambient_dim) 155 | 156 | projected_onb = manifold.to_tangent(ambient_onb, base_point=x) 157 | 158 | projected_onb_eigvals, projected_onb_eigvecs = np.linalg.eigh(projected_onb) 159 | 160 | # Getting rid of the zero eigenvalues: 161 | projected_onb_eigvals = projected_onb_eigvals[ambient_dim - manifold_dim :] 162 | projected_onb_eigvecs = projected_onb_eigvecs[:, ambient_dim - manifold_dim :] 163 | 164 | assert np.all(np.isclose(projected_onb_eigvals, 1.0)) 165 | 166 | return projected_onb_eigvecs 167 | -------------------------------------------------------------------------------- /geometric_kernels/utils/product.py: -------------------------------------------------------------------------------- 1 | """ Utilities for dealing with product spaces and product kernels. """ 2 | 3 | import lab as B 4 | from beartype.typing import Dict, List 5 | 6 | from geometric_kernels.lab_extras import smart_cast 7 | 8 | 9 | def params_to_params_list( 10 | number_of_factors: int, params: Dict[str, B.Numeric] 11 | ) -> List[Dict[str, B.Numeric]]: 12 | """ 13 | Takes a dictionary of parameters of a product kernel and returns a list of 14 | dictionaries of parameters for the factor kernels. The shape of "lengthscale" 15 | should be the same as the shame of "nu", and the length of both should be 16 | either 1 or equal to `number_of_factors`. 17 | 18 | :param number_of_factors: 19 | Number of factors in the product kernel. 20 | :param params: 21 | Parameters of the product kernel. 22 | """ 23 | assert params["lengthscale"].shape == params["nu"].shape 24 | assert len(params["nu"].shape) == 1 25 | 26 | if params["nu"].shape[0] == 1: 27 | return [params] * number_of_factors 28 | 29 | assert params["nu"].shape[0] == number_of_factors 30 | 31 | list_of_params: List[Dict[str, B.Numeric]] = [] 32 | for i in range(number_of_factors): 33 | list_of_params.append( 34 | { 35 | "lengthscale": params["lengthscale"][i : i + 1], 36 | "nu": params["nu"][i : i + 1], 37 | } 38 | ) 39 | 40 | return list_of_params 41 | 42 | 43 | def make_product(xs: List[B.Numeric]) -> B.Numeric: 44 | """ 45 | Embed a list of elements of factor spaces into the product space. 46 | Assumes that elements are batched along the first dimension. 47 | 48 | :param xs: 49 | List of the batches of elements, each of the shape [N, ], 50 | where `` is the shape of the elements of the respective 51 | space. 52 | 53 | :return: 54 | An [N, D]-shaped array, a batch of product space elements, where `D` is 55 | the sum, over all factor spaces, of `prod()`. 56 | """ 57 | common_dtype = B.promote_dtypes(*[B.dtype(x) for x in xs]) 58 | 59 | flat_xs = [B.cast(common_dtype, B.reshape(x, B.shape(x)[0], -1)) for x in xs] 60 | return B.concat(*flat_xs, axis=-1) 61 | 62 | 63 | def project_product( 64 | x: B.Numeric, 65 | dimension_indices: List[List[int]], 66 | element_shapes: List[List[int]], 67 | element_dtypes: List[B.DType], 68 | ) -> List[B.Numeric]: 69 | """ 70 | Project an element of the product space onto each factor. 71 | Assumes that elements are batched along the first dimension. 72 | 73 | :param x: 74 | An [N, D]-shaped array, a batch of N product space elements. 75 | :param dimension_indices: 76 | Determines how a product space element `x` is to be mapped to inputs 77 | `xi` of the factor kernels. `xi` are assumed to be equal to 78 | `x[dimension_indices[i]]`, possibly up to a reshape. Such a reshape 79 | might be necessary to accommodate the spaces whose elements are matrices 80 | rather than vectors, as determined by `element_shapes`. 81 | :param element_shapes: 82 | Shapes of the elements in each factor. Can be obtained as properties 83 | `space.element_shape` of any given factor `space`. 84 | :param element_dtypes: 85 | Abstract lab data types of the elements in each factor. Can be obtained 86 | as properties `space.element_dtype` of any given factor `space`. 87 | 88 | :return: 89 | A list of the batches of elements `xi` in factor spaces, each of the 90 | shape `[N, *element_shapes[i]]`. 91 | """ 92 | N = x.shape[0] 93 | xs = [ 94 | smart_cast(dtype, B.reshape(B.take(x, inds, axis=-1), N, *shape)) 95 | for inds, shape, dtype in zip(dimension_indices, element_shapes, element_dtypes) 96 | ] 97 | return xs 98 | -------------------------------------------------------------------------------- /geometric_kernels/utils/special_functions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Special mathematical functions used in the library. 3 | """ 4 | 5 | import lab as B 6 | from beartype.typing import List, Optional 7 | 8 | from geometric_kernels.lab_extras import ( 9 | count_nonzero, 10 | from_numpy, 11 | int_like, 12 | take_along_axis, 13 | ) 14 | 15 | 16 | def walsh_function(d: int, combination: List[int], x: B.Bool) -> B.Float: 17 | r""" 18 | This function returns the value of the Walsh function 19 | 20 | .. math:: w_T(x_0, .., x_{d-1}) = (-1)^{\sum_{j \in T} x_j} 21 | 22 | where $d$ is `d`, $T$ is `combination`, and $x = (x_0, .., x_{d-1})$ is `x`. 23 | 24 | :param d: 25 | The degree of the Walsh function and the dimension of its inputs. 26 | An integer d > 0. 27 | :param combination: 28 | A subset of the set $\{0, .., d-1\}$ determining the particular Walsh 29 | function. A list of integers. 30 | :param x: 31 | A batch of binary vectors $x = (x_0, .., x_{d-1})$ of shape [N, d]. 32 | 33 | :return: 34 | The value of the Walsh function $w_T(x)$ evaluated for every $x$ in the 35 | batch. An array of shape [N]. 36 | 37 | """ 38 | assert x.ndim == 2 39 | assert x.shape[-1] == d 40 | 41 | indices = B.cast(int_like(x), from_numpy(x, combination))[None, :] 42 | 43 | return (-1) ** count_nonzero(take_along_axis(x, indices, axis=-1), axis=-1) 44 | 45 | 46 | def kravchuk_normalized( 47 | d: int, 48 | j: int, 49 | m: B.Int, 50 | kravchuk_normalized_j_minus_1: Optional[B.Float] = None, 51 | kravchuk_normalized_j_minus_2: Optional[B.Float] = None, 52 | ) -> B.Float: 53 | r""" 54 | This function returns $G_{d, j, m}/G_{d, j, 0}$ where $G_{d, j, m}$ is the 55 | Kravchuk polynomial defined below. 56 | 57 | Define the Kravchuk polynomial of degree d > 0 and order 0 <= j <= d as the 58 | function $G_{d, j, m}$ of the independent variable 0 <= m <= d given by 59 | 60 | .. math:: G_{d, j, m} = \sum_{T \subseteq \{0, .., d-1\}, |T| = j} w_T(x). 61 | 62 | Here $w_T$ are the Walsh functions on the hypercube graph $C^d$ and 63 | $x \in C^d$ is an arbitrary binary vector with $m$ ones (the right-hand side 64 | does not depend on the choice of a particular vector of the kind). 65 | 66 | .. note:: 67 | We are using the three term recurrence relation to compute the Kravchuk 68 | polynomials. Cf. Equation (60) of Chapter 5 in MacWilliams and Sloane "The 69 | Theory of Error-Correcting Codes", 1977. The parameters q and $\gamma$ 70 | from :cite:t:`macwilliams1977` are set to be q = 2; $\gamma = q - 1 = 1$. 71 | 72 | .. note:: 73 | We use the fact that $G_{d, j, 0} = \binom{d}{j}$. 74 | 75 | :param d: 76 | The degree of Kravhuk polynomial, an integer d > 0. 77 | Maps to n in :cite:t:`macwilliams1977`. 78 | :param j: d 79 | The order of Kravhuk polynomial, an integer 0 <= j <= d. 80 | Maps to k in :cite:t:`macwilliams1977`. 81 | :param m: 82 | The independent variable, an integer 0 <= m <= d. 83 | Maps to x in :cite:t:`macwilliams1977`. 84 | :param kravchuk_normalized_j_minus_1: 85 | The optional precomputed value of $G_{d, j-1, m}/G_{d, j-1, 0}$, helps 86 | to avoid exponential complexity growth due to the recursion. 87 | :param kravchuk_normalized_j_minus_2: 88 | The optional precomputed value of $G_{d, j-2, m}/G_{d, j-2, 0}$, helps 89 | to avoid exponential complexity growth due to the recursion. 90 | 91 | :return: 92 | $G_{d, j, m}/G_{d, j, 0}$ where $G_{d, j, m}$ is the Kravchuk polynomial. 93 | """ 94 | assert d > 0 95 | assert 0 <= j and j <= d 96 | assert B.all(0 <= m) and B.all(m <= d) 97 | 98 | m = B.cast(B.dtype_float(m), m) 99 | 100 | if j == 0: 101 | return B.ones(m) 102 | elif j == 1: 103 | return 1 - 2 * m / d 104 | else: 105 | if kravchuk_normalized_j_minus_1 is None: 106 | kravchuk_normalized_j_minus_1 = kravchuk_normalized(d, j - 1, m) 107 | if kravchuk_normalized_j_minus_2 is None: 108 | kravchuk_normalized_j_minus_2 = kravchuk_normalized(d, j - 2, m) 109 | rhs_1 = (d - 2 * m) * kravchuk_normalized_j_minus_1 110 | rhs_2 = -(j - 1) * kravchuk_normalized_j_minus_2 111 | return (rhs_1 + rhs_2) / (d - j + 1) 112 | -------------------------------------------------------------------------------- /geometric_kernels/version.py: -------------------------------------------------------------------------------- 1 | """Adds `__version__`.""" 2 | 3 | import importlib.metadata 4 | 5 | __version__ = importlib.metadata.version(__package__ or __name__) 6 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["flit_core >=3.2,<4"] 3 | build-backend = "flit_core.buildapi" 4 | 5 | [project] 6 | name = "geometric_kernels" 7 | authors = [ 8 | {name = "The GeometricKernels Contributors", email = "geometric-kernels@googlegroups.com"}, 9 | ] 10 | description="A Python Package offering geometric kernels in NumPy, TensorFlow, PyTorch, and Jax." 11 | readme = "README.md" 12 | classifiers = [ 13 | "License :: OSI Approved :: Apache Software License", 14 | "Programming Language :: Python :: 3.8", 15 | "Programming Language :: Python :: 3.9", 16 | "Programming Language :: Python :: 3.10", 17 | "Programming Language :: Python :: 3.11", 18 | "Operating System :: OS Independent", 19 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 20 | ] 21 | keywords=[ 22 | "geometric-kernels", 23 | ] 24 | requires-python = ">=3.8" 25 | dependencies = [ 26 | "backends>=1.7", 27 | "einops", 28 | "geomstats", 29 | "numpy>=1.16", 30 | "opt-einsum", 31 | "plum-dispatch", 32 | "potpourri3d", 33 | "robust_laplacian", 34 | "scipy>=1.3", 35 | "spherical-harmonics-basis", 36 | "sympy~=1.13", 37 | ] 38 | version="0.3" 39 | 40 | [project.urls] 41 | Documentation = "https://geometric-kernels.github.io/" 42 | Source = "https://github.com/geometric-kernels/GeometricKernels" 43 | 44 | [tool.isort] 45 | profile = "black" 46 | skip_glob = [ 47 | "geometric_kernels/__init__.py", 48 | ] 49 | known_third_party = [ 50 | "lab", 51 | ] 52 | 53 | [tool.mypy] 54 | ignore_missing_imports = true 55 | strict_optional = false 56 | allow_redefinition = true 57 | 58 | 59 | [tool.black] 60 | line-length = 88 61 | target-version = ['py38', 'py39', 'py310', 'py311'] 62 | -------------------------------------------------------------------------------- /scripts/add_toc.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script takes a Jupyter notebook, and adds a table of contents at the top. 3 | 4 | The TOC is added in a new markdown cell. 5 | The items appearing in the TOC are all the markdown headers of the notebook. 6 | 7 | Usage: 8 | > add_toc.py notebook.ipynb 9 | 10 | Created by davide.gerbaudo@gmail.com in 2020. Modified by Viacheslav 11 | Borovitskiy in 2024. 12 | """ 13 | 14 | import os 15 | import re 16 | import sys 17 | from collections import namedtuple 18 | 19 | import nbformat 20 | from nbformat.v4.nbbase import new_markdown_cell 21 | 22 | TOC_COMMENT = "\n" 23 | 24 | 25 | Header = namedtuple("Header", ["level", "name"]) 26 | 27 | 28 | def is_toc_comment(cell): 29 | return cell.source.startswith(TOC_COMMENT) 30 | 31 | 32 | def collect_headers(nb_name): 33 | headers = [] 34 | RE = re.compile(r"(?:^|\n)(?P#{1,6})(?P
(?:\\.|[^\\])*?)#*(?:\n|$)") 35 | nb = nbformat.read(nb_name, as_version=4) 36 | for cell in nb.cells: 37 | if is_toc_comment(cell): 38 | continue 39 | elif cell.cell_type == "markdown": 40 | for m in RE.finditer(cell.source): 41 | header = m.group("header").strip() 42 | level = m.group("level").strip().count("#") 43 | headers.append(Header(level, header)) 44 | print(level * " ", "-", header) 45 | return headers 46 | 47 | 48 | def write_toc(nb_name, headers, top_level): 49 | nb = nbformat.read(nb_name, as_version=4) 50 | nb_file = os.path.basename(nb_name) 51 | 52 | def format(header): 53 | indent = (header.level - 1) * (2 * " ") 54 | name = header.name 55 | anchor = "#" + name.replace(" ", "-") 56 | if header.level <= top_level: 57 | name = f"**{name}**" 58 | result = f"{indent}- [{name}]({anchor})" 59 | return result 60 | 61 | toc = TOC_COMMENT 62 | toc += "## Contents\n" 63 | toc += "\n".join([format(h) for h in headers if h.level <= top_level + 1]) 64 | 65 | first_cell = nb.cells[0] 66 | if is_toc_comment(first_cell): 67 | print("- amending toc for {0}".format(nb_file)) 68 | first_cell.source = toc 69 | else: 70 | print("- inserting toc for {0}".format(nb_file)) 71 | nb.cells.insert(0, new_markdown_cell(source=toc)) 72 | nbformat.write(nb, nb_name) 73 | 74 | 75 | if __name__ == "__main__": 76 | nb_name = sys.argv[1] 77 | headers = collect_headers(nb_name) 78 | 79 | title_level = headers[0].level 80 | if [header.level for header in headers].count(title_level) > 1: 81 | top_level = title_level 82 | else: 83 | top_level = title_level + 1 84 | 85 | if len(sys.argv) >= 3 and sys.argv[2] == "--skip-title": 86 | del headers[0] # this is typically the title of the notebook 87 | write_toc(nb_name, headers, top_level) 88 | -------------------------------------------------------------------------------- /scripts/increment_header_levels.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script takes a Jupyter notebook, and turns 3 | # -> ## 4 | ## -> ### 5 | ### -> #### 6 | ... 7 | 8 | Usage: 9 | > increment_header_levels.py notebook.ipynb 10 | 11 | Created by Viacheslav Borovitskiy in 2024, based on the add_toc.py script 12 | by davide.gerbaudo@gmail.com. 13 | """ 14 | 15 | import re 16 | import sys 17 | from collections import namedtuple 18 | 19 | import nbformat 20 | 21 | Header = namedtuple("Header", ["level", "name"]) 22 | 23 | 24 | def increment_header_levels(nb_name, skip_title): 25 | RE = re.compile(r"(?:^|\n)(?P#{1,6})(?P
(?:\\.|[^\\])*?)#*(?:\n|$)") 26 | nb = nbformat.read(nb_name, as_version=4) 27 | title_skipped = not skip_title 28 | for cell in nb.cells: 29 | if cell.cell_type == "markdown": 30 | new_source = cell.source 31 | offset = 0 32 | for m in RE.finditer(cell.source): 33 | print("---", m.group("header")) 34 | if not title_skipped: 35 | print("Skipping title...") 36 | title_skipped = True 37 | continue 38 | header_start = m.start("header") + offset 39 | new_source = new_source[:header_start] + "#" + new_source[header_start:] 40 | offset += 1 41 | cell.source = new_source 42 | nbformat.write(nb, nb_name) 43 | 44 | 45 | if __name__ == "__main__": 46 | nb_name = sys.argv[1] 47 | skip_title = len(sys.argv) >= 3 and sys.argv[2] == "--skip-title" 48 | increment_header_levels(nb_name, skip_title) 49 | -------------------------------------------------------------------------------- /scripts/nblinks_for_ipynbs.py: -------------------------------------------------------------------------------- 1 | """ 2 | This script creates .nblink files in the `--nblink-dir` for all the .ipynb 3 | files in the `--ipynb-dir`. It recursively traverses the latter and recreates 4 | the same folder structure in the former. 5 | 6 | Created by Viacheslav Borovitskiy in 2024. 7 | """ 8 | 9 | import os 10 | from argparse import ArgumentParser 11 | from pathlib import Path 12 | 13 | parser = ArgumentParser( 14 | prog="nblinks_to_ipynbs", 15 | description="""Create .nblink files for all the .ipynb 16 | files, recursively traversing the directory with 17 | .ipynb-s and recreating the same folder structure 18 | in the destination directory.""", 19 | epilog="""If the current directory is the root directory of the library, 20 | the .ipynb files are located in the `./notebooks` directory, 21 | and you want to put the respective .nblink files into the 22 | `./docs/examples` directory, run 23 | `python docs/nblinks_for_ipynbs.py --ipynb-dir ./notebooks --nblink-dir ./docs/examples` 24 | """, 25 | ) 26 | parser.add_argument( 27 | "--ipynb-dir", 28 | dest="ipynb_dir", 29 | help="Directory where the actual .ipynb files are stored.", 30 | required=True, 31 | ) 32 | parser.add_argument( 33 | "--nblink-dir", 34 | dest="nblink_dir", 35 | help="Directory to put the .nblink files.", 36 | required=True, 37 | ) 38 | 39 | args = parser.parse_args() 40 | 41 | for root, subdirs, filenames in os.walk(args.ipynb_dir): 42 | # Filter out all the files/folders that start with . (are hidden) and 43 | # all the files without the .ipynb extension. 44 | filenames = [f for f in filenames if not f[0] == "." and f.endswith(".ipynb")] 45 | subdirs[:] = [d for d in subdirs if not d[0] == "."] 46 | 47 | # Modify filenames to include paths relative to the args.ipynb_dir. 48 | filenames = [ 49 | os.path.relpath(os.path.join(root, f), args.ipynb_dir) for f in filenames 50 | ] 51 | 52 | for f in filenames: 53 | nblink_path = Path(os.path.join(args.nblink_dir, f)).with_suffix(".nblink") 54 | nblink_content = '{"path": "%s"}' % os.path.relpath( 55 | os.path.join(args.ipynb_dir, f), nblink_path.parent 56 | ) 57 | print('Writing "%s" to %s.' % (nblink_content, nblink_path)) 58 | os.makedirs(os.path.dirname(nblink_path), exist_ok=True) 59 | with open(nblink_path, "w") as nblink_file: 60 | nblink_file.write(nblink_content) 61 | -------------------------------------------------------------------------------- /test_requirements-3.10.txt: -------------------------------------------------------------------------------- 1 | # Version-independent requirements 2 | ################################## 3 | -r test_requirements.txt 4 | 5 | # Version-dependent requirements 6 | ################################ 7 | 8 | # Base 9 | typing_extensions>=4.6 10 | scipy==1.12.0 11 | 12 | # Torch 13 | torch==2.1.2 14 | gpytorch==1.11 15 | botorch>=0.9 16 | 17 | # TensorFlow 18 | gpflow==2.9 19 | tensorflow==2.15 20 | tensorflow-probability==0.23 21 | 22 | # JAX 23 | jax==0.4.23 24 | jaxlib==0.4.23 25 | jaxtyping 26 | optax 27 | -------------------------------------------------------------------------------- /test_requirements-3.11.txt: -------------------------------------------------------------------------------- 1 | # Version-independent requirements 2 | ################################## 3 | -r test_requirements.txt 4 | 5 | # Version-dependent requirements 6 | ################################ 7 | 8 | # Base 9 | typing_extensions>=4.6 10 | scipy==1.12.0 11 | 12 | # Torch 13 | torch==2.1.2 14 | gpytorch==1.11 15 | botorch>=0.9 16 | 17 | # TensorFlow 18 | gpflow==2.9 19 | tensorflow==2.15 20 | tensorflow-probability==0.23 21 | 22 | # JAX 23 | jax==0.4.23 24 | jaxlib==0.4.23 25 | jaxtyping 26 | optax 27 | -------------------------------------------------------------------------------- /test_requirements-3.8.txt: -------------------------------------------------------------------------------- 1 | # Version-independent requirements 2 | ################################## 3 | -r test_requirements.txt 4 | 5 | # Version-dependent requirements 6 | ################################ 7 | 8 | # Base 9 | scipy==1.10.1 10 | 11 | # Torch 12 | torch==2.1.2 13 | gpytorch 14 | botorch 15 | 16 | # TensorFlow 17 | gpflow==2.9.0 18 | tensorflow==2.13.0 19 | tensorflow-probability==0.20.1 20 | 21 | # JAX 22 | jax==0.4.13 23 | jaxlib==0.4.13 24 | jaxtyping 25 | optax==0.1.7 26 | -------------------------------------------------------------------------------- /test_requirements-3.9.txt: -------------------------------------------------------------------------------- 1 | # Version-independent requirements 2 | ################################## 3 | -r test_requirements.txt 4 | 5 | # Version-dependent requirements 6 | ################################ 7 | 8 | # Base 9 | scipy==1.12.0 10 | 11 | # Torch 12 | torch==2.1.2 13 | gpytorch 14 | botorch 15 | 16 | # TensorFlow 17 | gpflow==2.9.0 18 | tensorflow==2.13.1 19 | tensorflow-probability==0.20.1 20 | 21 | # JAX 22 | jax==0.4.13 23 | jaxlib==0.4.13 24 | jaxtyping==0.2.25 25 | optax 26 | -------------------------------------------------------------------------------- /test_requirements.txt: -------------------------------------------------------------------------------- 1 | # Base 2 | ipykernel 3 | backends>=1.5.4 4 | 5 | # Plotting 6 | plotly 7 | kaleido 8 | 9 | # Tests and style checks 10 | black==24.3.0 11 | flake8==7.0.0 12 | isort==5.13.2 13 | autoflake 14 | pytest 15 | pytest-cov 16 | nbmake 17 | mypy 18 | 19 | # For running some tests 20 | scikit-learn 21 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import geometric_kernels # noqa 2 | import geometric_kernels.jax # noqa 3 | import geometric_kernels.tensorflow # noqa 4 | import geometric_kernels.torch # noqa 5 | -------------------------------------------------------------------------------- /tests/data.py: -------------------------------------------------------------------------------- 1 | from math import sqrt as sr 2 | from pathlib import Path 3 | 4 | import numpy as np 5 | 6 | TEST_MESH_PATH = str(Path(__file__).parent.resolve() / "teddy.obj") 7 | 8 | TEST_GRAPH_ADJACENCY = np.array( 9 | [ 10 | [0, 1, 0, 0, 0, 0, 0], 11 | [1, 0, 1, 1, 1, 0, 0], 12 | [0, 1, 0, 0, 0, 1, 0], 13 | [0, 1, 0, 0, 1, 0, 0], 14 | [0, 1, 0, 1, 0, 0, 0], 15 | [0, 0, 1, 0, 0, 0, 0], 16 | [0, 0, 0, 0, 0, 0, 0], 17 | ] 18 | ).astype(np.float64) 19 | 20 | 21 | TEST_GRAPH_LAPLACIAN = np.array( 22 | [ 23 | [1, -1, 0, 0, 0, 0, 0], 24 | [-1, 4, -1, -1, -1, 0, 0], 25 | [0, -1, 2, 0, 0, -1, 0], 26 | [0, -1, 0, 2, -1, 0, 0], 27 | [0, -1, 0, -1, 2, 0, 0], 28 | [0, 0, -1, 0, 0, 1, 0], 29 | [0, 0, 0, 0, 0, 0, 0], 30 | ] 31 | ).astype( 32 | np.float64 33 | ) # corresponds to TEST_GRAPH_ADJACENCY, unnormalized Laplacian 34 | 35 | TEST_GRAPH_LAPLACIAN_NORMALIZED = np.array( 36 | [ 37 | [1, -0.5, 0, 0, 0, 0, 0], # noqa: E241 38 | [-0.5, 1, -1 / sr(2) / 2, -1 / sr(2) / 2, -1 / sr(2) / 2, 0, 0], # noqa: E241 39 | [0, -1 / sr(2) / 2, 1, 0, 0, -1 / sr(2), 0], # noqa: E241 40 | [0, -1 / sr(2) / 2, 0, 1, -0.5, 0, 0], # noqa: E241 41 | [0, -1 / sr(2) / 2, 0, -0.5, 1, 0, 0], # noqa: E241 42 | [0, 0, -1 / sr(2), 0, 0, 1, 0], # noqa: E241 43 | [0, 0, 0, 0, 0, 0, 0], # noqa: E241 44 | ] 45 | ).astype( 46 | np.float64 47 | ) # corresponds to TEST_GRAPH_ADJACENCY, normalized Laplacian 48 | 49 | TEST_GRAPH_EDGES_NUM_NODES = 5 50 | 51 | TEST_GRAPH_EDGES_ADJACENCY = np.array( 52 | [ 53 | [0, 1, 1, 0, 1], 54 | [1, 0, 1, 1, 0], 55 | [1, 1, 0, 1, 0], 56 | [0, 1, 1, 0, 1], 57 | [1, 0, 0, 1, 0], 58 | ] 59 | ).astype(np.int32) 60 | 61 | TEST_GRAPH_EDGES_UP_LAPLACIAN = np.array( 62 | [ 63 | [1, -1, 0, 1, 0, 0, 0], 64 | [-1, 1, 0, -1, 0, 0, 0], 65 | [0, 0, 0, 0, 0, 0, 0], 66 | [1, -1, 0, 1, 0, 0, 0], 67 | [0, 0, 0, 0, 0, 0, 0], 68 | [0, 0, 0, 0, 0, 0, 0], 69 | [0, 0, 0, 0, 0, 0, 0], 70 | ] 71 | ).astype(np.float64) 72 | 73 | TEST_GRAPH_EDGES_DOWN_LAPLACIAN = np.array( 74 | [ 75 | [2, 1, 1, -1, -1, 0, 0], 76 | [1, 2, 1, 1, 0, -1, 0], 77 | [1, 1, 2, 0, 0, 0, 1], 78 | [-1, 1, 0, 2, 1, -1, 0], 79 | [-1, 0, 0, 1, 2, 1, -1], 80 | [0, -1, 0, -1, 1, 2, -1], 81 | [0, 0, 1, 0, -1, -1, 2], 82 | ] 83 | ).astype(np.float64) 84 | 85 | TEST_GRAPH_EDGES_ORIENTED_EDGES = np.array( 86 | [ 87 | [0, 1], 88 | [0, 2], 89 | [0, 4], 90 | [1, 2], 91 | [1, 3], 92 | [2, 3], 93 | [3, 4], 94 | ] 95 | ).astype(np.int32) 96 | 97 | TEST_GRAPH_EDGES_TRIANGLES = [(0, 1, 2)] 98 | 99 | TEST_GRAPH_EDGES_ORIENTED_TRIANGLES = np.array( 100 | [ 101 | [1, 4, -2], 102 | ] 103 | ).astype(np.int32) 104 | 105 | TEST_GRAPH_EDGES_ORIENTED_TRIANGLES_AUTO = np.array( 106 | [ 107 | [1, 4, -2], 108 | [4, 6, -5], 109 | ] 110 | ).astype(np.int32) 111 | -------------------------------------------------------------------------------- /tests/feature_maps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/tests/feature_maps/__init__.py -------------------------------------------------------------------------------- /tests/feature_maps/test_feature_maps.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.feature_maps import RandomPhaseFeatureMapCompact 6 | from geometric_kernels.kernels import MaternGeometricKernel, default_feature_map 7 | from geometric_kernels.kernels.matern_kernel import default_num 8 | from geometric_kernels.spaces import NoncompactSymmetricSpace 9 | from geometric_kernels.utils.utils import make_deterministic 10 | 11 | from ..helper import check_function_with_backend, create_random_state, spaces 12 | 13 | 14 | @pytest.fixture( 15 | params=spaces(), 16 | ids=str, 17 | ) 18 | def feature_map_and_friends(request, backend): 19 | """ 20 | Returns a tuple (feature_map, kernel, space) where: 21 | - feature_map is the `default_feature_map` of the `kernel`, 22 | - kernel is the `MaternGeometricKernel` on the `space`, with a reasonably 23 | small value of `num`, 24 | - space = request.param, 25 | 26 | `backend` parameter is required to create a random state for the feature 27 | map, if it requires one. 28 | """ 29 | space = request.param 30 | 31 | if isinstance(space, NoncompactSymmetricSpace): 32 | kernel = MaternGeometricKernel( 33 | space, key=create_random_state(backend), num=min(default_num(space), 100) 34 | ) 35 | else: 36 | kernel = MaternGeometricKernel(space, num=min(default_num(space), 3)) 37 | 38 | feature_map = default_feature_map(kernel=kernel) 39 | if isinstance(feature_map, RandomPhaseFeatureMapCompact): 40 | # RandomPhaseFeatureMapCompact requires a key. Note: normally, 41 | # RandomPhaseFeatureMapNoncompact, RejectionSamplingFeatureMapHyperbolic, 42 | # and RejectionSamplingFeatureMapSPD also require a key, but when they 43 | # are obtained from an already constructed kernel's feature map, the key 44 | # is already provided and fixed in the similar way as we do just below. 45 | feature_map = make_deterministic(feature_map, key=create_random_state(backend)) 46 | 47 | return feature_map, kernel, space 48 | 49 | 50 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 51 | def test_feature_map_approximates_kernel(backend, feature_map_and_friends): 52 | feature_map, kernel, space = feature_map_and_friends 53 | 54 | params = kernel.init_params() 55 | 56 | key = np.random.RandomState(0) 57 | key, X = space.random(key, 50) 58 | 59 | def diff_kern_mats(params, X): 60 | _, embedding = feature_map(X, params) 61 | 62 | kernel_mat = kernel.K(params, X, X) 63 | kernel_mat_alt = B.matmul(embedding, B.T(embedding)) 64 | 65 | return kernel_mat - kernel_mat_alt 66 | 67 | # Check that, approximately, k(X, X) = , where k is the 68 | # kernel and phi is the feature map. 69 | check_function_with_backend( 70 | backend, 71 | np.zeros((X.shape[0], X.shape[0])), 72 | diff_kern_mats, 73 | params, 74 | X, 75 | atol=0.1, 76 | ) 77 | -------------------------------------------------------------------------------- /tests/feature_maps/test_student_t_sample.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from scipy.stats import ks_2samp, multivariate_t 4 | 5 | from geometric_kernels.feature_maps.probability_densities import student_t_sample 6 | 7 | 8 | @pytest.mark.parametrize("deg_freedom, n", [(2, 3), (5, 5), (42, 10)]) 9 | def test_student_t_sample(deg_freedom, n): 10 | size = (2048,) 11 | 12 | key = np.random.RandomState(0) 13 | 14 | shape = 1.0 * np.eye(n) 15 | loc = 1.0 * np.zeros((n,)) 16 | 17 | _, random_sample = student_t_sample( 18 | key, loc, shape, np.array([1.0 * deg_freedom]), size 19 | ) 20 | 21 | np_random_sample = multivariate_t.rvs(loc, shape, deg_freedom, size, key) 22 | 23 | v = key.standard_normal(n) 24 | v = v / np.linalg.norm(v) 25 | 26 | random_proj = np.einsum("ni,i->n", random_sample, v) 27 | np_random_proj = np.einsum("ni,i->n", np_random_sample, v) 28 | 29 | p_value = 0.05 30 | 31 | test_res = ks_2samp(random_proj, np_random_proj) 32 | assert test_res.pvalue > p_value 33 | -------------------------------------------------------------------------------- /tests/kernels/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/tests/kernels/__init__.py -------------------------------------------------------------------------------- /tests/kernels/test_feature_map_kernel.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.kernels import MaternFeatureMapKernel 6 | from geometric_kernels.kernels.matern_kernel import default_feature_map, default_num 7 | 8 | from ..helper import ( 9 | check_function_with_backend, 10 | create_random_state, 11 | noncompact_symmetric_spaces, 12 | ) 13 | 14 | 15 | @pytest.fixture( 16 | params=noncompact_symmetric_spaces(), 17 | ids=str, 18 | scope="module", 19 | ) 20 | def inputs(request): 21 | """ 22 | Returns a tuple (space, num_features, feature_map, X, X2) where: 23 | - space = request.param, 24 | - num_features = default_num(space) or 15, whichever is smaller, 25 | - feature_map = default_feature_map(space=space, num=num_features), 26 | - X is a random sample of random size from the space, 27 | - X2 is another random sample of random size from the space, 28 | """ 29 | space = request.param 30 | num_features = min(default_num(space), 15) 31 | feature_map = default_feature_map(space=space, num=num_features) 32 | 33 | key = np.random.RandomState(0) 34 | N, N2 = key.randint(low=1, high=100 + 1, size=2) 35 | key, X = space.random(key, N) 36 | key, X2 = space.random(key, N2) 37 | 38 | return space, num_features, feature_map, X, X2 39 | 40 | 41 | @pytest.fixture 42 | def kernel(inputs, backend, normalize=True): 43 | space, _, feature_map, _, _ = inputs 44 | 45 | key = create_random_state(backend) 46 | 47 | return MaternFeatureMapKernel(space, feature_map, key, normalize=normalize) 48 | 49 | 50 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 51 | def test_params(inputs, backend, kernel): 52 | params = kernel.init_params() 53 | 54 | assert "lengthscale" in params 55 | assert params["lengthscale"].shape == (1,) 56 | assert "nu" in params 57 | assert params["nu"].shape == (1,) 58 | 59 | 60 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 61 | @pytest.mark.parametrize("normalize", [True, False], ids=["normalize", "no_normalize"]) 62 | def test_K(inputs, backend, normalize, kernel): 63 | _, _, _, X, X2 = inputs 64 | params = kernel.init_params() 65 | 66 | # Check that kernel.K runs and the output is a tensor of the right backend and shape. 67 | check_function_with_backend( 68 | backend, 69 | (X.shape[0], X2.shape[0]), 70 | kernel.K, 71 | params, 72 | X, 73 | X2, 74 | compare_to_result=lambda res, f_out: B.shape(f_out) == res, 75 | ) 76 | 77 | 78 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 79 | @pytest.mark.parametrize("normalize", [True, False], ids=["normalize", "no_normalize"]) 80 | def test_K_one_param(inputs, backend, normalize, kernel): 81 | _, _, _, X, _ = inputs 82 | params = kernel.init_params() 83 | 84 | # Check that kernel.K(X) coincides with kernel.K(X, X). 85 | check_function_with_backend( 86 | backend, 87 | np.zeros((X.shape[0], X.shape[0])), 88 | lambda params, X: kernel.K(params, X) - kernel.K(params, X, X), 89 | params, 90 | X, 91 | ) 92 | 93 | 94 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 95 | @pytest.mark.parametrize("normalize", [True, False], ids=["normalize", "no_normalize"]) 96 | def test_K_diag(inputs, backend, normalize, kernel): 97 | _, _, _, X, _ = inputs 98 | params = kernel.init_params() 99 | 100 | # Check that kernel.K_diag coincides with the diagonal of kernel.K. 101 | check_function_with_backend( 102 | backend, 103 | np.zeros((X.shape[0],)), 104 | lambda params, X: kernel.K_diag(params, X) - B.diag(kernel.K(params, X)), 105 | params, 106 | X, 107 | ) 108 | 109 | 110 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 111 | def test_normalize(inputs, backend, kernel): 112 | _, _, _, X, _ = inputs 113 | 114 | params = kernel.init_params() 115 | 116 | # Check that the variance of the kernel is constant 1. 117 | check_function_with_backend( 118 | backend, 119 | np.ones((X.shape[0],)), 120 | kernel.K_diag, 121 | params, 122 | X, 123 | ) 124 | -------------------------------------------------------------------------------- /tests/kernels/test_product_kernel.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from geometric_kernels.kernels import MaternGeometricKernel, ProductGeometricKernel 5 | from geometric_kernels.spaces import Circle, SpecialUnitary 6 | from geometric_kernels.utils.product import make_product 7 | 8 | from ..helper import check_function_with_backend 9 | 10 | 11 | @pytest.mark.parametrize( 12 | "factor1, factor2", [(Circle(), Circle()), (Circle(), SpecialUnitary(2))], ids=str 13 | ) 14 | @pytest.mark.parametrize("nu, lengthscale", [(1 / 2, 2.0), (5 / 2, 1.0), (np.inf, 0.1)]) 15 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 16 | def test_kernel_is_product_of_heat_kernels(factor1, factor2, nu, lengthscale, backend): 17 | key = np.random.RandomState(0) 18 | key, xs_factor1 = factor1.random(key, 10) 19 | key, xs_factor2 = factor2.random(key, 10) 20 | 21 | kernel_factor1 = MaternGeometricKernel(factor1) 22 | kernel_factor2 = MaternGeometricKernel(factor2) 23 | product_kernel = ProductGeometricKernel(kernel_factor1, kernel_factor2) 24 | 25 | def K_diff(nu, lengthscale, xs_factor1, xs_factor2): 26 | params = {"nu": nu, "lengthscale": lengthscale} 27 | 28 | xs_product = make_product([xs_factor1, xs_factor2]) 29 | 30 | K_product = product_kernel.K(params, xs_product, xs_product) 31 | K_factor1 = kernel_factor1.K(params, xs_factor1, xs_factor1) 32 | K_factor2 = kernel_factor2.K(params, xs_factor2, xs_factor2) 33 | 34 | return K_product - K_factor1 * K_factor2 35 | 36 | # Check that ProductGeometricKernel without ARD coincides with the product 37 | # of the respective factor kernels. 38 | check_function_with_backend( 39 | backend, 40 | np.zeros((xs_factor1.shape[0], xs_factor2.shape[0])), 41 | K_diff, 42 | np.array([nu]), 43 | np.array([lengthscale]), 44 | xs_factor1, 45 | xs_factor2, 46 | ) 47 | -------------------------------------------------------------------------------- /tests/sampling/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/tests/sampling/__init__.py -------------------------------------------------------------------------------- /tests/sampling/test_samplers.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.sampling import sampler 6 | 7 | from ..feature_maps.test_feature_maps import feature_map_and_friends # noqa: F401 8 | from ..helper import check_function_with_backend, create_random_state 9 | 10 | _NUM_SAMPLES = 2 11 | 12 | 13 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 14 | def test_output_shape_and_backend(backend, feature_map_and_friends): 15 | feature_map, kernel, space = feature_map_and_friends 16 | 17 | params = kernel.init_params() 18 | sample_paths = sampler(feature_map, s=_NUM_SAMPLES) 19 | 20 | key = np.random.RandomState(0) 21 | key, X = space.random(key, 50) 22 | 23 | def sample(params, X): 24 | return sample_paths(X, params, key=create_random_state(backend))[1] 25 | 26 | # Check that sample_paths runs and the output is a tensor of the right backend and shape. 27 | check_function_with_backend( 28 | backend, 29 | (X.shape[0], _NUM_SAMPLES), 30 | sample, 31 | params, 32 | X, 33 | compare_to_result=lambda res, f_out: B.shape(f_out) == res, 34 | ) 35 | -------------------------------------------------------------------------------- /tests/spaces/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/tests/spaces/__init__.py -------------------------------------------------------------------------------- /tests/spaces/test_basics.py: -------------------------------------------------------------------------------- 1 | import inspect 2 | 3 | import pytest 4 | 5 | import geometric_kernels.spaces 6 | 7 | from ..helper import ( 8 | compact_matrix_lie_groups, 9 | discrete_spectrum_spaces, 10 | hodge_discrete_spectrum_spaces, 11 | noncompact_symmetric_spaces, 12 | product_discrete_spectrum_spaces, 13 | spaces, 14 | ) 15 | 16 | 17 | @pytest.mark.parametrize( 18 | "fun, cls", 19 | [ 20 | ( 21 | hodge_discrete_spectrum_spaces, 22 | geometric_kernels.spaces.HodgeDiscreteSpectrumSpace, 23 | ), 24 | (compact_matrix_lie_groups, geometric_kernels.spaces.CompactMatrixLieGroup), 25 | ( 26 | product_discrete_spectrum_spaces, 27 | geometric_kernels.spaces.ProductDiscreteSpectrumSpace, 28 | ), 29 | (discrete_spectrum_spaces, geometric_kernels.spaces.DiscreteSpectrumSpace), 30 | ( 31 | noncompact_symmetric_spaces, 32 | geometric_kernels.spaces.NoncompactSymmetricSpace, 33 | ), 34 | (spaces, geometric_kernels.spaces.Space), 35 | ], 36 | ) 37 | def test_all_discrete_spectrum_spaces_covered(fun, cls): 38 | spaces = fun() 39 | 40 | # all classes in the geometric_kernels.spaces module 41 | classes = [ 42 | (cls_name, cls_obj) 43 | for cls_name, cls_obj in inspect.getmembers(geometric_kernels.spaces) 44 | if inspect.isclass(cls_obj) 45 | ] 46 | for cls_name, cls_obj in classes: 47 | if issubclass(cls_obj, cls) and not inspect.isabstract(cls_obj): 48 | for space in spaces: 49 | if isinstance(space, cls_obj): 50 | break 51 | else: 52 | # complain if discrete_spectrum_spaces() does not contain an 53 | # instance of a non-abstract subclass of DiscreteSpectrumSpace 54 | # from the geometric_kernels.spaces module. 55 | assert ( 56 | False 57 | ), f"An instance of the class `{cls_name}` is missing from the list returned by the function `{fun.__name__}`" 58 | -------------------------------------------------------------------------------- /tests/spaces/test_circle.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.kernels import MaternGeometricKernel 6 | from geometric_kernels.spaces.circle import Circle 7 | from geometric_kernels.utils.kernel_formulas import ( 8 | euclidean_matern_12_kernel, 9 | euclidean_matern_32_kernel, 10 | euclidean_matern_52_kernel, 11 | euclidean_rbf_kernel, 12 | ) 13 | 14 | from ..helper import check_function_with_backend 15 | 16 | 17 | @pytest.mark.parametrize( 18 | "nu, atol", [(0.5, 1e-1), (1.5, 1e-3), (2.5, 1e-3), (np.inf, 1e-5)] 19 | ) 20 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 21 | def test_equivalence_kernel(nu, atol, backend): 22 | if nu == 0.5: 23 | analytic_kernel = euclidean_matern_12_kernel 24 | elif nu == 1.5: 25 | analytic_kernel = euclidean_matern_32_kernel 26 | elif nu == 2.5: 27 | analytic_kernel = euclidean_matern_52_kernel 28 | elif nu == np.inf: 29 | analytic_kernel = euclidean_rbf_kernel 30 | 31 | inputs = np.random.uniform(0, 2 * np.pi, size=(5, 1)) 32 | inputs2 = np.random.uniform(0, 2 * np.pi, size=(3, 1)) 33 | 34 | # Compute kernel using periodic summation 35 | geodesic = inputs[:, None, :] - inputs2[None, :, :] # [N, N2, 1] 36 | all_distances = ( 37 | geodesic + np.array([i * 2 * np.pi for i in range(-10, 10)])[None, None, :] 38 | ) 39 | all_distances = B.abs(all_distances) 40 | result = B.to_numpy(B.sum(analytic_kernel(all_distances), axis=2)) 41 | 42 | kernel = MaternGeometricKernel(Circle()) 43 | 44 | # Check that MaternGeometricKernel on Circle() coincides with the 45 | # periodic summation of the respective Euclidean Matérn kernel. 46 | check_function_with_backend( 47 | backend, 48 | result, 49 | kernel.K, 50 | {"nu": np.array([nu]), "lengthscale": np.array([1.0])}, 51 | inputs, 52 | inputs2, 53 | atol=atol, 54 | ) 55 | -------------------------------------------------------------------------------- /tests/spaces/test_eigenvalues.py: -------------------------------------------------------------------------------- 1 | """ 2 | Note: We don't use `check_function_with_backend` throughout this module because 3 | eigenvalues are always represented by numpy arrays, regardless of the backend 4 | used for other routines. 5 | """ 6 | 7 | import numpy as np 8 | import pytest 9 | 10 | from geometric_kernels.kernels.matern_kernel import default_num 11 | 12 | from ..helper import discrete_spectrum_spaces 13 | 14 | 15 | @pytest.fixture( 16 | params=discrete_spectrum_spaces(), 17 | ids=str, 18 | ) 19 | def inputs(request): 20 | """ 21 | Returns a tuple (space, num_levels, eigenvalues) where: 22 | - space = request.param, 23 | - num_levels is the default number of levels for the `space`, if it does not 24 | exceed 100, and 100 otherwise, 25 | - eigenvalues = space.get_eigenvalues(num_levels), 26 | - eps, a small number, a technicality for using `assert_array_less`. 27 | """ 28 | space = request.param 29 | num_levels = min(default_num(space), 100) 30 | eigenvalues = space.get_eigenvalues(num_levels) 31 | eps = 1e-5 32 | 33 | return space, num_levels, eigenvalues, eps 34 | 35 | 36 | def test_shape(inputs): 37 | _, num_levels, eigenvalues, _ = inputs 38 | 39 | # Check that the eigenvalues have appropriate shape. 40 | assert eigenvalues.shape == (num_levels, 1) 41 | 42 | 43 | def test_positive(inputs): 44 | _, _, eigenvalues, eps = inputs 45 | 46 | # Check that the eigenvalues are nonnegative. 47 | np.testing.assert_array_less(np.zeros_like(eigenvalues), eigenvalues + eps) 48 | 49 | 50 | def test_ordered(inputs): 51 | _, _, eigenvalues, eps = inputs 52 | 53 | # Check that the eigenvalues are sorted in ascending order. 54 | np.testing.assert_array_less(eigenvalues[:-1], eigenvalues[1:] + eps) 55 | -------------------------------------------------------------------------------- /tests/spaces/test_graph.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | 3 | import lab as B 4 | import numpy as np 5 | import pytest 6 | 7 | from geometric_kernels.jax import * # noqa 8 | from geometric_kernels.kernels import MaternGeometricKernel 9 | from geometric_kernels.spaces import Graph 10 | from geometric_kernels.tensorflow import * # noqa 11 | from geometric_kernels.torch import * # noqa 12 | 13 | from ..data import ( 14 | TEST_GRAPH_ADJACENCY, 15 | TEST_GRAPH_LAPLACIAN, 16 | TEST_GRAPH_LAPLACIAN_NORMALIZED, 17 | ) 18 | from ..helper import check_function_with_backend, np_to_backend 19 | 20 | warnings.filterwarnings("ignore", category=RuntimeWarning, module="scipy") 21 | 22 | A = TEST_GRAPH_ADJACENCY 23 | L = TEST_GRAPH_LAPLACIAN 24 | 25 | 26 | @pytest.mark.parametrize("normalized", [True, False]) 27 | @pytest.mark.parametrize( 28 | "backend", ["numpy", "tensorflow", "torch", "jax", "scipy_sparse"] 29 | ) 30 | def test_laplacian(normalized, backend): 31 | 32 | # Check that the Laplacian is computed correctly. 33 | check_function_with_backend( 34 | backend, 35 | TEST_GRAPH_LAPLACIAN if not normalized else TEST_GRAPH_LAPLACIAN_NORMALIZED, 36 | lambda adj: Graph(adj, normalize_laplacian=normalized)._laplacian, 37 | TEST_GRAPH_ADJACENCY, 38 | ) 39 | 40 | 41 | @pytest.mark.parametrize( 42 | "L", [TEST_GRAPH_ADJACENCY.shape[0], TEST_GRAPH_ADJACENCY.shape[0] // 2] 43 | ) 44 | @pytest.mark.parametrize("normalized", [True, False]) 45 | @pytest.mark.parametrize( 46 | "backend", ["numpy", "tensorflow", "torch", "jax", "scipy_sparse"] 47 | ) 48 | def test_eigendecomposition(L, normalized, backend): 49 | laplacian = np_to_backend( 50 | TEST_GRAPH_LAPLACIAN if not normalized else TEST_GRAPH_LAPLACIAN_NORMALIZED, 51 | backend, 52 | ) 53 | 54 | def eigendiff(adj): 55 | graph = Graph(adj, normalize_laplacian=normalized) 56 | 57 | eigenvalue_mat = B.diag_construct(graph.get_eigenvalues(L)[:, 0]) 58 | eigenvectors = graph.get_eigenvectors(L) 59 | # If the backend is scipy_sparse, convert eigenvalues/eigenvectors, 60 | # which are always supposed to be dense arrays, to sparse arrays. 61 | if backend == "scipy_sparse": 62 | import scipy.sparse as sp 63 | 64 | eigenvalue_mat = sp.csr_array(eigenvalue_mat) 65 | eigenvectors = sp.csr_array(eigenvectors) 66 | 67 | laplace_x_eigvecs = laplacian @ eigenvectors 68 | eigvals_x_eigvecs = eigenvectors @ eigenvalue_mat 69 | return laplace_x_eigvecs - eigvals_x_eigvecs 70 | 71 | # Check that the computed eigenpairs are actually eigenpairs 72 | check_function_with_backend( 73 | backend, 74 | np.zeros((TEST_GRAPH_ADJACENCY.shape[0], L)), 75 | eigendiff, 76 | TEST_GRAPH_ADJACENCY, 77 | ) 78 | 79 | 80 | @pytest.mark.parametrize("nu, lengthscale", [(1.0, 1.0), (2.0, 1.0), (np.inf, 1.0)]) 81 | @pytest.mark.parametrize("sparse_adj", [True, False]) 82 | @pytest.mark.parametrize("normalized", [True, False]) 83 | @pytest.mark.parametrize( 84 | "backend", ["numpy", "tensorflow", "torch", "jax"] 85 | ) # The kernels never take sparse parameters and never output sparse matrices, thus we don't test scipy_sparse. The fact that the adjacency matrix may be sparse is tested when sparse_adj is True. 86 | def test_matern_kernels(nu, lengthscale, sparse_adj, normalized, backend): 87 | 88 | laplacian = ( 89 | TEST_GRAPH_LAPLACIAN if not normalized else TEST_GRAPH_LAPLACIAN_NORMALIZED 90 | ) 91 | 92 | evals_np, evecs_np = np.linalg.eigh(laplacian) 93 | evecs_np *= np.sqrt(laplacian.shape[0]) 94 | 95 | def evaluate_kernel(adj, nu, lengthscale): 96 | dtype = B.dtype(adj) 97 | if sparse_adj: 98 | adj = np_to_backend(B.to_numpy(adj), "scipy_sparse") 99 | graph = Graph(adj, normalize_laplacian=normalized) 100 | kernel = MaternGeometricKernel(graph) 101 | return kernel.K( 102 | {"nu": nu, "lengthscale": lengthscale}, 103 | B.range(dtype, adj.shape[0])[:, None], 104 | ) 105 | 106 | if nu < np.inf: 107 | K = ( 108 | evecs_np 109 | @ np.diag(np.power(evals_np + 2 * nu / lengthscale**2, -nu)) 110 | @ evecs_np.T 111 | ) 112 | else: 113 | K = evecs_np @ np.diag(np.exp(-(lengthscale**2) / 2 * evals_np)) @ evecs_np.T 114 | K = K / np.mean(K.diagonal()) 115 | 116 | # Check that the kernel matrix is correctly computed 117 | check_function_with_backend( 118 | backend, 119 | K, 120 | evaluate_kernel, 121 | TEST_GRAPH_ADJACENCY, 122 | np.array([nu]), 123 | np.array([lengthscale]), 124 | ) 125 | -------------------------------------------------------------------------------- /tests/spaces/test_hyperbolic.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.kernels import MaternGeometricKernel 6 | from geometric_kernels.spaces import Hyperbolic 7 | from geometric_kernels.utils.kernel_formulas import ( 8 | hyperbolic_heat_kernel_even, 9 | hyperbolic_heat_kernel_odd, 10 | ) 11 | 12 | from ..helper import check_function_with_backend, create_random_state 13 | 14 | 15 | @pytest.mark.parametrize("dim", [2, 3, 5, 7]) 16 | @pytest.mark.parametrize("lengthscale", [2.0]) 17 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 18 | def test_equivalence_kernel(dim, lengthscale, backend): 19 | space = Hyperbolic(dim) 20 | 21 | key = np.random.RandomState(0) 22 | key, X = space.random(key, 6) 23 | X2 = X.copy() 24 | 25 | t = lengthscale * lengthscale / 2 26 | if dim % 2 == 1: 27 | result = hyperbolic_heat_kernel_odd(dim, t, X, X2) 28 | else: 29 | result = hyperbolic_heat_kernel_even(dim, t, X, X2) 30 | 31 | kernel = MaternGeometricKernel(space, key=create_random_state(backend)) 32 | 33 | def compare_to_result(res, f_out): 34 | return ( 35 | np.linalg.norm(res - B.to_numpy(f_out)) 36 | / np.sqrt(res.shape[0] * res.shape[1]) 37 | < 1e-1 38 | ) 39 | 40 | # Check that MaternGeometricKernel on Hyperbolic(dim) with nu=inf coincides 41 | # with the well-known analytic formula for the heat kernel on the hyperbolic 42 | # space in odd dimensions and semi-analytic formula in even dimensions. 43 | # We are checking the equivalence on average, computing the norm between 44 | # the two covariance matrices. 45 | check_function_with_backend( 46 | backend, 47 | result, 48 | kernel.K, 49 | {"nu": np.array([np.inf]), "lengthscale": np.array([lengthscale])}, 50 | X, 51 | X2, 52 | compare_to_result=compare_to_result, 53 | ) 54 | -------------------------------------------------------------------------------- /tests/spaces/test_hypercube_graph.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | from plum import Tuple 5 | 6 | from geometric_kernels.kernels import MaternGeometricKernel 7 | from geometric_kernels.spaces import HypercubeGraph 8 | from geometric_kernels.utils.kernel_formulas import hypercube_graph_heat_kernel 9 | 10 | from ..helper import check_function_with_backend 11 | 12 | 13 | @pytest.fixture(params=[1, 2, 3, 5, 10]) 14 | def inputs(request) -> Tuple[B.Numeric]: 15 | """ 16 | Returns a tuple (space, eigenfunctions, X, X2) where: 17 | - space is a HypercubeGraph object with dimension equal to request.param, 18 | - eigenfunctions is the respective Eigenfunctions object with at most 5 levels, 19 | - X is a random sample of random size from the space, 20 | - X2 is another random sample of random size from the space, 21 | - weights is an array of positive numbers of shape (eigenfunctions.num_levels, 1). 22 | """ 23 | d = request.param 24 | space = HypercubeGraph(d) 25 | eigenfunctions = space.get_eigenfunctions(min(space.dim + 1, 5)) 26 | 27 | key = np.random.RandomState(0) 28 | N, N2 = key.randint(low=1, high=min(2**d, 10) + 1, size=2) 29 | key, X = space.random(key, N) 30 | key, X2 = space.random(key, N2) 31 | 32 | # These weights are used for testing the weighted outerproduct, they 33 | # should be positive. 34 | weights = np.random.rand(eigenfunctions.num_levels, 1) ** 2 + 1e-5 35 | 36 | return space, eigenfunctions, X, X2, weights 37 | 38 | 39 | def test_numbers_of_eigenfunctions(inputs): 40 | space, eigenfunctions, _, _, _ = inputs 41 | num_levels = eigenfunctions.num_levels 42 | 43 | # If the number of levels is maximal, check that the number of 44 | # eigenfunctions is equal to the number of binary vectors of size `space.dim`. 45 | if num_levels == space.dim + 1: 46 | assert eigenfunctions.num_eigenfunctions == 2**space.dim 47 | 48 | 49 | @pytest.mark.parametrize("lengthscale", [1.0, 5.0, 10.0]) 50 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 51 | def test_against_analytic_heat_kernel(inputs, lengthscale, backend): 52 | space, _, X, X2, _ = inputs 53 | lengthscale = np.array([lengthscale]) 54 | result = hypercube_graph_heat_kernel(lengthscale, X, X2) 55 | 56 | kernel = MaternGeometricKernel(space) 57 | 58 | # Check that MaternGeometricKernel on HypercubeGraph with nu=infinity 59 | # coincides with the closed form expression for the heat kernel on the 60 | # hypercube graph. 61 | check_function_with_backend( 62 | backend, 63 | result, 64 | kernel.K, 65 | {"nu": np.array([np.inf]), "lengthscale": lengthscale}, 66 | X, 67 | X2, 68 | atol=1e-2, 69 | ) 70 | -------------------------------------------------------------------------------- /tests/spaces/test_hypersphere.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | 4 | from geometric_kernels.spaces import Hypersphere 5 | 6 | 7 | def test_sphere_heat_kernel(): 8 | # Tests that the heat kernel on the sphere solves the heat equation. This 9 | # test only uses torch, as lab doesn't support backend-independent autodiff. 10 | import torch 11 | 12 | import geometric_kernels.torch # noqa 13 | from geometric_kernels.kernels import MaternKarhunenLoeveKernel 14 | from geometric_kernels.utils.manifold_utils import manifold_laplacian 15 | 16 | _TRUNCATION_LEVEL = 10 17 | 18 | # Parameters 19 | grid_size = 4 20 | nb_samples = 10 21 | dimension = 3 22 | 23 | # Create manifold 24 | hypersphere = Hypersphere(dim=dimension) 25 | 26 | # Generate samples 27 | ts = torch.linspace(0.1, 1, grid_size, requires_grad=True) 28 | xs = torch.tensor( 29 | np.array(hypersphere.random_point(nb_samples)), requires_grad=True 30 | ) 31 | ys = xs 32 | 33 | # Define kernel 34 | kernel = MaternKarhunenLoeveKernel(hypersphere, _TRUNCATION_LEVEL, normalize=False) 35 | params = kernel.init_params() 36 | params["nu"] = torch.tensor([torch.inf]) 37 | 38 | # Define heat kernel function 39 | def heat_kernel(t, x, y): 40 | params["lengthscale"] = B.reshape(B.sqrt(2 * t), 1) 41 | return kernel.K(params, x, y) 42 | 43 | for t in ts: 44 | for x in xs: 45 | for y in ys: 46 | # Compute the derivative of the kernel function wrt t 47 | dfdt, _, _ = torch.autograd.grad( 48 | heat_kernel(t, x[None], y[None]), (t, x, y) 49 | ) 50 | # Compute the Laplacian of the kernel on the manifold 51 | egrad = lambda u: torch.autograd.grad( # noqa 52 | heat_kernel(t, u[None], y[None]), (t, u, y) 53 | )[ 54 | 1 55 | ] # noqa 56 | fx = lambda u: heat_kernel(t, u[None], y[None]) # noqa 57 | ehess = lambda u, h: torch.autograd.functional.hvp(fx, u, h)[1] # noqa 58 | lapf = manifold_laplacian(x, hypersphere, egrad, ehess) 59 | 60 | # Check that they match 61 | np.testing.assert_allclose(dfdt.detach().numpy(), lapf, atol=1e-3) 62 | -------------------------------------------------------------------------------- /tests/spaces/test_lie_groups.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.kernels.matern_kernel import default_num 6 | from geometric_kernels.lab_extras import complex_conj 7 | from geometric_kernels.spaces import SpecialOrthogonal, SpecialUnitary 8 | 9 | from ..helper import check_function_with_backend, compact_matrix_lie_groups 10 | 11 | 12 | @pytest.fixture( 13 | params=compact_matrix_lie_groups(), 14 | ids=str, 15 | ) 16 | def inputs(request): 17 | """ 18 | Returns a tuple (space, eigenfunctions, X, X2) where: 19 | - space = request.param, 20 | - eigenfunctions = space.get_eigenfunctions(num_levels), with reasonable num_levels 21 | - X is a random sample of random size from the space, 22 | - X2 is another random sample of random size from the space, 23 | """ 24 | space = request.param 25 | num_levels = min(10, default_num(space)) 26 | eigenfunctions = space.get_eigenfunctions(num_levels) 27 | 28 | key = np.random.RandomState(0) 29 | N, N2 = key.randint(low=1, high=100 + 1, size=2) 30 | key, X = space.random(key, N) 31 | key, X2 = space.random(key, N2) 32 | 33 | return space, eigenfunctions, X, X2 34 | 35 | 36 | def get_dtype(group): 37 | if isinstance(group, SpecialOrthogonal): 38 | return np.double 39 | elif isinstance(group, SpecialUnitary): 40 | return np.cdouble 41 | else: 42 | raise ValueError() 43 | 44 | 45 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 46 | def test_group_inverse(inputs, backend): 47 | group, _, X, _ = inputs 48 | 49 | result = np.eye(group.n, dtype=get_dtype(group)) 50 | result = np.broadcast_to(result, (X.shape[0], group.n, group.n)) 51 | 52 | check_function_with_backend( 53 | backend, 54 | result, 55 | lambda X: B.matmul(X, group.inverse(X)), 56 | X, 57 | ) 58 | 59 | 60 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 61 | def test_character_conj_invariant(inputs, backend): 62 | group, eigenfunctions, X, G = inputs 63 | 64 | # Truncate X and G to have the same length 65 | n_xs = min(X.shape[0], G.shape[0]) 66 | X = X[:n_xs, :, :] 67 | G = G[:n_xs, :, :] 68 | 69 | def gammas_diff(X, G, chi): 70 | conjugates = B.matmul(B.matmul(G, X), group.inverse(G)) 71 | conj_gammas = eigenfunctions._torus_representative(conjugates) 72 | 73 | xs_gammas = eigenfunctions._torus_representative(X) 74 | 75 | return chi(xs_gammas) - chi(conj_gammas) 76 | 77 | for chi in eigenfunctions._characters: 78 | check_function_with_backend( 79 | backend, 80 | np.zeros((n_xs,)), 81 | lambda X, G: gammas_diff(X, G, chi), 82 | X, 83 | G, 84 | atol=1e-3, 85 | ) 86 | 87 | 88 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 89 | def test_character_at_identity(inputs, backend): 90 | group, eigenfunctions, _, _ = inputs 91 | 92 | for chi, dim in zip(eigenfunctions._characters, eigenfunctions._dimensions): 93 | check_function_with_backend( 94 | backend, 95 | np.array([dim], dtype=get_dtype(group)), 96 | lambda X: B.real(chi(eigenfunctions._torus_representative(X))), 97 | np.eye(group.n, dtype=get_dtype(group))[None, ...], 98 | ) 99 | 100 | check_function_with_backend( 101 | backend, 102 | np.array([0], dtype=get_dtype(group)), 103 | lambda X: B.imag(chi(eigenfunctions._torus_representative(X))), 104 | np.eye(group.n, dtype=get_dtype(group))[None, ...], 105 | ) 106 | 107 | 108 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 109 | def test_characters_orthogonal(inputs, backend): 110 | group, eigenfunctions, _, _ = inputs 111 | 112 | num_samples = 10000 113 | key = np.random.RandomState(0) 114 | _, X = group.random(key, num_samples) 115 | 116 | def all_char_vals(X): 117 | gammas = eigenfunctions._torus_representative(X) 118 | values = [ 119 | chi(gammas)[..., None] # [num_samples, 1] 120 | for chi in eigenfunctions._characters 121 | ] 122 | 123 | return B.concat(*values, axis=-1) 124 | 125 | check_function_with_backend( 126 | backend, 127 | np.eye(eigenfunctions.num_levels, dtype=get_dtype(group)), 128 | lambda X: complex_conj(B.T(all_char_vals(X))) @ all_char_vals(X) / num_samples, 129 | X, 130 | atol=0.4, # very loose, but helps make sure the diagonal is close to 1 while the rest is close to 0 131 | ) 132 | -------------------------------------------------------------------------------- /tests/spaces/test_mesh.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from geometric_kernels.spaces import Mesh 7 | 8 | 9 | @pytest.fixture() 10 | def mesh() -> Mesh: 11 | filename = Path(__file__).parent / "../teddy.obj" 12 | mesh = Mesh.load_mesh(str(filename)) 13 | return mesh 14 | 15 | 16 | def test_mesh_shapes(): 17 | Nv = 11 # num vertices 18 | Nf = 13 # num faces 19 | dim = 3 # ambient dimension 20 | vertices = np.random.randn(Nv, dim) 21 | faces = np.random.randint(0, Nv, size=(Nf, 3)) 22 | mesh = Mesh(vertices=vertices, faces=faces) 23 | assert mesh.vertices.shape == (Nv, dim) 24 | assert mesh.faces.shape == (Nf, 3) 25 | 26 | 27 | def test_read_mesh(mesh: Mesh): 28 | assert mesh.vertices.shape == (mesh.num_vertices, mesh.dimension + 1) 29 | assert mesh.faces.shape == (mesh.num_faces, 3) 30 | 31 | 32 | def test_eigenvalues(mesh: Mesh): 33 | assert mesh.get_eigenvalues(10).shape == (10, 1) 34 | assert mesh.get_eigenvalues(13).shape == (13, 1) 35 | 36 | 37 | def test_eigenvectors(mesh: Mesh): 38 | assert mesh.get_eigenvectors(10).shape == (mesh.num_vertices, 10) 39 | assert mesh.get_eigenvectors(13).shape == (mesh.num_vertices, 13) 40 | assert set(mesh.cache.keys()) == set([10, 13]) 41 | -------------------------------------------------------------------------------- /tests/spaces/test_product_discrete_spectrum_space.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from geometric_kernels.kernels import MaternGeometricKernel 5 | from geometric_kernels.spaces import ( 6 | Circle, 7 | ProductDiscreteSpectrumSpace, 8 | SpecialUnitary, 9 | ) 10 | from geometric_kernels.utils.product import make_product 11 | 12 | from ..helper import check_function_with_backend 13 | 14 | _NUM_LEVELS = 20 15 | 16 | 17 | @pytest.mark.parametrize( 18 | "factor1, factor2", [(Circle(), Circle()), (Circle(), SpecialUnitary(2))], ids=str 19 | ) 20 | @pytest.mark.parametrize("lengthscale", [0.1, 0.5, 1.0, 2.0, 5.0]) 21 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 22 | def test_heat_kernel_is_product_of_heat_kernels(factor1, factor2, lengthscale, backend): 23 | product = ProductDiscreteSpectrumSpace( 24 | factor1, factor2, num_levels=_NUM_LEVELS**2, num_levels_per_space=_NUM_LEVELS 25 | ) 26 | 27 | key = np.random.RandomState(0) 28 | key, xs_factor1 = factor1.random(key, 10) 29 | key, xs_factor2 = factor2.random(key, 10) 30 | 31 | kernel_product = MaternGeometricKernel(product, num=_NUM_LEVELS**2) 32 | kernel_factor1 = MaternGeometricKernel(factor1, num=_NUM_LEVELS) 33 | kernel_factor2 = MaternGeometricKernel(factor2, num=_NUM_LEVELS) 34 | 35 | def K_diff(nu, lengthscale, xs_factor1, xs_factor2): 36 | params = {"nu": nu, "lengthscale": lengthscale} 37 | 38 | xs_product = make_product([xs_factor1, xs_factor2]) 39 | 40 | K_product = kernel_product.K(params, xs_product, xs_product) 41 | K_factor1 = kernel_factor1.K(params, xs_factor1, xs_factor1) 42 | K_factor2 = kernel_factor2.K(params, xs_factor2, xs_factor2) 43 | 44 | return K_product - K_factor1 * K_factor2 45 | 46 | # Check that the heat kernel on the ProductDiscreteSpectrumSpace coincides 47 | # with the product of heat kernels on the factors. 48 | check_function_with_backend( 49 | backend, 50 | np.zeros((xs_factor1.shape[0], xs_factor2.shape[0])), 51 | K_diff, 52 | np.array([np.inf]), 53 | np.array([lengthscale]), 54 | xs_factor1, 55 | xs_factor2, 56 | ) 57 | -------------------------------------------------------------------------------- /tests/spaces/test_spd.py: -------------------------------------------------------------------------------- 1 | import lab as B 2 | import numpy as np 3 | import pytest 4 | 5 | from geometric_kernels.kernels import MaternGeometricKernel 6 | from geometric_kernels.spaces import SymmetricPositiveDefiniteMatrices 7 | from geometric_kernels.utils.kernel_formulas import spd_heat_kernel_2x2 8 | 9 | from ..helper import check_function_with_backend, create_random_state 10 | 11 | 12 | @pytest.mark.parametrize("lengthscale", [2.0]) 13 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 14 | def test_equivalence_kernel(lengthscale, backend): 15 | space = SymmetricPositiveDefiniteMatrices(2) 16 | 17 | key = np.random.RandomState(0) 18 | key, X = space.random(key, 5) 19 | X2 = X.copy() 20 | 21 | t = lengthscale * lengthscale / 2 22 | result = spd_heat_kernel_2x2(t, X, X2) 23 | 24 | kernel = MaternGeometricKernel(space, key=create_random_state(backend)) 25 | 26 | def compare_to_result(res, f_out): 27 | return ( 28 | np.linalg.norm(res - B.to_numpy(f_out)) 29 | / np.sqrt(res.shape[0] * res.shape[1]) 30 | < 1e-1 31 | ) 32 | 33 | # Check that MaternGeometricKernel on SymmetricPositiveDefiniteMatrices(2) 34 | # with nu=inf coincides with the semi-analytic formula from :cite:t:`sawyer1992`. 35 | # We are checking the equivalence on average, computing the norm between 36 | # the two covariance matrices. 37 | check_function_with_backend( 38 | backend, 39 | result, 40 | kernel.K, 41 | {"nu": np.array([np.inf]), "lengthscale": np.array([lengthscale])}, 42 | X, 43 | X2, 44 | compare_to_result=compare_to_result, 45 | ) 46 | -------------------------------------------------------------------------------- /tests/utils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geometric-kernels/GeometricKernels/fa53ebe14130c27530f4bda2c980b0189d1d388d/tests/utils/__init__.py -------------------------------------------------------------------------------- /tests/utils/test_kernel_formulas.py: -------------------------------------------------------------------------------- 1 | from math import log, tanh 2 | 3 | import numpy as np 4 | import pytest 5 | from sklearn.metrics.pairwise import rbf_kernel 6 | 7 | from geometric_kernels.spaces import HypercubeGraph 8 | from geometric_kernels.utils.kernel_formulas import hypercube_graph_heat_kernel 9 | 10 | from ..helper import check_function_with_backend 11 | 12 | 13 | @pytest.mark.parametrize("d", [1, 5, 10]) 14 | @pytest.mark.parametrize("lengthscale", [1.0, 5.0, 10.0]) 15 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 16 | def test_hypercube_graph_heat_kernel(d, lengthscale, backend): 17 | space = HypercubeGraph(d) 18 | 19 | key = np.random.RandomState(0) 20 | N, N2 = key.randint(low=1, high=min(2**d, 10) + 1, size=2) 21 | key, X = space.random(key, N) 22 | key, X2 = space.random(key, N2) 23 | 24 | gamma = -log(tanh(lengthscale**2 / 2)) 25 | result = rbf_kernel(X, X2, gamma=gamma) 26 | 27 | def heat_kernel(lengthscale, X, X2): 28 | return hypercube_graph_heat_kernel( 29 | lengthscale, X, X2, normalized_laplacian=False 30 | ) 31 | 32 | # Checks that the heat kernel on the hypercube graph coincides with the RBF 33 | # restricted onto binary vectors, with appropriately redefined length scale. 34 | check_function_with_backend( 35 | backend, 36 | result, 37 | heat_kernel, 38 | np.array([lengthscale]), 39 | X, 40 | X2, 41 | atol=1e-2, 42 | ) 43 | 44 | if d > 5: 45 | X_first = X[0:1, :3] 46 | X2_first = X2[0:1, :3] 47 | X_second = X[0:1, 3:] 48 | X2_second = X2[0:1, 3:] 49 | 50 | K_first = hypercube_graph_heat_kernel( 51 | np.array([lengthscale]), X_first, X2_first, normalized_laplacian=False 52 | ) 53 | K_second = hypercube_graph_heat_kernel( 54 | np.array([lengthscale]), X_second, X2_second, normalized_laplacian=False 55 | ) 56 | 57 | result = K_first * K_second 58 | 59 | # Checks that the heat kernel of the product is equal to the product 60 | # of heat kernels. 61 | check_function_with_backend( 62 | backend, 63 | result, 64 | heat_kernel, 65 | np.array([lengthscale]), 66 | X[0:1, :], 67 | X2[0:1, :], 68 | ) 69 | -------------------------------------------------------------------------------- /tests/utils/test_manifold_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from geometric_kernels.spaces import Hyperbolic 5 | from geometric_kernels.utils.manifold_utils import hyperbolic_distance 6 | 7 | from ..helper import check_function_with_backend 8 | 9 | 10 | @pytest.mark.parametrize("dim", [2, 3, 9, 10]) 11 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 12 | def test_hyperboloid_distance(dim, backend): 13 | space = Hyperbolic(dim=dim) 14 | 15 | key = np.random.RandomState(0) 16 | N, N2 = key.randint(low=2, high=15, size=2) 17 | key, X = space.random(key, N) 18 | key, X2 = space.random(key, N2) 19 | 20 | X_expanded = np.tile(X[..., None, :], (1, X2.shape[0], 1)) # (N, M, n+1) 21 | X2_expanded = np.tile(X2[None], (X.shape[0], 1, 1)) # (N, M, n+1) 22 | result = space.metric.dist(X_expanded, X2_expanded) 23 | 24 | # Check that our implementation of the hyperbolic distance coincides with 25 | # the one from geomstats. 26 | check_function_with_backend( 27 | backend, 28 | result, 29 | hyperbolic_distance, 30 | X, 31 | X2, 32 | ) 33 | -------------------------------------------------------------------------------- /tests/utils/test_special_functions.py: -------------------------------------------------------------------------------- 1 | from math import comb 2 | 3 | import lab as B 4 | import numpy as np 5 | import pytest 6 | 7 | from geometric_kernels.utils.special_functions import ( 8 | kravchuk_normalized, 9 | walsh_function, 10 | ) 11 | from geometric_kernels.utils.utils import binary_vectors_and_subsets, hamming_distance 12 | 13 | from ..helper import check_function_with_backend 14 | 15 | 16 | @pytest.fixture(params=[1, 2, 3, 5, 10]) 17 | def all_xs_and_combs(request): 18 | """ 19 | Returns a tuple (d, x, combs) where: 20 | - d is an integer equal to request.param, 21 | - x is a 2**d x d boolean matrix with all possible binary vectors of length d, 22 | - combs is a list of all possible combinations of indices of x. 23 | """ 24 | d = request.param 25 | 26 | X, combs = binary_vectors_and_subsets(d) 27 | 28 | return d, X, combs 29 | 30 | 31 | def walsh_matrix(d, combs, X): 32 | return B.stack(*[walsh_function(d, comb, X) for comb in combs]) 33 | 34 | 35 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 36 | def test_walsh_functions(all_xs_and_combs, backend): 37 | d, X, combs = all_xs_and_combs 38 | 39 | # Check that Walsh functions are orthogonal. 40 | check_function_with_backend( 41 | backend, 42 | 2**d * np.eye(2**d), 43 | lambda X: B.matmul(walsh_matrix(d, combs, X), B.T(walsh_matrix(d, combs, X))), 44 | X, 45 | ) 46 | 47 | # Check that Walsh functions only take values in the set {-1, 1}. 48 | check_function_with_backend( 49 | backend, 50 | np.ones((2**d, 2**d)), 51 | lambda X: B.abs(walsh_matrix(d, combs, X)), 52 | X, 53 | ) 54 | 55 | 56 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 57 | def test_kravchuk_polynomials(all_xs_and_combs, backend): 58 | d, X, combs = all_xs_and_combs 59 | 60 | x0 = np.zeros((1, d), dtype=bool) 61 | 62 | cur_ind = 0 63 | for j in range(d + 1): 64 | num_walsh = comb(d, j) 65 | 66 | result = np.sum( 67 | walsh_matrix(d, combs, X)[cur_ind : cur_ind + num_walsh, :], 68 | axis=0, 69 | keepdims=True, 70 | ) 71 | 72 | def krav(x0, X): 73 | return comb(d, j) * kravchuk_normalized(d, j, hamming_distance(x0, X)) 74 | 75 | # Checks that Kravchuk polynomials coincide with certain sums of 76 | # the Walsh functions. 77 | check_function_with_backend( 78 | backend, 79 | result, 80 | krav, 81 | x0, 82 | X, 83 | ) 84 | 85 | cur_ind += num_walsh 86 | 87 | 88 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 89 | def test_kravchuk_precomputed(all_xs_and_combs, backend): 90 | d, X, _ = all_xs_and_combs 91 | 92 | x0 = np.zeros((1, d), dtype=bool) 93 | 94 | kravchuk_normalized_j_minus_1, kravchuk_normalized_j_minus_2 = None, None 95 | for j in range(d + 1): 96 | 97 | cur_kravchuk_normalized = kravchuk_normalized(d, j, hamming_distance(x0, X)) 98 | 99 | def krav(x0, X, kn1, kn2): 100 | return kravchuk_normalized(d, j, hamming_distance(x0, X), kn1, kn2) 101 | 102 | # Checks that Kravchuk polynomials coincide with certain sums of 103 | # the Walsh functions. 104 | check_function_with_backend( 105 | backend, 106 | cur_kravchuk_normalized, 107 | krav, 108 | x0, 109 | X, 110 | kravchuk_normalized_j_minus_1, 111 | kravchuk_normalized_j_minus_2, 112 | ) 113 | 114 | kravchuk_normalized_j_minus_2 = kravchuk_normalized_j_minus_1 115 | kravchuk_normalized_j_minus_1 = cur_kravchuk_normalized 116 | -------------------------------------------------------------------------------- /tests/utils/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | 4 | from geometric_kernels.utils.utils import ( 5 | binary_vectors_and_subsets, 6 | hamming_distance, 7 | log_binomial, 8 | ) 9 | 10 | from ..helper import check_function_with_backend 11 | 12 | 13 | @pytest.mark.parametrize("backend", ["numpy", "tensorflow", "torch", "jax"]) 14 | def test_hamming_distance(backend): 15 | 16 | X = np.array([[1, 0, 1]], dtype=bool) 17 | 18 | X2 = np.array([[0, 0, 1]], dtype=bool) 19 | 20 | # Check that hamming_distance gives the correct results for the given inputs. 21 | check_function_with_backend(backend, np.array([[1]]), hamming_distance, X, X2) 22 | check_function_with_backend(backend, np.array([[0]]), hamming_distance, X, X) 23 | check_function_with_backend(backend, np.array([[0]]), hamming_distance, X2, X2) 24 | check_function_with_backend(backend, np.array([[1]]), hamming_distance, X2, X) 25 | 26 | X = np.asarray( 27 | [ 28 | [0, 0, 0, 1, 0], 29 | [1, 0, 0, 0, 0], 30 | [1, 1, 1, 1, 0], 31 | [1, 1, 1, 1, 1], 32 | [0, 1, 0, 1, 0], 33 | [0, 0, 0, 0, 0], 34 | [0, 0, 0, 0, 0], 35 | [0, 0, 0, 0, 0], 36 | ], 37 | dtype=bool, 38 | ) 39 | 40 | X2 = np.asarray( 41 | [ 42 | [1, 1, 1, 0, 0], 43 | [1, 0, 1, 0, 0], 44 | [1, 0, 1, 0, 1], 45 | ], 46 | dtype=bool, 47 | ) 48 | 49 | ham_X_X2 = np.asarray( 50 | [ 51 | [4, 3, 4], 52 | [2, 1, 2], 53 | [1, 2, 3], 54 | [2, 3, 2], 55 | [3, 4, 5], 56 | [3, 2, 3], 57 | [3, 2, 3], 58 | [3, 2, 3], 59 | ], 60 | dtype=int, 61 | ) 62 | 63 | ham_X_X = np.asarray( 64 | [ 65 | [0, 2, 3, 4, 1, 1, 1, 1], 66 | [2, 0, 3, 4, 3, 1, 1, 1], 67 | [3, 3, 0, 1, 2, 4, 4, 4], 68 | [4, 4, 1, 0, 3, 5, 5, 5], 69 | [1, 3, 2, 3, 0, 2, 2, 2], 70 | [1, 1, 4, 5, 2, 0, 0, 0], 71 | [1, 1, 4, 5, 2, 0, 0, 0], 72 | [1, 1, 4, 5, 2, 0, 0, 0], 73 | ], 74 | dtype=int, 75 | ) 76 | 77 | ham_X2_X2 = np.asarray( 78 | [ 79 | [0, 1, 2], 80 | [1, 0, 1], 81 | [2, 1, 0], 82 | ], 83 | dtype=int, 84 | ) 85 | 86 | # Check that hamming_distance gives the correct results for more given inputs. 87 | check_function_with_backend(backend, ham_X_X2, hamming_distance, X, X2) 88 | check_function_with_backend(backend, ham_X_X, hamming_distance, X, X) 89 | check_function_with_backend(backend, ham_X2_X2, hamming_distance, X2, X2) 90 | check_function_with_backend(backend, ham_X_X2.T, hamming_distance, X2, X) 91 | 92 | 93 | @pytest.mark.parametrize("n", [0, 1, 2, 3, 4, 5]) 94 | def test_log_binomial(n): 95 | for k in range(n + 1): 96 | # Check that log_binomial gives the same result as the log of the 97 | # binomial coefficient (as computed through `np.math.comb`). 98 | assert np.isclose(np.log(np.math.comb(n, k)), log_binomial(n, k), atol=1e-10) 99 | 100 | 101 | @pytest.mark.parametrize("d", [0, 1, 2, 3, 5, 10]) 102 | def test_binary_vectors_and_subsets(d): 103 | X, subsets = binary_vectors_and_subsets(d) 104 | 105 | # Check the returned values have the correct types. 106 | assert isinstance(X, np.ndarray) 107 | assert isinstance(subsets, list) 108 | 109 | # Check the returned values have the correct shapes. 110 | assert X.shape == (2**d, d) 111 | assert X.dtype == bool 112 | assert len(subsets) == 2**d 113 | 114 | # Check that all x[i, :] are different and that they have ones at the 115 | # positions contained in subsets[i] and only there. 116 | for i in range(2**d): 117 | xi_alt = np.zeros(d, dtype=bool) 118 | assert isinstance(subsets[i], list) 119 | xi_alt[subsets[i]] = True 120 | assert np.all(X[i, :] == xi_alt) 121 | for j in range(i + 1, 2**d): 122 | assert np.any(X[i, :] != X[j, :]) 123 | --------------------------------------------------------------------------------