├── benchmarks ├── __init__.py ├── test_tfga.py ├── README.md ├── graph.py ├── test_clifford.py └── generate.py ├── .gitattributes ├── tfga ├── __init__.py ├── cayley.py ├── mv_ops.py ├── blades.py ├── mv.py ├── layers.py └── tfga.py ├── notebooks ├── em_output │ ├── electric_field.webm │ └── electric_potential.webm ├── conv.ipynb ├── tfga.ipynb └── pga.ipynb ├── docs ├── index.rst └── conf.py ├── tests ├── test_autodiff.py ├── test_dual_cayley.py ├── test_pga.py ├── test_sta_cayley.py ├── test_dual_ga.py └── test_keras.py ├── LICENSE ├── .github └── workflows │ └── earthly.yml ├── pyproject.toml ├── Earthfile ├── .gitignore └── README.md /benchmarks/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto eol=lf -------------------------------------------------------------------------------- /tfga/__init__.py: -------------------------------------------------------------------------------- 1 | from tfga.tfga import GeometricAlgebra 2 | -------------------------------------------------------------------------------- /notebooks/em_output/electric_field.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobinKa/tfga/HEAD/notebooks/em_output/electric_field.webm -------------------------------------------------------------------------------- /notebooks/em_output/electric_potential.webm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RobinKa/tfga/HEAD/notebooks/em_output/electric_potential.webm -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to tfga's documentation! 2 | ================================ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | Indices and tables 9 | ================== 10 | 11 | * :ref:`genindex` 12 | * :ref:`modindex` 13 | * :ref:`search` 14 | -------------------------------------------------------------------------------- /benchmarks/test_tfga.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import tensorflow as tf 3 | 4 | from tfga import GeometricAlgebra 5 | 6 | 7 | def _tfga_add(a, b): 8 | return a + b 9 | 10 | 11 | def _tfga_mul(ga, a, b): 12 | return ga.geom_prod(a, b) 13 | 14 | 15 | @pytest.mark.parametrize( 16 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 17 | ) 18 | def test_tfga_add_mv_mv(num_elements, benchmark): 19 | ga = GeometricAlgebra([1, -1, -1, -1]) 20 | a = tf.ones([num_elements, ga.num_blades]) 21 | b = tf.ones([num_elements, ga.num_blades]) 22 | benchmark(_tfga_add, a, b) 23 | 24 | 25 | @pytest.mark.parametrize( 26 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 27 | ) 28 | def test_tfga_mul_mv_mv(num_elements, benchmark): 29 | ga = GeometricAlgebra([1, -1, -1, -1]) 30 | a = tf.ones([num_elements, ga.num_blades]) 31 | b = tf.ones([num_elements, ga.num_blades]) 32 | benchmark(_tfga_mul, ga, a, b) 33 | -------------------------------------------------------------------------------- /tests/test_autodiff.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tfga import GeometricAlgebra 3 | from tfga.blades import BladeKind 4 | 5 | algebra = GeometricAlgebra([1, 1, 1]) 6 | 7 | 8 | def get_rotor_loss(values): 9 | rotor = algebra.from_tensor_with_kind(values, BladeKind.BIVECTOR) 10 | s = algebra.geom_prod(rotor, algebra.reversion(rotor))[..., 0] 11 | return tf.reduce_sum(tf.math.square(s - 1)) 12 | 13 | 14 | def test_make_rotor(): 15 | rotor_values = tf.Variable([1, 2, 3], dtype=tf.float32) 16 | 17 | optimizer = tf.optimizers.Adam(1) 18 | 19 | def train_step(): 20 | with tf.GradientTape() as tape: 21 | tape.watch(rotor_values) 22 | loss = get_rotor_loss(rotor_values) 23 | 24 | grads = tape.gradient(loss, rotor_values) 25 | optimizer.apply_gradients(zip([grads], [rotor_values])) 26 | 27 | for _ in range(100): 28 | train_step() 29 | 30 | final_loss = get_rotor_loss(rotor_values) 31 | assert final_loss < 0.1 32 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | # -- Project information ----------------------------------------------------- 7 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 8 | import os 9 | import sys 10 | 11 | sys.path.insert(0, os.path.abspath(".")) 12 | project = "tfga" 13 | copyright = "2023, Robin 'Tora' Kahlow" 14 | author = "Robin 'Tora' Kahlow" 15 | 16 | # -- General configuration --------------------------------------------------- 17 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 18 | 19 | extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] 20 | 21 | 22 | # -- Options for HTML output ------------------------------------------------- 23 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output 24 | 25 | html_theme = "sphinx_material" 26 | html_static_path = ["_static"] 27 | -------------------------------------------------------------------------------- /benchmarks/README.md: -------------------------------------------------------------------------------- 1 | # Benchmarks 2 | Here we compare the performance of TFGA (both CPU and GPU) against other libraries. The benchmark can be run using [generate.py](generate.py) and the plots created using [graph.py](graph.py). 3 | 4 | The following libraries were compared with: 5 | - [clifford](https://github.com/pygae/clifford) 6 | 7 | The benchmarks below were created with the following specs: 8 | - CPU: AMD Ryzen 7 2700X 9 | - GPU: Nvidia GTX 1070 10 | - RAM: 2x16GB (2800MHZ) 11 | - OS: Windows 10 Pro 1903 12 | 13 | And relevant libraries: 14 | - tfga: 0.1.10 15 | - tf-gpu-nightly: 2.3.0-dev20200515 16 | - clifford: 1.3.0 17 | - numpy (mkl): 1.18.1 18 | - numba: 0.49.1 19 | 20 | Also the environment variable `MKL_DEBUG_CPU_TYPE` was set to `5` in order to disable [crippling of AMD CPUs by MKL](https://www.reddit.com/r/MachineLearning/comments/f2pbvz/discussion_workaround_for_mkl_on_amd/). 21 | 22 | ### Addition A + B, Algebra=STA, A=Full Multivector, B=Full Multivector 23 | ![](output/add-mv-mv.svg) 24 | 25 | ### Geometric Product A * B, Algebra=STA, A=Full Multivector, B=Full Multivector 26 | ![](output/mul-mv-mv.svg) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Robin Kahlow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/test_dual_cayley.py: -------------------------------------------------------------------------------- 1 | import unittest as ut 2 | 3 | from tfga.cayley import blades_from_bases, get_cayley_tensor 4 | 5 | dual_metric = [0] 6 | dual_bases = ["0"] 7 | dual_blades = ["", "0"] 8 | dual_blade_degrees = [len(blade) for blade in dual_blades] 9 | 10 | 11 | class TestDualCayleyTensor(ut.TestCase): 12 | def test_dual_blades_from_bases(self): 13 | blades, blade_degrees = blades_from_bases(dual_bases) 14 | self.assertCountEqual(blades, dual_blades) 15 | for blade, blade_degree in zip(dual_blades, dual_blade_degrees): 16 | blade_index = blades.index(blade) 17 | self.assertEqual(blade_degrees[blade_index], blade_degree) 18 | 19 | def test_cayley_tensor_correct(self): 20 | cayley, cayley_inner, cayley_outer = get_cayley_tensor( 21 | dual_metric, dual_bases, dual_blades 22 | ) 23 | 24 | self.assertSequenceEqual(cayley.shape, [2, 2, 2]) 25 | 26 | # Scalar * Scalar -> Scalar 27 | self.assertEqual(cayley[0, 0, 0], 1) 28 | self.assertEqual(cayley[0, 0, 1], 0) 29 | 30 | # Scalar * Dual -> Dual 31 | self.assertEqual(cayley[0, 1, 0], 0) 32 | self.assertEqual(cayley[0, 1, 1], 1) 33 | 34 | # Dual * Scalar -> Dual 35 | self.assertEqual(cayley[1, 0, 0], 0) 36 | self.assertEqual(cayley[1, 0, 1], 1) 37 | 38 | # Dual * Dual -> Zero 39 | self.assertEqual(cayley[1, 1, 0], 0) 40 | self.assertEqual(cayley[1, 1, 1], 0) 41 | -------------------------------------------------------------------------------- /tests/test_pga.py: -------------------------------------------------------------------------------- 1 | import unittest as ut 2 | 3 | import tensorflow as tf 4 | 5 | from tfga import GeometricAlgebra 6 | 7 | # Make tensorflow not take over the entire GPU memory 8 | for gpu in tf.config.experimental.list_physical_devices("GPU"): 9 | tf.config.experimental.set_memory_growth(gpu, True) 10 | 11 | 12 | pga_signature = [0, 1, 1, 1] 13 | 14 | 15 | class TestDualGeometricAlgebraMultiply(ut.TestCase): 16 | def assertTensorsApproxEqual(self, a, b, tolerance=1e-4): 17 | self.assertTrue( 18 | tf.reduce_all(tf.abs(a - b) < tolerance), "%s not equal to %s" % (a, b) 19 | ) 20 | 21 | def test_exp_eq_approx_exp_e01_e02(self): 22 | pga = GeometricAlgebra(pga_signature) 23 | 24 | # a = 3e01 + 5e02 25 | a = 3 * pga.e01 + 5 * pga.e02 26 | 27 | # exp(a) = 1 + 3e01 + 5e02 28 | self.assertTensorsApproxEqual(pga.approx_exp(a), pga.exp(a)) 29 | 30 | def test_exp_eq_approx_exp_e12_e23(self): 31 | pga = GeometricAlgebra(pga_signature) 32 | 33 | # a = 3e12 + 5e23 34 | a = 3 * pga.e12 + 5 * pga.e23 35 | 36 | # exp(a) ~= 0.90 - 0.22e12 -0.37e23 37 | self.assertTensorsApproxEqual(pga.approx_exp(a), pga.exp(a)) 38 | 39 | def test_inverse(self): 40 | pga = GeometricAlgebra(pga_signature) 41 | 42 | # a = 3e12 + 5e23 43 | a = 3 * pga.e12 + 5 * pga.e23 44 | 45 | # a_inv: -0.09*e_12 + -0.15*e_23 46 | a_inv = pga.inverse(a) 47 | 48 | # a a_inv should be 1 49 | self.assertTensorsApproxEqual(pga.geom_prod(a, a_inv), 1 * pga.e("")) 50 | -------------------------------------------------------------------------------- /.github/workflows/earthly.yml: -------------------------------------------------------------------------------- 1 | name: Earthly 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | workflow_dispatch: 9 | 10 | concurrency: 11 | group: ${{ github.workflow }}-${{ github.ref }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | test: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | - name: Download Earthly 20 | run: "sudo /bin/sh -c 'wget https://github.com/earthly/earthly/releases/download/v0.6.30/earthly-linux-amd64 -O /usr/local/bin/earthly && chmod +x /usr/local/bin/earthly'" 21 | - name: Run test 22 | run: | 23 | earthly --ci +test 24 | publish: 25 | runs-on: ubuntu-latest 26 | steps: 27 | - uses: actions/checkout@v3 28 | - name: Download Earthly 29 | run: "sudo /bin/sh -c 'wget https://github.com/earthly/earthly/releases/download/v0.6.30/earthly-linux-amd64 -O /usr/local/bin/earthly && chmod +x /usr/local/bin/earthly'" 30 | - name: Publish test 31 | run: | 32 | earthly --secret PYPI_TOKEN=${{ secrets.test_pypi_password }} --ci +publish --REPOSITORY=testpypi 33 | - name: Publish 34 | if: contains(github.ref, 'master') 35 | run: | 36 | earthly --secret PYPI_TOKEN=${{ secrets.pypi_password }} --ci +publish --REPOSITORY=pypi 37 | docs: 38 | runs-on: ubuntu-latest 39 | steps: 40 | - uses: actions/checkout@v3 41 | - name: Download Earthly 42 | run: "sudo /bin/sh -c 'wget https://github.com/earthly/earthly/releases/download/v0.6.30/earthly-linux-amd64 -O /usr/local/bin/earthly && chmod +x /usr/local/bin/earthly'" 43 | - name: Build docs 44 | run: | 45 | earthly --ci -a +docs/html . 46 | - name: Push documentation to pages 47 | if: contains(github.ref, 'master') 48 | uses: JamesIves/github-pages-deploy-action@v4 49 | with: 50 | branch: gh-pages 51 | folder: "html" 52 | -------------------------------------------------------------------------------- /benchmarks/graph.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from glob import glob 4 | 5 | import clifford 6 | import numpy as np 7 | import pandas as pd 8 | from matplotlib import pyplot as plt 9 | 10 | 11 | def load_results(path): 12 | with open(path, "r", encoding="utf-8") as result_file: 13 | data = json.load(result_file) 14 | 15 | file_name = os.path.splitext(os.path.basename(path))[0] 16 | 17 | # file name: __ 18 | lib_name, fn_name, num_elements = file_name.split("_") 19 | num_elements = int(num_elements) 20 | 21 | benchmarks = data["benchmarks"] 22 | 23 | assert len(benchmarks) == 1 24 | benchmark = benchmarks[0] 25 | 26 | mean, stddev = benchmark["stats"]["mean"], benchmark["stats"]["stddev"] 27 | 28 | return { 29 | "lib_name": lib_name, 30 | "fn_name": fn_name, 31 | "num_elements": num_elements, 32 | "mean": mean, 33 | "stddev": stddev, 34 | } 35 | 36 | 37 | def main(): 38 | result_paths = sorted(glob(os.path.join("results", "*.json"))) 39 | out_path = "output" 40 | 41 | os.makedirs(out_path, exist_ok=True) 42 | 43 | all_results = list(map(load_results, result_paths)) 44 | 45 | df = pd.DataFrame(all_results) 46 | print(df) 47 | 48 | with plt.style.context("seaborn-darkgrid"): 49 | for fn_name, fn_df in df.groupby(by="fn_name"): 50 | plt.figure(figsize=(6, 4)) 51 | for lib_name, lib_df in fn_df.groupby(by="lib_name"): 52 | plt.errorbar( 53 | lib_df["num_elements"], 54 | lib_df["mean"], 55 | lib_df["stddev"], 56 | label=lib_name, 57 | ) 58 | plt.xscale("log") 59 | plt.yscale("log") 60 | plt.xlabel("Number of elements") 61 | plt.ylabel("Runtime [s]") 62 | plt.title(fn_name) 63 | plt.legend() 64 | plt.savefig(os.path.join(out_path, "%s.svg" % fn_name)) 65 | 66 | 67 | if __name__ == "__main__": 68 | main() 69 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "tfga" 3 | packages = [{ include = "tfga" }] 4 | version = "0.2.0" 5 | authors = ["Robin Kahlow "] 6 | readme = "README.md" 7 | description = "Clifford and Geometric Algebra with TensorFlow" 8 | homepage = "https://github.com/RobinKa/tfga" 9 | repository = "https://github.com/RobinKa/tfga" 10 | documentation = "https://tfga.warlock.ai/" 11 | license = "MIT" 12 | keywords = [ 13 | "geometric-algebra", 14 | "clifford-algebra", 15 | "tensorflow", 16 | "multi-vector", 17 | "para-vector", 18 | "mathematics", 19 | "machine-learning", 20 | ] 21 | classifiers = [ 22 | "License :: OSI Approved :: MIT License", 23 | "Development Status :: 3 - Alpha", 24 | 25 | "Programming Language :: Python :: 3", 26 | "Programming Language :: Python :: 3.8", 27 | "Programming Language :: Python :: 3.9", 28 | "Programming Language :: Python :: 3.10", 29 | "Programming Language :: Python :: 3.11", 30 | "Programming Language :: Python :: 3 :: Only", 31 | 32 | "Intended Audience :: Education", 33 | "Intended Audience :: Developers", 34 | "Intended Audience :: Science/Research", 35 | 36 | "Topic :: Scientific/Engineering", 37 | "Topic :: Scientific/Engineering :: Physics", 38 | "Topic :: Scientific/Engineering :: Mathematics", 39 | "Topic :: Scientific/Engineering :: Artificial Intelligence", 40 | 41 | "Topic :: Software Development", 42 | "Topic :: Software Development :: Libraries", 43 | "Topic :: Software Development :: Libraries :: Python Modules", 44 | ] 45 | 46 | [tool.poetry.dependencies] 47 | python = ">=3.8,<3.12" 48 | tensorflow = "^2.11.0" 49 | numpy = "^1.20.0" 50 | 51 | [tool.poetry.extras] 52 | tf = ["tensorflow"] 53 | 54 | [tool.poetry.group.dev.dependencies] 55 | pytest = "^7.2.1" 56 | black = "^23.1.0" 57 | isort = "^5.12.0" 58 | 59 | [tool.poetry.group.docs.dependencies] 60 | sphinx = "^6.1.3" 61 | sphinx-material = "^0.0.35" 62 | 63 | [build-system] 64 | requires = ["poetry-core>=1.0.0"] 65 | build-backend = "poetry.core.masonry.api" 66 | -------------------------------------------------------------------------------- /benchmarks/test_clifford.py: -------------------------------------------------------------------------------- 1 | import numba 2 | import numpy as np 3 | import pytest 4 | from clifford import MVArray 5 | from clifford.sta import D, D_blades 6 | 7 | 8 | def _clifford_add(a, b): 9 | return a + b 10 | 11 | 12 | @numba.njit(parallel=True, nogil=True) 13 | def _clifford_raw_add(a, b): 14 | return a + b 15 | 16 | 17 | def _clifford_mul(a, b): 18 | return a * b 19 | 20 | 21 | gmt_func = D.gmt_func 22 | 23 | 24 | @numba.njit(parallel=True, nogil=True) 25 | def _clifford_raw_mul(a, b): 26 | op = np.empty(a.shape, dtype=np.float32) 27 | for i in numba.prange(op.shape[0]): 28 | op[i, :] = gmt_func(a[i, :], b[i, :]) 29 | return a * b 30 | 31 | 32 | def _mv_ones(num_elements): 33 | return MVArray( 34 | [ 35 | D.MultiVector(value=np.ones(2**4, dtype=np.float32)) 36 | for i in range(num_elements) 37 | ] 38 | ) 39 | 40 | 41 | @pytest.mark.parametrize( 42 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 43 | ) 44 | def test_clifford_add_mv_mv(num_elements, benchmark): 45 | a = _mv_ones(num_elements) 46 | b = _mv_ones(num_elements) 47 | benchmark(_clifford_add, a, b) 48 | 49 | 50 | @pytest.mark.parametrize( 51 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 52 | ) 53 | def test_clifford_raw_mul_mv_mv(num_elements, benchmark): 54 | a = _mv_ones(num_elements).value 55 | b = _mv_ones(num_elements).value 56 | benchmark(_clifford_raw_mul, a, b) 57 | 58 | 59 | @pytest.mark.parametrize( 60 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 61 | ) 62 | def test_clifford_raw_add_mv_mv(num_elements, benchmark): 63 | a = _mv_ones(num_elements).value 64 | b = _mv_ones(num_elements).value 65 | benchmark(_clifford_raw_add, a, b) 66 | 67 | 68 | @pytest.mark.parametrize( 69 | "num_elements", [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 70 | ) 71 | def test_clifford_mul_mv_mv(num_elements, benchmark): 72 | a = _mv_ones(num_elements) 73 | b = _mv_ones(num_elements) 74 | benchmark(_clifford_mul, a, b) 75 | 76 | 77 | if __name__ == "__main__": 78 | pytest.main() 79 | -------------------------------------------------------------------------------- /Earthfile: -------------------------------------------------------------------------------- 1 | VERSION 0.6 2 | 3 | base-python: 4 | FROM python:3.8-slim 5 | ENV PIP_CACHE_DIR /pip-cache 6 | 7 | requirements: 8 | FROM +base-python 9 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 10 | pip install poetry 11 | COPY pyproject.toml poetry.lock . 12 | RUN poetry export -f requirements.txt -E tf -o requirements.txt 13 | RUN poetry export -f requirements.txt -E tf --with dev -o dev-requirements.txt 14 | RUN poetry export -f requirements.txt -E tf --with docs -o docs-requirements.txt 15 | SAVE ARTIFACT requirements.txt /requirements.txt 16 | SAVE ARTIFACT dev-requirements.txt /dev-requirements.txt 17 | SAVE ARTIFACT docs-requirements.txt /docs-requirements.txt 18 | 19 | build: 20 | FROM +base-python 21 | 22 | WORKDIR /app 23 | 24 | COPY +requirements/requirements.txt . 25 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 26 | pip install -r requirements.txt 27 | 28 | COPY tfga tfga 29 | ENV PYTHONPATH /app:$PYTHONPATH 30 | 31 | test: 32 | FROM +build 33 | 34 | COPY +requirements/dev-requirements.txt . 35 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 36 | pip install -r dev-requirements.txt 37 | 38 | COPY tests tests 39 | RUN pytest tests 40 | 41 | publish: 42 | FROM +base-python 43 | 44 | ARG --required REPOSITORY 45 | 46 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 47 | pip install poetry 48 | RUN poetry config repositories.pypi https://upload.pypi.org/legacy/ 49 | RUN poetry config repositories.testpypi https://test.pypi.org/legacy/ 50 | COPY pyproject.toml poetry.lock README.md . 51 | COPY tfga tfga 52 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 53 | --secret PYPI_TOKEN=+secrets/PYPI_TOKEN \ 54 | poetry publish \ 55 | --build --skip-existing -r $REPOSITORY \ 56 | -u __token__ -p $PYPI_TOKEN 57 | 58 | docs: 59 | FROM +build 60 | 61 | COPY +requirements/docs-requirements.txt . 62 | RUN --mount=type=cache,target=$PIP_CACHE_DIR \ 63 | pip install -r docs-requirements.txt 64 | 65 | COPY docs . 66 | 67 | RUN sphinx-apidoc -o _build tfga 68 | RUN sphinx-build -M html . _build 69 | 70 | SAVE ARTIFACT _build/html /html 71 | -------------------------------------------------------------------------------- /benchmarks/generate.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | 4 | 5 | def main(): 6 | results_dir = "results" 7 | os.makedirs(results_dir, exist_ok=True) 8 | 9 | def _call_pytest(name, *paths, env=None): 10 | subprocess.call( 11 | [ 12 | "pytest", 13 | *paths, 14 | "--benchmark-json", 15 | os.path.join(results_dir, "%s.json" % name), 16 | ], 17 | env=env, 18 | ) 19 | 20 | def _run_parameterized(lib_name, fn_name, fn_path, num_elements, env=None): 21 | for i in num_elements: 22 | _call_pytest("%s_%s_%d" % (lib_name, fn_name, i), fn_path % i, env=env) 23 | 24 | num_elements = [1, 10, 100, 1_000, 10_000, 100_000, 1_000_000] 25 | 26 | cpu_env = os.environ.copy() 27 | cpu_env["CUDA_VISIBLE_DEVICES"] = "-1" 28 | 29 | # Multiply multivector batches 30 | _run_parameterized( 31 | "tfga-gpu", "mul-mv-mv", "test_tfga.py::test_tfga_mul_mv_mv[%d]", num_elements 32 | ) 33 | _run_parameterized( 34 | "tfga", 35 | "mul-mv-mv", 36 | "test_tfga.py::test_tfga_mul_mv_mv[%d]", 37 | num_elements, 38 | env=cpu_env, 39 | ) 40 | _run_parameterized( 41 | "clifford", 42 | "mul-mv-mv", 43 | "test_clifford.py::test_clifford_mul_mv_mv[%d]", 44 | num_elements, 45 | ) 46 | _run_parameterized( 47 | "clifford-raw", 48 | "mul-mv-mv", 49 | "test_clifford.py::test_clifford_raw_mul_mv_mv[%d]", 50 | num_elements, 51 | ) 52 | 53 | # Add multivector batches 54 | _run_parameterized( 55 | "tfga-gpu", "add-mv-mv", "test_tfga.py::test_tfga_add_mv_mv[%d]", num_elements 56 | ) 57 | _run_parameterized( 58 | "tfga", 59 | "add-mv-mv", 60 | "test_tfga.py::test_tfga_add_mv_mv[%d]", 61 | num_elements, 62 | env=cpu_env, 63 | ) 64 | _run_parameterized( 65 | "clifford", 66 | "add-mv-mv", 67 | "test_clifford.py::test_clifford_add_mv_mv[%d]", 68 | num_elements, 69 | ) 70 | _run_parameterized( 71 | "clifford-raw", 72 | "add-mv-mv", 73 | "test_clifford.py::test_clifford_raw_add_mv_mv[%d]", 74 | num_elements, 75 | ) 76 | 77 | 78 | if __name__ == "__main__": 79 | main() 80 | -------------------------------------------------------------------------------- /tfga/cayley.py: -------------------------------------------------------------------------------- 1 | """Operations for constructing the cayley 3-tensor needed 2 | for the geometric product. Used internally. 3 | """ 4 | from itertools import combinations 5 | 6 | import numpy as np 7 | 8 | from tfga.blades import get_normal_ordered 9 | 10 | 11 | def _collapse_same(x): 12 | for i in range(len(x) - 1): 13 | a, b = x[i], x[i + 1] 14 | if a == b: 15 | return False, x[:i] + x[i + 2 :], a 16 | return True, x, None 17 | 18 | 19 | def _reduce_bases(a, b, metric): 20 | if a == "": 21 | return 1, b 22 | elif b == "": 23 | return 1, a 24 | 25 | combined = list(a + b) 26 | 27 | # Bring into normal order: 28 | sign, combined = get_normal_ordered(combined) 29 | 30 | done = False 31 | while not done: 32 | done, combined, combined_elem = _collapse_same(combined) 33 | if not done: 34 | sign *= metric[combined_elem] 35 | 36 | return sign, "".join(combined) 37 | 38 | 39 | def blades_from_bases(vector_bases): 40 | all_combinations = [""] 41 | degrees = [0] 42 | for i in range(1, len(vector_bases) + 1): 43 | combs = combinations(vector_bases, i) 44 | combs = ["".join(c) for c in combs] 45 | all_combinations += combs 46 | degrees += [i] * len(combs) 47 | return all_combinations, degrees 48 | 49 | 50 | def get_cayley_tensor(metric, bases, blades): 51 | num_blades = len(blades) 52 | 53 | t_geom = np.zeros((num_blades, num_blades, num_blades), dtype=np.int32) 54 | t_inner = np.zeros((num_blades, num_blades, num_blades), dtype=np.int32) 55 | t_outer = np.zeros((num_blades, num_blades, num_blades), dtype=np.int32) 56 | 57 | metric_dict = {v: metric[i] for i, v in enumerate(bases)} 58 | 59 | for a in blades: 60 | for b in blades: 61 | sign, result = _reduce_bases(a, b, metric_dict) 62 | a_index = blades.index(a) 63 | b_index = blades.index(b) 64 | out_index = blades.index(result) 65 | t_geom[a_index, b_index, out_index] = sign 66 | 67 | # Degree went down -> part of inner 68 | if len(result) == abs(len(a) - len(b)): 69 | t_inner[a_index, b_index, out_index] = sign 70 | 71 | # Degree went up -> part of outer 72 | if len(result) == len(a) + len(b): 73 | t_outer[a_index, b_index, out_index] = sign 74 | 75 | return t_geom, t_inner, t_outer 76 | -------------------------------------------------------------------------------- /tests/test_sta_cayley.py: -------------------------------------------------------------------------------- 1 | import unittest as ut 2 | 3 | import numpy as np 4 | 5 | from tfga.cayley import blades_from_bases, get_cayley_tensor 6 | 7 | sta_metric = [1, -1, -1, -1] 8 | sta_bases = ["0", "1", "2", "3"] 9 | sta_blades = [ 10 | "", 11 | "0", 12 | "1", 13 | "2", 14 | "3", 15 | "01", 16 | "02", 17 | "03", 18 | "12", 19 | "13", 20 | "23", 21 | "012", 22 | "013", 23 | "023", 24 | "123", 25 | "0123", 26 | ] 27 | sta_blade_degrees = [len(blade) for blade in sta_blades] 28 | 29 | 30 | class TestStaCayleyTensor(ut.TestCase): 31 | def test_sta_blades_from_bases(self): 32 | blades, blade_degrees = blades_from_bases(sta_bases) 33 | self.assertCountEqual(blades, sta_blades) 34 | for blade, blade_degree in zip(sta_blades, sta_blade_degrees): 35 | blade_index = blades.index(blade) 36 | self.assertEqual(blade_degrees[blade_index], blade_degree) 37 | 38 | def test_sta_cayley_tensor_scalar_scalar_scalar(self): 39 | cayley, cayley_inner, cayley_outer = get_cayley_tensor( 40 | sta_metric, sta_bases, sta_blades 41 | ) 42 | 43 | # Scalar * Scalar -> Scalar 44 | self.assertEqual(cayley[0, 0, 0], 1) 45 | self.assertTrue(np.all(cayley[0, 0, 1:] == 0)) 46 | 47 | def test_sta_cayley_tensor_e12_e23_me13(self): 48 | cayley, cayley_inner, cayley_outer = get_cayley_tensor( 49 | sta_metric, sta_bases, sta_blades 50 | ) 51 | 52 | # e12 * e23 -> -e13 53 | e12_index = sta_blades.index("12") 54 | e23_index = sta_blades.index("23") 55 | e13_index = sta_blades.index("13") 56 | self.assertEqual(cayley[e12_index, e23_index, e13_index], -1) 57 | self.assertTrue(np.all(cayley[e12_index, e23_index, :e13_index] == 0)) 58 | self.assertTrue(np.all(cayley[e12_index, e23_index, e13_index + 1 :] == 0)) 59 | 60 | def test_sta_cayley_tensor_e01_e23_e0123(self): 61 | cayley, cayley_inner, cayley_outer = get_cayley_tensor( 62 | sta_metric, sta_bases, sta_blades 63 | ) 64 | 65 | # e01 * e23 -> e0123 66 | e01_index = sta_blades.index("01") 67 | e23_index = sta_blades.index("23") 68 | e0123_index = sta_blades.index("0123") 69 | self.assertEqual(cayley[e01_index, e23_index, e0123_index], 1) 70 | self.assertTrue(np.all(cayley[e01_index, e23_index, :e0123_index] == 0)) 71 | self.assertTrue(np.all(cayley[e01_index, e23_index, e0123_index + 1 :] == 0)) 72 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 98 | __pypackages__/ 99 | 100 | # Celery stuff 101 | celerybeat-schedule 102 | celerybeat.pid 103 | 104 | # SageMath parsed files 105 | *.sage.py 106 | 107 | # Environments 108 | .env 109 | .venv 110 | env/ 111 | venv/ 112 | ENV/ 113 | env.bak/ 114 | venv.bak/ 115 | 116 | # Spyder project settings 117 | .spyderproject 118 | .spyproject 119 | 120 | # Rope project settings 121 | .ropeproject 122 | 123 | # mkdocs documentation 124 | /site 125 | 126 | # mypy 127 | .mypy_cache/ 128 | .dmypy.json 129 | dmypy.json 130 | 131 | # Pyre type checker 132 | .pyre/ 133 | 134 | # pytype static type analyzer 135 | .pytype/ 136 | 137 | # Cython debug symbols 138 | cython_debug/ 139 | 140 | # Visual Studio Code 141 | .vscode/ 142 | 143 | /benchmarks/results/ 144 | /html/ 145 | -------------------------------------------------------------------------------- /tfga/mv_ops.py: -------------------------------------------------------------------------------- 1 | """Operations on geometric algebra tensors used internally.""" 2 | from typing import Union 3 | 4 | import tensorflow as tf 5 | 6 | 7 | def mv_multiply( 8 | a_blade_values: tf.Tensor, b_blade_values: tf.Tensor, cayley: tf.Tensor 9 | ) -> tf.Tensor: 10 | # ...i, ijk -> ...jk 11 | x = tf.tensordot(a_blade_values, cayley, axes=[-1, 0]) 12 | 13 | # ...1j, ...jk -> ...1k 14 | x = tf.expand_dims(b_blade_values, axis=b_blade_values.shape.ndims - 1) @ x 15 | 16 | # ...1k -> ...k 17 | x = tf.squeeze(x, axis=-2) 18 | 19 | return x 20 | 21 | 22 | def mv_conv1d( 23 | a_blade_values: tf.Tensor, 24 | k_blade_values: tf.Tensor, 25 | cayley: tf.Tensor, 26 | stride: int, 27 | padding: str, 28 | dilations: Union[int, None] = None, 29 | ) -> tf.Tensor: 30 | # Winograd convolution 31 | 32 | # A: [..., S, CI, BI] 33 | # K: [K, CI, CO, BK] 34 | # C: [BI, BK, BO] 35 | 36 | kernel_size = k_blade_values.shape[0] 37 | 38 | a_batch_shape = tf.shape(a_blade_values)[:-3] 39 | 40 | # Reshape a_blade_values to a 2d image (since that's what the tf op expects) 41 | # [*, S, 1, CI*BI] 42 | a_image_shape = tf.concat( 43 | [ 44 | a_batch_shape, 45 | tf.shape(a_blade_values)[-3:-2], 46 | [1, tf.reduce_prod(tf.shape(a_blade_values)[-2:])], 47 | ], 48 | axis=0, 49 | ) 50 | a_image = tf.reshape(a_blade_values, a_image_shape) 51 | 52 | sizes = [1, kernel_size, 1, 1] 53 | strides = [1, stride, 1, 1] 54 | 55 | # [*, P, 1, K*CI*BI] where eg. number of patches P = S * K for 56 | # stride=1 and "SAME", (S-K+1) * K for "VALID", ... 57 | a_slices = tf.image.extract_patches( 58 | a_image, sizes=sizes, strides=strides, rates=[1, 1, 1, 1], padding=padding 59 | ) 60 | 61 | # [..., P, K, CI, BI] 62 | out_shape = tf.concat( 63 | [ 64 | a_batch_shape, 65 | tf.shape(a_slices)[-3:-2], 66 | tf.shape(k_blade_values)[:1], 67 | tf.shape(a_blade_values)[-2:], 68 | ], 69 | axis=0, 70 | ) 71 | 72 | a_slices = tf.reshape(a_slices, out_shape) 73 | 74 | # TODO: Optimize this to not use einsum (since it's slow with ellipses) 75 | # a_...p,k,ci,bi; k_k,ci,co,bk; c_bi,bk,bo -> y_...p,co,bo 76 | # ...a b c d , e c f g , d g h -> ...a f h 77 | x = tf.einsum("...abcd,bcfg,dgh->...afh", a_slices, k_blade_values, cayley) 78 | 79 | return x 80 | 81 | 82 | def mv_reversion(a_blade_values, algebra_blade_degrees): 83 | algebra_blade_degrees = tf.cast(algebra_blade_degrees, tf.float32) 84 | 85 | # for each blade, 0 if even number of swaps required, else 1 86 | odd_swaps = tf.cast( 87 | tf.floor(algebra_blade_degrees * (algebra_blade_degrees - 0.5)) % 2, tf.float32 88 | ) 89 | 90 | # [0, 1] -> [-1, 1] 91 | reversion_signs = 1.0 - 2.0 * odd_swaps 92 | 93 | return reversion_signs * a_blade_values 94 | 95 | 96 | def mv_grade_automorphism(a_blade_values, algebra_blade_degrees): 97 | algebra_blade_degrees = tf.cast(algebra_blade_degrees, tf.float32) 98 | signs = 1.0 - 2.0 * (algebra_blade_degrees % 2.0) 99 | return signs * a_blade_values 100 | -------------------------------------------------------------------------------- /notebooks/conv.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "tags": [] 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "%load_ext autoreload\n", 12 | "%autoreload 2\n", 13 | "\n", 14 | "import matplotlib.pyplot as plt\n", 15 | "import tensorflow as tf\n", 16 | "# Make tensorflow not take over the entire GPU memory\n", 17 | "for gpu in tf.config.experimental.list_physical_devices('GPU'):\n", 18 | " tf.config.experimental.set_memory_growth(gpu, True)\n", 19 | "from tfga import GeometricAlgebra\n", 20 | "from tfga.blades import BladeKind\n", 21 | "from tfga.layers import GeometricProductConv1D" 22 | ] 23 | }, 24 | { 25 | "cell_type": "code", 26 | "execution_count": 2, 27 | "metadata": { 28 | "tags": [] 29 | }, 30 | "outputs": [ 31 | { 32 | "output_type": "stream", 33 | "name": "stdout", 34 | "text": "(2, 4, 4, 16)\ntf.Tensor(\n[[[[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]]]\n\n\n [[[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]\n [ 0. -36. 0. 36. 0. 36. 36. -36. 36. 0. 36. 36. 36.\n 36. 36. 36.]]\n\n [[ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]\n [ 0. -24. 0. 24. 0. 24. 24. -24. 24. 0. 24. 24. 24.\n 24. 24. 24.]]]], shape=(2, 4, 4, 16), dtype=float32)\n" 35 | } 36 | ], 37 | "source": [ 38 | "ga = GeometricAlgebra([0, 1, 1, 1])\n", 39 | "\n", 40 | "batch_size = 2\n", 41 | "sequence_length = 8\n", 42 | "c_in = 3\n", 43 | "c_out = 4\n", 44 | "kernel_size = 3\n", 45 | "\n", 46 | "a = ga.from_tensor_with_kind(tf.ones([batch_size, sequence_length, c_in, ga.num_blades]), BladeKind.MV)\n", 47 | "k = ga.from_tensor_with_kind(tf.ones([kernel_size, c_in, c_out, ga.num_blades]), BladeKind.MV)\n", 48 | "\n", 49 | "y = ga.geom_conv1d(a, k, 2, \"SAME\")\n", 50 | "\n", 51 | "print(y.shape)\n", 52 | "print(y)" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": 3, 58 | "metadata": { 59 | "tags": [] 60 | }, 61 | "outputs": [ 62 | { 63 | "output_type": "stream", 64 | "name": "stdout", 65 | "text": "(2, 4, 4, 16)\nMultiVector[batch_shape=(2, 4, 4)]\nMultiVector[-0.86*1 + 0.12*e_0 + -0.86*e_1 + 0.24*e_2 + 0.55*e_3 + -1.85*e_01 + -1.05*e_02 + 2.10*e_03 + 0.24*e_12 + 0.55*e_13 + -1.29*e_23 + 1.53*e_012 + -1.01*e_013 + -1.56*e_023 + -1.29*e_123 + -1.02*e_0123]\n" 66 | } 67 | ], 68 | "source": [ 69 | "mv_indices = tf.range(ga.num_blades, dtype=tf.int64)\n", 70 | "\n", 71 | "conv_layer = GeometricProductConv1D(\n", 72 | " ga, filters=c_out, kernel_size=kernel_size, stride=2, padding=\"SAME\",\n", 73 | " blade_indices_kernel=tf.range(ga.num_blades, dtype=tf.int64),\n", 74 | " blade_indices_bias=tf.range(ga.num_blades, dtype=tf.int64)\n", 75 | ")\n", 76 | "\n", 77 | "y2 = conv_layer(a)\n", 78 | "print(y2.shape)\n", 79 | "ga.print(y2)\n", 80 | "ga.print(y2[0, 0, 0])" 81 | ] 82 | } 83 | ], 84 | "metadata": { 85 | "language_info": { 86 | "codemirror_mode": { 87 | "name": "ipython", 88 | "version": 3 89 | }, 90 | "file_extension": ".py", 91 | "mimetype": "text/x-python", 92 | "name": "python", 93 | "nbconvert_exporter": "python", 94 | "pygments_lexer": "ipython3", 95 | "version": "3.7.6-final" 96 | }, 97 | "orig_nbformat": 2, 98 | "kernelspec": { 99 | "name": "python37664bittf2conda034469ea11204d31b38329519e9d7dbe", 100 | "display_name": "Python 3.7.6 64-bit ('tf2': conda)" 101 | } 102 | }, 103 | "nbformat": 4, 104 | "nbformat_minor": 2 105 | } -------------------------------------------------------------------------------- /tfga/blades.py: -------------------------------------------------------------------------------- 1 | """Blade-related definitions and functions used across the library.""" 2 | from enum import Enum 3 | from typing import List, Tuple, Union 4 | 5 | import tensorflow as tf 6 | 7 | 8 | class BladeKind(Enum): 9 | """Kind of blade depending on its degree.""" 10 | 11 | MV = "mv" 12 | EVEN = "even" 13 | ODD = "odd" 14 | SCALAR = "scalar" 15 | VECTOR = "vector" 16 | BIVECTOR = "bivector" 17 | TRIVECTOR = "trivector" 18 | PSEUDOSCALAR = "pseudoscalar" 19 | PSEUDOVECTOR = "pseudovector" 20 | PSEUDOBIVECTOR = "pseudobivector" 21 | PSEUDOTRIVECTOR = "pseudotrivector" 22 | 23 | 24 | def get_blade_repr(blade_name: str) -> str: 25 | """Returns the representation to use 26 | for a given blade. 27 | 28 | Examples: 29 | - `"12"` -> `"e_12"` 30 | - `""` -> `"1"` 31 | 32 | Args: 33 | blade_name: name of the blade in the algebra (eg. `"12"`) 34 | 35 | Returns: 36 | Representation to use for a given blade 37 | """ 38 | if blade_name == "": 39 | return "1" 40 | return "e_%s" % blade_name 41 | 42 | 43 | def is_blade_kind( 44 | blade_degrees: tf.Tensor, kind: Union[BladeKind, str], max_degree: int 45 | ) -> tf.Tensor: 46 | """Finds a boolean mask for whether blade degrees are of a given kind. 47 | 48 | Args: 49 | blade_degrees: list of blade degrees 50 | kind: kind of blade to check for 51 | max_degree: maximum blade degree in the algebra 52 | 53 | Returns: 54 | boolean mask for whether blade degrees are of a given kind 55 | """ 56 | # Convert kind to string representation 57 | # for comparison. 58 | kind = kind.value if isinstance(kind, BladeKind) else kind 59 | 60 | if kind == BladeKind.MV.value: 61 | return tf.constant(True, shape=[len(blade_degrees)]) 62 | elif kind == BladeKind.EVEN.value: 63 | return blade_degrees % 2 == 0 64 | elif kind == BladeKind.ODD.value: 65 | return blade_degrees % 2 == 1 66 | elif kind == BladeKind.SCALAR.value: 67 | return blade_degrees == 0 68 | elif kind == BladeKind.VECTOR.value: 69 | return blade_degrees == 1 70 | elif kind == BladeKind.BIVECTOR.value: 71 | return blade_degrees == 2 72 | elif kind == BladeKind.TRIVECTOR.value: 73 | return blade_degrees == 3 74 | elif kind == BladeKind.PSEUDOSCALAR.value: 75 | return blade_degrees == max_degree 76 | elif kind == BladeKind.PSEUDOVECTOR.value: 77 | return blade_degrees == max_degree - 1 78 | elif kind == BladeKind.PSEUDOBIVECTOR.value: 79 | return blade_degrees == max_degree - 2 80 | elif kind == BladeKind.PSEUDOTRIVECTOR.value: 81 | return blade_degrees == max_degree - 3 82 | raise Exception("Unknown blade kind: %s" % kind) 83 | 84 | 85 | def invert_blade_indices(num_blades: int, blade_indices: tf.Tensor) -> tf.Tensor: 86 | """Returns all blade indices except for the given ones. 87 | 88 | Args: 89 | num_blades: Total number of blades in the algebra 90 | blade_indices: blade indices to exclude 91 | 92 | Returns: 93 | All blade indices except for the given ones 94 | """ 95 | 96 | all_blades = tf.range(num_blades, dtype=blade_indices.dtype) 97 | return tf.sparse.to_dense( 98 | tf.sets.difference( 99 | tf.expand_dims(all_blades, axis=0), tf.expand_dims(blade_indices, axis=0) 100 | ) 101 | )[0] 102 | 103 | 104 | def get_blade_of_kind_indices( 105 | blade_degrees: tf.Tensor, kind: BladeKind, max_degree: int, invert: bool = False 106 | ) -> tf.Tensor: 107 | """Finds a boolean mask for whether blades are of a given kind. 108 | 109 | Args: 110 | blade_degrees: List of blade degrees 111 | kind: kind of blade for which the mask will be true 112 | max_degree: maximum blade degree in the algebra 113 | invert: whether to invert the result 114 | 115 | Returns: 116 | boolean mask for whether blades are of a given kind 117 | """ 118 | cond = is_blade_kind(blade_degrees, kind, max_degree) 119 | cond = tf.math.logical_xor(cond, invert) 120 | return tf.where(cond)[:, 0] 121 | 122 | 123 | def _normal_swap(x: List[str]) -> List[str]: 124 | """Swaps the first unordered blade pair and returns the new list as well 125 | as whether a swap was performed.""" 126 | for i in range(len(x) - 1): 127 | a, b = x[i], x[i + 1] 128 | if a > b: # string comparison 129 | x[i], x[i + 1] = b, a 130 | return False, x 131 | return True, x 132 | 133 | 134 | def get_normal_ordered(blade_name: str) -> Tuple[int, str]: 135 | """Returns the normal ordered blade name and its sign. 136 | Example: 21 => -1, 12 137 | 138 | Args: 139 | blade_name: Blade name for which to return normal ordered 140 | name and sign 141 | 142 | Returns: 143 | sign: sign of the blade 144 | blade_name: normalized name of the blade 145 | """ 146 | blade_name = list(blade_name) 147 | sign = -1 148 | done = False 149 | while not done: 150 | sign *= -1 151 | done, blade_name = _normal_swap(blade_name) 152 | return sign, "".join(blade_name) 153 | 154 | 155 | def get_blade_indices_from_names( 156 | blade_names: List[str], all_blade_names: List[str] 157 | ) -> tf.Tensor: 158 | """Finds blade signs and indices for given blade names in a list of blade 159 | names. Blade names can be unnormalized and their correct sign will be 160 | returned. 161 | 162 | Args: 163 | blade_names: Blade names to return indices for. May be unnormalized. 164 | all_blade_names: Blade names to use as index 165 | 166 | Returns: 167 | blade_signs: signs for the passed blades in same order as passed 168 | blade_indices: blade indices in the same order as passed 169 | """ 170 | signs_and_names = [get_normal_ordered(b) for b in blade_names] 171 | 172 | blade_signs = [sign for sign, blade_name in signs_and_names] 173 | 174 | blade_indices = [ 175 | all_blade_names.index(blade_name) for sign, blade_name in signs_and_names 176 | ] 177 | 178 | return ( 179 | tf.convert_to_tensor(blade_signs, dtype=tf.float32), 180 | tf.convert_to_tensor(blade_indices, dtype=tf.int64), 181 | ) 182 | -------------------------------------------------------------------------------- /tests/test_dual_ga.py: -------------------------------------------------------------------------------- 1 | import unittest as ut 2 | 3 | import tensorflow as tf 4 | 5 | from tfga import GeometricAlgebra 6 | 7 | # Make tensorflow not take over the entire GPU memory 8 | for gpu in tf.config.experimental.list_physical_devices("GPU"): 9 | tf.config.experimental.set_memory_growth(gpu, True) 10 | 11 | dual_metric = [0] 12 | dual_bases = ["0"] 13 | dual_blades = ["", "0"] 14 | dual_blade_degrees = [len(blade) for blade in dual_blades] 15 | 16 | 17 | class TestDualGeometricAlgebraMultiply(ut.TestCase): 18 | def assertTensorsEqual(self, a, b): 19 | self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) 20 | 21 | def test_mul_mv_mv(self): 22 | ga = GeometricAlgebra(metric=dual_metric) 23 | 24 | zero = ga.from_scalar(0.0) 25 | one = ga.from_scalar(1.0) 26 | eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar") 27 | ten = ga.from_scalar(10.0) 28 | 29 | self.assertTensorsEqual(ga.geom_prod(eps, eps), zero) 30 | self.assertTensorsEqual(ga.geom_prod(one, one), one) 31 | self.assertTensorsEqual(ga.geom_prod(zero, one), zero) 32 | self.assertTensorsEqual(ga.geom_prod(one, zero), zero) 33 | self.assertTensorsEqual(ga.geom_prod(one, eps), eps) 34 | self.assertTensorsEqual(ga.geom_prod(eps, one), eps) 35 | self.assertTensorsEqual(ga.geom_prod(zero, zero), zero) 36 | self.assertTensorsEqual(ga.geom_prod(ten, zero), zero) 37 | self.assertTensorsEqual(ga.geom_prod(zero, ten), zero) 38 | self.assertTensorsEqual(ga.geom_prod(ga.geom_prod(ten, eps), eps), zero) 39 | self.assertTensorsEqual(ga.geom_prod(ten, one), ten) 40 | self.assertTensorsEqual(ga.geom_prod(one, ten), ten) 41 | 42 | def test_mul_tf_mv(self): 43 | ga = GeometricAlgebra(metric=dual_metric) 44 | 45 | zero = ga.from_scalar(0.0) 46 | one = ga.from_scalar(1.0) 47 | eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar") 48 | ten = ga.from_scalar(10.0) 49 | 50 | zero_tf = tf.convert_to_tensor([0, 0], dtype=tf.float32) 51 | one_tf = tf.convert_to_tensor([1, 0], dtype=tf.float32) 52 | eps_tf = tf.convert_to_tensor([0, 1], dtype=tf.float32) 53 | ten_tf = tf.convert_to_tensor([10, 0], dtype=tf.float32) 54 | 55 | self.assertTensorsEqual(ga.geom_prod(one, one_tf), one) 56 | self.assertTensorsEqual(ga.geom_prod(one_tf, one), one) 57 | self.assertTensorsEqual(ga.geom_prod(zero, one_tf), zero) 58 | self.assertTensorsEqual(ga.geom_prod(one_tf, zero), zero) 59 | self.assertTensorsEqual(ga.geom_prod(zero_tf, one), zero) 60 | self.assertTensorsEqual(ga.geom_prod(one, zero_tf), zero) 61 | self.assertTensorsEqual(ga.geom_prod(one_tf, eps), eps) 62 | self.assertTensorsEqual(ga.geom_prod(eps, one_tf), eps) 63 | self.assertTensorsEqual(ga.geom_prod(zero_tf, zero), zero) 64 | self.assertTensorsEqual(ga.geom_prod(zero, zero_tf), zero) 65 | self.assertTensorsEqual(ga.geom_prod(ten_tf, zero), zero) 66 | self.assertTensorsEqual(ga.geom_prod(zero, ten_tf), zero) 67 | self.assertTensorsEqual(ga.geom_prod(ten, zero_tf), zero) 68 | self.assertTensorsEqual(ga.geom_prod(zero_tf, ten), zero) 69 | self.assertTensorsEqual(ga.geom_prod(ga.geom_prod(ten_tf, eps), eps), zero) 70 | self.assertTensorsEqual(ga.geom_prod(ten_tf, one), ten) 71 | self.assertTensorsEqual(ga.geom_prod(one, ten_tf), ten) 72 | self.assertTensorsEqual(ga.geom_prod(ten, one_tf), ten) 73 | self.assertTensorsEqual(ga.geom_prod(one_tf, ten), ten) 74 | 75 | 76 | class TestDualGeometricAlgebraMisc(ut.TestCase): 77 | def assertTensorsEqual(self, a, b): 78 | self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) 79 | 80 | def test_auto_diff_square(self): 81 | """Test automatic differentiation using 82 | dual numbers for the square function. 83 | f(x) = x^2 84 | f'(x) = d/dx f(x) = 2x 85 | """ 86 | ga = GeometricAlgebra(metric=dual_metric) 87 | 88 | one = ga.from_scalar(1.0) 89 | five = ga.from_scalar(5.0) 90 | eps = ga.from_tensor_with_kind(tf.ones(1), kind="pseudoscalar") 91 | 92 | x = one + eps 93 | 94 | # f(1) = 1^2 = 1, f'(1) = 2 95 | x_squared = ga.geom_prod(x, x) 96 | self.assertTensorsEqual(ga.select_blades_with_name(x_squared, ""), 1.0) 97 | self.assertTensorsEqual(ga.select_blades_with_name(x_squared, "0"), 2.0) 98 | 99 | y = five + eps 100 | 101 | # f(5) = 5^2 = 25, f'(5) = 10 102 | y_squared = ga.geom_prod(y, y) 103 | self.assertTensorsEqual(ga.select_blades_with_name(y_squared, ""), 25.0) 104 | self.assertTensorsEqual(ga.select_blades_with_name(y_squared, "0"), 10.0) 105 | 106 | def test_batched_auto_diff_square(self): 107 | """Test automatic differentiation using 108 | dual numbers for the square function. 109 | Use batch with identical elements. 110 | f(x) = x^2 111 | f'(x) = d/dx f(x) = 2x 112 | """ 113 | ga = GeometricAlgebra(metric=dual_metric) 114 | 115 | one = ga.from_tensor_with_kind(tf.ones([3, 4, 1]), kind="scalar") 116 | five = ga.from_tensor_with_kind(tf.fill([3, 4, 1], 5.0), kind="scalar") 117 | eps = ga.from_tensor_with_kind(tf.ones([3, 4, 1]), kind="pseudoscalar") 118 | 119 | x = one + eps 120 | 121 | # f(1) = 1^2 = 1, f'(1) = 2 122 | x_squared = ga.geom_prod(x, x) 123 | self.assertTensorsEqual(ga.select_blades_with_name(x_squared, ""), 1.0) 124 | self.assertTensorsEqual(ga.select_blades_with_name(x_squared, "0"), 2.0) 125 | 126 | y = five + eps 127 | 128 | # f(5) = 5^2 = 25, f'(5) = 10 129 | y_squared = ga.geom_prod(y, y) 130 | self.assertTensorsEqual(ga.select_blades_with_name(y_squared, ""), 25.0) 131 | self.assertTensorsEqual(ga.select_blades_with_name(y_squared, "0"), 10.0) 132 | 133 | def test_mul_inverse(self): 134 | ga = GeometricAlgebra(metric=dual_metric) 135 | 136 | # a = 2 137 | a = ga.from_tensor_with_kind(tf.fill([1], 2.0), kind="scalar") 138 | 139 | # b = 3 + 3e0 140 | b = ga.from_tensor_with_kind(tf.fill([2], 3.0), kind="mv") 141 | 142 | # a * b = 2 * (3 + 3e0) = 6 + 6e0 143 | c = ga.geom_prod(a, b) 144 | self.assertTensorsEqual(c, ga.from_scalar(6.0) + 6.0 * ga.e("0")) 145 | 146 | # a^-1 = 1 / 2 147 | a_inv = ga.inverse(a) 148 | self.assertTensorsEqual(ga.select_blades_with_name(a_inv, ""), 0.5) 149 | 150 | # c = a * b 151 | # => a_inv * c = b 152 | self.assertTensorsEqual(ga.geom_prod(a_inv, c), b) 153 | 154 | # Since a is scalar, should commute too. 155 | # => c * a_inv = b 156 | self.assertTensorsEqual(ga.geom_prod(c, a_inv), b) 157 | 158 | # b is not simply invertible (because it does not square to a scalar) 159 | # and will throw an exception 160 | self.assertRaises(Exception, ga.simple_inverse, b) 161 | 162 | # b is invertible with the shirokov inverse 163 | b_inv = ga.inverse(b) 164 | self.assertTensorsEqual(ga.geom_prod(b, b_inv), 1 * ga.e("")) 165 | -------------------------------------------------------------------------------- /tfga/mv.py: -------------------------------------------------------------------------------- 1 | """Defines the `MultiVector` class which is used as a convenience wrapper 2 | for `GeometricAlgebra` operations. 3 | """ 4 | 5 | from typing import List, Union 6 | 7 | import tensorflow as tf 8 | 9 | from tfga.blades import BladeKind 10 | 11 | 12 | class MultiVector: 13 | """Wrapper for geometric algebra tensors using `GeometricAlgebra` 14 | operations in a less verbose way using operators. 15 | """ 16 | 17 | def __init__(self, blade_values: tf.Tensor, algebra: "GeometricAlgebra"): 18 | """Initializes a MultiVector from a geometric algebra `tf.Tensor` 19 | and its corresponding `GeometricAlgebra`. 20 | 21 | Args: 22 | blade_values: Geometric algebra `tf.Tensor` with as many elements 23 | on its last axis as blades in the algebra 24 | algebra: `GeometricAlgebra` instance corresponding to the geometric 25 | algebra tensor 26 | """ 27 | 28 | self._blade_values = blade_values 29 | self._algebra = algebra 30 | 31 | @property 32 | def tensor(self): 33 | """Geometric algebra tensor holding the values of this multivector.""" 34 | return self._blade_values 35 | 36 | @property 37 | def algebra(self): 38 | """`GeometricAlgebra` instance this multivector belongs to.""" 39 | return self._algebra 40 | 41 | @property 42 | def batch_shape(self): 43 | """Batch shape of the multivector (ie. the shape of all axes except 44 | for the last one in the geometric algebra tensor). 45 | """ 46 | return self._blade_values.shape[:-1] 47 | 48 | def __len__(self) -> int: 49 | """Number of elements on the first axis of the geometric algebra 50 | tensor.""" 51 | return self._blade_values.shape[0] 52 | 53 | def __iter__(self): 54 | for n in range(self._blade_values.shape[0]): 55 | # If we only have one axis left, return the 56 | # actual numbers, otherwise return a new 57 | # multivector. 58 | if self._blade_values.shape.ndims == 1: 59 | yield self._blade_values[n] 60 | else: 61 | yield MultiVector(self._blade_values[n], self._algebra) 62 | 63 | def __xor__(self, other: "MultiVector") -> "MultiVector": 64 | """Exterior product. See `GeometricAlgebra.ext_prod()`""" 65 | assert isinstance(other, MultiVector) 66 | 67 | return MultiVector( 68 | self._algebra.ext_prod(self._blade_values, other._blade_values), 69 | self._algebra, 70 | ) 71 | 72 | def __or__(self, other: "MultiVector") -> "MultiVector": 73 | """Inner product. See `GeometricAlgebra.inner_prod()`""" 74 | assert isinstance(other, MultiVector) 75 | 76 | return MultiVector( 77 | self._algebra.inner_prod(self._blade_values, other._blade_values), 78 | self._algebra, 79 | ) 80 | 81 | def __mul__(self, other: "MultiVector") -> "MultiVector": 82 | """Geometric product. See `GeometricAlgebra.geom_prod()`""" 83 | assert isinstance(other, MultiVector) 84 | 85 | return MultiVector( 86 | self._algebra.geom_prod(self._blade_values, other._blade_values), 87 | self._algebra, 88 | ) 89 | 90 | def __truediv__(self, other: "MultiVector") -> "MultiVector": 91 | """Division, ie. multiplication with the inverse.""" 92 | assert isinstance(other, MultiVector) 93 | 94 | return MultiVector( 95 | self._algebra.geom_prod( 96 | self._blade_values, self._algebra.inverse(other._blade_values) 97 | ), 98 | self._algebra, 99 | ) 100 | 101 | def __and__(self, other: "MultiVector") -> "MultiVector": 102 | """Regressive product. See `GeometricAlgebra.reg_prod()`""" 103 | assert isinstance(other, MultiVector) 104 | 105 | return MultiVector( 106 | self._algebra.reg_prod(self._blade_values, other._blade_values), 107 | self._algebra, 108 | ) 109 | 110 | def __invert__(self) -> "MultiVector": 111 | """Reversion. See `GeometricAlgebra.reversion()`""" 112 | return MultiVector(self._algebra.reversion(self._blade_values), self._algebra) 113 | 114 | def __neg__(self) -> "MultiVector": 115 | """Negation.""" 116 | return MultiVector(-self._blade_values, self._algebra) 117 | 118 | def __add__(self, other: "MultiVector") -> "MultiVector": 119 | """Addition of multivectors.""" 120 | assert isinstance(other, MultiVector) 121 | 122 | return MultiVector(self._blade_values + other._blade_values, self._algebra) 123 | 124 | def __sub__(self, other: "MultiVector") -> "MultiVector": 125 | """Subtraction of multivectors.""" 126 | assert isinstance(other, MultiVector) 127 | 128 | return MultiVector(self._blade_values - other._blade_values, self._algebra) 129 | 130 | def __pow__(self, n: int) -> "MultiVector": 131 | """Multivector raised to an integer power.""" 132 | return MultiVector(self._algebra.int_pow(self._blade_values, n), self._algebra) 133 | 134 | def __getitem__(self, key: Union[str, List[str]]) -> "MultiVector": 135 | """`MultiVector` with only passed blade names as non-zeros.""" 136 | return MultiVector( 137 | self._algebra.keep_blades_with_name(self._blade_values, key), self._algebra 138 | ) 139 | 140 | def __call__(self, key: Union[str, List[str]]): 141 | """`tf.Tensor` with passed blade names on last axis.""" 142 | return self._algebra.select_blades_with_name(self._blade_values, key) 143 | 144 | def __repr__(self) -> str: 145 | return self._algebra.mv_repr(self._blade_values) 146 | 147 | def inverse(self) -> "MultiVector": 148 | """Inverse. See `GeometricAlgebra.inverse()`.""" 149 | return MultiVector(self._algebra.inverse(self._blade_values), self._algebra) 150 | 151 | def simple_inverse(self) -> "MultiVector": 152 | """Simple inverse. See `GeometricAlgebra.simple_inverse()`.""" 153 | return MultiVector( 154 | self._algebra.simple_inverse(self._blade_values), self._algebra 155 | ) 156 | 157 | def dual(self) -> "MultiVector": 158 | """Dual. See `GeometricAlgebra.dual()`.""" 159 | return MultiVector(self._algebra.dual(self._blade_values), self._algebra) 160 | 161 | def conjugation(self) -> "MultiVector": 162 | """Conjugation. See `GeometricAlgebra.conjugation()`.""" 163 | return MultiVector(self._algebra.conjugation(self._blade_values), self._algebra) 164 | 165 | def grade_automorphism(self) -> "MultiVector": 166 | """Grade automorphism. See `GeometricAlgebra.grade_automorphism()`.""" 167 | return MultiVector( 168 | self._algebra.grade_automorphism(self._blade_values), self._algebra 169 | ) 170 | 171 | def approx_exp(self, order: int = 50) -> "MultiVector": 172 | """Approximate exponential. See `GeometricAlgebra.approx_exp()`.""" 173 | return MultiVector( 174 | self._algebra.approx_exp(self._blade_values, order=order), self._algebra 175 | ) 176 | 177 | def exp(self, square_scalar_tolerance: Union[float, None] = 1e-4) -> "MultiVector": 178 | """Exponential. See `GeometricAlgebra.exp()`.""" 179 | return MultiVector( 180 | self._algebra.exp( 181 | self._blade_values, square_scalar_tolerance=square_scalar_tolerance 182 | ), 183 | self._algebra, 184 | ) 185 | 186 | def approx_log(self, order: int = 50) -> "MultiVector": 187 | """Approximate logarithm. See `GeometricAlgebra.approx_log()`.""" 188 | return MultiVector( 189 | self._algebra.approx_log(self._blade_values, order=order), self._algebra 190 | ) 191 | 192 | def is_pure_kind(self, kind: BladeKind) -> bool: 193 | """Whether the `MultiVector` is of a pure kind.""" 194 | return self._algebra.is_pure_kind(self._blade_values, kind=kind) 195 | 196 | def geom_conv1d( 197 | self, 198 | kernel: "MultiVector", 199 | stride: int, 200 | padding: str, 201 | dilations: Union[int, None] = None, 202 | ) -> "MultiVector": 203 | """1D convolution. See `GeometricAlgebra.geom_conv1d().`""" 204 | return MultiVector( 205 | self._algebra.geom_conv1d( 206 | self._blade_values, 207 | kernel._blade_values, 208 | stride=stride, 209 | padding=padding, 210 | dilations=dilations, 211 | ), 212 | self._algebra, 213 | ) 214 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TFGA - TensorFlow Geometric Algebra 2 | [![Build status](https://github.com/RobinKa/tfga/workflows/Build%20Test%20Publish/badge.svg)](https://github.com/RobinKa/tfga/actions) [![PyPI](https://badge.fury.io/py/tfga.svg)](https://badge.fury.io/py/tfga) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3902404.svg)](https://doi.org/10.5281/zenodo.3902404) 3 | 4 | [GitHub](https://github.com/RobinKa/tfga) | [Docs](https://tfga.warlock.ai) | [Benchmarks](https://github.com/RobinKa/tfga/tree/master/benchmarks) | [Slides](https://tfgap.warlock.ai) 5 | 6 | Python package for Geometric / Clifford Algebra with TensorFlow 2. 7 | 8 | **This project is a work in progress. Its API may change and the examples aren't polished yet.** 9 | 10 | Pull requests and suggestions either by opening an issue or by [sending me an email](mailto:tora@warlock.ai) are welcome. 11 | 12 | ## Installation 13 | Install using pip: `pip install tfga` 14 | 15 | Requirements: 16 | - Python 3 17 | - tensorflow 2 18 | - numpy 19 | 20 | ## Basic usage 21 | There are two ways to use this library. In both ways we first create a [`GeometricAlgebra`](https://tfga.warlock.ai/tfga.html#tfga.tfga.GeometricAlgebra) instance given a metric. 22 | Then we can either work on [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) instances directly where the last axis is assumed to correspond to 23 | the algebra's blades. 24 | ```python 25 | import tensorflow as tf 26 | from tfga import GeometricAlgebra 27 | 28 | # Create an algebra with 3 basis vectors given their metric. 29 | # Contains geometric algebra operations. 30 | ga = GeometricAlgebra(metric=[1, 1, 1]) 31 | 32 | # Create geometric algebra tf.Tensor for vector blades (ie. e_0 + e_1 + e_2). 33 | # Represented as tf.Tensor with shape [8] (one value for each blade of the algebra). 34 | # tf.Tensor: [0, 1, 1, 1, 0, 0, 0, 0] 35 | ordinary_vector = ga.from_tensor_with_kind(tf.ones(3), kind="vector") 36 | 37 | # 5 + 5 e_01 + 5 e_02 + 5 e_12 38 | quaternion = ga.from_tensor_with_kind(tf.fill(dims=4, value=5), kind="even") 39 | 40 | # 5 + 1 e_0 + 1 e_1 + 1 e_2 + 5 e_01 + 5 e_02 + 5 e_12 41 | multivector = ordinary_vector + quaternion 42 | 43 | # Inner product e_0 | (e_0 + e_1 + e_2) = 1 44 | # ga.print is like print, but has extra formatting for geometric algebra tf.Tensor instances. 45 | ga.print(ga.inner_prod(ga.e0, ordinary_vector)) 46 | 47 | # Exterior product e_0 ^ e_1 = e_01. 48 | ga.print(ga.ext_prod(ga.e0, ga.e1)) 49 | 50 | # Grade reversal ~(5 + 5 e_01 + 5 e_02 + 5 e_12) 51 | # = 5 + 5 e_10 + 5 e_20 + 5 e_21 52 | # = 5 - 5 e_01 - 5 e_02 - 5 e_12 53 | ga.print(ga.reversion(quaternion)) 54 | 55 | # tf.Tensor 5 56 | ga.print(quaternion[0]) 57 | 58 | # tf.Tensor of shape [1]: -5 (ie. reversed sign of e_01 component) 59 | ga.print(ga.select_blades_with_name(quaternion, "10")) 60 | 61 | # tf.Tensor of shape [8] with only e_01 component equal to 5 62 | ga.print(ga.keep_blades_with_name(quaternion, "10")) 63 | ``` 64 | 65 | Alternatively we can convert the geometric algebra [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) instance to [`MultiVector`](https://tfga.warlock.ai/tfga.html#tfga.mv.MultiVector) 66 | instances which wrap the operations and provide operator overrides for convenience. 67 | This can be done by using the `__call__` operator of the [`GeometricAlgebra`](https://tfga.warlock.ai/tfga.html#tfga.tfga.GeometricAlgebra) instance. 68 | ```python 69 | # Create geometric algebra tf.Tensor instances 70 | a = ga.e123 71 | b = ga.e1 72 | 73 | # Wrap them as `MultiVector` instances 74 | mv_a = ga(a) 75 | mv_b = ga(b) 76 | 77 | # Reversion ((~mv_a).tensor equivalent to ga.reversion(a)) 78 | print(~mv_a) 79 | 80 | # Geometric / inner / exterior product 81 | print(mv_a * mv_b) 82 | print(mv_a | mv_b) 83 | print(mv_a ^ mv_b) 84 | ``` 85 | 86 | ## Keras layers 87 | TFGA also provides [Keras](https://www.tensorflow.org/guide/keras/sequential_model) layers which provide 88 | layers similar to the existing ones but using multivectors instead. For example the [`GeometricProductDense`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricProductDense) 89 | layer is exactly the same as the [`Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) layer but uses 90 | multivector-valued weights and biases instead of scalar ones. The exact kind of multivector-type can be 91 | passed too. Example: 92 | 93 | ```python 94 | import tensorflow as tf 95 | from tfga import GeometricAlgebra 96 | from tfga.layers import TensorToGeometric, GeometricToTensor, GeometricProductDense 97 | 98 | # 4 basis vectors (e0^2=+1, e1^2=-1, e2^2=-1, e3^2=-1) 99 | sta = GeometricAlgebra([1, -1, -1, -1]) 100 | 101 | # We want our dense layer to perform a matrix multiply 102 | # with a matrix that has vector-valued entries. 103 | vector_blade_indices = sta.get_kind_blade_indices(BladeKind.VECTOR), 104 | 105 | # Create our input of shape [Batch, Units, BladeValues] 106 | tensor = tf.ones([20, 6, 4]) 107 | 108 | # The matrix-multiply will perform vector * vector 109 | # so our result will be scalar + bivector. 110 | # Use the resulting blade type for the bias too which is 111 | # added to the result. 112 | result_indices = tf.concat([ 113 | sta.get_kind_blade_indices(BladeKind.SCALAR), # 1 index 114 | sta.get_kind_blade_indices(BladeKind.BIVECTOR) # 6 indices 115 | ], axis=0) 116 | 117 | sequence = tf.keras.Sequential([ 118 | # Converts the last axis to a dense multivector 119 | # (so, 4 -> 16 (total number of blades in the algebra)) 120 | TensorToGeometric(sta, blade_indices=vector_blade_indices), 121 | # Perform matrix multiply with vector-valued matrix 122 | GeometricProductDense( 123 | algebra=sta, units=8, # units is analagous to Keras' Dense layer 124 | blade_indices_kernel=vector_blade_indices, 125 | blade_indices_bias=result_indices 126 | ), 127 | # Extract our wanted blade indices (last axis 16 -> 7 (1+6)) 128 | GeometricToTensor(sta, blade_indices=result_indices) 129 | ]) 130 | 131 | # Result will have shape [20, 8, 7] 132 | result = sequence(tensor) 133 | ``` 134 | 135 | ### Available layers 136 | | Class | Description | 137 | |--|--| 138 | | [`GeometricProductDense`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricProductDense) | Analagous to Keras' [`Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) with multivector-valued weights and biases. Each term in the matrix multiplication does the geometric product `x * w`. | 139 | | [`GeometricSandwichProductDense`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricSandwichProductDense) | Analagous to Keras' [`Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) with multivector-valued weights and biases. Each term in the matrix multiplication does the geometric product `w *x * ~w`. | 140 | | [`GeometricProductElementwise`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricProductElementwise) | Performs multivector-valued elementwise geometric product of the input units with a different weight for each unit. | 141 | | [`GeometricSandwichProductElementwise`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricSandwichProductElementwise) | Performs multivector-valued elementwise geometric sandwich product of the input units with a different weight for each unit. | 142 | | [`GeometricProductConv1D`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricProductConv1D) | Analagous to Keras' [`Conv1D`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D) with multivector-valued kernels and biases. Each term in the kernel multiplication does the geometric product `x * k`. | 143 | | [`TensorToGeometric`](https://tfga.warlock.ai/tfga.html#tfga.layers.TensorToGeometric) | Converts from a [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) to the geometric algebra [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) with as many blades on the last axis as basis blades in the algebra where blade indices determine which basis blades the input's values belong to. | 144 | | [`GeometricToTensor`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricToTensor) | Converts from a geometric algebra [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) with as many blades on the last axis as basis blades in the algebra to a [`tf.Tensor`](https://www.tensorflow.org/api_docs/python/tf/Tensor) where blade indices determine which basis blades we extract for the output. | 145 | | [`TensorWithKindToGeometric`](https://tfga.warlock.ai/tfga.html#tfga.layers.TensorWithKindToGeometric) | Same as [`TensorToGeometric`](https://tfga.warlock.ai/tfga.html#tfga.layers.TensorToGeometric) but using [`BladeKind`](https://tfga.warlock.ai/tfga.html#tfga.blades.BladeKind) (eg. `"bivector"`, `"even"`) instead of blade indices. | 146 | | [`GeometricToTensorWithKind`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricToTensorWithKind) | Same as [`GeometricToTensor`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricToTensor) but using [`BladeKind`](https://tfga.warlock.ai/tfga.html#tfga.blades.BladeKind) (eg. `"bivector"`, `"even"`) instead of blade indices. | 147 | | [`GeometricAlgebraExp`](https://tfga.warlock.ai/tfga.html#tfga.layers.GeometricAlgebraExp) | Calculates the exponential function of the input. Input must square to a scalar. | 148 | 149 | ## Notebooks 150 | [Generic examples](https://github.com/RobinKa/tfga/tree/master/notebooks/tfga.ipynb) 151 | 152 | [Using Keras layers to estimate triangle area](https://github.com/RobinKa/tfga/tree/master/notebooks/keras-triangles.ipynb) 153 | 154 | [Classical Electromagnetism using Geometric Algebra](https://github.com/RobinKa/tfga/tree/master/notebooks/em.ipynb) 155 | 156 | [Quantum Electrodynamics using Geometric Algebra](https://github.com/RobinKa/tfga/tree/master/notebooks/qed.ipynb) 157 | 158 | [Projective Geometric Algebra](https://github.com/RobinKa/tfga/tree/master/notebooks/pga.ipynb) 159 | 160 | [1D Multivector-valued Convolution Example](https://github.com/RobinKa/tfga/tree/master/notebooks/conv.ipynb) 161 | 162 | ## Tests 163 | Tests using Python's built-in [`unittest`](https://docs.python.org/3/library/unittest.html) module are available in the `tests` directory. All tests can be run by 164 | executing `python -m unittest discover tests` from the root directory of the repository. 165 | 166 | ## Citing 167 | See our [Zenodo](https://doi.org/10.5281/zenodo.3902404) page. For citing all versions the following BibTeX can be used 168 | 169 | ``` 170 | @software{python_tfga, 171 | author = {Kahlow, Robin}, 172 | title = {TensorFlow Geometric Algebra}, 173 | publisher = {Zenodo}, 174 | doi = {10.5281/zenodo.3902404}, 175 | url = {https://doi.org/10.5281/zenodo.3902404} 176 | } 177 | ``` 178 | 179 | ## Disclaimer 180 | TensorFlow, the TensorFlow logo and any related marks are trademarks of Google Inc. -------------------------------------------------------------------------------- /tests/test_keras.py: -------------------------------------------------------------------------------- 1 | import unittest as ut 2 | from io import BytesIO 3 | 4 | import h5py 5 | import tensorflow as tf 6 | 7 | from tfga import GeometricAlgebra 8 | from tfga.blades import BladeKind 9 | from tfga.layers import ( 10 | GeometricAlgebraExp, 11 | GeometricProductConv1D, 12 | GeometricProductDense, 13 | GeometricProductElementwise, 14 | GeometricSandwichProductDense, 15 | GeometricSandwichProductElementwise, 16 | GeometricToTensor, 17 | GeometricToTensorWithKind, 18 | TensorToGeometric, 19 | TensorWithKindToGeometric, 20 | ) 21 | 22 | # Make tensorflow not take over the entire GPU memory 23 | for gpu in tf.config.experimental.list_physical_devices("GPU"): 24 | tf.config.experimental.set_memory_growth(gpu, True) 25 | 26 | 27 | class TestKerasLayers(ut.TestCase): 28 | def assertTensorsEqual(self, a, b): 29 | self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) 30 | 31 | def test_tensor_to_geometric(self): 32 | sta = GeometricAlgebra([1, -1, -1, -1]) 33 | tensor = tf.ones([32, 4]) 34 | gt_geom_tensor = tf.concat( 35 | [tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])], axis=-1 36 | ) 37 | 38 | vector_blade_indices = [1, 2, 3, 4] 39 | 40 | tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) 41 | 42 | self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) 43 | 44 | def test_tensor_with_kind_to_geometric(self): 45 | sta = GeometricAlgebra([1, -1, -1, -1]) 46 | tensor = tf.ones([32, 4]) 47 | gt_geom_tensor = tf.concat( 48 | [tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])], axis=-1 49 | ) 50 | 51 | vector_blade_indices = [1, 2, 3, 4] 52 | 53 | tensor_kind_to_geom_layer = TensorWithKindToGeometric(sta, BladeKind.VECTOR) 54 | 55 | self.assertTensorsEqual(tensor_kind_to_geom_layer(tensor), gt_geom_tensor) 56 | 57 | def test_geometric_to_tensor(self): 58 | sta = GeometricAlgebra([1, -1, -1, -1]) 59 | gt_tensor = tf.ones([32, 4]) 60 | geom_tensor = tf.concat( 61 | [tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])], axis=-1 62 | ) 63 | 64 | vector_blade_indices = [1, 2, 3, 4] 65 | 66 | geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) 67 | 68 | self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) 69 | 70 | def test_geometric_to_tensor_with_kind(self): 71 | sta = GeometricAlgebra([1, -1, -1, -1]) 72 | gt_tensor = tf.ones([32, 4]) 73 | geom_tensor = tf.concat( 74 | [tf.zeros([32, 1]), tf.ones([32, 4]), tf.zeros([32, 11])], axis=-1 75 | ) 76 | 77 | vector_blade_indices = [1, 2, 3, 4] 78 | 79 | geom_to_tensor_kind_layer = GeometricToTensorWithKind(sta, BladeKind.VECTOR) 80 | 81 | self.assertTensorsEqual(geom_to_tensor_kind_layer(geom_tensor), gt_tensor) 82 | 83 | def test_geometric_product_dense_v_v(self): 84 | sta = GeometricAlgebra([1, -1, -1, -1]) 85 | 86 | geom_tensor = tf.concat( 87 | [tf.zeros([32, 6, 1]), tf.ones([32, 6, 4]), tf.zeros([32, 6, 11])], axis=-1 88 | ) 89 | 90 | vector_blade_indices = [1, 2, 3, 4] 91 | 92 | geom_prod_layer = GeometricProductDense( 93 | sta, 94 | 8, 95 | blade_indices_kernel=vector_blade_indices, 96 | blade_indices_bias=vector_blade_indices, 97 | bias_initializer=tf.keras.initializers.RandomNormal(), 98 | ) 99 | 100 | result = geom_prod_layer(geom_tensor) 101 | 102 | # vector * vector + vector -> scalar + bivector + vector 103 | expected_result_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 104 | 105 | self.assertTrue(sta.is_pure(result, expected_result_indices)) 106 | 107 | def test_geometric_product_dense_s_mv(self): 108 | sta = GeometricAlgebra([1, -1, -1, -1]) 109 | 110 | geom_tensor = tf.concat([tf.ones([20, 6, 1]), tf.zeros([20, 6, 15])], axis=-1) 111 | 112 | mv_blade_indices = list(range(16)) 113 | 114 | geom_prod_layer = GeometricProductDense( 115 | sta, 116 | 8, 117 | blade_indices_kernel=mv_blade_indices, 118 | blade_indices_bias=mv_blade_indices, 119 | ) 120 | 121 | result = geom_prod_layer(geom_tensor) 122 | 123 | # scalar * multivector + multivector -> multivector 124 | # Check that nothing is zero (it would be extremely unlikely 125 | # but not impossible to randomly get a zero here). 126 | self.assertTrue(tf.reduce_all(result != 0.0)) 127 | 128 | def test_geometric_product_dense_sequence(self): 129 | sta = GeometricAlgebra([1, -1, -1, -1]) 130 | 131 | tensor = tf.ones([20, 6, 4]) 132 | 133 | vector_blade_indices = [1, 2, 3, 4] 134 | mv_blade_indices = list(range(16)) 135 | 136 | # vector * vector + vector -> scalar + bivector + vector 137 | scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] 138 | 139 | sequence = tf.keras.Sequential( 140 | [ 141 | TensorToGeometric(sta, blade_indices=vector_blade_indices), 142 | GeometricProductDense( 143 | sta, 144 | 8, 145 | blade_indices_kernel=vector_blade_indices, 146 | blade_indices_bias=vector_blade_indices, 147 | bias_initializer=tf.keras.initializers.RandomNormal(), 148 | ), 149 | GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices), 150 | ] 151 | ) 152 | 153 | result = sequence(tensor) 154 | 155 | self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) 156 | 157 | def test_geometric_sandwich_product_dense_v_v(self): 158 | sta = GeometricAlgebra([1, -1, -1, -1]) 159 | 160 | geom_tensor = tf.concat( 161 | [tf.zeros([32, 6, 1]), tf.ones([32, 6, 4]), tf.zeros([32, 6, 11])], axis=-1 162 | ) 163 | 164 | vector_blade_indices = [1, 2, 3, 4] 165 | 166 | result_indices = tf.concat( 167 | [ 168 | sta.get_kind_blade_indices(BladeKind.VECTOR), 169 | sta.get_kind_blade_indices(BladeKind.TRIVECTOR), 170 | ], 171 | axis=0, 172 | ) 173 | 174 | geom_prod_layer = GeometricSandwichProductDense( 175 | sta, 176 | 8, 177 | blade_indices_kernel=vector_blade_indices, 178 | blade_indices_bias=result_indices, 179 | bias_initializer=tf.keras.initializers.RandomNormal(), 180 | ) 181 | 182 | result = geom_prod_layer(geom_tensor) 183 | 184 | # vector * vector * ~vector + vector -> vector + trivector 185 | 186 | self.assertTrue(sta.is_pure(result, result_indices)) 187 | 188 | 189 | class TestKerasLayersSerializable(ut.TestCase): 190 | def assertTensorsEqual(self, a, b): 191 | self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) 192 | 193 | def _test_layer_serializable(self, layer, inputs): 194 | # Create algebra 195 | algebra = layer.algebra 196 | 197 | # Create model 198 | model = tf.keras.Sequential([layer]) 199 | 200 | # Predict on inputs to compare later 201 | model_output = model(inputs) 202 | 203 | # Serialize model to virtual file 204 | model_file = h5py.File(BytesIO(), mode="w") 205 | model.save(model_file) 206 | 207 | # Load model from stream 208 | loaded_model = tf.keras.models.load_model(model_file) 209 | 210 | # Predict on same inputs as before 211 | loaded_output = loaded_model(inputs) 212 | 213 | # Check same output for original and loaded model 214 | self.assertTensorsEqual(model_output, loaded_output) 215 | 216 | # Check same recreated algebra 217 | self.assertTensorsEqual(algebra.metric, loaded_model.layers[0].algebra.metric) 218 | self.assertTensorsEqual(algebra.cayley, loaded_model.layers[0].algebra.cayley) 219 | 220 | def test_geom_dense_serializable(self): 221 | # Create algebra 222 | sta = GeometricAlgebra([1, -1, -1, -1]) 223 | vector_blade_indices = [1, 2, 3, 4] 224 | mv_blade_indices = list(range(16)) 225 | 226 | # Create model 227 | self._test_layer_serializable( 228 | GeometricProductDense( 229 | sta, 230 | units=8, 231 | blade_indices_kernel=mv_blade_indices, 232 | blade_indices_bias=vector_blade_indices, 233 | ), 234 | tf.random.normal([3, 6, sta.num_blades], seed=0), 235 | ) 236 | 237 | def test_sandwich_dense_serializable(self): 238 | # Create algebra 239 | sta = GeometricAlgebra([1, -1, -1, -1]) 240 | vector_blade_indices = [1, 2, 3, 4] 241 | mv_blade_indices = list(range(16)) 242 | 243 | # Create model 244 | self._test_layer_serializable( 245 | GeometricSandwichProductDense( 246 | sta, 247 | units=8, 248 | blade_indices_kernel=mv_blade_indices, 249 | blade_indices_bias=vector_blade_indices, 250 | ), 251 | tf.random.normal([3, 6, sta.num_blades], seed=0), 252 | ) 253 | 254 | def test_geom_elementwise_serializable(self): 255 | # Create algebra 256 | sta = GeometricAlgebra([1, -1, -1, -1]) 257 | vector_blade_indices = [1, 2, 3, 4] 258 | mv_blade_indices = list(range(16)) 259 | 260 | # Create model 261 | self._test_layer_serializable( 262 | GeometricProductElementwise( 263 | sta, 264 | blade_indices_kernel=mv_blade_indices, 265 | blade_indices_bias=vector_blade_indices, 266 | ), 267 | tf.random.normal([3, 6, sta.num_blades], seed=0), 268 | ) 269 | 270 | def test_sandwich_elementwise_serializable(self): 271 | # Create algebra 272 | sta = GeometricAlgebra([1, -1, -1, -1]) 273 | vector_blade_indices = [1, 2, 3, 4] 274 | mv_blade_indices = list(range(16)) 275 | 276 | # Create model 277 | self._test_layer_serializable( 278 | GeometricSandwichProductElementwise( 279 | sta, 280 | blade_indices_kernel=mv_blade_indices, 281 | blade_indices_bias=vector_blade_indices, 282 | ), 283 | tf.random.normal([3, 6, sta.num_blades], seed=0), 284 | ) 285 | 286 | def test_geom_prod_conv1d_serializable(self): 287 | # Create algebra 288 | sta = GeometricAlgebra([1, -1, -1, -1]) 289 | vector_blade_indices = [1, 2, 3, 4] 290 | mv_blade_indices = list(range(16)) 291 | 292 | # Create model 293 | self._test_layer_serializable( 294 | GeometricProductConv1D( 295 | sta, 296 | filters=8, 297 | kernel_size=3, 298 | padding="SAME", 299 | stride=2, 300 | blade_indices_kernel=mv_blade_indices, 301 | blade_indices_bias=vector_blade_indices, 302 | ), 303 | tf.random.normal([3, 8, 4, sta.num_blades], seed=0), 304 | ) 305 | 306 | def test_tensor_to_geom_serializable(self): 307 | # Create algebra 308 | sta = GeometricAlgebra([1, -1, -1, -1]) 309 | vector_blade_indices = [1, 2, 3, 4] 310 | 311 | # Create model 312 | self._test_layer_serializable( 313 | TensorToGeometric(sta, blade_indices=vector_blade_indices), 314 | tf.random.normal([1, 2, 3, len(vector_blade_indices)], seed=0), 315 | ) 316 | 317 | def test_geom_to_tensor_serializable(self): 318 | # Create algebra 319 | sta = GeometricAlgebra([1, -1, -1, -1]) 320 | vector_blade_indices = [1, 2, 3, 4] 321 | 322 | # Create model 323 | self._test_layer_serializable( 324 | GeometricToTensor(sta, blade_indices=vector_blade_indices), 325 | tf.random.normal([1, 2, 3, sta.num_blades], seed=0), 326 | ) 327 | 328 | def test_geom_exp_serializable(self): 329 | # Create algebra 330 | ga = GeometricAlgebra([1, 1, 1]) 331 | 332 | inputs = ga.from_tensor_with_kind( 333 | tf.random.normal([3], seed=0), BladeKind.BIVECTOR 334 | ) 335 | 336 | # Create model 337 | self._test_layer_serializable(GeometricAlgebraExp(ga), inputs) 338 | -------------------------------------------------------------------------------- /notebooks/tfga.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2\n", 11 | "\n", 12 | "import tensorflow as tf\n", 13 | "\n", 14 | "# Make tensorflow not take over the entire GPU memory\n", 15 | "for gpu in tf.config.experimental.list_physical_devices('GPU'):\n", 16 | " tf.config.experimental.set_memory_growth(gpu, True)\n", 17 | "\n", 18 | "import numpy as np\n", 19 | "\n", 20 | "from tfga import GeometricAlgebra" 21 | ] 22 | }, 23 | { 24 | "cell_type": "code", 25 | "execution_count": 2, 26 | "metadata": {}, 27 | "outputs": [], 28 | "source": [ 29 | "sta = GeometricAlgebra([1, -1, -1, -1])" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": 3, 35 | "metadata": {}, 36 | "outputs": [ 37 | { 38 | "output_type": "stream", 39 | "name": "stdout", 40 | "text": [ 41 | "MultiVector[1.00*e_01]\nMultiVector[4.00*e_0]\nMultiVector[9.00*e_1]\nMultiVector[4.00*e_0] MultiVector[9.00*e_1]\nMultiVector[1.00*e_0] MultiVector[1.00*e_1] MultiVector[1.00*e_0 + 1.00*e_1] MultiVector[1.00*e_01] MultiVector[-1.00*e_01]\n" 42 | ] 43 | } 44 | ], 45 | "source": [ 46 | "sta.print(sta.geom_prod(sta.e0, sta.e1))\n", 47 | "a = sta.geom_prod(sta.e0, sta.from_scalar(4.0))\n", 48 | "b = sta.geom_prod(sta.from_scalar(9.0), sta.e1)\n", 49 | "sta.print(a)\n", 50 | "sta.print(b)\n", 51 | "sta.print(a, b)\n", 52 | "sta.print(\n", 53 | " sta.e0,\n", 54 | " sta.e1,\n", 55 | " sta.e(\"0\", \"1\"),\n", 56 | " sta.e01,\n", 57 | " sta.e10\n", 58 | ")" 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": 4, 64 | "metadata": {}, 65 | "outputs": [ 66 | { 67 | "output_type": "stream", 68 | "name": "stdout", 69 | "text": [ 70 | "a: MultiVector[4.00*e_0]\n~a: MultiVector[4.00*e_0]\ninv a: MultiVector[0.25*e_0]\nb: MultiVector[9.00*e_1]\n~b: MultiVector[9.00*e_1]\ninv b: MultiVector[-0.11*e_1]\n" 71 | ] 72 | } 73 | ], 74 | "source": [ 75 | "sta.print(\"a:\", a)\n", 76 | "sta.print(\"~a:\", sta.reversion(a))\n", 77 | "sta.print(\"inv a:\", sta.inverse(a))\n", 78 | "sta.print(\"b:\", b)\n", 79 | "sta.print(\"~b:\", sta.reversion(b))\n", 80 | "sta.print(\"inv b:\", sta.inverse(b))" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": 5, 86 | "metadata": {}, 87 | "outputs": [ 88 | { 89 | "output_type": "stream", 90 | "name": "stdout", 91 | "text": [ 92 | "a: MultiVector[4.00*e_0]\n~a: MultiVector[4.00*e_0]\ninv a: MultiVector[0.25*e_0]\nb: MultiVector[9.00*e_1]\n~b: MultiVector[9.00*e_1]\ninv a: MultiVector[-0.11*e_1]\na + b: MultiVector[4.00*e_0 + 9.00*e_1]\na / b: MultiVector[-0.44*e_01]\n" 93 | ] 94 | } 95 | ], 96 | "source": [ 97 | "mv_a = sta(a)\n", 98 | "mv_b = sta(b)\n", 99 | "\n", 100 | "print(\"a:\", mv_a)\n", 101 | "print(\"~a:\", ~mv_a)\n", 102 | "print(\"inv a:\", mv_a.inverse())\n", 103 | "print(\"b:\", mv_b)\n", 104 | "print(\"~b:\", ~mv_b)\n", 105 | "print(\"inv a:\", mv_b.inverse())\n", 106 | "print(\"a + b:\", mv_a + mv_b)\n", 107 | "print(\"a / b:\", mv_a / mv_b)" 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": 6, 113 | "metadata": {}, 114 | "outputs": [ 115 | { 116 | "output_type": "stream", 117 | "name": "stdout", 118 | "text": [ 119 | "c = a * b: MultiVector[36.00*e_01]\nc * c^-1: MultiVector[1.00*1]\na * a^-1 MultiVector[1.00*1]\nb * b^-1: MultiVector[1.00*1]\na^-1 * c: MultiVector[9.00*e_1] should be b: MultiVector[9.00*e_1] tf.Tensor(True, shape=(), dtype=bool)\nc * b^-1: MultiVector[4.00*e_0] should be a: MultiVector[4.00*e_0] tf.Tensor(True, shape=(), dtype=bool)\n" 120 | ] 121 | } 122 | ], 123 | "source": [ 124 | "c = sta.geom_prod(a, b)\n", 125 | "sta.print(\"c = a * b:\", c)\n", 126 | "sta.print(\"c * c^-1:\", sta.geom_prod(c, sta.inverse(c)))\n", 127 | "sta.print(\"a * a^-1\", sta.geom_prod(a, sta.inverse(a)))\n", 128 | "sta.print(\"b * b^-1:\", sta.geom_prod(b, sta.inverse(b)))\n", 129 | "a_inv_c = sta.geom_prod(sta.inverse(a), c)\n", 130 | "sta.print(\"a^-1 * c:\", a_inv_c, \"should be b:\", b, tf.reduce_all(a_inv_c == b))\n", 131 | "c_b_inv = sta.geom_prod(c, sta.inverse(b))\n", 132 | "sta.print(\"c * b^-1:\", c_b_inv, \"should be a:\", a, tf.reduce_all(c_b_inv == a))" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 7, 138 | "metadata": {}, 139 | "outputs": [ 140 | { 141 | "output_type": "stream", 142 | "name": "stdout", 143 | "text": [ 144 | "c: MultiVector[36.00*e_01]\nc^-1: MultiVector[0.03*e_01]\nc^-1 shirokov: MultiVector[0.03*e_01]\n" 145 | ] 146 | } 147 | ], 148 | "source": [ 149 | "sta.print(\"c:\", c)\n", 150 | "sta.print(\"c^-1:\", sta.simple_inverse(c)) # Faster, only works if c ~c is a scalar \n", 151 | "sta.print(\"c^-1 shirokov:\", sta.inverse(c)) # Always works if an inverse exists" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 8, 157 | "metadata": {}, 158 | "outputs": [ 159 | { 160 | "output_type": "stream", 161 | "name": "stdout", 162 | "text": [ 163 | "MultiVector[9.00*e_1 + 36.00*e_01]\n" 164 | ] 165 | } 166 | ], 167 | "source": [ 168 | "d = sta.geom_prod(a, b) + b\n", 169 | "sta.print(d)" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 9, 175 | "metadata": {}, 176 | "outputs": [ 177 | { 178 | "output_type": "stream", 179 | "name": "stdout", 180 | "text": [ 181 | "MultiVector[]\n", 182 | "MultiVector[36.00*e_01]\n" 183 | ] 184 | } 185 | ], 186 | "source": [ 187 | "sta.print(sta.inner_prod(a, b))\n", 188 | "sta.print(sta.ext_prod(a, b))" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 10, 194 | "metadata": {}, 195 | "outputs": [ 196 | { 197 | "output_type": "stream", 198 | "name": "stdout", 199 | "text": [ 200 | "m: MultiVector[1.00*1 + 1.00*e_0 + 1.00*e_1 + 1.00*e_2 + 1.00*e_3 + 1.00*e_01 + 1.00*e_02 + 1.00*e_03 + 1.00*e_12 + 1.00*e_13 + 1.00*e_23 + 1.00*e_012 + 1.00*e_013 + 1.00*e_023 + 1.00*e_123 + 1.00*e_0123]\n", 201 | "~m: MultiVector[1.00*1 + 1.00*e_0 + 1.00*e_1 + 1.00*e_2 + 1.00*e_3 + -1.00*e_01 + -1.00*e_02 + -1.00*e_03 + -1.00*e_12 + -1.00*e_13 + -1.00*e_23 + -1.00*e_012 + -1.00*e_013 + -1.00*e_023 + -1.00*e_123 + 1.00*e_0123]\n", 202 | "bar m: MultiVector[1.00*1 + -1.00*e_0 + -1.00*e_1 + -1.00*e_2 + -1.00*e_3 + -1.00*e_01 + -1.00*e_02 + -1.00*e_03 + -1.00*e_12 + -1.00*e_13 + -1.00*e_23 + 1.00*e_012 + 1.00*e_013 + 1.00*e_023 + 1.00*e_123 + 1.00*e_0123]\n", 203 | "bar~m: MultiVector[1.00*1 + -1.00*e_0 + -1.00*e_1 + -1.00*e_2 + -1.00*e_3 + 1.00*e_01 + 1.00*e_02 + 1.00*e_03 + 1.00*e_12 + 1.00*e_13 + 1.00*e_23 + -1.00*e_012 + -1.00*e_013 + -1.00*e_023 + -1.00*e_123 + 1.00*e_0123]\n" 204 | ] 205 | } 206 | ], 207 | "source": [ 208 | "m = tf.ones(16)\n", 209 | "sta.print(\"m:\", m)\n", 210 | "sta.print(\"~m:\", sta.reversion(m))\n", 211 | "sta.print(\"bar m:\", sta.conjugation(m))\n", 212 | "sta.print(\"bar~m:\", sta.grade_automorphism(m))" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 11, 218 | "metadata": {}, 219 | "outputs": [ 220 | { 221 | "output_type": "stream", 222 | "name": "stdout", 223 | "text": [ 224 | "tf.Tensor(\n[[0. 1. 0. 0.]\n [0. 0. 1. 0.]], shape=(2, 4), dtype=float32)\n" 225 | ] 226 | } 227 | ], 228 | "source": [ 229 | "complex_ga = GeometricAlgebra([1, 1])\n", 230 | "print(complex_ga.basis_mvs)" 231 | ] 232 | }, 233 | { 234 | "cell_type": "code", 235 | "execution_count": 12, 236 | "metadata": {}, 237 | "outputs": [ 238 | { 239 | "output_type": "stream", 240 | "name": "stdout", 241 | "text": [ 242 | "x: MultiVector[5.00*1]\ne0: MultiVector[1.00*e_0]\ne1: MultiVector[1.00*e_1]\ni = e01: MultiVector[1.00*e_01]\ni^2: MultiVector[-1.00*1]\nr = e^(45° * e12): MultiVector[0.71*1 + 0.71*e_01]\nx * r (x rotated 45°): MultiVector[3.54*1 + 3.54*e_01]\nx * ~r (x rotated -45°): MultiVector[3.54*1 + -3.54*e_01]\n" 243 | ] 244 | } 245 | ], 246 | "source": [ 247 | "x = complex_ga.from_scalar(5.0)\n", 248 | "imag = complex_ga.e01\n", 249 | "r = complex_ga.approx_exp(complex_ga.geom_prod(complex_ga.from_scalar(np.deg2rad(45).astype(np.float32)), imag))\n", 250 | "complex_ga.print(\"x:\", x)\n", 251 | "complex_ga.print(\"e0:\", complex_ga.e0)\n", 252 | "complex_ga.print(\"e1:\", complex_ga.e1)\n", 253 | "complex_ga.print(\"i = e01:\", imag)\n", 254 | "complex_ga.print(\"i^2:\", complex_ga.geom_prod(imag, imag))\n", 255 | "complex_ga.print(\"r = e^(45° * e12):\", r)\n", 256 | "complex_ga.print(\"x * r (x rotated 45°):\", complex_ga.geom_prod(x, r))\n", 257 | "complex_ga.print(\"x * ~r (x rotated -45°):\", complex_ga.geom_prod(x, complex_ga.reversion(r)))" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 13, 263 | "metadata": {}, 264 | "outputs": [ 265 | { 266 | "output_type": "stream", 267 | "name": "stdout", 268 | "text": [ 269 | "0 0° MultiVector[1.00*1]\n1 45° MultiVector[0.71*1 + 0.71*e_01]\n2 90° MultiVector[1.00*e_01]\n3 135° MultiVector[-0.71*1 + 0.71*e_01]\n4 180° MultiVector[-1.00*1]\n5 225° MultiVector[-0.71*1 + -0.71*e_01]\n6 270° MultiVector[-1.00*e_01]\n7 315° MultiVector[0.71*1 + -0.71*e_01]\n8 360° MultiVector[1.00*1]\n" 270 | ] 271 | } 272 | ], 273 | "source": [ 274 | "for i in range(9):\n", 275 | " complex_ga.print(i, \"%d°\" % (i * 45), complex_ga.int_pow(r, i))" 276 | ] 277 | }, 278 | { 279 | "cell_type": "code", 280 | "execution_count": 14, 281 | "metadata": {}, 282 | "outputs": [ 283 | { 284 | "output_type": "stream", 285 | "name": "stdout", 286 | "text": [ 287 | "MultiVector[0.71*1 + 0.71*e_01]\n" 288 | ] 289 | } 290 | ], 291 | "source": [ 292 | "complex_ga.print(complex_ga.int_pow(r, 25))" 293 | ] 294 | }, 295 | { 296 | "cell_type": "code", 297 | "execution_count": 15, 298 | "metadata": {}, 299 | "outputs": [ 300 | { 301 | "output_type": "stream", 302 | "name": "stdout", 303 | "text": [ 304 | "MultiVector[0.80*1]\nMultiVector[-0.22*1] expected -0.2231435513142097\nMultiVector[0.80*1] expected 0.8\nMultiVector[-283.90*1] expected 0.8\n" 305 | ] 306 | } 307 | ], 308 | "source": [ 309 | "y = complex_ga.from_scalar(0.8)\n", 310 | "complex_ga.print(y)\n", 311 | "complex_ga.print(complex_ga.approx_log(y), \"expected\", np.log(0.8))\n", 312 | "complex_ga.print(complex_ga.approx_exp(complex_ga.approx_log(y)), \"expected\", 0.8)\n", 313 | "complex_ga.print(complex_ga.approx_log(complex_ga.approx_exp(y)), \"expected\", 0.8) # doesn't work because approx_log only works for |x -1| < 1" 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 16, 319 | "metadata": {}, 320 | "outputs": [ 321 | { 322 | "output_type": "stream", 323 | "name": "stdout", 324 | "text": [ 325 | "MultiVector[batch_shape=(3,)]\nMultiVector[4.00*e_0]\n" 326 | ] 327 | } 328 | ], 329 | "source": [ 330 | "u = tf.tile(tf.expand_dims(a, axis=0), [3, 1])\n", 331 | "sta.print(u)\n", 332 | "sta.print(u[0])" 333 | ] 334 | }, 335 | { 336 | "cell_type": "code", 337 | "execution_count": 17, 338 | "metadata": {}, 339 | "outputs": [ 340 | { 341 | "output_type": "stream", 342 | "name": "stdout", 343 | "text": [ 344 | "MultiVector[1.00*1 + 1.00*e_0 + 1.00*e_1 + 1.00*e_2 + 1.00*e_3 + 1.00*e_01 + 1.00*e_02 + 1.00*e_03 + 1.00*e_12 + 1.00*e_13 + 1.00*e_23 + 1.00*e_012 + 1.00*e_013 + 1.00*e_023 + 1.00*e_123 + 1.00*e_0123]\nMultiVector[1.00*e_1 + 1.00*e_01]\nMultiVector[1.00*e_2]\nR: tf.Tensor([ 1. 1. -1.], shape=(3,), dtype=float32)\nR: tf.Tensor([1. 1. 1. 1.], shape=(4,), dtype=float32)\nR: tf.Tensor(1.0, shape=(), dtype=float32)\ntf.Tensor(1.0, shape=(), dtype=float32)\n" 345 | ] 346 | } 347 | ], 348 | "source": [ 349 | "v = sta.from_tensor_with_kind(tf.ones(16, dtype=tf.float32), \"mv\")\n", 350 | "sta.print(v)\n", 351 | "sta.print(sta.keep_blades_with_name(v, [\"10\", \"1\"]))\n", 352 | "sta.print(sta.keep_blades_with_name(v, \"2\"))\n", 353 | "sta.print(\"R:\", sta.select_blades_with_name(v, [\"0\", \"01\", \"10\"]))\n", 354 | "sta.print(\"R:\", sta.select_blades_with_name(v, [\"123\", \"01\", \"0\", \"0\"]))\n", 355 | "sta.print(\"R:\", sta.select_blades_with_name(v, \"312\"))\n", 356 | "sta.print(v[..., 0])" 357 | ] 358 | } 359 | ], 360 | "metadata": { 361 | "language_info": { 362 | "codemirror_mode": { 363 | "name": "ipython", 364 | "version": 3 365 | }, 366 | "file_extension": ".py", 367 | "mimetype": "text/x-python", 368 | "name": "python", 369 | "nbconvert_exporter": "python", 370 | "pygments_lexer": "ipython3", 371 | "version": "3.9.5" 372 | }, 373 | "orig_nbformat": 2, 374 | "kernelspec": { 375 | "name": "python3", 376 | "display_name": "Python 3.9.5 64-bit" 377 | }, 378 | "interpreter": { 379 | "hash": "cced1986960fe3b129f0d210d1c871ce83f42220302c405c0426e934ac950470" 380 | } 381 | }, 382 | "nbformat": 4, 383 | "nbformat_minor": 2 384 | } -------------------------------------------------------------------------------- /tfga/layers.py: -------------------------------------------------------------------------------- 1 | """Provides Geometric Algebra Keras layers.""" 2 | from typing import List, Union 3 | 4 | import tensorflow as tf 5 | from tensorflow.keras import (activations, constraints, initializers, layers, 6 | regularizers) 7 | from tensorflow.keras.utils import register_keras_serializable 8 | 9 | from tfga.blades import BladeKind 10 | from tfga.tfga import GeometricAlgebra 11 | 12 | 13 | class GeometricAlgebraLayer(layers.Layer): 14 | def __init__(self, algebra: GeometricAlgebra, **kwargs): 15 | self.algebra = algebra 16 | super().__init__(**kwargs) 17 | 18 | @classmethod 19 | def from_config(cls, config): 20 | # Create algebra if necessary (should only occur once, assumes that 21 | # config is actually mutable). 22 | if "algebra" not in config: 23 | assert "metric" in config 24 | config["algebra"] = GeometricAlgebra(config["metric"]) 25 | del config["metric"] 26 | return cls(**config) 27 | 28 | def get_config(self): 29 | # Store metric of the algebra. In from_config() we will recreate the 30 | # algebra from the metric. 31 | config = super().get_config() 32 | config.update({"metric": self.algebra.metric.numpy()}) 33 | return config 34 | 35 | 36 | @register_keras_serializable(package="TFGA") 37 | class TensorToGeometric(GeometricAlgebraLayer): 38 | """Layer for converting tensors with given blade indices to 39 | geometric algebra tensors. 40 | 41 | Args: 42 | algebra: GeometricAlgebra instance to use 43 | blade_indices: blade indices to interpret the last axis of the 44 | input tensor as 45 | """ 46 | 47 | def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): 48 | super().__init__(algebra=algebra, **kwargs) 49 | self.blade_indices = tf.convert_to_tensor(blade_indices, dtype=tf.int64) 50 | 51 | def compute_output_shape(self, input_shape): 52 | return tf.TensorShape([*input_shape[:-1], self.algebra.num_blades]) 53 | 54 | def call(self, inputs): 55 | return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices) 56 | 57 | def get_config(self): 58 | config = super().get_config() 59 | config.update({"blade_indices": self.blade_indices.numpy()}) 60 | return config 61 | 62 | 63 | @register_keras_serializable(package="TFGA") 64 | class TensorWithKindToGeometric(GeometricAlgebraLayer): 65 | """Layer for converting tensors with given blade kind to 66 | geometric algebra tensors. 67 | 68 | Args: 69 | algebra: GeometricAlgebra instance to use 70 | kind: blade kind indices to interpret the last axis of the 71 | input tensor as 72 | """ 73 | 74 | def __init__(self, algebra: GeometricAlgebra, kind: BladeKind, **kwargs): 75 | super().__init__(algebra=algebra, **kwargs) 76 | self.kind = kind 77 | 78 | def compute_output_shape(self, input_shape): 79 | return tf.TensorShape( 80 | [*input_shape[:-1], self.algebra.get_kind_blade_indices(self.kind).shape[0]] 81 | ) 82 | 83 | def call(self, inputs): 84 | return self.algebra.from_tensor_with_kind(inputs, kind=self.kind) 85 | 86 | def get_config(self): 87 | config = super().get_config() 88 | config.update({"kind": self.kind}) 89 | return config 90 | 91 | 92 | @register_keras_serializable(package="TFGA") 93 | class GeometricToTensor(GeometricAlgebraLayer): 94 | """Layer for extracting given blades from geometric algebra tensors. 95 | 96 | Args: 97 | algebra: GeometricAlgebra instance to use 98 | blade_indices: blade indices to extract 99 | """ 100 | 101 | def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): 102 | super().__init__(algebra=algebra, **kwargs) 103 | self.blade_indices = tf.convert_to_tensor(blade_indices, dtype=tf.int64) 104 | 105 | def compute_output_shape(self, input_shape): 106 | return tf.TensorShape([*input_shape[:-1], self.blade_indices.shape[0]]) 107 | 108 | def call(self, inputs): 109 | return tf.gather(inputs, self.blade_indices, axis=-1) 110 | 111 | def get_config(self): 112 | config = super().get_config() 113 | config.update({"blade_indices": self.blade_indices.numpy()}) 114 | return config 115 | 116 | 117 | @register_keras_serializable(package="TFGA") 118 | class GeometricToTensorWithKind(GeometricToTensor): 119 | """Layer for extracting blades of a kind from geometric algebra tensors. 120 | 121 | Args: 122 | algebra: GeometricAlgebra instance to use 123 | kind: blade indices of kind to extract 124 | """ 125 | 126 | def __init__(self, algebra: GeometricAlgebra, kind: BladeKind, **kwargs): 127 | blade_indices = algebra.get_kind_blade_indices(kind) 128 | super().__init__(algebra=algebra, blade_indices=blade_indices, **kwargs) 129 | 130 | 131 | @register_keras_serializable(package="TFGA") 132 | class GeometricProductDense(GeometricAlgebraLayer): 133 | """Analagous to Keras' Dense layer but using multivector-valued matrices 134 | instead of scalar ones and geometric multiplication instead of standard 135 | multiplication. 136 | 137 | Args: 138 | algebra: GeometricAlgebra instance to use for the parameters 139 | blade_indices_kernel: Blade indices to use for the kernel parameter 140 | blade_indices_bias: Blade indices to use for the bias parameter (if used) 141 | """ 142 | 143 | def __init__( 144 | self, 145 | algebra: GeometricAlgebra, 146 | units: int, 147 | blade_indices_kernel: List[int], 148 | blade_indices_bias: Union[None, List[int]] = None, 149 | activation=None, 150 | use_bias=True, 151 | kernel_initializer="glorot_uniform", 152 | bias_initializer="zeros", 153 | kernel_regularizer=None, 154 | bias_regularizer=None, 155 | activity_regularizer=None, 156 | kernel_constraint=None, 157 | bias_constraint=None, 158 | **kwargs 159 | ): 160 | super().__init__( 161 | algebra=algebra, activity_regularizer=activity_regularizer, **kwargs 162 | ) 163 | 164 | self.units = units 165 | self.blade_indices_kernel = tf.convert_to_tensor( 166 | blade_indices_kernel, dtype_hint=tf.int64 167 | ) 168 | if use_bias: 169 | self.blade_indices_bias = tf.convert_to_tensor( 170 | blade_indices_bias, dtype_hint=tf.int64 171 | ) 172 | 173 | self.activation = activations.get(activation) 174 | self.use_bias = use_bias 175 | self.kernel_initializer = initializers.get(kernel_initializer) 176 | self.bias_initializer = initializers.get(bias_initializer) 177 | self.kernel_regularizer = regularizers.get(kernel_regularizer) 178 | self.bias_regularizer = regularizers.get(bias_regularizer) 179 | self.kernel_constraint = constraints.get(kernel_constraint) 180 | self.bias_constraint = constraints.get(bias_constraint) 181 | 182 | def build(self, input_shape: tf.TensorShape): 183 | self.num_input_units = input_shape[-2] 184 | shape_kernel = [ 185 | self.units, 186 | self.num_input_units, 187 | self.blade_indices_kernel.shape[0], 188 | ] 189 | self.kernel = self.add_weight( 190 | "kernel", 191 | shape=shape_kernel, 192 | initializer=self.kernel_initializer, 193 | regularizer=self.kernel_regularizer, 194 | constraint=self.kernel_constraint, 195 | dtype=self.dtype, 196 | trainable=True, 197 | ) 198 | if self.use_bias: 199 | shape_bias = [self.units, self.blade_indices_bias.shape[0]] 200 | self.bias = self.add_weight( 201 | "bias", 202 | shape=shape_bias, 203 | initializer=self.bias_initializer, 204 | regularizer=self.bias_regularizer, 205 | constraint=self.bias_constraint, 206 | dtype=self.dtype, 207 | trainable=True, 208 | ) 209 | else: 210 | self.bias = None 211 | self.built = True 212 | 213 | def compute_output_shape(self, input_shape): 214 | return tf.TensorShape([*input_shape[:-2], self.units, self.algebra.num_blades]) 215 | 216 | def call(self, inputs): 217 | w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel) 218 | 219 | # Perform a matrix-multiply, but using geometric product instead of 220 | # standard multiplication. To do this we do the geometric product 221 | # elementwise and then sum over the common axis. 222 | # [..., 1, I, X] * [..., O, I, X] -> [..., O, I, X] -> [..., O, X] 223 | inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2) 224 | result = tf.reduce_sum(self.algebra.geom_prod(inputs_expanded, w_geom), axis=-2) 225 | 226 | if self.bias is not None: 227 | b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias) 228 | result += b_geom 229 | 230 | return self.activation(result) 231 | 232 | def get_config(self): 233 | config = super().get_config() 234 | config.update( 235 | { 236 | "blade_indices_kernel": self.blade_indices_kernel.numpy(), 237 | "blade_indices_bias": self.blade_indices_bias.numpy(), 238 | "units": self.units, 239 | "activation": activations.serialize(self.activation), 240 | "use_bias": self.use_bias, 241 | "kernel_initializer": initializers.serialize(self.kernel_initializer), 242 | "bias_initializer": initializers.serialize(self.bias_initializer), 243 | "kernel_regularizer": regularizers.serialize(self.kernel_regularizer), 244 | "bias_regularizer": regularizers.serialize(self.bias_regularizer), 245 | "activity_regularizer": regularizers.serialize( 246 | self.activity_regularizer 247 | ), 248 | "kernel_constraint": constraints.serialize(self.kernel_constraint), 249 | "bias_constraint": constraints.serialize(self.bias_constraint), 250 | } 251 | ) 252 | return config 253 | 254 | 255 | @register_keras_serializable(package="TFGA") 256 | class GeometricSandwichProductDense(GeometricProductDense): 257 | """Analagous to Keras' Dense layer but using multivector-valued matrices 258 | instead of scalar ones and geometric sandwich multiplication instead of 259 | standard multiplication. 260 | 261 | Args: 262 | algebra: GeometricAlgebra instance to use for the parameters 263 | blade_indices_kernel: Blade indices to use for the kernel parameter 264 | blade_indices_bias: Blade indices to use for the bias parameter (if used) 265 | """ 266 | 267 | def __init__( 268 | self, 269 | algebra, 270 | units, 271 | blade_indices_kernel, 272 | blade_indices_bias=None, 273 | activation=None, 274 | use_bias=True, 275 | kernel_initializer="glorot_uniform", 276 | bias_initializer="zeros", 277 | kernel_regularizer=None, 278 | bias_regularizer=None, 279 | activity_regularizer=None, 280 | kernel_constraint=None, 281 | bias_constraint=None, 282 | **kwargs 283 | ): 284 | super().__init__( 285 | algebra, 286 | units, 287 | blade_indices_kernel, 288 | blade_indices_bias=blade_indices_bias, 289 | activation=activation, 290 | use_bias=use_bias, 291 | kernel_initializer=kernel_initializer, 292 | bias_initializer=bias_initializer, 293 | kernel_regularizer=kernel_regularizer, 294 | bias_regularizer=bias_regularizer, 295 | activity_regularizer=activity_regularizer, 296 | kernel_constraint=kernel_constraint, 297 | bias_constraint=bias_constraint, 298 | **kwargs 299 | ) 300 | 301 | def call(self, inputs): 302 | w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel) 303 | 304 | # Same as GeometricProductDense but using R*x*~R instead of just R*x 305 | inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2) 306 | result = tf.reduce_sum( 307 | self.algebra.geom_prod( 308 | w_geom, 309 | self.algebra.geom_prod(inputs_expanded, self.algebra.reversion(w_geom)), 310 | ), 311 | axis=-2, 312 | ) 313 | 314 | if self.bias is not None: 315 | b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias) 316 | result += b_geom 317 | 318 | return self.activation(result) 319 | 320 | 321 | @register_keras_serializable(package="TFGA") 322 | class GeometricProductElementwise(GeometricAlgebraLayer): 323 | """Performs the elementwise geometric product with a list of multivectors 324 | with as many elements as there are input units. 325 | 326 | Args: 327 | algebra: GeometricAlgebra instance to use for the parameters 328 | blade_indices_kernel: Blade indices to use for the kernel parameter 329 | blade_indices_bias: Blade indices to use for the bias parameter (if used) 330 | """ 331 | 332 | def __init__( 333 | self, 334 | algebra: GeometricAlgebra, 335 | blade_indices_kernel: List[int], 336 | blade_indices_bias: Union[None, List[int]] = None, 337 | activation=None, 338 | use_bias=True, 339 | kernel_initializer="glorot_uniform", 340 | bias_initializer="zeros", 341 | kernel_regularizer=None, 342 | bias_regularizer=None, 343 | activity_regularizer=None, 344 | kernel_constraint=None, 345 | bias_constraint=None, 346 | **kwargs 347 | ): 348 | super().__init__( 349 | algebra=algebra, activity_regularizer=activity_regularizer, **kwargs 350 | ) 351 | 352 | self.blade_indices_kernel = tf.convert_to_tensor( 353 | blade_indices_kernel, dtype_hint=tf.int64 354 | ) 355 | if use_bias: 356 | self.blade_indices_bias = tf.convert_to_tensor( 357 | blade_indices_bias, dtype_hint=tf.int64 358 | ) 359 | 360 | self.activation = activations.get(activation) 361 | self.use_bias = use_bias 362 | self.kernel_initializer = initializers.get(kernel_initializer) 363 | self.bias_initializer = initializers.get(bias_initializer) 364 | self.kernel_regularizer = regularizers.get(kernel_regularizer) 365 | self.bias_regularizer = regularizers.get(bias_regularizer) 366 | self.kernel_constraint = constraints.get(kernel_constraint) 367 | self.bias_constraint = constraints.get(bias_constraint) 368 | 369 | def build(self, input_shape: tf.TensorShape): 370 | self.num_input_units = input_shape[-2] 371 | shape_kernel = [self.num_input_units, self.blade_indices_kernel.shape[0]] 372 | self.kernel = self.add_weight( 373 | "kernel", 374 | shape=shape_kernel, 375 | initializer=self.kernel_initializer, 376 | regularizer=self.kernel_regularizer, 377 | constraint=self.kernel_constraint, 378 | dtype=self.dtype, 379 | trainable=True, 380 | ) 381 | if self.use_bias: 382 | shape_bias = [self.num_input_units, self.blade_indices_bias.shape[0]] 383 | self.bias = self.add_weight( 384 | "bias", 385 | shape=shape_bias, 386 | initializer=self.bias_initializer, 387 | regularizer=self.bias_regularizer, 388 | constraint=self.bias_constraint, 389 | dtype=self.dtype, 390 | trainable=True, 391 | ) 392 | else: 393 | self.bias = None 394 | self.built = True 395 | 396 | def compute_output_shape(self, input_shape): 397 | return tf.TensorShape([*input_shape[:-1], self.algebra.num_blades]) 398 | 399 | def call(self, inputs): 400 | w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel) 401 | 402 | # Elementwise multiplication for each unit with a multivector. 403 | # [..., U, X] * [U, X] -> [..., U, X] 404 | result = self.algebra.geom_prod(inputs, w_geom) 405 | 406 | if self.bias is not None: 407 | b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias) 408 | result += b_geom 409 | 410 | return self.activation(result) 411 | 412 | def get_config(self): 413 | config = super().get_config() 414 | config.update( 415 | { 416 | "blade_indices_kernel": self.blade_indices_kernel.numpy(), 417 | "blade_indices_bias": self.blade_indices_bias.numpy(), 418 | "activation": activations.serialize(self.activation), 419 | "use_bias": self.use_bias, 420 | "kernel_initializer": initializers.serialize(self.kernel_initializer), 421 | "bias_initializer": initializers.serialize(self.bias_initializer), 422 | "kernel_regularizer": regularizers.serialize(self.kernel_regularizer), 423 | "bias_regularizer": regularizers.serialize(self.bias_regularizer), 424 | "activity_regularizer": regularizers.serialize( 425 | self.activity_regularizer 426 | ), 427 | "kernel_constraint": constraints.serialize(self.kernel_constraint), 428 | "bias_constraint": constraints.serialize(self.bias_constraint), 429 | } 430 | ) 431 | return config 432 | 433 | 434 | @register_keras_serializable(package="TFGA") 435 | class GeometricSandwichProductElementwise(GeometricProductElementwise): 436 | """Performs the elementwise geometric sandwich product with a list of 437 | multivectors with as many elements as there are input units. 438 | 439 | Args: 440 | algebra: GeometricAlgebra instance to use for the parameters 441 | blade_indices_kernel: Blade indices to use for the kernel parameter 442 | blade_indices_bias: Blade indices to use for the bias parameter (if used) 443 | """ 444 | 445 | def __init__( 446 | self, 447 | algebra, 448 | blade_indices_kernel, 449 | blade_indices_bias=None, 450 | activation=None, 451 | use_bias=True, 452 | kernel_initializer="glorot_uniform", 453 | bias_initializer="zeros", 454 | kernel_regularizer=None, 455 | bias_regularizer=None, 456 | activity_regularizer=None, 457 | kernel_constraint=None, 458 | bias_constraint=None, 459 | **kwargs 460 | ): 461 | super().__init__( 462 | algebra, 463 | blade_indices_kernel, 464 | blade_indices_bias=blade_indices_bias, 465 | activation=activation, 466 | use_bias=use_bias, 467 | kernel_initializer=kernel_initializer, 468 | bias_initializer=bias_initializer, 469 | kernel_regularizer=kernel_regularizer, 470 | bias_regularizer=bias_regularizer, 471 | activity_regularizer=activity_regularizer, 472 | kernel_constraint=kernel_constraint, 473 | bias_constraint=bias_constraint, 474 | **kwargs 475 | ) 476 | 477 | def call(self, inputs): 478 | w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel) 479 | 480 | # Elementwise multiplication Rx~R for each unit with a multivector. 481 | # [..., U, X] * [U, X] -> [..., U, X] 482 | result = self.algebra.geom_prod( 483 | w_geom, self.algebra.geom_prod(inputs, self.algebra.reversion(w_geom)) 484 | ) 485 | 486 | if self.bias is not None: 487 | b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias) 488 | result += b_geom 489 | 490 | return self.activation(result) 491 | 492 | 493 | @register_keras_serializable(package="TFGA") 494 | class GeometricProductConv1D(GeometricAlgebraLayer): 495 | """Analagous to Keras' Conv1D layer but using multivector-valued kernels 496 | instead of scalar ones and geometric product instead of 497 | standard multiplication. 498 | 499 | Args: 500 | algebra: GeometricAlgebra instance to use for the parameters 501 | filters: How many channels the output will have 502 | kernel_size: Size for the convolution kernel 503 | stride: Stride to use for the convolution 504 | padding: "SAME" (zero-pad input length so output 505 | length == input length / stride) or "VALID" (no padding) 506 | blade_indices_kernel: Blade indices to use for the kernel parameter 507 | blade_indices_bias: Blade indices to use for the bias parameter (if used) 508 | """ 509 | 510 | def __init__( 511 | self, 512 | algebra: GeometricAlgebra, 513 | filters: int, 514 | kernel_size: int, 515 | stride: int, 516 | padding: str, 517 | blade_indices_kernel: List[int], 518 | blade_indices_bias: Union[None, List[int]] = None, 519 | dilations: Union[None, int] = None, 520 | activation=None, 521 | use_bias=True, 522 | kernel_initializer="glorot_uniform", 523 | bias_initializer="zeros", 524 | kernel_regularizer=None, 525 | bias_regularizer=None, 526 | activity_regularizer=None, 527 | kernel_constraint=None, 528 | bias_constraint=None, 529 | **kwargs 530 | ): 531 | super().__init__( 532 | algebra=algebra, activity_regularizer=activity_regularizer, **kwargs 533 | ) 534 | 535 | self.filters = filters 536 | self.kernel_size = kernel_size 537 | self.stride = stride 538 | self.padding = padding 539 | self.dilations = dilations 540 | 541 | self.blade_indices_kernel = tf.convert_to_tensor( 542 | blade_indices_kernel, dtype_hint=tf.int64 543 | ) 544 | if use_bias: 545 | self.blade_indices_bias = tf.convert_to_tensor( 546 | blade_indices_bias, dtype_hint=tf.int64 547 | ) 548 | 549 | self.activation = activations.get(activation) 550 | self.use_bias = use_bias 551 | self.kernel_initializer = initializers.get(kernel_initializer) 552 | self.bias_initializer = initializers.get(bias_initializer) 553 | self.kernel_regularizer = regularizers.get(kernel_regularizer) 554 | self.bias_regularizer = regularizers.get(bias_regularizer) 555 | self.kernel_constraint = constraints.get(kernel_constraint) 556 | self.bias_constraint = constraints.get(bias_constraint) 557 | 558 | def build(self, input_shape: tf.TensorShape): 559 | # I: [..., S, C, B] 560 | self.num_input_filters = input_shape[-2] 561 | 562 | # K: [K, IC, OC, B] 563 | shape_kernel = [ 564 | self.kernel_size, 565 | self.num_input_filters, 566 | self.filters, 567 | self.blade_indices_kernel.shape[0], 568 | ] 569 | self.kernel = self.add_weight( 570 | "kernel", 571 | shape=shape_kernel, 572 | initializer=self.kernel_initializer, 573 | regularizer=self.kernel_regularizer, 574 | constraint=self.kernel_constraint, 575 | dtype=self.dtype, 576 | trainable=True, 577 | ) 578 | if self.use_bias: 579 | shape_bias = [self.filters, self.blade_indices_bias.shape[0]] 580 | self.bias = self.add_weight( 581 | "bias", 582 | shape=shape_bias, 583 | initializer=self.bias_initializer, 584 | regularizer=self.bias_regularizer, 585 | constraint=self.bias_constraint, 586 | dtype=self.dtype, 587 | trainable=True, 588 | ) 589 | else: 590 | self.bias = None 591 | self.built = True 592 | 593 | def call(self, inputs): 594 | k_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel) 595 | 596 | result = self.algebra.geom_conv1d( 597 | inputs, 598 | k_geom, 599 | stride=self.stride, 600 | padding=self.padding, 601 | dilations=self.dilations, 602 | ) 603 | 604 | if self.bias is not None: 605 | b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias) 606 | result += b_geom 607 | 608 | return self.activation(result) 609 | 610 | def get_config(self): 611 | config = super().get_config() 612 | config.update( 613 | { 614 | "filters": self.filters, 615 | "kernel_size": self.kernel_size, 616 | "stride": self.stride, 617 | "padding": self.padding, 618 | "dilations": self.dilations, 619 | "blade_indices_kernel": self.blade_indices_kernel.numpy(), 620 | "blade_indices_bias": self.blade_indices_bias.numpy(), 621 | "activation": activations.serialize(self.activation), 622 | "use_bias": self.use_bias, 623 | "kernel_initializer": initializers.serialize(self.kernel_initializer), 624 | "bias_initializer": initializers.serialize(self.bias_initializer), 625 | "kernel_regularizer": regularizers.serialize(self.kernel_regularizer), 626 | "bias_regularizer": regularizers.serialize(self.bias_regularizer), 627 | "activity_regularizer": regularizers.serialize( 628 | self.activity_regularizer 629 | ), 630 | "kernel_constraint": constraints.serialize(self.kernel_constraint), 631 | "bias_constraint": constraints.serialize(self.bias_constraint), 632 | } 633 | ) 634 | 635 | return config 636 | 637 | 638 | @register_keras_serializable(package="TFGA") 639 | class GeometricAlgebraExp(GeometricAlgebraLayer): 640 | """ 641 | Calculates the exponential function of the input. Input must square to 642 | a scalar. 643 | 644 | Args: 645 | algebra: GeometricAlgebra instance to use 646 | square_scalar_tolerance: Tolerance to use for the square scalar check 647 | or None if the check should be skipped 648 | """ 649 | 650 | def __init__( 651 | self, 652 | algebra: GeometricAlgebra, 653 | square_scalar_tolerance: Union[float, None] = 1e-4, 654 | **kwargs 655 | ): 656 | super().__init__(algebra=algebra, **kwargs) 657 | self.square_scalar_tolerance = square_scalar_tolerance 658 | 659 | def compute_output_shape(self, input_shape): 660 | return tf.TensorShape([*input_shape[:-1], self.algebra.num_blades]) 661 | 662 | def call(self, inputs): 663 | return self.algebra.exp( 664 | inputs, square_scalar_tolerance=self.square_scalar_tolerance 665 | ) 666 | 667 | def get_config(self): 668 | config = super().get_config() 669 | config.update({"square_scalar_tolerance": self.square_scalar_tolerance}) 670 | return config 671 | -------------------------------------------------------------------------------- /tfga/tfga.py: -------------------------------------------------------------------------------- 1 | """Provides classes and operations for performing geometric algebra 2 | with TensorFlow. 3 | 4 | The `GeometricAlgebra` class is used to construct the algebra given a metric. 5 | It exposes methods for operating on `tf.Tensor` instances where their last 6 | axis is interpreted as blades of the algebra. 7 | """ 8 | import numbers 9 | from typing import List, Union 10 | 11 | import tensorflow as tf 12 | 13 | from tfga.blades import ( 14 | BladeKind, 15 | get_blade_indices_from_names, 16 | get_blade_of_kind_indices, 17 | get_blade_repr, 18 | invert_blade_indices, 19 | ) 20 | from tfga.cayley import blades_from_bases, get_cayley_tensor 21 | from tfga.mv import MultiVector 22 | from tfga.mv_ops import mv_conv1d, mv_grade_automorphism, mv_multiply, mv_reversion 23 | 24 | 25 | class GeometricAlgebra: 26 | """Class used for performing geometric algebra operations on `tf.Tensor` instances. 27 | Exposes methods for operating on `tf.Tensor` instances where their last 28 | axis is interpreted as blades of the algebra. 29 | Holds the metric and other quantities derived from it. 30 | """ 31 | 32 | def __init__(self, metric: List[float]): 33 | """Creates a GeometricAlgebra object given a metric. 34 | The algebra will have as many basis vectors as there are 35 | elements in the metric. 36 | 37 | Args: 38 | metric: Metric as a list. Specifies what basis vectors square to 39 | """ 40 | self._metric = tf.convert_to_tensor(metric, dtype=tf.float32) 41 | 42 | self._num_bases = len(metric) 43 | self._bases = list(map(str, range(self._num_bases))) 44 | 45 | self._blades, self._blade_degrees = blades_from_bases(self._bases) 46 | self._blade_degrees = tf.convert_to_tensor(self._blade_degrees) 47 | self._num_blades = len(self._blades) 48 | self._max_degree = tf.reduce_max(self._blade_degrees) 49 | 50 | # [Blades, Blades, Blades] 51 | self._cayley, self._cayley_inner, self._cayley_outer = tf.convert_to_tensor( 52 | get_cayley_tensor(self.metric, self._bases, self._blades), dtype=tf.float32 53 | ) 54 | 55 | self._blade_mvs = tf.eye(self._num_blades) 56 | self._basis_mvs = self._blade_mvs[1 : 1 + self._num_bases] 57 | 58 | # Find the dual by looking at the anti-diagonal in the Cayley tensor. 59 | self._dual_blade_indices = [] 60 | self._dual_blade_signs = [] 61 | 62 | for blade_index in range(self._num_blades): 63 | dual_index = self.num_blades - blade_index - 1 64 | anti_diag = self._cayley[blade_index, dual_index] 65 | dual_sign = tf.gather(anti_diag, tf.where(anti_diag != 0.0)[..., 0])[..., 0] 66 | self._dual_blade_indices.append(dual_index) 67 | self._dual_blade_signs.append(dual_sign) 68 | 69 | self._dual_blade_indices = tf.convert_to_tensor( 70 | self._dual_blade_indices, dtype=tf.int64 71 | ) 72 | self._dual_blade_signs = tf.convert_to_tensor( 73 | self._dual_blade_signs, dtype=tf.float32 74 | ) 75 | 76 | def print(self, *args, **kwargs): 77 | """Same as the default `print` function but formats `tf.Tensor` 78 | instances that have as many elements on their last axis 79 | as the algebra has blades using `mv_repr()`. 80 | """ 81 | 82 | def _is_mv(arg): 83 | return ( 84 | isinstance(arg, tf.Tensor) 85 | and arg.shape.ndims > 0 86 | and arg.shape[-1] == self.num_blades 87 | ) 88 | 89 | new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args] 90 | 91 | print(*new_args, **kwargs) 92 | 93 | @property 94 | def metric(self) -> tf.Tensor: 95 | """Metric list which contains the number that each 96 | basis vector in the algebra squares to 97 | (ie. the diagonal of the metric tensor). 98 | """ 99 | return self._metric 100 | 101 | @property 102 | def cayley(self) -> tf.Tensor: 103 | """`MxMxM` tensor where `M` is the number of basis 104 | blades in the algebra. Used for calculating the 105 | geometric product: 106 | 107 | `a_i, b_j, cayley_ijk -> c_k` 108 | """ 109 | return self._cayley 110 | 111 | @property 112 | def cayley_inner(self) -> tf.Tensor: 113 | """Analagous to cayley but for inner product.""" 114 | return self._cayley_inner 115 | 116 | @property 117 | def cayley_outer(self) -> tf.Tensor: 118 | """Analagous to cayley but for outer product.""" 119 | return self._cayley_outer 120 | 121 | @property 122 | def blades(self) -> List[str]: 123 | """List of all blade names. 124 | 125 | Blades are all possible independent combinations of 126 | basis vectors. Basis vectors are named starting 127 | from `"0"` and counting up. The scalar blade is the 128 | empty string `""`. 129 | 130 | Example 131 | - Bases: `["0", "1", "2"]` 132 | - Blades: `["", "0", "1", "2", "01", "02", "12", "012"]` 133 | """ 134 | return self._blades 135 | 136 | @property 137 | def blade_mvs(self) -> tf.Tensor: 138 | """List of all blade tensors in the algebra.""" 139 | return self._blade_mvs 140 | 141 | @property 142 | def dual_blade_indices(self) -> tf.Tensor: 143 | """Indices of the dual blades for each blade.""" 144 | return self._dual_blade_indices 145 | 146 | @property 147 | def dual_blade_signs(self) -> tf.Tensor: 148 | """Signs of the dual blades for each blade.""" 149 | return self._dual_blade_signs 150 | 151 | @property 152 | def num_blades(self) -> int: 153 | """Total number of blades in the algebra.""" 154 | return self._num_blades 155 | 156 | @property 157 | def blade_degrees(self) -> tf.Tensor: 158 | """List of blade-degree for each blade in the algebra.""" 159 | return self._blade_degrees 160 | 161 | @property 162 | def max_degree(self) -> int: 163 | """Highest blade degree in the algebra.""" 164 | return self._max_degree 165 | 166 | @property 167 | def basis_mvs(self) -> tf.Tensor: 168 | """List of basis vectors as tf.Tensor.""" 169 | return self._basis_mvs 170 | 171 | def get_kind_blade_indices( 172 | self, kind: BladeKind, invert: bool = False 173 | ) -> tf.Tensor: 174 | """Find all indices of blades of a given kind in the algebra. 175 | 176 | Args: 177 | kind: kind of blade to give indices for 178 | invert: whether to return all blades not of the kind 179 | 180 | Returns: 181 | indices of blades of a given kind in the algebra 182 | """ 183 | return get_blade_of_kind_indices( 184 | self.blade_degrees, kind, self.max_degree, invert=invert 185 | ) 186 | 187 | def get_blade_indices_of_degree(self, degree: int) -> tf.Tensor: 188 | """Find all indices of blades of the given degree. 189 | 190 | Args: 191 | degree: degree to return blades for 192 | 193 | Returns: 194 | indices of blades with the given degree in the algebra 195 | """ 196 | return tf.gather( 197 | tf.range(self.num_blades), tf.where(self.blade_degrees == degree)[..., 0] 198 | ) 199 | 200 | def is_pure(self, tensor: tf.Tensor, blade_indices: tf.Tensor) -> bool: 201 | """Returns whether the given tensor is purely of the given blades 202 | and has no non-zero values for blades not in the given blades. 203 | 204 | Args: 205 | tensor: tensor to check purity for 206 | blade_indices: blade indices to check purity for 207 | 208 | Returns: 209 | Whether the tensor is purely of the given blades 210 | and has no non-zero values for blades not in the given blades 211 | """ 212 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 213 | blade_indices = tf.convert_to_tensor(blade_indices, dtype_hint=tf.int64) 214 | 215 | inverted_blade_indices = invert_blade_indices(self.num_blades, blade_indices) 216 | 217 | return tf.reduce_all(tf.gather(tensor, inverted_blade_indices, axis=-1) == 0) 218 | 219 | def is_pure_kind(self, tensor: tf.Tensor, kind: BladeKind) -> bool: 220 | """Returns whether the given tensor is purely of a given kind 221 | and has no non-zero values for blades not of the kind. 222 | 223 | Args: 224 | tensor: tensor to check purity for 225 | kind: kind of blade to check purity for 226 | 227 | Returns: 228 | Whether the tensor is purely of a given kind 229 | and has no non-zero values for blades not of the kind 230 | """ 231 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 232 | inverted_kind_indices = self.get_kind_blade_indices(kind, invert=True) 233 | 234 | return tf.reduce_all(tf.gather(tensor, inverted_kind_indices, axis=-1) == 0) 235 | 236 | def from_tensor(self, tensor: tf.Tensor, blade_indices: tf.Tensor) -> tf.Tensor: 237 | """Creates a geometric algebra tf.Tensor from a tf.Tensor and blade 238 | indices. The blade indices have to align with the last axis of the 239 | tensor. 240 | 241 | Args: 242 | tensor: tf.Tensor to take as values for the geometric algebra tensor 243 | blade_indices: Blade indices corresponding to the tensor. Can 244 | be obtained from blade names eg. using get_kind_blade_indices() 245 | or as indices from the blades list property. 246 | 247 | Returns: 248 | Geometric algebra tf.Tensor from tensor and blade indices 249 | """ 250 | blade_indices = tf.cast( 251 | tf.convert_to_tensor(blade_indices, dtype_hint=tf.int64), dtype=tf.int64 252 | ) 253 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 254 | 255 | # Put last axis on first axis so scatter_nd becomes easier. 256 | # Later undo the transposition again. 257 | t = tf.concat( 258 | [[tensor.shape.ndims - 1], tf.range(0, tensor.shape.ndims - 1)], axis=0 259 | ) 260 | t_inv = tf.concat([tf.range(1, tensor.shape.ndims), [0]], axis=0) 261 | 262 | tensor = tf.transpose(tensor, t) 263 | 264 | shape = tf.concat( 265 | [ 266 | tf.convert_to_tensor([self.num_blades], dtype=tf.int64), 267 | tf.shape(tensor, tf.int64)[1:], 268 | ], 269 | axis=0, 270 | ) 271 | 272 | tensor = tf.scatter_nd(tf.expand_dims(blade_indices, axis=-1), tensor, shape) 273 | 274 | return tf.transpose(tensor, t_inv) 275 | 276 | def from_tensor_with_kind(self, tensor: tf.Tensor, kind: BladeKind) -> tf.Tensor: 277 | """Creates a geometric algebra tf.Tensor from a tf.Tensor and a kind. 278 | The kind's blade indices have to align with the last axis of the 279 | tensor. 280 | 281 | Args: 282 | tensor: tf.Tensor to take as values for the geometric algebra tensor 283 | kind: Kind corresponding to the tensor 284 | 285 | Returns: 286 | Geometric algebra tf.Tensor from tensor and kind 287 | """ 288 | # Put last axis on first axis so scatter_nd becomes easier. 289 | # Later undo the transposition again. 290 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 291 | kind_indices = self.get_kind_blade_indices(kind) 292 | return self.from_tensor(tensor, kind_indices) 293 | 294 | def from_scalar(self, scalar: numbers.Number) -> tf.Tensor: 295 | """Creates a geometric algebra tf.Tensor with scalar elements. 296 | 297 | Args: 298 | scalar: Elements to be used as scalars 299 | 300 | Returns: 301 | Geometric algebra tf.Tensor from scalars 302 | """ 303 | return self.from_tensor_with_kind( 304 | tf.expand_dims(scalar, axis=-1), BladeKind.SCALAR 305 | ) 306 | 307 | def e(self, *blades: List[str]) -> tf.Tensor: 308 | """Returns a geometric algebra tf.Tensor with the given blades set 309 | to 1. 310 | 311 | Args: 312 | blades: list of blade names, can be unnormalized 313 | 314 | Returns: 315 | tf.Tensor with blades set to 1 316 | """ 317 | blade_signs, blade_indices = get_blade_indices_from_names(blades, self.blades) 318 | 319 | blade_indices = tf.convert_to_tensor(blade_indices) 320 | 321 | # Don't allow duplicate indices 322 | tf.Assert( 323 | blade_indices.shape[0] == tf.unique(blade_indices)[0].shape[0], [blades] 324 | ) 325 | 326 | x = tf.expand_dims(blade_signs, axis=-1) * tf.gather( 327 | self.blade_mvs, blade_indices 328 | ) 329 | 330 | # a, b -> b 331 | return tf.reduce_sum(x, axis=-2) 332 | 333 | def __getattr__(self, name: str) -> tf.Tensor: 334 | """Returns basis blade tensors if name was a basis.""" 335 | if name.startswith("e") and (name[1:] == "" or int(name[1:]) >= 0): 336 | return self.e(name[1:]) 337 | raise AttributeError 338 | 339 | def dual(self, tensor: tf.Tensor) -> tf.Tensor: 340 | """Returns the dual of the geometric algebra tensor. 341 | 342 | Args: 343 | tensor: Geometric algebra tensor to return dual for 344 | 345 | Returns: 346 | Dual of the geometric algebra tensor 347 | """ 348 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 349 | return self.dual_blade_signs * tf.gather( 350 | tensor, self.dual_blade_indices, axis=-1 351 | ) 352 | 353 | def grade_automorphism(self, tensor: tf.Tensor) -> tf.Tensor: 354 | """Returns the geometric algebra tensor with odd grades negated. 355 | See https://en.wikipedia.org/wiki/Paravector#Grade_automorphism. 356 | 357 | Args: 358 | tensor: Geometric algebra tensor to return grade automorphism for 359 | 360 | Returns: 361 | Geometric algebra tensor with odd grades negated 362 | """ 363 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 364 | return mv_grade_automorphism(tensor, self.blade_degrees) 365 | 366 | def reversion(self, tensor: tf.Tensor) -> tf.Tensor: 367 | """Returns the grade-reversed geometric algebra tensor. 368 | See https://en.wikipedia.org/wiki/Paravector#Reversion_conjugation. 369 | 370 | Args: 371 | tensor: Geometric algebra tensor to return grade-reversion for 372 | 373 | Returns: 374 | Grade-reversed geometric algebra tensor 375 | """ 376 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 377 | return mv_reversion(tensor, self.blade_degrees) 378 | 379 | def conjugation(self, tensor: tf.Tensor) -> tf.Tensor: 380 | """Combines reversion and grade automorphism. 381 | See https://en.wikipedia.org/wiki/Paravector#Clifford_conjugation. 382 | 383 | Args: 384 | tensor: Geometric algebra tensor to return conjugate for 385 | 386 | Returns: 387 | Geometric algebra tensor after `reversion()` and `grade_automorphism()` 388 | """ 389 | tensor = tf.convert_to_tensor(tensor, dtype_hint=tf.float32) 390 | return self.grade_automorphism(self.reversion(tensor)) 391 | 392 | def simple_inverse(self, a: tf.Tensor) -> tf.Tensor: 393 | """Returns the inverted geometric algebra tensor 394 | `X^-1` such that `X * X^-1 = 1`. Only works for elements that 395 | square to scalars. Faster than the general inverse. 396 | 397 | Args: 398 | a: Geometric algebra tensor to return inverse for 399 | 400 | Returns: 401 | inverted geometric algebra tensor 402 | """ 403 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 404 | 405 | rev_a = self.reversion(a) 406 | divisor = self.geom_prod(a, rev_a) 407 | if not self.is_pure_kind(divisor, BladeKind.SCALAR): 408 | raise Exception( 409 | "Can't invert multi-vector (inversion divisor V ~V not scalar: %s)." 410 | % divisor 411 | ) 412 | 413 | # Divide by scalar part 414 | return rev_a / divisor[..., :1] 415 | 416 | def reg_prod(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor: 417 | """Returns the regressive product of two geometric 418 | algebra tensors. 419 | 420 | Args: 421 | a: Geometric algebra tensor on the left hand side of 422 | the regressive product 423 | b: Geometric algebra tensor on the right hand side of 424 | the regressive product 425 | 426 | Returns: 427 | regressive product of a and b 428 | """ 429 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 430 | b = tf.convert_to_tensor(b, dtype_hint=tf.float32) 431 | 432 | return self.dual(self.ext_prod(self.dual(a), self.dual(b))) 433 | 434 | def ext_prod(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor: 435 | """Returns the exterior product of two geometric 436 | algebra tensors. 437 | 438 | Args: 439 | a: Geometric algebra tensor on the left hand side of 440 | the exterior product 441 | b: Geometric algebra tensor on the right hand side of 442 | the exterior product 443 | 444 | Returns: 445 | exterior product of a and b 446 | """ 447 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 448 | b = tf.convert_to_tensor(b, dtype_hint=tf.float32) 449 | 450 | return mv_multiply(a, b, self._cayley_outer) 451 | 452 | def geom_prod(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor: 453 | """Returns the geometric product of two geometric 454 | algebra tensors. 455 | 456 | Args: 457 | a: Geometric algebra tensor on the left hand side of 458 | the geometric product 459 | b: Geometric algebra tensor on the right hand side of 460 | the geometric product 461 | 462 | Returns: 463 | geometric product of a and b 464 | """ 465 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 466 | b = tf.convert_to_tensor(b, dtype_hint=tf.float32) 467 | 468 | a = tf.convert_to_tensor(a) 469 | b = tf.convert_to_tensor(b) 470 | return mv_multiply(a, b, self._cayley) 471 | 472 | def inner_prod(self, a: tf.Tensor, b: tf.Tensor) -> tf.Tensor: 473 | """Returns the inner product of two geometric 474 | algebra tensors. 475 | 476 | Args: 477 | a: Geometric algebra tensor on the left hand side of 478 | the inner product 479 | b: Geometric algebra tensor on the right hand side of 480 | the inner product 481 | 482 | Returns: 483 | inner product of a and b 484 | """ 485 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 486 | b = tf.convert_to_tensor(b, dtype_hint=tf.float32) 487 | 488 | return mv_multiply(a, b, self._cayley_inner) 489 | 490 | def geom_conv1d( 491 | self, 492 | a: tf.Tensor, 493 | k: tf.Tensor, 494 | stride: int, 495 | padding: str, 496 | dilations: Union[int, None] = None, 497 | ) -> tf.Tensor: 498 | """Returns the 1D convolution of a sequence with a geometric algebra 499 | tensor kernel. The convolution is performed using the geometric 500 | product. 501 | 502 | Args: 503 | a: Input geometric algebra tensor of shape 504 | [..., Length, ChannelsIn, Blades] 505 | k: Geometric algebra tensor for the convolution kernel of shape 506 | [KernelSize, ChannelsIn, ChannelsOut, Blades] 507 | stride: Stride to use for the convolution 508 | padding: "SAME" (zero-pad input length so output 509 | length == input length / stride) or "VALID" (no padding) 510 | Returns: 511 | Geometric algbra tensor of shape 512 | [..., OutputLength, ChannelsOut, Blades] 513 | representing `a` convolved with `k` 514 | """ 515 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 516 | k = tf.convert_to_tensor(k, dtype_hint=tf.float32) 517 | 518 | return mv_conv1d(a, k, self._cayley, stride=stride, padding=padding) 519 | 520 | def mv_repr(self, a: tf.Tensor) -> str: 521 | """Returns a string representation for the given 522 | geometric algebra tensor. 523 | 524 | Args: 525 | a: Geometric algebra tensor to return the representation for 526 | 527 | Returns: 528 | string representation for `a` 529 | """ 530 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 531 | 532 | if len(a.shape) == 1: 533 | return "MultiVector[%s]" % " + ".join( 534 | "%.2f*%s" % (value, get_blade_repr(blade_name)) 535 | for value, blade_name in zip(a, self.blades) 536 | if value != 0 537 | ) 538 | else: 539 | return "MultiVector[batch_shape=%s]" % a.shape[:-1] 540 | 541 | def approx_exp(self, a: tf.Tensor, order: int = 50) -> tf.Tensor: 542 | """Returns an approximation of the exponential using a centered taylor series. 543 | 544 | Args: 545 | a: Geometric algebra tensor to return exponential for 546 | order: order of the approximation 547 | 548 | Returns: 549 | Approximation of `exp(a)` 550 | """ 551 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 552 | 553 | v = self.from_scalar(1.0) 554 | result = self.from_scalar(1.0) 555 | for i in range(1, order + 1): 556 | v = self.geom_prod(a, v) 557 | i_factorial = tf.exp(tf.math.lgamma(i + 1.0)) 558 | result += v / i_factorial 559 | return result 560 | 561 | def exp( 562 | self, a: tf.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4 563 | ) -> tf.Tensor: 564 | """Returns the exponential of the passed geometric algebra tensor. 565 | Only works for multivectors that square to scalars. 566 | 567 | Args: 568 | a: Geometric algebra tensor to return exponential for 569 | square_scalar_tolerance: Tolerance to use for the square scalar check 570 | or None if the check should be skipped 571 | 572 | Returns: 573 | `exp(a)` 574 | """ 575 | # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm 576 | # for an explanation of how to exponentiate multivectors. 577 | 578 | self_sq = self.geom_prod(a, a) 579 | 580 | if square_scalar_tolerance is not None: 581 | tf.Assert( 582 | tf.reduce_all(tf.abs(self_sq[..., 1:]) < square_scalar_tolerance), 583 | [self_sq], 584 | ) 585 | 586 | scalar_self_sq = self_sq[..., :1] 587 | 588 | # "Complex" square root (argument can be negative) 589 | s_sqrt = tf.sign(scalar_self_sq) * tf.sqrt(tf.abs(scalar_self_sq)) 590 | 591 | # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||)) 592 | # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||)) 593 | # TODO: Does this work for values other than 1 too? eg. square to +0.5? 594 | # TODO: Find a solution that doesnt require calculating all possibilities 595 | # first. 596 | non_zero_result = tf.where( 597 | scalar_self_sq < 0, 598 | (self.from_tensor(tf.cos(s_sqrt), [0]) + a / s_sqrt * tf.sin(s_sqrt)), 599 | (self.from_tensor(tf.cosh(s_sqrt), [0]) + a / s_sqrt * tf.sinh(s_sqrt)), 600 | ) 601 | 602 | return tf.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result) 603 | 604 | def approx_log(self, a: tf.Tensor, order: int = 50) -> tf.Tensor: 605 | """Returns an approximation of the natural logarithm using a centered 606 | taylor series. Only converges for multivectors where `||mv - 1|| < 1`. 607 | 608 | Args: 609 | a: Geometric algebra tensor to return logarithm for 610 | order: order of the approximation 611 | 612 | Returns: 613 | Approximation of `log(a)` 614 | """ 615 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 616 | 617 | result = self.from_scalar(0.0) 618 | 619 | a_minus_one = a - self.from_scalar(1.0) 620 | v = None 621 | 622 | for i in range(1, order + 1): 623 | v = a_minus_one if v is None else v * a_minus_one 624 | result += (((-1.0) ** i) / i) * v 625 | 626 | return -result 627 | 628 | def int_pow(self, a: tf.Tensor, n: int) -> tf.Tensor: 629 | """Returns the geometric algebra tensor to the power of an integer 630 | using repeated multiplication. 631 | 632 | Args: 633 | a: Geometric algebra tensor to raise 634 | n: integer power to raise the multivector to 635 | 636 | Returns: 637 | `a` to the power of `n` 638 | """ 639 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 640 | 641 | if not isinstance(n, int): 642 | raise Exception("n must be an integer.") 643 | if n < 0: 644 | raise Exception("Can't raise to negative powers.") 645 | 646 | if n == 0: 647 | # TODO: more efficient (ones only in scalar) 648 | return tf.ones_like(a) * self.e("") 649 | 650 | result = a 651 | for i in range(n - 1): 652 | result = self.geom_prod(result, a) 653 | return result 654 | 655 | def keep_blades(self, a: tf.Tensor, blade_indices: List[int]) -> tf.Tensor: 656 | """Takes a geometric algebra tensor and returns it with only the given 657 | blade_indices as non-zeros. 658 | 659 | Args: 660 | a: Geometric algebra tensor to copy 661 | blade_indices: Indices for blades to keep 662 | 663 | Returns: 664 | `a` with only `blade_indices` components as non-zeros 665 | """ 666 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 667 | blade_indices = tf.cast( 668 | tf.convert_to_tensor(blade_indices, dtype_hint=tf.int64), dtype=tf.int64 669 | ) 670 | 671 | blade_values = tf.gather(a, blade_indices, axis=-1) 672 | 673 | return self.from_tensor(blade_values, blade_indices) 674 | 675 | def keep_blades_with_name( 676 | self, a: tf.Tensor, blade_names: Union[List[str], str] 677 | ) -> tf.Tensor: 678 | """Takes a geometric algebra tensor and returns it with only the given 679 | blades as non-zeros. 680 | 681 | Args: 682 | a: Geometric algebra tensor to copy 683 | blade_names: Blades to keep 684 | 685 | Returns: 686 | `a` with only `blade_names` components as non-zeros 687 | """ 688 | if isinstance(blade_names, str): 689 | blade_names = [blade_names] 690 | 691 | _, blade_indices = get_blade_indices_from_names(blade_names, self.blades) 692 | 693 | return self.keep_blades(a, blade_indices) 694 | 695 | def select_blades(self, a: tf.Tensor, blade_indices: List[int]) -> tf.Tensor: 696 | """Takes a geometric algebra tensor and returns a `tf.Tensor` with the 697 | blades in blade_indices on the last axis. 698 | 699 | 700 | Args: 701 | a: Geometric algebra tensor to copy 702 | blade_indices: Indices for blades to select 703 | 704 | Returns: 705 | `tf.Tensor` based on `a` with `blade_indices` on last axis. 706 | """ 707 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 708 | blade_indices = tf.cast( 709 | tf.convert_to_tensor(blade_indices, dtype_hint=tf.int64), dtype=tf.int64 710 | ) 711 | 712 | result = tf.gather(a, blade_indices, axis=-1) 713 | 714 | return result 715 | 716 | def select_blades_with_name( 717 | self, a: tf.Tensor, blade_names: Union[List[str], str] 718 | ) -> tf.Tensor: 719 | """Takes a geometric algebra tensor and returns a `tf.Tensor` with the 720 | blades in blade_names on the last axis. 721 | 722 | 723 | Args: 724 | a: Geometric algebra tensor to copy 725 | blade_names: Blades to keep 726 | 727 | Returns: 728 | `tf.Tensor` based on `a` with `blade_names` on last axis. 729 | """ 730 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 731 | 732 | is_single_blade = isinstance(blade_names, str) 733 | if is_single_blade: 734 | blade_names = [blade_names] 735 | 736 | blade_signs, blade_indices = get_blade_indices_from_names( 737 | blade_names, self.blades 738 | ) 739 | 740 | result = blade_signs * self.select_blades(a, blade_indices) 741 | 742 | if is_single_blade: 743 | return result[..., 0] 744 | 745 | return result 746 | 747 | def inverse(self, a: tf.Tensor) -> tf.Tensor: 748 | """Returns the inverted geometric algebra tensor 749 | `X^-1` such that `X * X^-1 = 1`. 750 | 751 | Using Shirokov's inverse algorithm that works in arbitrary dimensions, 752 | see https://arxiv.org/abs/2005.04015 Theorem 4. 753 | 754 | Args: 755 | a: Geometric algebra tensor to return inverse for 756 | 757 | Returns: 758 | inverted geometric algebra tensor 759 | """ 760 | a = tf.convert_to_tensor(a, dtype_hint=tf.float32) 761 | 762 | n = 2 ** ((len(self.metric) + 1) // 2) 763 | 764 | u = a 765 | for k in range(1, n): 766 | c = n / k * self.keep_blades_with_name(u, "") 767 | u_minus_c = u - c 768 | u = self.geom_prod(a, u_minus_c) 769 | 770 | if not self.is_pure_kind(u, BladeKind.SCALAR): 771 | raise Exception("Can't invert multi-vector (det U not scalar: %s)." % u) 772 | 773 | # adj / det 774 | return u_minus_c / u[..., :1] 775 | 776 | def __call__(self, a: tf.Tensor) -> MultiVector: 777 | """Creates a `MultiVector` from a geometric algebra tensor. 778 | Mainly used as a wrapper for the algebra's functions for convenience. 779 | 780 | Args: 781 | a: Geometric algebra tensor to return `MultiVector` for 782 | 783 | Returns: 784 | `MultiVector` for `a` 785 | """ 786 | return MultiVector(tf.convert_to_tensor(a), self) 787 | -------------------------------------------------------------------------------- /notebooks/pga.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "%load_ext autoreload\n", 10 | "%autoreload 2\n", 11 | "\n", 12 | "import tensorflow as tf\n", 13 | "\n", 14 | "# Make tensorflow not take over the entire GPU memory\n", 15 | "for gpu in tf.config.experimental.list_physical_devices('GPU'):\n", 16 | " tf.config.experimental.set_memory_growth(gpu, True)\n", 17 | "\n", 18 | "from matplotlib import pyplot as plt\n", 19 | "from tfga import GeometricAlgebra" 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": 2, 25 | "metadata": {}, 26 | "outputs": [ 27 | { 28 | "output_type": "stream", 29 | "name": "stdout", 30 | "text": "tf.Tensor(\n[[0. 1. 0. 0. 0. 0. 0. 0.]\n [0. 0. 1. 0. 0. 0. 0. 0.]\n [0. 0. 0. 1. 0. 0. 0. 0.]], shape=(3, 8), dtype=float32)\n" 31 | } 32 | ], 33 | "source": [ 34 | "ga = GeometricAlgebra([0, 1, 1])\n", 35 | "print(ga.basis_mvs)" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": 5, 41 | "metadata": {}, 42 | "outputs": [ 43 | { 44 | "output_type": "stream", 45 | "name": "stdout", 46 | "text": "P1: MultiVector[-1.00*e_01 + 1.00*e_12]\nP2: MultiVector[0.50*e_01 + 1.00*e_02 + 1.00*e_12]\nP3: MultiVector[0.50*e_01 + -1.00*e_02 + 1.00*e_12]\nP4: MultiVector[1.00*e_01 + 1.00*e_12]\nP5: MultiVector[1.00*e_12]\nL14: MultiVector[-2.00*e_1]\nSigned distance between P2 and L14: MultiVector[1.00*1]\nSigned distance between P3 and L14: MultiVector[-1.00*1]\nP2 on L14: MultiVector[2.00*e_01 + 4.00*e_12]\n" 47 | } 48 | ], 49 | "source": [ 50 | "\"\"\"\n", 51 | " p_4\n", 52 | "\n", 53 | "p_2 p_3\n", 54 | "\n", 55 | " p_5\n", 56 | "\n", 57 | " p_1\n", 58 | "\n", 59 | "p: x e_20 + y e_01 + e_12\n", 60 | "\"\"\"\n", 61 | "\n", 62 | "def mv_length(mv):\n", 63 | " return tf.sqrt((mv * ~mv).tensor)[..., 0]\n", 64 | "\n", 65 | "def dist_point_line(point, line):\n", 66 | " point_normalized = point.tensor / mv_length(point)\n", 67 | " line_normalized = line.tensor / mv_length(line)\n", 68 | " return ga(point_normalized) & ga(line_normalized)\n", 69 | "\n", 70 | "def dist_points(point_a, point_b):\n", 71 | " point_a_normalized = point_a.tensor / mv_length(point_a)\n", 72 | " point_b_normalized = point_b.tensor / mv_length(point_b)\n", 73 | "\n", 74 | " return ga(point_a_normalized) & ga(point_b_normalized)\n", 75 | "\n", 76 | "def proj_point_line(point, line):\n", 77 | " return (point | line) * line\n", 78 | "\n", 79 | "def intersect_lines(line_a, line_b):\n", 80 | " return line_a ^ line_b\n", 81 | "\n", 82 | "def point_coordinates(point):\n", 83 | " z = point(\"12\")\n", 84 | " x = point(\"20\") / z\n", 85 | " y = point(\"01\") / z\n", 86 | " return x, y\n", 87 | "\n", 88 | "# Shift up vertically\n", 89 | "shift_23 = 0.5 * ga.e01\n", 90 | "\n", 91 | "p_1 = ga(ga.e12 - ga.e01)\n", 92 | "p_2 = ga(ga.e12 - ga.e20 + shift_23)\n", 93 | "p_3 = ga(ga.e12 + ga.e20 + shift_23)\n", 94 | "p_4 = ga(ga.e12 + ga.e01)\n", 95 | "p_5 = ga(ga.e12)\n", 96 | "\n", 97 | "l_14 = p_1 & p_4\n", 98 | "l_23 = p_2 & p_3\n", 99 | "\n", 100 | "p2_on_l14 = proj_point_line(p_2, l_14)\n", 101 | "\n", 102 | "print(\"P1:\", p_1)\n", 103 | "print(\"P2:\", p_2)\n", 104 | "print(\"P3:\", p_3)\n", 105 | "print(\"P4:\", p_4)\n", 106 | "print(\"P5:\", p_5)\n", 107 | "print(\"L14:\", l_14)\n", 108 | "print(\"Signed distance between P2 and L14:\", dist_point_line(p_2, l_14))\n", 109 | "print(\"Signed distance between P3 and L14:\", dist_point_line(p_3, l_14))\n", 110 | "print(\"P2 on L14:\", p2_on_l14)" 111 | ] 112 | }, 113 | { 114 | "cell_type": "code", 115 | "execution_count": 6, 116 | "metadata": {}, 117 | "outputs": [ 118 | { 119 | "output_type": "display_data", 120 | "data": { 121 | "text/plain": "
", 122 | "image/svg+xml": "\r\n\r\n\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n", 123 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgAAAAHwCAYAAADQAtd+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4yLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+j8jraAAAgAElEQVR4nO3dfXRddZ3v8feXlEKFIiAtFkoszK1aaDFCBpSHEUQY6AULeusCeaha7VJvx0Gc8ZbljHhn1hq44MPIDNBbFeRp6FJHxwqUZ71IFSFlSilUpDIFQjsFGbRAizTle//ISThNkzRpknPS/N6vtc7K3vv32/t8fzlnZ3+y985JZCaSJKksO9W7AEmSVHsGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJA06CLi0Yg4rt51SOqZAUBSryJidURsjIiXI2JdRFwTEbv3tk5mHpKZP+vH9j8wKMVK6jMDgKS+OC0zdwcOA/4U+Js61yNpgAwAkvosM58FFgNTI+KDlVP9v4+In0XElI5+1b/VR8RXIuJ7EXFdRLxUWae50nY90Aj8pHKG4YsRsWtE3BARL1S2/WBE7FuP8UojmQFAUp9FxAHAdOAl4CbgfGAccCvtB/HRPaz6QWAhsCewCPhngMw8F3iayhmGzLwUmAW8GTgAeAvwaWDjUI1JKpUBQFJf/FtE/B64D/h/wGPALZl5Z2ZuAr4KjAGO6mH9+zLz1szcDFwPvKuX59pE+4H/v2Xm5sxcmpnrB20kkgADgKS+OT0z98zMt2XmZ4H9gKc6GjPzdeAZYP8e1v/PqukNwK4RMaqHvtcDtwMLI2JNRFwaETsPfAiSqhkAJG2PNcDbOmYiImg/Zf/sdmxri39JmpmbMvN/Z+bBtJ9ROBU4bwC1SuqGAUDS9vge8N8j4oTKb+dfAP4I/GI7trUOOKhjJiKOj4hpEdEArKf9ksDmQahZUhUDgKR+y8zHgXOAfwJ+B5xG+418r23H5i4G/qZyx/9fAW8FfkD7wX8l7fcc3DAohUvqFJm57V6SJGlE8QyAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBerpk7hGpH322ScnTZpU7zIkSaqJpUuX/i4zx3XXVlQAmDRpEi0tLfUuQ5KkmoiIp3pq8xKAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgKSaaGhooKmpialTpzJz5kw2bNjQ2bZ582be/e53c+qpp9axQqksBgBJNTFmzBiWLVvGihUrGD16NPPnz+9s++Y3v8mUKVPqWJ1UHgOApJo79thjWbVqFQCtra3ccsstfPKTn6xzVVJZDACSaqqtrY3Fixczbdo0AM4//3wuvfRSdtrJH0dSLdV1j4uIqyPiuYhY0UN7RMTlEbEqIpZHxGFVbSdHxOOVtnm1q1pSX2Vm5/TGjRtpamqiubmZxsZGZs+ezc0338z48eM5/PDD61ilVKZRdX7+7wL/DFzXQ/spwOTK40jgKuDIiGgArgBOBFqBByNiUWY+NuQVS+qT5//pn9n80nr2vfBCIoIxY8Zw20c+QsPYPRj3F3MBWLJkCYsWLeLWW2/l1VdfZf369ZxzzjnccMMNda5eGvnqegYgM+8F/quXLjOA67Ld/cCeETEBOAJYlZlPZuZrwMJKX0nDQGay+aX1vHjd9ay7+GIyk9y0iRevu57NL63vPDNw8cUX09rayurVq1m4cCHvf//7PfhLNVLvMwDbsj/wTNV8a2VZd8uPrGFdknoREex74YUAvHjd9bx43fXkpk3sdd65nWcEJNXXcL/rprufEtnL8q03EDEnIloiouX5558f1OIk9aw6BAAsffs7ej34H3fccdx88821Kk8q3nAPAK3AAVXzE4E1vSzfSmYuyMzmzGweN27ckBUqaUuZybqLL95iWcflAEn1N9wDwCLgvMpfA7wH+ENmrgUeBCZHxIERMRo4s9JX0jDQcfB/8brr2eu8c3nnysfY67xzt7gnQFJ91fUegIi4CTgO2CciWoGLgJ0BMnM+cCswHVgFbAA+Xmlri4i5wO1AA3B1Zj5a8wFI6lZE0DB2jy2u+XdcDmgYu4f3AEjDQJSUxJubm7OlpaXeZUjFyMwtDvZd5yUNrYhYmpnN3bUN90sAknZgXQ/2Hvyl4cMAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKB6hoAIuLkiHg8IlZFxLxu2v86IpZVHisiYnNE7F1pWx0Rj1TaWmpfvSRJO65R9XriiGgArgBOBFqBByNiUWY+1tEnMy8DLqv0Pw34fGb+V9Vmjs/M39WwbEmSRoR6ngE4AliVmU9m5mvAQmBGL/3PAm6qSWWSJI1w9QwA+wPPVM23VpZtJSLeBJwM/GvV4gTuiIilETFnyKqUJGkEqtslACC6WZY99D0NWNLl9P/RmbkmIsYDd0bErzPz3q2epD0czAFobGwcaM2SJI0I9TwD0AocUDU/EVjTQ98z6XL6PzPXVL4+B/yI9ksKW8nMBZnZnJnN48aNG3DRkiSNBPUMAA8CkyPiwIgYTftBflHXThHxZuB9wI+rlu0WEWM7poGTgBU1qVqSpBGgbpcAMrMtIuYCtwMNwNWZ+WhEfLrSPr/S9Qzgjsx8pWr1fYEfRQS0j+FfMvO22lUvSdKOLTJ7uuw+8jQ3N2dLix8ZIEkqQ0Qszczm7tr8JEBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkABighoYGmpqamDp1KjNnzmTDhg0888wzHH/88UyZMoVDDjmEb37zm/UuU6q7eu4ru++++1bL7r33Xg477DBGjRrFD37wg63a169fz/7778/cuXOHpCapJ93tK6+++ipHHHEE73rXuzjkkEO46KKLBvw8BoABGjNmDMuWLWPFihWMHj2a+fPnM2rUKL72ta+xcuVK7r//fq644goee+yxepcq1dVw21caGxv57ne/y0c/+tFu2//2b/+W973vfTWpRarW3b6yyy67cM899/Dwww+zbNkybrvtNu6///4BPY8BYBAde+yxrFq1igkTJnDYYYcBMHbsWKZMmcKzzz5b5+qk4aO/+8pTTz3FCSecwKGHHsoJJ5zA008/DcDHPvYxPve5z3HUUUdx0EEHdfubfE8mTZrEoYceyk47bf1jcOnSpaxbt46TTjppO0coDY6OfSUiOs9kbdq0iU2bNhERA9q2AWCQtLW1sXjxYqZNm7bF8tWrV/Pv//7vHHnkkXWqTBpetmdfmTt3Lueddx7Lly/n7LPP5nOf+1xn29q1a7nvvvu4+eabmTdv3oDre/311/nCF77AZZddNuBtSQPRdV/ZvHkzTU1NjB8/nhNPPHHAx5W6BoCIODkiHo+IVRGx1Z4bEcdFxB8iYlnl8eW+rjukMjsnN27cSFNTE83NzTQ2NjJ79uzOtpdffpkPf/jD/OM//iN77LFHTUuUhoMcpH3ll7/8Zeep+nPPPZf77ruvs+30009np5124uCDD2bdunUDrvnKK69k+vTpHHDAAQPeltRXfdlXGhoaWLZsGa2trTzwwAOsWLFiQM85akBrD0BENABXACcCrcCDEbEoM7teAPx5Zp66nesOvp9eDK/+AU6+GCLar9VcfDLs+mY4/sLObps2beLDH/4wZ599Nh/60IeGvCxpuPnF92/kj6+8wnGzPkVU9pVvnP9ZdtltN46aeXZnv+3ZV6pPfe6yyy6d09U/RLfXL3/5S37+859z5ZVX8vLLL/Paa6+x++67c8kllwx421J3rlx2JS+99hJf/NMvdu4rZy04i7Gjx/LZps9u1X/PPffkuOOO47bbbmPq1Knb/bz1PANwBLAqM5/MzNeAhcCMGqy7/TLbD/6/ugpuu7B9fvNr7fOv/qHzzEBmMnv2bKZMmcIFF1ww5GVJw01m8sdXXuGhxYv42bXfIjPZ3NbGQ4sX8cdXXuk8UPd1XznqqKNYuHAhADfeeCPHHHPMkNV+44038vTTT7N69Wq++tWvct5553nw15DJTF567SVuWHkDlz54KZnJptc3ccPKG3jptZc695Xnn3+e3//+90D7GYK77rqLd77znQN67rqdAQD2B56pmm8Furug8d6IeBhYA/xVZj7aj3UHV0T7b/7QftD/1VWweRMceUHnGQGAJUuWcP311zNt2jSampoA+Id/+AemT58+5CVKw0FEcNysTwHw0OJFPLR4Ea9vbuOwUz7YeUYA+r6vXH755XziE5/gsssuY9y4cVxzzTX9qmfDhg1MnDixc/6CCy7g2GOP5YwzzuDFF1/kJz/5CRdddBGPPvroQIYt9VtE8MU//SIAN6y8gRtW3kDb622cM+WczjMC0H6vy6xZs9i8eTOvv/46H/nIRzj11FN72/S2n3swTplt1xNHzAT+PDM/WZk/FzgiM/+iqs8ewOuZ+XJETAe+mZmT+7Ju1TbmAHMAGhsbD3/qqacGXnwm/O8935i/6PedB39Jb8hMvn7maZ3zFyz8yYDvXJZGoszk0OsO7Zxfft7yQdlXImJpZjZ311bPSwCtQPVdNhNp/y2/U2auz8yXK9O3AjtHxD59WbdqGwsyszkzm8eNGzfwqjPbT/9X67gcIKlTZvKza7+1xbKOywGS3pCZXPrgpVss67gcMJTqGQAeBCZHxIERMRo4E1hU3SEi3hqVCBQRR9Be7wt9WXdIdBz8f3UVHPmZ9t/8j/zMlvcESOo8+D+0eBGHnfJBLlj4Ew475YNb3BMg6Y2D/w0rb+CcKeew/LzlnDPlnC3uCRgqdbsHIDPbImIucDvQAFydmY9GxKcr7fOB/wF8JiLagI3Amdn+3eh23SEvOqL9bv8jP/PGNf+OewJ2fbOXAaSKiGCX3Xbb4pp/xz0Bu+y2m5cBpIqIYOzosVtc8++4J2Ds6LFDuq/U7R6Aemhubs6WlpaBbyhzy4N913lJQPtvN9U/wLrOS2o3VPvKcL0HYMfV9UXxB5rUra4/wDz4S92rx75iAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQHUNABFxckQ8HhGrImJeN+1nR8TyyuMXEfGuqrbVEfFIRCyLiJbaVi5J0o5tVL2eOCIagCuAE4FW4MGIWJSZj1V1+w/gfZn5YkScAiwAjqxqPz4zf1ezoiVJGiHqeQbgCGBVZj6Zma8BC4EZ1R0y8xeZ+WJl9n5gYo1rlCRpRKpnANgfeKZqvrWyrCezgcVV8wncERFLI2LOENQnSdKIVbdLAEB0syy77RhxPO0B4JiqxUdn5pqIGA/cGRG/zsx7u1l3DjAHoLGxceBVS5I0AtTzDEArcEDV/ERgTddOEXEo8G1gRma+0LE8M9dUvj4H/Ij2SwpbycwFmdmcmc3jxo0bxPIlSdpx1TMAPAhMjogDI2I0cCawqLpDRDQCPwTOzczfVC3fLSLGdkwDJwErala5JEk7uLpdAsjMtoiYC9wONABXZ+ajEfHpSvt84MvAW4ArIwKgLTObgX2BH1WWjQL+JTNvq8MwJEnaIUVmt5fdR6Tm5uZsafEjAyRJZYiIpZVfnLfiJwFKklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEiSVCADgCRJBTIASJJUIAOAJEkFMgBIklSgUfUuQFIZGhoamDZtGm1tbUyZMoVrr72WN73pTUyaNImxY8fS0NDAqFGjaGlpqXepUhE8AyCpJsaMGcOyZctYsWIFo0ePZv78+Z1tP/3pT1m2bJkHf6mGDACSau7YY49l1apV9S5DKpoBQNKQycyt5tva2li8eDHTpk0DICI46aSTOPzww1mwYEE9ypSKVNcAEBEnR8TjEbEqIuZ10x4RcXmlfXlEHNbXdSXV1wM/eZL7vv9EZwjYuHEjbz/wYA5++6E0NjYye/ZsAJYsWcJDDz3E4sWLueKKK7j33nvrWbZUjB4DQETcGhGThuqJI6IBuAI4BTgYOCsiDu7S7RRgcuUxB7iqH+tKqpPM5I8b21h+T2tnCNhl5105/+Qr+M4l/8bll1/O6NGjAdhvv/0AGD9+PGeccQYPPPBAPUuXitHbGYDvAndExJciYucheO4jgFWZ+WRmvgYsBGZ06TMDuC7b3Q/sGRET+riupDqJCI6ZOZlD3z+R5fe0cuVnfsrmza9z6PsncszMyUQEAK+88govvfRS5/Qdd9zB1KlT61m6VIwe/wwwM78XEbcAXwZaIuJ64PWq9q8P8Ln3B56pmm8FjuxDn/37uC4AETGH9rMHNDY2DqxiSX3WEQKW39Pauaz64A+wbt06zjjjDADa2tr46Ec/ysknn1zzWqUSbetzADYBrwC7AGOpCgCDILpZln3s05d12xdmLgAWADQ3N3fbR9Lgy0zu+/4TnfNfn30L933/iS1CwEEHHcTDDz9crxKlovUYACLiZODrwCLgsMzcMMjP3QocUDU/EVjTxz6j+7CupDrpOPgvv6e187R/xzxsfSZAUu31dgbgS8DMzHx0iJ77QWByRBwIPAucCXy0S59FwNyIWEj7Kf4/ZObaiHi+D+tKqpOIYJcxo7a45n/MzMkA7DJmlAd/aRjo7R6AY4fyiTOzLSLmArcDDcDVmfloRHy60j4fuBWYDqwCNgAf723doaxXUv8ccdpBZGbnwb4jBHjwl4aH6PpBHSNZc3Nz+lGjkqRSRMTSzGzurs1PApQkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKVJcAEBF7R8SdEfFE5ete3fQ5ICJ+GhErI+LRiPjLqravRMSzEbGs8phe2xFIkrRjq9cZgHnA3Zk5Gbi7Mt9VG/CFzJwCvAf4nxFxcFX7NzKzqfK4dehLliRp5KhXAJgBXFuZvhY4vWuHzFybmQ9Vpl8CVgL716xCSZJGsHoFgH0zcy20H+iB8b11johJwLuBX1UtnhsRyyPi6u4uIUiSpJ4NWQCIiLsiYkU3jxn93M7uwL8C52fm+sriq4A/AZqAtcDXell/TkS0RETL888/v52jkSRpZBk1VBvOzA/01BYR6yJiQmaujYgJwHM99NuZ9oP/jZn5w6ptr6vq8y3g5l7qWAAsAGhubs5+D0SSpBGoXpcAFgGzKtOzgB937RARAXwHWJmZX+/SNqFq9gxgxRDVKUnSiFSvAHAJcGJEPAGcWJknIvaLiI47+o8GzgXe382f+10aEY9ExHLgeODzNa5fkqQd2pBdAuhNZr4AnNDN8jXA9Mr0fUD0sP65Q1qgJEkjnJ8EKElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBXIACBJUoEMAJIkFcgAIElSgQwAkiQVyAAgSVKBDACSJBWoLgEgIvaOiDsj4onK17166Lc6Ih6JiGUR0dLf9SVJUvfqdQZgHnB3Zk4G7q7M9+T4zGzKzObtXF+SJHVRrwAwA7i2Mn0tcHqN15ckqWj1CgD7ZuZagMrX8T30S+COiFgaEXO2Y31JktSNUUO14Yi4C3hrN01f6sdmjs7MNRExHrgzIn6dmff2s445wByAxsbG/qwqSdKINWQBIDM/0FNbRKyLiAmZuTYiJgDP9bCNNZWvz0XEj4AjgHuBPq1fWXcBsACgubk5t39EkiSNHPW6BLAImFWZngX8uGuHiNgtIsZ2TAMnASv6ur4kSepZvQLAJcCJEfEEcGJlnojYLyJurfTZF7gvIh4GHgBuyczbeltfkiT1zZBdAuhNZr4AnNDN8jXA9Mr0k8C7+rO+JEnqGz8JUJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpALVJQBExN4RcWdEPFH5ulc3fd4REcuqHusj4vxK21ci4tmqtum1H4UkSTuuep0BmAfcnZmTgbsr81vIzMczsykzm4DDgQ3Aj6q6fKOjPTNvrUnVkiSNEPUKADOAayvT1wKnb6P/CcBvM/OpIa1KkqRC1CsA7JuZawEqX8dvo/+ZwE1dls2NiOURcXV3lxAkSVLPhiwARMRdEbGim8eMfm5nNPBB4PtVi68C/gRoAtYCX+tl/TkR0RIRLc8///x2jESSpJFn1FBtODM/0FNbRKyLiAmZuTYiJgDP9bKpU4CHMnNd1bY7pyPiW8DNvdSxAFgA0NzcnP0YgiRJI1a9LgEsAmZVpmcBP+6l71l0Of1fCQ0dzgBWDGp1kiSNcPUKAJcAJ0bEE8CJlXkiYr+I6LyjPyLeVGn/YZf1L42IRyJiOXA88PnalC1J0sgwZJcAepOZL9B+Z3/X5WuA6VXzG4C3dNPv3CEtUJKkEc5PApQkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJBUEw0NDTQ1NTF16lRmzpzJhg0bAPjEJz7B+PHjmTp1ap0rlMpiAJBUE2PGjGHZsmWsWLGC0aNHM3/+fAA+9rGPcdttt9W5Oqk8BgBJNXfssceyatUqAP7sz/6Mvffeu84VSeUxAEiqqba2NhYvXsy0adPqXYpUtLoEgIiYGRGPRsTrEdHcS7+TI+LxiFgVEfOqlu8dEXdGxBOVr3vVpnJJ/ZGZndMbN26kqamJ5uZmGhsbmT17dh0rkzSqTs+7AvgQ8H976hARDcAVwIlAK/BgRCzKzMeAecDdmXlJJRjMA/7X0Jctqa++cedvWP/qJr586sFEBGPGjOGMv7+RPXbdmc+f+PZ6lycVry5nADJzZWY+vo1uRwCrMvPJzHwNWAjMqLTNAK6tTF8LnD40lUraHpnJ+lc3cc2S1fzdzY+Rmby2+XWuWbKa9a9u2uLMgKT6GM73AOwPPFM131pZBrBvZq4FqHwdX+PaJPUiIvjyqQfz8aMncc2S1Rx44a20bU4+fvSkzjMCHc466yze+9738vjjjzNx4kS+853v1LFyqRxDdgkgIu4C3tpN05cy88d92UQ3y/r9a0NEzAHmADQ2NvZ3dUnbqSMEXLNkNQCNF/xgq4M/wE033VSH6iQN2RmAzPxAZk7t5tGXgz+0/8Z/QNX8RGBNZXpdREwAqHx9rpc6FmRmc2Y2jxs3bnuGImk7ZCZ/d/NjWyzruBwgqf6G8yWAB4HJEXFgRIwGzgQWVdoWAbMq07OAvoYKSTXQcfC/ZslqPn70JP7j4umdlwMMAdLwUJe/AoiIM4B/AsYBt0TEssz884jYD/h2Zk7PzLaImAvcDjQAV2fmo5VNXAJ8LyJmA08DM+swDEk9iAj22HXnLa75f/nUgwHYY9edt7oMIKn2oqQk3uaJhVcAAAbwSURBVNzcnC0tLfUuQypGZm5xsO86L2loRcTSzOz283aG8yUASTu4rgd7D/7S8GEAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKZACQJKlABgBJkgpkAJAkqUAGAEmSCmQAkCSpQAYASZIKFJlZ7xpqJiKeB54axE3uA/xuELdXT45l+Bkp4wDHMlyNlLGMlHHA4I/lbZk5rruGogLAYIuIlsxsrncdg8GxDD8jZRzgWIarkTKWkTIOqO1YvAQgSVKBDACSJBXIADAwC+pdwCByLMPPSBkHOJbhaqSMZaSMA2o4Fu8BkCSpQJ4BkCSpQAaAbYiImRHxaES8HhE93pkZESdHxOMRsSoi5lUt3zsi7oyIJypf96pN5d3WuM1aIuIdEbGs6rE+Is6vtH0lIp6tapte+1H0/XsaEasj4pFKrS39Xb8W+viaHBARP42IlZX34l9WtdX9NenpvV/VHhFxeaV9eUQc1td1a6kP4zi7Uv/yiPhFRLyrqq3b91q99GEsx0XEH6reN1/u67q11oex/HXVOFZExOaI2LvSNmxel4i4OiKei4gVPbTXfj/JTB+9PIApwDuAnwHNPfRpAH4LHASMBh4GDq60XQrMq0zPA/5PHcfSr1oq4/pP2v+OFOArwF8Ng9ekT+MAVgP7DPT7UO+xABOAwyrTY4HfVL2/6vqa9Pber+ozHVgMBPAe4Fd9XXeYjeMoYK/K9Ckd4+jtvTaMx3IccPP2rDvcxtKl/2nAPcP0dfkz4DBgRQ/tNd9PPAOwDZm5MjMf30a3I4BVmflkZr4GLARmVNpmANdWpq8FTh+aSvukv7WcAPw2Mwfzw5MGw0C/pzvUa5KZazPzocr0S8BKYP+aVdi73t77HWYA12W7+4E9I2JCH9etlW3Wkpm/yMwXK7P3AxNrXGNfDeT7Opxek+2p5yzgpppU1k+ZeS/wX710qfl+YgAYHPsDz1TNt/LGD+h9M3MttP8gB8bXuLZq/a3lTLbemeZWTk9dXcdT530dRwJ3RMTSiJizHevXQr9qiYhJwLuBX1Utrudr0tt7f1t9+rJurfS3ltm0/7bWoaf3Wj30dSzvjYiHI2JxRBzSz3Vrpc/1RMSbgJOBf61aPJxel22p+X4yajA2sqOLiLuAt3bT9KXM/HFfNtHNsrr8eUVvY+nndkYDHwQurFp8FfD3tI/t74GvAZ/Yvkq3+fyDMY6jM3NNRIwH7oyIX1dSeE0N4muyO+0/3M7PzPWVxTV7TXoqq5tlXd/7PfUZNvsN/aglIo6nPQAcU7V4WLzXKvoylodov7T3cuW+kX8DJvdx3VrqTz2nAUsys/q37OH0umxLzfcTAwCQmR8Y4CZagQOq5icCayrT6yJiQmaurZzOeW6Az9Wr3sYSEf2p5RTgocxcV7XtzumI+BZw82DU3J3BGEdmrql8fS4ifkT7qbR72QFfk4jYmfaD/42Z+cOqbdfsNelBb+/9bfUZ3Yd1a6Uv4yAiDgW+DZySmS90LO/lvVYP2xxLVYAkM2+NiCsjYp++rFtj/alnqzOWw+x12Zaa7ydeAhgcDwKTI+LAym/OZwKLKm2LgFmV6VlAX84oDJX+1LLVtbTKAarDGUC3d7PWwDbHERG7RcTYjmngJN6od4d6TSIigO8AKzPz613a6v2a9Pbe77AIOK9yl/N7gD9ULnf0Zd1a2WYtEdEI/BA4NzN/U7W8t/daPfRlLG+tvK+IiCNoPxa80Jd1a6xP9UTEm4H3UbX/DMPXZVtqv58M5V2PI+FB+w/VVuCPwDrg9sry/YBbq/pNp/3u7N/SfumgY/lbgLuBJypf967jWLqtpZuxvIn2HwZv7rL+9cAjwPLKG3DCcB0H7XfMPlx5PLojvya0n2rOyvd9WeUxfbi8Jt2994FPA5+uTAdwRaX9Ear+mqan/aZOr8W2xvFt4MWq16BlW++1YTyWuZVaH6b9hsajhuNr0pexVOY/Bizsst6wel1o/4VqLbCJ9mPK7HrvJ34SoCRJBfISgCRJBTIASJJUIAOAJEkFMgBIklQgA4AkSQUyAEgaEtH+Xwz/I974z2x7VebfVu/aJBkAJA2RzHyG9o8qvqSy6BJgQQ6/fy4lFcnPAZA0ZCofYbwUuBr4FPDubP+PZpLqzP8FIGnIZOamiPhr4DbgJA/+0vDhJQBJQ+0U2j8CdWq9C5H0BgOApCETEU3AicB7gM93+edFkurIACBpSFT+29xVwPmZ+TRwGfDV+lYlqYMBQNJQ+RTwdGbeWZm/EnhnRLyvjjVJqvCvACRJKpBnACRJKpABQJKkAhkAJEkqkAFAkqQCGQAkSSqQAUCSpAIZACRJKpABQJKkAv1/vSuIzKSq93UAAAAASUVORK5CYII=\n" 124 | }, 125 | "metadata": { 126 | "needs_background": "light" 127 | } 128 | } 129 | ], 130 | "source": [ 131 | "# Plot the results\n", 132 | "\n", 133 | "def plot_point(point, name):\n", 134 | " xy = point_coordinates(point)\n", 135 | " plt.scatter(*xy, marker=\"x\")\n", 136 | " plt.annotate(name, xy)\n", 137 | "\n", 138 | "plt.figure(figsize=(8, 8))\n", 139 | "plot_point(p_1, \"P1\")\n", 140 | "plot_point(p_2, \"P2\")\n", 141 | "plot_point(p_3, \"P3\")\n", 142 | "plot_point(p_4, \"P4\")\n", 143 | "plot_point(p_5, \"P5\")\n", 144 | "plot_point(p2_on_l14, \"P2 on L14\")\n", 145 | "plt.xlabel(\"X\")\n", 146 | "plt.ylabel(\"Y\")\n", 147 | "plt.title(\"Points\")\n", 148 | "plt.show()" 149 | ] 150 | }, 151 | { 152 | "cell_type": "code", 153 | "execution_count": null, 154 | "metadata": {}, 155 | "outputs": [], 156 | "source": [] 157 | } 158 | ], 159 | "metadata": { 160 | "language_info": { 161 | "codemirror_mode": { 162 | "name": "ipython", 163 | "version": 3 164 | }, 165 | "file_extension": ".py", 166 | "mimetype": "text/x-python", 167 | "name": "python", 168 | "nbconvert_exporter": "python", 169 | "pygments_lexer": "ipython3", 170 | "version": "3.7.6-final" 171 | }, 172 | "orig_nbformat": 2, 173 | "kernelspec": { 174 | "name": "python37664bittf2conda034469ea11204d31b38329519e9d7dbe", 175 | "display_name": "Python 3.7.6 64-bit ('tf2': conda)" 176 | } 177 | }, 178 | "nbformat": 4, 179 | "nbformat_minor": 2 180 | } --------------------------------------------------------------------------------