├── examples ├── lapy ├── README.rst ├── Test_Plot.ipynb ├── Test_TetMesh.ipynb ├── Test_ShapeDNA.ipynb └── Test_TetMesh_Geodesics.ipynb ├── lapy ├── commands │ ├── __init__.py │ └── sys_info.py ├── utils │ ├── tests │ │ ├── __init__.py │ │ ├── test_config.py │ │ ├── test_imports.py │ │ ├── test_visualization_meshes.py │ │ ├── expected_outcomes.json │ │ ├── test_TriaMesh_Geodesics.py │ │ ├── test_tet_mesh.py │ │ ├── test_TetMesh_Geodesics.py │ │ ├── test_shape_DNA.py │ │ └── test_polygon.py │ ├── __init__.py │ ├── _imports.py │ └── _config.py ├── _version.py ├── __init__.py ├── heat.py ├── _read_geometry.py ├── _tet_io.py ├── shapedna.py ├── io.py ├── tet_mesh.py └── polygon.py ├── .codespellignore ├── doc ├── tutorials │ ├── examples │ └── index.rst ├── _static │ └── css │ │ └── style.css ├── changes │ ├── index.rst │ ├── authors.inc │ ├── latest.rst.template │ └── latest.rst ├── _templates │ └── autosummary │ │ ├── module.rst │ │ ├── function.rst │ │ └── class.rst ├── api │ ├── lapy.solver.rst │ ├── lapy.meshes.rst │ ├── lapy.modules.rst │ └── index.rst ├── Makefile ├── make.bat ├── links.inc ├── index.rst ├── references.bib └── conf.py ├── tutorials └── README.rst ├── setup.py ├── .github ├── dependabot.yml ├── ISSUE_TEMPLATE │ ├── documentation.md │ ├── feature-request.md │ ├── questions-help-support.md │ └── bug-report.md └── workflows │ ├── publish.yml │ ├── code-style.yml │ ├── build.yml │ ├── doc.yml │ └── pytest.yml ├── .codecov.yml ├── data └── icosahedron.off ├── LICENSE ├── .gitignore ├── pyproject.toml └── README.md /examples/lapy: -------------------------------------------------------------------------------- 1 | ../lapy -------------------------------------------------------------------------------- /lapy/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lapy/utils/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.codespellignore: -------------------------------------------------------------------------------- 1 | coo 2 | daty 3 | -------------------------------------------------------------------------------- /doc/tutorials/examples: -------------------------------------------------------------------------------- 1 | ../../examples -------------------------------------------------------------------------------- /examples/README.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | -------------------------------------------------------------------------------- /tutorials/README.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | -------------------------------------------------------------------------------- /lapy/utils/__init__.py: -------------------------------------------------------------------------------- 1 | """Utilities module.""" 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | setup() 4 | -------------------------------------------------------------------------------- /doc/_static/css/style.css: -------------------------------------------------------------------------------- 1 | div.sphx-glr-download-link-note { 2 | height: 0px; 3 | visibility: hidden; 4 | } 5 | -------------------------------------------------------------------------------- /doc/changes/index.rst: -------------------------------------------------------------------------------- 1 | Changelog 2 | ========= 3 | 4 | .. toctree:: 5 | :titlesonly: 6 | 7 | latest.rst 8 | -------------------------------------------------------------------------------- /lapy/_version.py: -------------------------------------------------------------------------------- 1 | """Version number.""" 2 | 3 | from importlib.metadata import version 4 | 5 | __version__ = version(__package__) 6 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/module.rst: -------------------------------------------------------------------------------- 1 | {{ fullname }} 2 | {{ underline }} 3 | 4 | .. automodule:: {{ fullname }} 5 | :members: 6 | 7 | -------------------------------------------------------------------------------- /doc/api/lapy.solver.rst: -------------------------------------------------------------------------------- 1 | Solver 2 | ====== 3 | 4 | .. currentmodule:: lapy 5 | 6 | .. autosummary:: 7 | :toctree: generated/ 8 | 9 | Solver 10 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | -------------------------------------------------------------------------------- /doc/api/lapy.meshes.rst: -------------------------------------------------------------------------------- 1 | Geometries 2 | ========== 3 | 4 | .. currentmodule:: lapy 5 | 6 | .. autosummary:: 7 | :toctree: generated/ 8 | 9 | TriaMesh 10 | TetMesh 11 | Polygon 12 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/function.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autofunction:: {{ objname }} 6 | 7 | .. minigallery:: {{ fullname }} 8 | :add-heading: 9 | -------------------------------------------------------------------------------- /.codecov.yml: -------------------------------------------------------------------------------- 1 | github_checks: 2 | annotations: false 3 | 4 | coverage: 5 | status: 6 | project: 7 | default: 8 | informational: true 9 | patch: 10 | default: 11 | informational: true 12 | -------------------------------------------------------------------------------- /doc/changes/authors.inc: -------------------------------------------------------------------------------- 1 | .. _Andreas Girodi: https://github.com/agirodi 2 | .. _Kersten Diers: https://github.com/kdiers 3 | .. _Martin Reuter: https://github.com/m-reuter 4 | .. _Mathieu Scheltienne: https://github.com/mscheltienne 5 | -------------------------------------------------------------------------------- /doc/api/lapy.modules.rst: -------------------------------------------------------------------------------- 1 | Modules 2 | ======= 3 | 4 | .. currentmodule:: lapy 5 | 6 | .. autosummary:: 7 | :toctree: generated/ 8 | 9 | io 10 | shapedna 11 | heat 12 | diffgeo 13 | conformal 14 | plot 15 | -------------------------------------------------------------------------------- /doc/_templates/autosummary/class.rst: -------------------------------------------------------------------------------- 1 | {{ fullname | escape | underline }} 2 | 3 | .. currentmodule:: {{ module }} 4 | 5 | .. autoclass:: {{ objname }} 6 | :members: 7 | :inherited-members: 8 | 9 | .. minigallery:: {{ fullname }} 10 | :add-heading: 11 | -------------------------------------------------------------------------------- /lapy/__init__.py: -------------------------------------------------------------------------------- 1 | from ._version import __version__ # noqa: F401 2 | from .polygon import Polygon # noqa: F401 3 | from .solver import Solver # noqa: F401 4 | from .tet_mesh import TetMesh # noqa: F401 5 | from .tria_mesh import TriaMesh # noqa: F401 6 | from .utils._config import sys_info # noqa: F401 7 | -------------------------------------------------------------------------------- /doc/api/index.rst: -------------------------------------------------------------------------------- 1 | API References 2 | ============== 3 | 4 | This is the reference for classes (``CamelCase`` names) and functions 5 | (``underscore_case`` names) of ``lapy`` grouped thematically. 6 | 7 | .. toctree:: 8 | :maxdepth: 2 9 | 10 | lapy.meshes.rst 11 | lapy.solver.rst 12 | lapy.modules.rst 13 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/documentation.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Documentation 3 | about: Report an issue or make a suggestion related to LaPy documentation 4 | title: '' 5 | labels: documentation 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Documentation 11 | 12 | ... 13 | -------------------------------------------------------------------------------- /doc/tutorials/index.rst: -------------------------------------------------------------------------------- 1 | Tutorials 2 | ========= 3 | 4 | Here you can find Notebooks with examples highlighting some of LaPy's functionality. 5 | 6 | .. toctree:: 7 | :maxdepth: 1 8 | 9 | examples/Test_TriaMesh.ipynb 10 | examples/Test_TetMesh.ipynb 11 | examples/Test_Plot.ipynb 12 | examples/Test_ShapeDNA.ipynb 13 | examples/Test_TriaMesh_Geodesics.ipynb 14 | examples/Test_TetMesh_Geodesics.ipynb 15 | -------------------------------------------------------------------------------- /lapy/commands/sys_info.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from .. import sys_info 4 | 5 | 6 | def run(): 7 | """Run sys_info() command.""" 8 | parser = argparse.ArgumentParser( 9 | prog=f"{__package__.split('.')[0]}-sys_info", description="sys_info" 10 | ) 11 | parser.add_argument( 12 | "--developer", 13 | help="display information for optional dependencies", 14 | action="store_true", 15 | ) 16 | args = parser.parse_args() 17 | 18 | sys_info(developer=args.developer) 19 | -------------------------------------------------------------------------------- /data/icosahedron.off: -------------------------------------------------------------------------------- 1 | OFF 2 | 12 20 0 3 | 0.0 0.0 2.0 4 | 1.788854 0.000000 0.894427 5 | 0.552786 1.701302 0.894427 6 | -1.447214 1.051462 0.894427 7 | -1.447214 -1.051462 0.894427 8 | 0.552786 -1.701302 0.894427 9 | 1.447214 1.051462 -0.894427 10 | -0.552786 1.701302 -0.894427 11 | -1.788854 0.000000 -0.894427 12 | -0.552786 -1.701302 -0.894427 13 | 1.447214 -1.051462 -0.894427 14 | 0.0 0.0 -2.0 15 | 3 2 0 1 16 | 3 3 0 2 17 | 3 4 0 3 18 | 3 5 0 4 19 | 3 1 0 5 20 | 3 2 1 6 21 | 3 7 2 6 22 | 3 3 2 7 23 | 3 8 3 7 24 | 3 4 3 8 25 | 3 9 4 8 26 | 3 5 4 9 27 | 3 10 5 9 28 | 3 6 1 10 29 | 3 1 5 10 30 | 3 6 11 7 31 | 3 7 11 8 32 | 3 8 11 9 33 | 3 9 11 10 34 | 3 10 11 6 35 | -------------------------------------------------------------------------------- /doc/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature Request 3 | about: Submit a proposal/request for a new LaPy feature 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Feature Description 11 | 12 | ... 13 | 14 | ## Motivation 15 | 16 | ... 17 | 18 | ## Alternatives 19 | 20 | ... 21 | 22 | ## Additional Context 23 | 24 | ... 25 | -------------------------------------------------------------------------------- /doc/changes/latest.rst.template: -------------------------------------------------------------------------------- 1 | .. NOTE: we use cross-references to highlight new functions and classes. 2 | Please follow the examples below, so the changelog page will have a link to 3 | the function/class documentation. 4 | 5 | .. NOTE: there are 3 separate sections for changes, based on type: 6 | - "Enhancements" for new features 7 | - "Bugs" for bug fixes 8 | - "API changes" for backward-incompatible changes 9 | 10 | .. NOTE: You can use the :pr:`xx` and :issue:`xx` role to x-ref to a GitHub PR 11 | or issue from this project. 12 | 13 | .. include:: ./authors.inc 14 | 15 | .. _latest: 16 | 17 | Version x.x 18 | =========== 19 | 20 | Enhancements 21 | ------------ 22 | 23 | - xxx 24 | 25 | Bugs 26 | ---- 27 | 28 | - xxx 29 | 30 | API and behavior changes 31 | ------------------------ 32 | 33 | - xxx 34 | 35 | Authors 36 | ------- 37 | 38 | * `Mathieu Scheltienne`_ 39 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_config.py: -------------------------------------------------------------------------------- 1 | from io import StringIO 2 | 3 | from .._config import sys_info 4 | 5 | 6 | def test_sys_info(): 7 | """Test info-showing utility.""" 8 | out = StringIO() 9 | sys_info(fid=out) 10 | value = out.getvalue() 11 | out.close() 12 | assert "Platform:" in value 13 | assert "Executable:" in value 14 | assert "CPU:" in value 15 | assert "Physical cores:" in value 16 | assert "Logical cores" in value 17 | assert "RAM:" in value 18 | assert "SWAP:" in value 19 | 20 | assert "numpy" in value 21 | assert "psutil" in value 22 | 23 | assert "style" not in value 24 | assert "test" not in value 25 | 26 | out = StringIO() 27 | sys_info(fid=out, developer=True) 28 | value = out.getvalue() 29 | out.close() 30 | 31 | assert "build" in value 32 | assert "style" in value 33 | assert "test" in value 34 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_imports.py: -------------------------------------------------------------------------------- 1 | """Test _imports.py""" 2 | 3 | import pytest 4 | 5 | from .._imports import import_optional_dependency 6 | 7 | 8 | def test_import_optional_dependency(): 9 | """Test the import of optional dependencies.""" 10 | # Test import of present package 11 | numpy = import_optional_dependency("numpy") 12 | assert isinstance(numpy.__version__, str) 13 | 14 | # Test import of absent package 15 | with pytest.raises(ImportError, match="Missing optional dependency"): 16 | import_optional_dependency("non_existing_pkg", raise_error=True) 17 | 18 | # Test import of absent package without raise 19 | pkg = import_optional_dependency("non_existing_pkg", raise_error=False) 20 | assert pkg is None 21 | 22 | # Test extra 23 | with pytest.raises(ImportError, match="blabla"): 24 | import_optional_dependency("non_existing_pkg", extra="blabla") 25 | -------------------------------------------------------------------------------- /doc/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | %SPHINXBUILD% >NUL 2>NUL 14 | if errorlevel 9009 ( 15 | echo. 16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 17 | echo.installed, then set the SPHINXBUILD environment variable to point 18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 19 | echo.may add the Sphinx directory to PATH. 20 | echo. 21 | echo.If you don't have Sphinx installed, grab it from 22 | echo.https://www.sphinx-doc.org/ 23 | exit /b 1 24 | ) 25 | 26 | if "%1" == "" goto help 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/questions-help-support.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Questions/Help/Support 3 | about: Submit a request for support or a question 4 | title: '' 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Question/Support Request 11 | 12 | ... 13 | 14 | ## Screenshots 15 | 16 | ... 17 | 18 | 19 | 20 | ## Environment 21 | - LaPy Version: ... 22 | - OS: ... 23 | - CPU: ... 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: publish 2 | on: 3 | workflow_dispatch: 4 | release: 5 | types: [published] 6 | 7 | jobs: 8 | pypi: 9 | timeout-minutes: 10 10 | runs-on: ubuntu-latest 11 | steps: 12 | - name: Checkout repository 13 | uses: actions/checkout@v6 14 | - name: Setup Python 3.10 15 | uses: actions/setup-python@v6 16 | with: 17 | python-version: '3.10' 18 | - name: Install dependencies 19 | run: | 20 | python -m pip install --progress-bar off --upgrade pip setuptools wheel 21 | python -m pip install --progress-bar off .[build] 22 | - name: Display system information 23 | run: lapy-sys_info --developer 24 | - name: Build and publish 25 | env: 26 | TWINE_USERNAME: __token__ 27 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }} 28 | run: | 29 | python -m build 30 | twine upload dist/* 31 | -------------------------------------------------------------------------------- /doc/links.inc: -------------------------------------------------------------------------------- 1 | .. This (-*- rst -*-) format file contains commonly used link targets and name 2 | substitutions. It may be included in many files, therefore it should only 3 | contain link targets and name substitutions. Try grepping for "^\.\. _" to 4 | find plausible candidates for this list. 5 | 6 | .. NOTE: reST targets are 7 | __not_case_sensitive__, so only one target definition is needed for: 8 | nipy, NIPY, Nipy, etc... 9 | 10 | 11 | .. project 12 | 13 | .. _project pypi: https://pypi.org/project/lapy/ 14 | .. _project conda: https://anaconda.org/conda-forge/lapy 15 | .. _project github: https://github.com/Deep-MI/LaPy 16 | .. _project license: https://github.com/Deep-MI/LaPy/blob/master/LICENSE 17 | 18 | 19 | .. license 20 | 21 | .. _MIT license: https://opensource.org/licenses/MIT 22 | 23 | 24 | .. numpy 25 | 26 | .. _numpy: https://numpy.org/ 27 | 28 | 29 | .. sklearn 30 | 31 | .. _scikit-learn: https://scikit-learn.org/stable/ 32 | 33 | 34 | .. scipy 35 | 36 | .. _scipy: https://scipy.org/ 37 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Deep Medical Imaging Lab (PI Reuter) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug Report 3 | about: Create a bug report to help us improve LaPy 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Description 11 | 12 | .... 13 | 14 | ## Steps to Reproduce 15 | 22 | 23 | 24 | ... 25 | 26 | ## Expected Behavior 27 | 28 | ... 29 | 30 | ## Screenshots 31 | 32 | ... 33 | 34 | ## Environment 35 | - LaPy Version: ... 36 | - OS: ... 37 | - CPU: ... 38 | 39 | 40 | 41 | 42 | 43 | ## Additional Context 44 | 45 | ... 46 | -------------------------------------------------------------------------------- /.github/workflows/code-style.yml: -------------------------------------------------------------------------------- 1 | name: code-style 2 | concurrency: 3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | push: 8 | branches: [main] 9 | workflow_dispatch: 10 | 11 | jobs: 12 | style: 13 | timeout-minutes: 10 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout repository 17 | uses: actions/checkout@v6 18 | - name: Setup Python 3.10 19 | uses: actions/setup-python@v6 20 | with: 21 | python-version: '3.10' 22 | - name: Install dependencies 23 | run: | 24 | python -m pip install --progress-bar off --upgrade pip setuptools wheel 25 | python -m pip install --progress-bar off .[style] 26 | - name: Run Ruff 27 | run: ruff check . 28 | - name: Run codespell 29 | uses: codespell-project/actions-codespell@master 30 | with: 31 | check_filenames: true 32 | check_hidden: true 33 | skip: './.git,./build,./.mypy_cache,./.pytest_cache' 34 | ignore_words_file: ./.codespellignore 35 | - name: Run pydocstyle 36 | run: pydocstyle . 37 | - name: Run bibclean 38 | run: bibclean-check doc/references.bib 39 | -------------------------------------------------------------------------------- /doc/index.rst: -------------------------------------------------------------------------------- 1 | .. include:: ./links.inc 2 | 3 | **LaPy** 4 | ======== 5 | 6 | .. toctree:: 7 | :hidden: 8 | 9 | api/index 10 | tutorials/index 11 | changes/index 12 | 13 | LaPy is an `open-source Python package `_ for differential 14 | geometry on triangle and tetrahedra meshes. It includes an FEM solver to 15 | estimate the Laplace, Poisson or Heat equations. Further functionality 16 | includes the computations of gradients, divergence, mean-curvature flow, 17 | conformal mappings, geodesics, ShapeDNA (Laplace spectra), and IO and 18 | plotting methods. 19 | 20 | LaPy is written purely in Python 3 without sacrificing speed as almost all 21 | loops are vectorized, drawing upon efficient and sparse mesh data structures. 22 | 23 | 24 | Install 25 | ------- 26 | 27 | LaPy is available on `Pypi `_ and on 28 | `conda-forge `_. 29 | 30 | .. tab-set:: 31 | 32 | .. tab-item:: Pypi 33 | 34 | .. code-block:: bash 35 | 36 | pip install lapy 37 | 38 | .. tab-item:: Conda 39 | 40 | .. code-block:: bash 41 | 42 | conda install -c conda-forge lapy 43 | 44 | .. tab-item:: Source 45 | 46 | .. code-block:: bash 47 | 48 | pip install git+https://github.com/Deep-MI/LaPy 49 | 50 | License 51 | ------- 52 | 53 | ``lapy`` is licensed under the `MIT license`_. 54 | A full copy of the license can be found `on GitHub `_. 55 | -------------------------------------------------------------------------------- /doc/changes/latest.rst: -------------------------------------------------------------------------------- 1 | .. NOTE: we use cross-references to highlight new functions and classes. 2 | Please follow the examples below, so the changelog page will have a link to 3 | the function/class documentation. 4 | 5 | .. NOTE: there are 3 separate sections for changes, based on type: 6 | - "Enhancements" for new features 7 | - "Bugs" for bug fixes 8 | - "API changes" for backward-incompatible changes 9 | 10 | .. NOTE: You can use the :pr:`xx` and :issue:`xx` role to x-ref to a GitHub PR 11 | or issue from this project. 12 | 13 | .. include:: ./authors.inc 14 | 15 | .. _latest: 16 | 17 | Version 1.0 18 | =========== 19 | 20 | API changes 21 | ----------- 22 | 23 | - Classes: TriaMesh, TetMesh, and Solver are still available directly at top level and imported directly from lapy. 24 | - Mesh IO: mesh classes have been extended with IO class member functions and TriaIO and TetIO have been deprecated. Use read\_* and write\_* class members to load and write mehses, for example, TriaMesh.read_vtk() to import a VTK triangle mesh file. This simplifies IO greatly. 25 | - Module names have been changed to comply with PEP8 conventions (lower case). For example, DiffGeo to diffgeo, FuncIO to io, and Plot to plot, etc. 26 | 27 | Bugs 28 | ---- 29 | 30 | - Fixed numpy deprecation issue in import_vfunc and import_ev functions. 31 | 32 | Enhancements 33 | ------------ 34 | 35 | - Comply with the numpy convention for docstrings (by `Andreas Girodi`_, `Kersten Diers`_ and `Martin Reuter`_ in :pr:`19` and :pr:`21`) 36 | - Add initial documentation build (by `Mathieu Scheltienne`_ in :pr:`22`) 37 | 38 | 39 | Authors 40 | ------- 41 | 42 | * `Andreas Girodi`_ 43 | * `Kersten Diers`_ 44 | * `Martin Reuter`_ 45 | * `Mathieu Scheltienne`_ 46 | -------------------------------------------------------------------------------- /.github/workflows/build.yml: -------------------------------------------------------------------------------- 1 | name: build 2 | concurrency: 3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | push: 8 | branches: [main] 9 | workflow_dispatch: 10 | 11 | jobs: 12 | build: 13 | timeout-minutes: 10 14 | strategy: 15 | fail-fast: false 16 | matrix: 17 | os: [ubuntu, macos, windows] 18 | python-version: ["3.9", "3.10", "3.11", "3.12"] 19 | name: ${{ matrix.os }} - py${{ matrix.python-version }} 20 | runs-on: ${{ matrix.os }}-latest 21 | defaults: 22 | run: 23 | shell: bash 24 | steps: 25 | - name: Checkout repository 26 | uses: actions/checkout@v6 27 | - name: Setup Python ${{ matrix.python-version }} 28 | uses: actions/setup-python@v6 29 | with: 30 | python-version: ${{ matrix.python-version }} 31 | - name: Install dependencies 32 | run: | 33 | python -m pip install --progress-bar off --upgrade pip setuptools wheel 34 | python -m pip install --progress-bar off .[build] 35 | - name: Test package install 36 | run: lapy-sys_info 37 | - name: Remove package install 38 | run: python -m pip uninstall -yq lapy 39 | - name: Build package 40 | run: python -m build 41 | - name: Install sdist 42 | run: pip install ./dist/*.tar.gz 43 | - name: Test sdist install 44 | run: lapy-sys_info 45 | - name: Remove sdist install 46 | run: python -m pip uninstall -yq lapy 47 | - name: Install wheel 48 | run: pip install ./dist/*.whl 49 | - name: Test wheel install 50 | run: lapy-sys_info 51 | - name: Remove wheel install 52 | run: python -m pip uninstall -yq lapy 53 | -------------------------------------------------------------------------------- /lapy/utils/_imports.py: -------------------------------------------------------------------------------- 1 | """Handle optional dependency imports. 2 | 3 | Inspired from pandas: https://pandas.pydata.org/ 4 | """ 5 | 6 | import importlib 7 | 8 | # A mapping from import name to package name (on PyPI) when the package name 9 | # is different. 10 | INSTALL_MAPPING = { 11 | "sksparse": "scikit-sparse", 12 | } 13 | 14 | 15 | def import_optional_dependency( 16 | name: str, 17 | extra: str = "", 18 | raise_error: bool = True, 19 | ): 20 | """Import an optional dependency. 21 | 22 | By default, if a dependency is missing an ImportError with a nice message 23 | will be raised. 24 | 25 | Parameters 26 | ---------- 27 | name : str 28 | The module name. 29 | extra : str, default="" 30 | Additional text to include in the ImportError message. 31 | raise_error : bool, default=True 32 | What to do when a dependency is not found. 33 | * True : Raise an ImportError. 34 | * False: Return None. 35 | 36 | Returns 37 | ------- 38 | module : Optional[ModuleType] 39 | The imported module when found. 40 | None is returned when the package is not found and raise_error is 41 | False. 42 | 43 | Raises 44 | ------- 45 | ImportError 46 | dependency not found; see raise_error 47 | """ 48 | 49 | package_name = INSTALL_MAPPING.get(name) 50 | install_name = package_name if package_name is not None else name 51 | 52 | try: 53 | module = importlib.import_module(name) 54 | except ImportError: 55 | if raise_error: 56 | raise ImportError( 57 | f"Missing optional dependency '{install_name}'. {extra} " 58 | f"Use pip or conda to install {install_name}." 59 | ) from None 60 | else: 61 | return None 62 | 63 | return module 64 | -------------------------------------------------------------------------------- /.github/workflows/doc.yml: -------------------------------------------------------------------------------- 1 | name: doc 2 | concurrency: 3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | push: 8 | branches: [main] 9 | workflow_dispatch: 10 | 11 | jobs: 12 | build: 13 | timeout-minutes: 10 14 | runs-on: ubuntu-latest 15 | defaults: 16 | run: 17 | shell: bash 18 | steps: 19 | - name: Checkout repository 20 | uses: actions/checkout@v6 21 | with: 22 | path: ./main 23 | - name: Setup Python 3.10 24 | uses: actions/setup-python@v6 25 | with: 26 | python-version: '3.10' 27 | - name: Install package 28 | run: | 29 | python -m pip install --progress-bar off --upgrade pip setuptools wheel 30 | python -m pip install --progress-bar off main/.[doc] 31 | - name: Display system information 32 | run: lapy-sys_info --developer 33 | - name: Build doc 34 | run: TZ=UTC sphinx-build ./main/doc ./doc-build/dev -W --keep-going 35 | - name: Upload documentation 36 | uses: actions/upload-artifact@v6 37 | with: 38 | name: doc-dev 39 | path: ./doc-build/dev 40 | 41 | deploy: 42 | if: github.event_name == 'push' 43 | needs: build 44 | timeout-minutes: 10 45 | runs-on: ubuntu-latest 46 | permissions: 47 | contents: write 48 | defaults: 49 | run: 50 | shell: bash 51 | steps: 52 | - name: Download documentation 53 | uses: actions/download-artifact@v7 54 | with: 55 | name: doc-dev 56 | path: ./doc-dev 57 | - name: Deploy dev documentation 58 | uses: peaceiris/actions-gh-pages@v4 59 | with: 60 | github_token: ${{ secrets.GITHUB_TOKEN }} 61 | publish_dir: ./doc-dev 62 | destination_dir: ./dev 63 | user_name: 'github-actions[bot]' 64 | user_email: 'github-actions[bot]@users.noreply.github.com' 65 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # macOS 2 | .DS_Store 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | *$py.class 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | MANIFEST 30 | 31 | # PyInstaller 32 | # Usually these files are written by a python script from a template 33 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 34 | *.manifest 35 | *.spec 36 | 37 | # Installer logs 38 | pip-log.txt 39 | pip-delete-this-directory.txt 40 | 41 | # Unit test / coverage reports 42 | htmlcov/ 43 | .tox/ 44 | .nox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | junit-results.xml 54 | 55 | # Translations 56 | *.mo 57 | *.pot 58 | 59 | # Django stuff: 60 | *.log 61 | local_settings.py 62 | db.sqlite3 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | doc/bin 73 | doc/_build/ 74 | doc/generated/ 75 | doc/api/generated/ 76 | 77 | # PyBuilder 78 | target/ 79 | 80 | # Jupyter Notebook 81 | .ipynb_checkpoints 82 | 83 | # IPython 84 | profile_default/ 85 | ipython_config.py 86 | 87 | # pyenv 88 | .python-version 89 | 90 | # celery beat schedule file 91 | celerybeat-schedule 92 | 93 | # SageMath parsed files 94 | *.sage.py 95 | 96 | # Environments 97 | .env 98 | .venv 99 | env/ 100 | venv/ 101 | ENV/ 102 | env.bak/ 103 | venv.bak/ 104 | 105 | # Spyder project settings 106 | .spyderproject 107 | .spyproject 108 | 109 | # Rope project settings 110 | .ropeproject 111 | 112 | # mkdocs documentation 113 | /site 114 | 115 | # mypy 116 | .mypy_cache/ 117 | .dmypy.json 118 | dmypy.json 119 | 120 | # Pyre type checker 121 | .pyre/ 122 | 123 | # PyCharm 124 | **/.idea/ 125 | -------------------------------------------------------------------------------- /.github/workflows/pytest.yml: -------------------------------------------------------------------------------- 1 | name: pytest 2 | concurrency: 3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }} 4 | cancel-in-progress: true 5 | on: 6 | pull_request: 7 | paths: 8 | - '**.py' 9 | push: 10 | branches: [main] 11 | paths: 12 | - '**.py' 13 | workflow_dispatch: 14 | 15 | jobs: 16 | pytest: 17 | timeout-minutes: 30 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | os: [ubuntu, macos, windows] 22 | python-version: ["3.9", "3.10", "3.11", "3.12"] 23 | # some tests fail (numerical issues) in older python on mac, so we ... 24 | exclude: 25 | - os: macos 26 | python-version: '3.9' 27 | name: ${{ matrix.os }} - py${{ matrix.python-version }} 28 | runs-on: ${{ matrix.os }}-latest 29 | defaults: 30 | run: 31 | shell: bash 32 | steps: 33 | - name: Checkout repository 34 | uses: actions/checkout@v6 35 | - name: Setup Python ${{ matrix.python-version }} 36 | uses: actions/setup-python@v6 37 | with: 38 | python-version: ${{ matrix.python-version }} 39 | - name: Install package 40 | run: | 41 | python -m pip install --progress-bar off --upgrade pip setuptools wheel 42 | python -m pip install --progress-bar off .[test] 43 | - name: Display system information 44 | run: lapy-sys_info --developer 45 | - name: Run pytest 46 | run: pytest lapy --cov=lapy --cov-report=xml --cov-config=pyproject.toml 47 | - name: Upload to codecov 48 | if: ${{ matrix.os == 'ubuntu' && matrix.python-version == '3.10' && github.repository == 'Deep-MI/LaPy' }} 49 | uses: codecov/codecov-action@v5 50 | with: 51 | files: ./coverage.xml 52 | flags: unittests # optional 53 | name: codecov-umbrella # optional 54 | #fail_ci_if_error: true # optional (default = false) 55 | verbose: true # optional (default = false) 56 | token: ${{ secrets.CODECOV_TOKEN }} 57 | #slug: deep-mi/LaPy 58 | -------------------------------------------------------------------------------- /doc/references.bib: -------------------------------------------------------------------------------- 1 | @article{conformal_parameterization_2020, 2 | author = {Choi, Gary P. T. and Leung-Liu, Yusan and Gu, Xianfeng and Lui, Lok Ming}, 3 | doi = {10.1137/19M125337X}, 4 | journal = {SIAM Journal on Imaging Sciences}, 5 | number = {3}, 6 | pages = {1049-1083}, 7 | title = {Parallelizable Global Conformal Parameterization of Simply-Connected Surfaces via Partial Welding}, 8 | volume = {13}, 9 | year = {2020} 10 | } 11 | 12 | @article{numpy_2020, 13 | author = {Harris, Charles R. and Millman, K. Jarrod and van der Walt, Stéfan J. and Gommers, Ralf and Virtanen, Pauli and Cournapeau, David and Wieser, Eric and Taylor, Julian and Berg, Sebastian and Smith, Nathaniel J. and Kern, Robert and Picus, Matti and Hoyer, Stephan and van Kerkwijk, Marten H. and Brett, Matthew and Haldane, Allan and del Río, Jaime Fernández and Wiebe, Mark and Peterson, Pearu and Gérard-Marchant, Pierre and Sheppard, Kevin and Reddy, Tyler and Weckesser, Warren and Abbasi, Hameer and Gohlke, Christoph and Oliphant, Travis E.}, 14 | doi = {10.1038/s41586-020-2649-2}, 15 | journal = {Nature}, 16 | month = {September}, 17 | number = {7825}, 18 | pages = {357--362}, 19 | title = {Array programming with {NumPy}}, 20 | volume = {585}, 21 | year = {2020} 22 | } 23 | 24 | @article{scipy_2020, 25 | author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and van der Walt, Stéfan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, İlhan and Feng, Yu and Moore, Eric W. and VanderPlas, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antônio H. and Pedregosa, Fabian and van Mulbregt, Paul and {SciPy 1.0 Contributors} and Vijaykumar, Aditya and Bardelli, Alessandro Pietro and Rothberg, Alex and Hilboll, Andreas and Kloeckner, Andreas and Scopatz, Anthony and Lee, Antony and Rokem, Ariel and Woods, C. Nathan and Fulton, Chad and Masson, Charles and Häggström, Christian and Fitzgerald, Clark and Nicholson, David A. and Hagen, David R. and Pasechnik, Dmitrii V. and Olivetti, Emanuele and Martin, Eric and Wieser, Eric and Silva, Fabrice and Lenders, Felix and Wilhelm, Florian and Young, G. and Price, Gavin A. and Ingold, Gert-Ludwig and Allen, Gregory E. and Lee, Gregory R. and Audren, Hervé and Probst, Irvin and Dietrich, Jörg P. and Silterra, Jacob and Webber, James T and Slavič, Janko and Nothman, Joel and Buchner, Johannes and Kulick, Johannes and Schönberger, Johannes L. and de Miranda Cardoso, José Vinícius and Reimer, Joscha and Harrington, Joseph and Rodríguez, Juan Luis Cano and Nunez-Iglesias, Juan and Kuczynski, Justin and Tritz, Kevin and Thoma, Martin and Newville, Matthew and Kümmerer, Matthias and Bolingbroke, Maximilian and Tartre, Michael and Pak, Mikhail and Smith, Nathaniel J. and Nowaczyk, Nikolai and Shebanov, Nikolay and Pavlyk, Oleksandr and Brodtkorb, Per A. and Lee, Perry and McGibbon, Robert T. and Feldbauer, Roman and Lewis, Sam and Tygier, Sam and Sievert, Scott and Vigna, Sebastiano and Peterson, Stefan and More, Surhud and Pudlik, Tadeusz and Oshima, Takuya and Pingel, Thomas J. and Robitaille, Thomas P. and Spura, Thomas and Jones, Thouis R. and Cera, Tim and Leslie, Tim and Zito, Tiziano and Krauss, Tom and Upadhyay, Utkarsh and Halchenko, Yaroslav O. and Vázquez-Baeza, Yoshiki}, 26 | doi = {10.1038/s41592-019-0686-2}, 27 | journal = {Nature Methods}, 28 | month = {March}, 29 | number = {3}, 30 | pages = {261--272}, 31 | title = {{SciPy} 1.0: fundamental algorithms for scientific computing in {Python}}, 32 | volume = {17}, 33 | year = {2020} 34 | } 35 | -------------------------------------------------------------------------------- /lapy/utils/_config.py: -------------------------------------------------------------------------------- 1 | import platform 2 | import re 3 | import sys 4 | from functools import partial 5 | from importlib.metadata import requires, version 6 | from typing import IO, Callable, Optional 7 | 8 | import psutil 9 | 10 | 11 | def sys_info(fid: Optional[IO] = None, developer: bool = False): 12 | """Print the system information for debugging. 13 | 14 | Parameters 15 | ---------- 16 | fid : file-like, default=None 17 | The file to write to, passed to :func:`print`. 18 | Can be None to use :data:`sys.stdout`. 19 | developer : bool, default=False 20 | If True, display information about optional dependencies. 21 | """ 22 | 23 | ljust = 26 24 | out = partial(print, end="", file=fid) 25 | package = __package__.split(".")[0] 26 | 27 | # OS information - requires python 3.8 or above 28 | out("Platform:".ljust(ljust) + platform.platform() + "\n") 29 | # Python information 30 | out("Python:".ljust(ljust) + sys.version.replace("\n", " ") + "\n") 31 | out("Executable:".ljust(ljust) + sys.executable + "\n") 32 | # CPU information 33 | out("CPU:".ljust(ljust) + platform.processor() + "\n") 34 | out("Physical cores:".ljust(ljust) + str(psutil.cpu_count(False)) + "\n") 35 | out("Logical cores:".ljust(ljust) + str(psutil.cpu_count(True)) + "\n") 36 | # Memory information 37 | out("RAM:".ljust(ljust)) 38 | out(f"{psutil.virtual_memory().total / float(2 ** 30):0.1f} GB\n") 39 | out("SWAP:".ljust(ljust)) 40 | out(f"{psutil.swap_memory().total / float(2 ** 30):0.1f} GB\n") 41 | 42 | # dependencies 43 | out("\nDependencies info\n") 44 | out(f"{package}:".ljust(ljust) + version(package) + "\n") 45 | dependencies = [ 46 | elt.split(";")[0].rstrip() for elt in requires(package) if "extra" not in elt 47 | ] 48 | _list_dependencies_info(out, ljust, dependencies) 49 | 50 | # extras 51 | if developer: 52 | keys = ( 53 | "build", 54 | "chol", 55 | "doc", 56 | "test", 57 | "style", 58 | ) 59 | for key in keys: 60 | dependencies = [ 61 | elt.split(";")[0].rstrip() 62 | for elt in requires(package) 63 | if f"extra == '{key}'" in elt or f'extra == "{key}"' in elt 64 | ] 65 | if len(dependencies) == 0: 66 | continue 67 | out(f"\nOptional '{key}' info\n") 68 | _list_dependencies_info(out, ljust, dependencies) 69 | 70 | 71 | def _list_dependencies_info(out: Callable, ljust: int, dependencies: list[str]): 72 | """List dependencies names and versions. 73 | 74 | Parameters 75 | ---------- 76 | out : Callable 77 | output function 78 | ljust : int 79 | length of returned string 80 | dependencies : List[str] 81 | list of dependencies 82 | 83 | """ 84 | 85 | for dep in dependencies: 86 | # handle dependencies with version specifiers 87 | specifiers_pattern = r"(~=|==|!=|<=|>=|<|>|===)" 88 | specifiers = re.findall(specifiers_pattern, dep) 89 | if len(specifiers) != 0: 90 | dep, _ = dep.split(specifiers[0]) 91 | while not dep[-1].isalpha(): 92 | dep = dep[:-1] 93 | # handle dependencies provided with a [key], e.g. pydocstyle[toml] 94 | if "[" in dep: 95 | dep = dep.split("[")[0] 96 | try: 97 | version_ = version(dep) 98 | except Exception: 99 | version_ = "Not found." 100 | 101 | # handle special dependencies with backends, C dep, .. 102 | if dep in ("matplotlib", "seaborn") and version_ != "Not found.": 103 | try: 104 | from matplotlib import pyplot as plt 105 | 106 | backend = plt.get_backend() 107 | except Exception: 108 | backend = "Not found" 109 | 110 | out(f"{dep}:".ljust(ljust) + version_ + f" (backend: {backend})\n") 111 | 112 | else: 113 | out(f"{dep}:".ljust(ljust) + version_ + "\n") 114 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | 'setuptools >= 61.0.0', 4 | 'numpy>=2', 5 | ] 6 | build-backend = 'setuptools.build_meta' 7 | 8 | [project] 9 | name = 'lapy' 10 | version = '1.5.0-dev' 11 | description = 'A package for differential geometry on meshes (Laplace, FEM)' 12 | readme = 'README.md' 13 | license = {file = 'LICENSE'} 14 | requires-python = '>=3.9' 15 | authors = [ 16 | {name = 'Martin Reuter', email = 'martin.reuter@dzne.de'}, 17 | ] 18 | maintainers = [ 19 | {name = 'Martin Reuter', email = 'martin.reuter@dzne.de'}, 20 | ] 21 | keywords = [ 22 | 'python', 23 | 'Laplace', 24 | 'FEM', 25 | 'ShapeDNA', 26 | 'BrainPrint', 27 | 'Triangle Mesh', 28 | 'Tetrahedra Mesh', 29 | 'Geodesics in Heat', 30 | 'Mean Curvature Flow', 31 | ] 32 | classifiers = [ 33 | 'Operating System :: Microsoft :: Windows', 34 | 'Operating System :: Unix', 35 | 'Operating System :: MacOS', 36 | 'Programming Language :: Python :: 3 :: Only', 37 | 'Programming Language :: Python :: 3.9', 38 | 'Programming Language :: Python :: 3.10', 39 | 'Programming Language :: Python :: 3.11', 40 | 'Programming Language :: Python :: 3.12', 41 | 'Natural Language :: English', 42 | 'License :: OSI Approved :: MIT License', 43 | 'Intended Audience :: Science/Research', 44 | ] 45 | dependencies = [ 46 | 'nibabel', 47 | 'numpy>=1.21', 48 | 'plotly', 49 | 'psutil', 50 | 'scipy!=1.13.0', 51 | ] 52 | 53 | [project.optional-dependencies] 54 | build = [ 55 | 'build', 56 | 'twine', 57 | ] 58 | chol = [ 59 | 'scikit-sparse>=0.4.16', 60 | ] 61 | doc = [ 62 | 'furo!=2023.8.17', 63 | 'matplotlib', 64 | 'memory-profiler', 65 | 'numpydoc', 66 | 'sphinx!=7.2.*', 67 | 'sphinxcontrib-bibtex', 68 | 'sphinx-copybutton', 69 | 'sphinx-design', 70 | 'sphinx-gallery', 71 | 'sphinx-issues', 72 | 'pypandoc', 73 | 'nbsphinx', 74 | 'IPython', # For syntax highlighting in notebooks 75 | 'ipykernel', 76 | ] 77 | style = [ 78 | 'bibclean', 79 | 'codespell', 80 | 'pydocstyle[toml]', 81 | 'ruff', 82 | ] 83 | test = [ 84 | 'pytest', 85 | 'pytest-cov', 86 | 'pytest-timeout', 87 | ] 88 | all = [ 89 | 'lapy[build]', 90 | 'lapy[chol]', 91 | 'lapy[doc]', 92 | 'lapy[style]', 93 | 'lapy[test]', 94 | ] 95 | full = [ 96 | 'lapy[all]', 97 | ] 98 | 99 | [project.urls] 100 | homepage = 'https://Deep-MI.github.io/LaPy/dev/index.html' 101 | documentation = 'https://Deep-MI.github.io/LaPy/dev/index.html' 102 | source = 'https://github.com/Deep-MI/LaPy' 103 | tracker = 'https://github.com/Deep-MI/LaPy/issues' 104 | 105 | [project.scripts] 106 | lapy-sys_info = 'lapy.commands.sys_info:run' 107 | 108 | [tool.setuptools] 109 | include-package-data = false 110 | 111 | [tool.setuptools.packages.find] 112 | include = ['lapy*'] 113 | exclude = ['lapy*tests'] 114 | 115 | [tool.pydocstyle] 116 | convention = 'numpy' 117 | ignore-decorators = '(copy_doc|property|.*setter|.*getter|pyqtSlot|Slot)' 118 | match = '^(?!setup|__init__|test_).*\.py' 119 | match-dir = '^lapy.*' 120 | add_ignore = 'D100,D104,D107' 121 | 122 | [tool.ruff] 123 | line-length = 120 124 | extend-exclude = [ 125 | "doc", 126 | ".github", 127 | "data", 128 | ] 129 | 130 | [tool.ruff.lint] 131 | # https://docs.astral.sh/ruff/linter/#rule-selection 132 | select = [ 133 | "E", # pycodestyle 134 | "F", # Pyflakes 135 | "UP", # pyupgrade 136 | "B", # flake8-bugbear 137 | "I", # isort 138 | # "SIM", # flake8-simplify 139 | ] 140 | 141 | [tool.ruff.lint.per-file-ignores] 142 | "__init__.py" = ["F401"] 143 | "examples/*" = ["E501"] # ignore too long lines in example ipynb 144 | 145 | [tool.pytest.ini_options] 146 | minversion = '6.0' 147 | addopts = '--durations 20 --junit-xml=junit-results.xml --verbose' 148 | filterwarnings = [] 149 | 150 | [tool.coverage.run] 151 | branch = true 152 | cover_pylib = false 153 | omit = [ 154 | '**/__init__.py', 155 | '**/lapy/_version.py', 156 | '**/lapy/commands/*', 157 | '**/tests/**', 158 | ] 159 | 160 | [tool.coverage.report] 161 | exclude_lines = [ 162 | 'pragma: no cover', 163 | 'if __name__ == .__main__.:', 164 | ] 165 | precision = 2 166 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_visualization_meshes.py: -------------------------------------------------------------------------------- 1 | import json 2 | from pathlib import Path 3 | 4 | import pytest 5 | 6 | from ...io import write_ev 7 | from ...solver import Solver 8 | from ...tet_mesh import TetMesh 9 | from ...tria_mesh import TriaMesh 10 | 11 | 12 | # Fixture to load the TetMesh 13 | @pytest.fixture 14 | def load_tria_mesh(): 15 | tria = TriaMesh.read_vtk("data/cubeTria.vtk") 16 | return tria 17 | 18 | 19 | @pytest.fixture 20 | def load_tet_mesh(): 21 | tetra = TetMesh.read_vtk("data/cubeTetra.vtk") 22 | return tetra 23 | 24 | 25 | @pytest.fixture 26 | def loaded_data(): 27 | """ 28 | Load and provide the expected outcomes data from a JSON file. 29 | 30 | Returns: 31 | dict: Dictionary containing the expected outcomes data. 32 | """ 33 | with open("lapy/utils/tests/expected_outcomes.json") as f: 34 | expected_outcomes = json.load(f) 35 | return expected_outcomes 36 | 37 | 38 | def test_visualization_triangle_mesh(load_tria_mesh, loaded_data): 39 | """ 40 | Test visualization of a triangle mesh using expected outcomes. 41 | 42 | Parameters: 43 | - load_tria_mesh (fixture): Fixture for loading a triangle mesh. 44 | - loaded_data (fixture): Fixture for loading expected outcomes. 45 | 46 | Raises: 47 | - AssertionError: If any test assertions fail. 48 | """ 49 | tria = load_tria_mesh 50 | fem = Solver(tria) 51 | evals, evecs = fem.eigs(k=3) 52 | evDict = dict() 53 | evDict["Refine"] = 0 54 | evDict["Degree"] = 1 55 | evDict["Dimension"] = 2 56 | evDict["Elements"] = len(tria.t) 57 | evDict["DoF"] = len(tria.v) 58 | evDict["NumEW"] = 3 59 | evDict["Eigenvalues"] = evals 60 | evDict["Eigenvectors"] = evecs 61 | write_ev("data/cubeTria.ev", evDict) 62 | output_file = Path("data/cubeTria.ev") 63 | assert output_file.exists() # Check if the output file exists 64 | expected_elements = loaded_data["expected_outcomes"][ 65 | "test_visualization_triangle_mesh" 66 | ]["expected_elements"] 67 | expected_dof = loaded_data["expected_outcomes"]["test_visualization_triangle_mesh"][ 68 | "expected_dof" 69 | ] 70 | expected_ev = loaded_data["expected_outcomes"]["test_visualization_triangle_mesh"][ 71 | "expected_ev" 72 | ] 73 | 74 | expected_evec_shape = (2402, 3) 75 | assert evDict["Elements"] == expected_elements 76 | assert evDict["DoF"] == expected_dof 77 | assert evals == pytest.approx(expected_ev, rel=1e-5, abs=1e-5) 78 | assert evecs.shape == expected_evec_shape 79 | 80 | 81 | def test_visualization_tetrahedral_mesh(load_tet_mesh, loaded_data): 82 | """ 83 | Test visualization of a tetrahedral mesh using expected outcomes. 84 | 85 | Parameters: 86 | - load_tet_mesh (fixture): Fixture for loading a tetrahedral mesh. 87 | - loaded_data (fixture): Fixture for loading expected outcomes. 88 | 89 | Raises: 90 | - AssertionError: If any test assertions fail. 91 | """ 92 | tetra = load_tet_mesh 93 | fem = Solver(tetra) 94 | evals, evecs = fem.eigs(k=3) 95 | evDict = dict() 96 | evDict["Refine"] = 0 97 | evDict["Degree"] = 1 98 | evDict["Dimension"] = 2 99 | evDict["Elements"] = len(tetra.t) 100 | evDict["DoF"] = len(tetra.v) 101 | evDict["NumEW"] = 3 102 | evDict["Eigenvalues"] = evals 103 | evDict["Eigenvectors"] = evecs 104 | write_ev("data/cubeTetra.ev", evDict) 105 | output_file = Path("data/cubeTetra.ev") 106 | assert output_file.exists() # Check if the output file exists 107 | expected_elements = loaded_data["expected_outcomes"][ 108 | "test_visualization_tetrahedral_mesh" 109 | ]["expected_elements"] 110 | expected_dof = loaded_data["expected_outcomes"][ 111 | "test_visualization_tetrahedral_mesh" 112 | ]["expected_dof"] 113 | expected_ev = loaded_data["expected_outcomes"][ 114 | "test_visualization_tetrahedral_mesh" 115 | ]["expected_ev"] 116 | expected_evec_shape = (9261, 3) 117 | assert evDict["Elements"] == expected_elements 118 | assert evDict["DoF"] == expected_dof 119 | assert evals == pytest.approx(expected_ev, rel=1e-4, abs=1e-4) 120 | assert evecs.shape == expected_evec_shape 121 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![PyPI version](https://badge.fury.io/py/lapy.svg)](https://pypi.org/project/lapy/) 2 | # LaPy 3 | 4 | LaPy is an open-source Python library for differential geometry and finite-element computations on triangle and tetrahedral meshes. It provides mesh data structures and fast, vectorized algorithms to compute differential operators, curvature flows, spectral descriptors and to solve PDEs such as Laplace, Poisson and Heat equations on surfaces and volumes. 5 | 6 | Key design goals: 7 | - Pure Python 3 implementation with heavy use of NumPy and SciPy for performance. 8 | - Sparse, memory-efficient mesh data structures and vectorized algorithms. 9 | - Utilities for IO, visualization, and common geometry processing tasks. 10 | 11 | ## Table of contents 12 | - [Features](#features) 13 | - [Quick start](#quick-start) 14 | - [Installation](#installation) 15 | - [Solver backends](#solver-backends) 16 | - [Examples and documentation](#examples-and-documentation) 17 | - [Development and testing](#development-and-testing) 18 | - [References](#references) 19 | 20 | ## Features 21 | - `TriaMesh`: triangle mesh class with orientation checking, boundary handling, normals, smoothing and quality metrics; efficient edge and adjacency representations; IO for OFF, VTK and related formats. 22 | - `TetMesh`: tetrahedral mesh utilities, boundary handling and IO. 23 | - `Solver`: FEM routines producing stiffness and mass matrices, sparse eigenvalue solvers, Poisson and heat equation solvers, and support for anisotropic operators. 24 | - `diffgeo`: gradient, divergence, mean-curvature flow and related differential operators. 25 | - `heat`: heat kernel and diffusion utilities, geodesics via heat method. 26 | - `shapedna`: compute ShapeDNA (Laplace spectra) for shape descriptors. 27 | - `conformal`: conformal mapping methods for genus-0 surfaces. 28 | - `io`: read/write vertex functions and eigenvector files. 29 | - `plot`: lightweight Plotly wrappers for interactive visualization. 30 | 31 | ## Quick start 32 | 33 | Install the released package: 34 | ``` 35 | python3 -m pip install lapy 36 | ``` 37 | 38 | Import and inspect main classes: 39 | ``` 40 | import lapy as lp 41 | help(lp.TriaMesh) 42 | help(lp.Solver) 43 | ``` 44 | 45 | A minimal example (compute eigenpairs of a triangular mesh): 46 | ``` 47 | import lapy as lp 48 | mesh = lp.TriaMesh.from_off('examples/data/sample.off') # or other supported reader 49 | solver = lp.Solver(mesh) 50 | vals, vecs = solver.eigensystem(k=20) # compute 20 smallest nontrivial eigenpairs 51 | ``` 52 | 53 | ## Installation 54 | 55 | Install the development version to a chosen source location: 56 | ``` 57 | python3 -m pip install --user --src /my/preferred/location --editable git+https://github.com/Deep-MI/Lapy.git#egg=lapy 58 | ``` 59 | 60 | ### Dependencies and optional backends 61 | - Core: Python 3, NumPy, SciPy. 62 | - Optional (recommended): scikit-sparse (for CHOLMOD) to accelerate Cholesky sparse solves. 63 | Note: CHOLMOD (via scikit-sparse) is not currently installable via plain `pip` on all platforms; use `conda` when possible. If `use_cholmod=True` is requested, LaPy attempts to import CHOLMOD and will raise if it is unavailable. Install ordering: install `numpy` and `scipy` first, then `scikit-sparse`. 64 | 65 | ## Solver backends 66 | - Default: SciPy sparse LU/QR routines. 67 | - Optional: CHOLMOD (faster for symmetric positive definite systems). Toggle with `use_cholmod=True` when constructing `Solver`. 68 | 69 | ## Examples and documentation 70 | - Example Jupyter notebooks are available in the `examples` directory demonstrating common workflows (mesh IO, curvature, diffusion, ShapeDNA, geodesics). 71 | - Full API documentation: https://deep-mi.org/LaPy 72 | 73 | ## Development and testing 74 | - The project includes unit tests and example notebooks. Use the development installation command above and run tests with your preferred test runner (e.g., `pytest`). 75 | - Contributions and issues are welcome via the repository issue tracker. 76 | 77 | ## References 78 | If you use LaPy in publications, please cite: 79 | 80 | 1. Reuter M, Wolter F-E, Peinecke N. "Laplace-Beltrami spectra as 'Shape-DNA' of surfaces and solids." Computer-Aided Design. 2006;38(4):342–366. http://dx.doi.org/10.1016/j.cad.2005.10.011 81 | 82 | 2. Wachinger C, Golland P, Kremen W, Fischl B, Reuter M. "BrainPrint: a discriminative characterization of brain morphology." NeuroImage. 2015;109:232–248. http://dx.doi.org/10.1016/j.neuroimage.2015.01.032 http://www.ncbi.nlm.nih.gov/pubmed/25613439 83 | 84 | Additional algorithmic sources: 85 | - Crane K, Weischedel C, Wardetzky M. "Geodesics in heat." ACM Trans. Graph. (use for heat-based geodesics) https://doi.org/10.1145/2516971.2516977 86 | - Kazhdan M, Solomon J, Ben-Chen M. "Can Mean-Curvature Flow be Modified to be Non-singular?" Comput. Graph. Forum (2012) (for non-singular mean curvature flow) https://doi.org/10.1111/j.1467-8659.2012.03179.x 87 | - Choi PT, Lam KC, Lui LM. "FLASH: Fast Landmark Aligned Spherical Harmonic Parameterization for Genus-0 Closed Brain Surfaces." SIAM J. Imaging Sci. (for conformal mapping methods) https://doi.org/10.1137/130950008 88 | 89 | ## Website 90 | - Lab / project page: https://deep-mi.org 91 | 92 | ## License 93 | - See the repository `LICENSE` file for license terms. 94 | 95 | ## Contact 96 | - Report issues or feature requests via the repository issue tracker on GitHub. 97 | -------------------------------------------------------------------------------- /examples/Test_Plot.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Visualization" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "## Triangle Mesh" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import plotly.io as pio\n", 24 | "\n", 25 | "from lapy import Solver, TetMesh, TriaMesh, io, plot\n", 26 | "\n", 27 | "pio.renderers.default = \"sphinx_gallery\"" 28 | ] 29 | }, 30 | { 31 | "cell_type": "markdown", 32 | "metadata": {}, 33 | "source": [ 34 | "This tutorial will show you some of our visualization functionality. For that we load a larger mesh of the cube and compute the first three eigenvalues and eigenvectors. We also show how to save the eigenfunctions to disk." 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": {}, 41 | "outputs": [], 42 | "source": [ 43 | "tria = TriaMesh.read_vtk(\"../data/cubeTria.vtk\")\n", 44 | "fem = Solver(tria)\n", 45 | "evals, evecs = fem.eigs(k=3)\n", 46 | "evDict = dict()\n", 47 | "evDict[\"Refine\"] = 0\n", 48 | "evDict[\"Degree\"] = 1\n", 49 | "evDict[\"Dimension\"] = 2\n", 50 | "evDict[\"Elements\"] = len(tria.t)\n", 51 | "evDict[\"DoF\"] = len(tria.v)\n", 52 | "evDict[\"NumEW\"] = 3\n", 53 | "evDict[\"Eigenvalues\"] = evals\n", 54 | "evDict[\"Eigenvectors\"] = evecs\n", 55 | "io.write_ev(\"../data/cubeTria.ev\", evDict)" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "Let's look at the result by visualizing the first non-constant eigenfunction on top of the cube mesh. You can see that the extrema localize in two diametrically opposed corners." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": {}, 69 | "outputs": [], 70 | "source": [ 71 | "plot.plot_tria_mesh(\n", 72 | " tria,\n", 73 | " vfunc=evecs[:, 1],\n", 74 | " xrange=None,\n", 75 | " yrange=None,\n", 76 | " zrange=None,\n", 77 | " showcaxis=False,\n", 78 | " caxis=None,\n", 79 | ")" 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "We can also adjust the axes and add a color scale." 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "plot.plot_tria_mesh(\n", 96 | " tria,\n", 97 | " vfunc=evecs[:, 1],\n", 98 | " xrange=[-2, 2],\n", 99 | " yrange=[-2, 2],\n", 100 | " zrange=[-2, 2],\n", 101 | " showcaxis=True,\n", 102 | " caxis=[-0.3, 0.5],\n", 103 | ")" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "## Tetrahedral Mesh" 111 | ] 112 | }, 113 | { 114 | "cell_type": "markdown", 115 | "metadata": {}, 116 | "source": [ 117 | "Next we load a tetrahedral mesh and again compute the first 3 eigenvectors." 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": null, 123 | "metadata": {}, 124 | "outputs": [], 125 | "source": [ 126 | "tetra = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")\n", 127 | "fem = Solver(tetra)\n", 128 | "evals, evecs = fem.eigs(k=3)\n", 129 | "evDict = dict()\n", 130 | "evDict[\"Refine\"] = 0\n", 131 | "evDict[\"Degree\"] = 1\n", 132 | "evDict[\"Dimension\"] = 2\n", 133 | "evDict[\"Elements\"] = len(tetra.t)\n", 134 | "evDict[\"DoF\"] = len(tetra.v)\n", 135 | "evDict[\"NumEW\"] = 3\n", 136 | "evDict[\"Eigenvalues\"] = evals\n", 137 | "evDict[\"Eigenvectors\"] = evecs\n", 138 | "io.write_ev(\"../data/cubeTetra.ev\", evDict)" 139 | ] 140 | }, 141 | { 142 | "cell_type": "markdown", 143 | "metadata": {}, 144 | "source": [ 145 | "The eigenvector defines a function on all vertices, also inside the cube. Here we can see it as a color overlay on the boundary." 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": {}, 152 | "outputs": [], 153 | "source": [ 154 | "plot.plot_tet_mesh(\n", 155 | " tetra,\n", 156 | " vfunc=evecs[:, 1],\n", 157 | " xrange=None,\n", 158 | " yrange=None,\n", 159 | " zrange=None,\n", 160 | " showcaxis=False,\n", 161 | " caxis=None,\n", 162 | ")" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "The plot function allows cutting the solid object open (here we keep every vertex where the function is larger than 0)." 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": null, 175 | "metadata": {}, 176 | "outputs": [], 177 | "source": [ 178 | "plot.plot_tet_mesh(\n", 179 | " tetra,\n", 180 | " cutting=(\"f>0\"),\n", 181 | " vfunc=evecs[:, 1],\n", 182 | " xrange=[-2, 2],\n", 183 | " yrange=[-2, 2],\n", 184 | " zrange=[-2, 2],\n", 185 | " showcaxis=True,\n", 186 | " caxis=[-0.3, 0.5],\n", 187 | ")" 188 | ] 189 | } 190 | ], 191 | "metadata": { 192 | "kernelspec": { 193 | "display_name": "Python3", 194 | "language": "python", 195 | "name": "python3" 196 | }, 197 | "language_info": { 198 | "codemirror_mode": { 199 | "name": "ipython", 200 | "version": 3 201 | }, 202 | "file_extension": ".py", 203 | "mimetype": "text/x-python", 204 | "name": "python", 205 | "nbconvert_exporter": "python", 206 | "pygments_lexer": "ipython3", 207 | "version": "3" 208 | } 209 | }, 210 | "nbformat": 4, 211 | "nbformat_minor": 4 212 | } 213 | -------------------------------------------------------------------------------- /lapy/utils/tests/expected_outcomes.json: -------------------------------------------------------------------------------- 1 | { 2 | "expected_outcomes": { 3 | "test_tria_mesh": { 4 | "expected_euler_value": 2, 5 | "expected_area": [ 6 | 0.5, 7 | 0.5, 8 | 0.5, 9 | 0.5, 10 | 0.5, 11 | 0.5, 12 | 0.5, 13 | 0.5, 14 | 0.5, 15 | 0.5, 16 | 0.5, 17 | 0.5 18 | ], 19 | "expected_mesh_area": 5.999999999999998, 20 | "expected_vertex_degrees": [6, 4, 4, 4, 4, 4, 6, 4], 21 | "expected_vertex_area": [ 22 | 1.0, 23 | 0.66666667, 24 | 0.66666667, 25 | 0.66666667, 26 | 0.66666667, 27 | 0.66666667, 28 | 1.0, 29 | 0.66666667 30 | ], 31 | "expected_edge_length": 1.1380711874576983, 32 | "expected_triangle_normals": [ 33 | [0.0, 0.0, -1.0], 34 | [0.0, -0.0, -1.0], 35 | [0.0, 0.0, -1.0], 36 | [0.0, -0.0, -1.0], 37 | [0.0, 1.0, 0.0], 38 | [0.0, 1.0, 0.0], 39 | [-1.0, 0.0, 0.0], 40 | [-1.0, 0.0, -0.0], 41 | [0.0, 1.0, 0.0], 42 | [0.0, 1.0, 0.0], 43 | [-1.0, 0.0, 0.0], 44 | [-1.0, 0.0, -0.0] 45 | ], 46 | "expected_triangle": [ 47 | 0.8660254, 48 | 0.8660254, 49 | 0.8660254, 50 | 0.8660254, 51 | 0.8660254, 52 | 0.8660254, 53 | 0.8660254, 54 | 0.8660254, 55 | 0.8660254, 56 | 0.8660254, 57 | 0.8660254, 58 | 0.8660254 59 | ], 60 | "expected_vertices": [0, 1, 2, 3, 4, 5, 6, 7], 61 | "expected_flips": 6, 62 | "expected_result": [ 63 | [-0.57735027, -0.57735027, -0.57735027], 64 | [-0.40824829, 0.81649658, -0.40824829], 65 | [0.40824829, 0.40824829, -0.81649658], 66 | [0.81649658, -0.40824829, -0.40824829], 67 | [-0.40824829, -0.40824829, 0.81649658], 68 | [-0.81649658, 0.40824829, 0.40824829], 69 | [0.57735027, 0.57735027, 0.57735027], 70 | [0.40824829, -0.81649658, 0.40824829] 71 | ], 72 | "expected_result_offset": [ 73 | [-0.57735027, -0.57735027, -0.57735027], 74 | [-0.40824829, 0.81649658, -0.40824829], 75 | [0.40824829, 0.40824829, -0.81649658], 76 | [0.81649658, -0.40824829, -0.40824829], 77 | [-0.40824829, -0.40824829, 0.81649658], 78 | [-0.81649658, 0.40824829, 0.40824829], 79 | [0.57735027, 0.57735027, 0.57735027], 80 | [0.40824829, -0.81649658, 0.40824829] 81 | ], 82 | "expected_boundary_loop": [0, 8, 1, 13, 2, 16, 3, 9] 83 | }, 84 | "test_tet_mesh": {"expected_vertices": [0, 1, 2, 3, 4, 5, 6, 7, 8]}, 85 | "test_compute_shapedna": { 86 | "expected_eigenvalues": [-4.0165149e-05, 4.169641, 4.1704664], 87 | "tolerance": 1e-4 88 | }, 89 | "test_normalize_ev_geometry": { 90 | "expected_normalized_values": [-0.00024099089, 25.017845, 25.022799], 91 | "tolerance": 1e-4 92 | }, 93 | "test_reweight_ev": { 94 | "expected_reweighted_values": [ 95 | -4.01651487e-05, 96 | 2.08482051e00, 97 | 1.39015547e00 98 | ], 99 | "tolerance": 1e-4 100 | }, 101 | "test_compute_distance": {"expected_compute_distance": 0.0}, 102 | "test_compute_shapedna_tet": { 103 | "expected_eigen_values": [8.4440224e-05, 9.8897915e00, 9.8898811e00], 104 | "tolerance": 1e-4 105 | }, 106 | "test_normalize_ev_geometry_tet": { 107 | "expected_normalized_values": [8.4440224e-05, 9.8897915e00, 9.8898811e00], 108 | "tolerance": 1e-4 109 | }, 110 | "test_reweight_ev_tet": { 111 | "expected_reweighted_values": [ 112 | 8.44402239e-05, 113 | 4.94489574e00, 114 | 3.29662704e00 115 | ], 116 | "tolerance": 1e-4 117 | }, 118 | "test_compute_distance_tet": { 119 | "exp_compute_distance": 0.0 120 | }, 121 | "test_Geodesics_format": { 122 | "expected_matrix_format": "csc", 123 | "max_distance": 0.60497826, 124 | "expected_sqrt_2_div_2": 0.7071067811865476, 125 | "expected_max_abs_diff": 0.0 126 | }, 127 | "test_TetMesh_Geodesics": { 128 | "expected_evals_len": 10, 129 | "expected_max_col_values": [1.0, 1.0, 1.0], 130 | "expected_min_col_values": [-1.0, -1.0, -1.0], 131 | "expected_matrix_format": "csc", 132 | "max_distance": 0.69931495, 133 | "expected_sqrt": 0.8660254037844386, 134 | "expected_divx": [ 135 | 5.9999948, 136 | 6.0000215, 137 | 6.0000215, 138 | 5.999988, 139 | 6.000053, 140 | 5.999975, 141 | 5.9999676, 142 | 6.000024, 143 | 6.000013, 144 | 6.000008 145 | ] 146 | }, 147 | "test_visualization_triangle_mesh": { 148 | "expected_elements": 4800, 149 | "expected_dof": 2402, 150 | "expected_ev": [-4.1549094e-05, 4.169634, 4.170457] 151 | }, 152 | "test_visualization_tetrahedral_mesh": { 153 | "expected_elements": 48000, 154 | "expected_dof": 9261, 155 | "expected_ev": [8.4652565e-05, 9.889787, 9.889887] 156 | } 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_TriaMesh_Geodesics.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import numpy as np 4 | import pytest 5 | from scipy.sparse.linalg import splu 6 | 7 | from ...diffgeo import compute_divergence, compute_geodesic_f, compute_gradient 8 | from ...heat import diffusion 9 | from ...solver import Solver 10 | from ...tria_mesh import TriaMesh 11 | 12 | 13 | @pytest.fixture 14 | def loaded_data(): 15 | """ 16 | Load and provide the expected outcomes data from a JSON file. 17 | 18 | Returns: 19 | dict: Dictionary containing the expected outcomes data. 20 | """ 21 | with open("lapy/utils/tests/expected_outcomes.json") as f: 22 | expected_outcomes = json.load(f) 23 | return expected_outcomes 24 | 25 | 26 | # Fixture to load the TetMesh 27 | @pytest.fixture 28 | def load_square_mesh(): 29 | T = TriaMesh.read_off("data/square-mesh.off") 30 | return T 31 | 32 | 33 | def test_tria_qualities(load_square_mesh): 34 | """ 35 | Test triangle mesh quality computation. 36 | """ 37 | T = load_square_mesh 38 | computed_q = T.tria_qualities() 39 | expected_q_length = 768 40 | assert len(computed_q) == expected_q_length 41 | 42 | 43 | # Laplace 44 | def test_Laplace_Geodesics(load_square_mesh): 45 | """ 46 | Test Laplace solver for geodesics on a mesh. 47 | """ 48 | 49 | T = load_square_mesh 50 | 51 | # compute first eigenfunction 52 | fem = Solver(T, lump=True) 53 | eval, evec = fem.eigs() 54 | # vfunc = evec[:, 1] 55 | 56 | # Get A,B (lumped), and inverse of B (as it is diagonal due to lumping) 57 | A, B = fem.stiffness, fem.mass 58 | Bi = B.copy() 59 | Bi.data **= -1 60 | 61 | assert B.sum() == 1.0 62 | assert Bi is not B 63 | # Convert A to a dense NumPy array 64 | A_dense = A.toarray() 65 | 66 | # Assert that A is symmetric 67 | assert (A_dense == A_dense.T).all() 68 | 69 | expected_eval_length = 10 70 | assert len(eval) == expected_eval_length 71 | 72 | 73 | # Geodesics 74 | def test_Laplace_Geodesics_with_Gradient_Divergence(load_square_mesh): 75 | """ 76 | Test Laplace geodesics using gradient and divergence. 77 | """ 78 | T = load_square_mesh 79 | 80 | # Load eigenfunction 81 | fem = Solver(T, lump=True) 82 | eval, evec = fem.eigs() 83 | vfunc = evec[:, 1] 84 | 85 | # Compute Laplacian using -div(grad(f)) 86 | grad = compute_gradient(T, vfunc) 87 | divx = -compute_divergence(T, grad) 88 | 89 | # Get the lumped mass matrix B 90 | fem = Solver(T, lump=True) 91 | B = fem.mass 92 | Bi = B.copy() 93 | Bi.data **= -1 94 | 95 | # Apply Laplacian operator and then the inverse of B 96 | Laplacian_result = -divx # The Laplacian result 97 | 98 | # Apply the inverse of B to recover vfunc 99 | recovered_vfunc = Bi.dot(Laplacian_result) 100 | 101 | # Check if the original vfunc and the recovered vfunc length are equal 102 | assert len(recovered_vfunc) == len(vfunc) 103 | 104 | expected_eval_length = 10 105 | assert len(eval) == expected_eval_length 106 | 107 | 108 | def test_heat_diffusion_shape(load_square_mesh): 109 | """ 110 | Test the shape of the heat diffusion result on a square mesh. 111 | 112 | Parameters: 113 | load_square_mesh: Fixture providing a loaded square mesh. 114 | 115 | This test function computes the heat diffusion and verifies that the shape 116 | of the result matches the expected shape. 117 | 118 | Returns: 119 | None 120 | """ 121 | T = load_square_mesh 122 | bvert = T.boundary_loops() 123 | u = diffusion(T, bvert, m=1) 124 | expected_shape = (len(T.v),) 125 | assert u.shape == expected_shape 126 | 127 | 128 | def test_Geodesics_format(loaded_data, load_square_mesh): 129 | """ 130 | Test geodesics format and accuracy. 131 | """ 132 | T = load_square_mesh 133 | bvert = T.boundary_loops() 134 | u = diffusion(T, bvert, m=1) 135 | # compute gradient of heat diffusion 136 | tfunc = compute_gradient(T, u) 137 | 138 | # normalize gradient 139 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis] 140 | X = np.nan_to_num(X) 141 | divx = compute_divergence(T, X) 142 | # compute distance 143 | 144 | useCholmod = True 145 | try: 146 | from sksparse.cholmod import cholesky 147 | except ImportError: 148 | useCholmod = False 149 | 150 | fem = Solver(T, lump=True) 151 | A, B = fem.stiffness, fem.mass 152 | H = -A 153 | b0 = divx 154 | 155 | # solve H x = b0 156 | # we don't need the B matrix here, as divx is the integrated divergence 157 | print("Matrix Format now: " + H.getformat()) 158 | if useCholmod: 159 | print("Solver: cholesky decomp - performance optimal ...") 160 | chol = cholesky(H) 161 | x = chol(b0) 162 | else: 163 | print("Solver: spsolve (LU decomp) - performance not optimal ...") 164 | lu = splu(H) 165 | x = lu.solve(b0) 166 | 167 | # remove shift 168 | x = x - min(x) 169 | 170 | Bi = B.copy() 171 | vf = fem.poisson(-Bi * divx) 172 | vf = vf - min(vf) 173 | gf = compute_geodesic_f(T, u) 174 | expected_matrix_format = loaded_data["expected_outcomes"]["test_Geodesics_format"][ 175 | "expected_matrix_format" 176 | ] 177 | assert H.getformat() == expected_matrix_format 178 | assert not useCholmod, "Solver: cholesky decomp - performance optimal ..." 179 | expected_max_x = loaded_data["expected_outcomes"]["test_Geodesics_format"][ 180 | "max_distance" 181 | ] 182 | expected_sqrt_2_div_2 = loaded_data["expected_outcomes"]["test_Geodesics_format"][ 183 | "expected_sqrt_2_div_2" 184 | ] 185 | assert np.isclose(max(x), expected_max_x) 186 | computed_sqrt_2_div_2 = np.sqrt(2) / 2 187 | assert np.isclose(computed_sqrt_2_div_2, expected_sqrt_2_div_2) 188 | expected_max_abs_diff = loaded_data["expected_outcomes"]["test_Geodesics_format"][ 189 | "expected_max_abs_diff" 190 | ] 191 | computed_max_abs_diff = max(abs(gf - x)) 192 | assert np.allclose(computed_max_abs_diff, expected_max_abs_diff) 193 | -------------------------------------------------------------------------------- /lapy/heat.py: -------------------------------------------------------------------------------- 1 | """Functions for computing heat kernel and diffusion. 2 | 3 | Inputs are eigenvalues and eigenvectors (for heat kernel) and the 4 | mesh geometries (tet or tria mesh) for heat diffusion. 5 | """ 6 | 7 | import importlib 8 | import logging 9 | from typing import Optional, Union 10 | 11 | import numpy as np 12 | 13 | from .utils._imports import import_optional_dependency 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | 18 | def diagonal( 19 | t: Union[float, np.ndarray], 20 | x: np.ndarray, 21 | evecs: np.ndarray, 22 | evals: np.ndarray, 23 | n: int, 24 | ) -> np.ndarray: 25 | """Compute heat kernel diagonal K(t,x,x). 26 | 27 | For a given time t (can be a vector) using only the first n smallest 28 | eigenvalues and eigenvectors. 29 | 30 | Parameters 31 | ---------- 32 | t : float or np.ndarray 33 | Time or array of time values, shape (n_times,). 34 | x : np.ndarray 35 | Vertex indices for the positions of K(t,x,x), shape (n_vertices,). 36 | evecs : np.ndarray 37 | Eigenvectors matrix, shape (n_vertices, n_eigenvectors). 38 | evals : np.ndarray 39 | Vector of eigenvalues, shape (n_eigenvalues,). 40 | n : int 41 | Number of eigenvectors and eigenvalues to use (smaller or equal to length). 42 | 43 | Returns 44 | ------- 45 | np.ndarray 46 | Heat kernel diagonal values. Shape (n_vertices, n_times) if t is array, 47 | or (n_vertices, 1) if t is scalar. Rows correspond to vertices selected 48 | in x, columns to times in t. 49 | 50 | Raises 51 | ------ 52 | ValueError 53 | If n exceeds the number of available eigenpairs. 54 | """ 55 | if n > evecs.shape[1] or n > evals.shape[0]: 56 | raise ValueError("n exceeds the number of available eigenpairs") 57 | # maybe add code to check dimensions of input and flip axis if necessary 58 | h = np.matmul(evecs[x, 0:n] * evecs[x, 0:n], np.exp(-np.matmul(evals[0:n], t))) 59 | return h 60 | 61 | 62 | def kernel( 63 | t: Union[float, np.ndarray], 64 | vfix: int, 65 | evecs: np.ndarray, 66 | evals: np.ndarray, 67 | n: int, 68 | ) -> np.ndarray: 69 | r"""Compute heat kernel from all points to a fixed point. 70 | 71 | For a given time t, computes K_t(p,q) using only the first n smallest 72 | eigenvalues and eigenvectors: 73 | 74 | .. math:: 75 | K_t (p,q) = \sum_j \exp(-\lambda_j t) \phi_j(p) \phi_j(q) 76 | 77 | where :math:`\lambda_j` are eigenvalues and :math:`\phi_j` are eigenvectors. 78 | 79 | Parameters 80 | ---------- 81 | t : float or np.ndarray 82 | Time (can also be array, if passing multiple times), shape (n_times,). 83 | vfix : int 84 | Fixed vertex index. 85 | evecs : np.ndarray 86 | Matrix of eigenvectors, shape (n_vertices, n_eigenvectors). 87 | evals : np.ndarray 88 | Column vector of eigenvalues, shape (n_eigenvalues,). 89 | n : int 90 | Number of eigenvalues/vectors used in heat kernel (n <= n_eigenvectors). 91 | 92 | Returns 93 | ------- 94 | np.ndarray 95 | Heat kernel values. Shape (n_vertices, n_times) if t is array, 96 | or (n_vertices, 1) if t is scalar. Rows correspond to all vertices, 97 | columns to times in t. 98 | 99 | Raises 100 | ------ 101 | ValueError 102 | If n exceeds the number of available eigenpairs. 103 | """ 104 | # h = evecs * ( exp(-evals * t) .* repmat(evecs(vfix,:)',1,length(t)) ) 105 | if n > evecs.shape[1] or n > evals.shape[0]: 106 | raise ValueError("n exceeds the number of available eigenpairs") 107 | h = np.matmul( 108 | evecs[:, 0:n], (np.exp(np.matmul(-evals[0:n], t)) * evecs[vfix, 0:n]) 109 | ) 110 | return h 111 | 112 | 113 | def diffusion( 114 | geometry: object, 115 | vids: Union[int, np.ndarray], 116 | m: float = 1.0, 117 | aniso: Optional[int] = None, 118 | use_cholmod: bool = False, 119 | ) -> np.ndarray: 120 | """Compute the heat diffusion from initial vertices in vids. 121 | 122 | Uses the backward Euler solution with time :math:`t = m l^2`, where l 123 | describes the average edge length. 124 | 125 | Parameters 126 | ---------- 127 | geometry : TriaMesh or TetMesh 128 | Geometric object on which to run diffusion. 129 | vids : int or np.ndarray 130 | Vertex index or indices where initial heat is applied. 131 | m : float, default=1.0 132 | Factor to compute time of heat evolution. 133 | aniso : int, default=None 134 | Number of smoothing iterations for curvature computation on vertices. 135 | use_cholmod : bool, default=False 136 | Which solver to use. If True, use Cholesky decomposition from 137 | scikit-sparse cholmod. If False, use spsolve (LU decomposition). 138 | 139 | Returns 140 | ------- 141 | np.ndarray 142 | Heat diffusion values at vertices, shape (n_vertices,). 143 | 144 | Raises 145 | ------ 146 | ValueError 147 | If vids contains out-of-range vertex indices. 148 | ImportError 149 | If use_cholmod is True but scikit-sparse is not installed. 150 | """ 151 | if use_cholmod: 152 | sksparse = import_optional_dependency("sksparse", raise_error=True) 153 | importlib.import_module(".cholmod", sksparse.__name__) 154 | else: 155 | sksparse = None 156 | from . import Solver 157 | 158 | nv = len(geometry.v) 159 | vids = np.asarray(vids, dtype=int) 160 | if np.any(vids < 0) or np.any(vids >= nv): 161 | raise ValueError("vids contains out-of-range vertex indices") 162 | fem = Solver(geometry, lump=True, aniso=aniso) 163 | # time of heat evolution: 164 | t = m * geometry.avg_edge_length() ** 2 165 | # backward Euler matrix: 166 | hmat = fem.mass + t * fem.stiffness 167 | # set initial heat 168 | b0 = np.zeros((nv,)) 169 | b0[vids] = 1.0 170 | # solve H x = b0 171 | logger.debug("Matrix Format: %s", hmat.getformat()) 172 | if use_cholmod: 173 | logger.info("Solver: Cholesky decomposition from scikit-sparse cholmod") 174 | chol = sksparse.cholmod.cholesky(hmat) 175 | vfunc = chol(b0) 176 | else: 177 | from scipy.sparse.linalg import splu 178 | 179 | logger.info("Solver: LU decomposition via splu") 180 | lu = splu(hmat) 181 | vfunc = lu.solve(np.float32(b0)) 182 | return vfunc 183 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_tet_mesh.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from ...tet_mesh import TetMesh 7 | 8 | 9 | @pytest.fixture 10 | def tet_mesh_fixture(): 11 | points = np.array( 12 | [ 13 | [0, 0, 0], 14 | [1, 0, 0], 15 | [1, 1, 0], 16 | [0, 1, 0], 17 | [0, 0, 1], 18 | [1, 0, 1], 19 | [1, 1, 1], 20 | [0, 1, 1], 21 | [0.5, 0.5, 0.5], 22 | ] 23 | ) 24 | tets = np.array( 25 | [ 26 | [0, 5, 8, 1], 27 | [0, 4, 5, 8], 28 | [2, 5, 6, 8], 29 | [1, 5, 2, 8], 30 | [6, 7, 3, 8], 31 | [6, 3, 2, 8], 32 | [0, 3, 4, 8], 33 | [3, 7, 4, 8], 34 | [0, 1, 2, 8], 35 | [0, 2, 3, 8], 36 | [4, 6, 5, 8], 37 | [4, 7, 6, 8], 38 | ] 39 | ) 40 | 41 | return TetMesh(points, tets) 42 | 43 | 44 | @pytest.fixture 45 | def loaded_data(): 46 | """ 47 | Load expected outcomes data from a JSON file as a dictionary. 48 | """ 49 | with open("lapy/utils/tests/expected_outcomes.json") as f: 50 | expected_outcomes = json.load(f) 51 | return expected_outcomes 52 | 53 | 54 | def test_has_free_vertices(tet_mesh_fixture): 55 | """ 56 | Testing tet mesh has free vertices or not 57 | """ 58 | mesh = tet_mesh_fixture 59 | result = mesh.has_free_vertices() 60 | expected_result = False 61 | assert result == expected_result 62 | 63 | 64 | def test_rm_free_vertices(tet_mesh_fixture, loaded_data): 65 | """ 66 | Testing removing free vertices from tet mesh 67 | """ 68 | mesh = tet_mesh_fixture 69 | updated_vertices, deleted_vertices = mesh.rm_free_vertices_() 70 | expected_vertices = np.array( 71 | loaded_data["expected_outcomes"]["test_tet_mesh"]["expected_vertices"] 72 | ) 73 | expected_removed_vertices = np.array([]) 74 | assert np.array_equal( 75 | updated_vertices, expected_vertices 76 | ), f"{updated_vertices}, {deleted_vertices}" 77 | assert np.array_equal(deleted_vertices, expected_removed_vertices) 78 | 79 | 80 | def test_is_oriented(tet_mesh_fixture): 81 | """ 82 | Testing whether test mesh orientations are consistent 83 | """ 84 | mesh = tet_mesh_fixture 85 | result = mesh.is_oriented() 86 | expected_result = False 87 | assert ( 88 | result == expected_result 89 | ), f"Expected is_oriented result {expected_result}, but got {result}" 90 | 91 | 92 | def test_boundary_tria(tet_mesh_fixture): 93 | """ 94 | Test computation of boundary triangles from tet mesh. 95 | 96 | `BT.t` represents the array of boundary triangles. 97 | `.shape[0]` counts the number of boundary triangles. 98 | """ 99 | mesh = tet_mesh_fixture 100 | boundary_tria_mesh = mesh.boundary_tria() 101 | 102 | expected_num_traingles = 12 103 | assert boundary_tria_mesh.t.shape[0] == expected_num_traingles 104 | 105 | # Check if the boundary triangle mesh is not oriented (this should fail) 106 | result = boundary_tria_mesh.is_oriented() 107 | expected_result = False 108 | assert ( 109 | result == expected_result 110 | ), f"Expected is_oriented result {expected_result}, but got {result}" 111 | 112 | 113 | def test_avg_edge_length(tet_mesh_fixture): 114 | """ 115 | Testing the computatoin of average edge length for tetrahedral mesh 116 | """ 117 | mesh = tet_mesh_fixture 118 | result = mesh.avg_edge_length() 119 | 120 | expected_avg_edge_length = 1.0543647924813107 121 | 122 | assert np.isclose(result, expected_avg_edge_length) 123 | 124 | 125 | def test_boundary_is_oriented(tet_mesh_fixture): 126 | """ 127 | Test orientation consistency in boundary of tetrahedral mesh. 128 | """ 129 | mesh = tet_mesh_fixture 130 | 131 | # Get the boundary triangle mesh 132 | boundary_mesh = mesh.boundary_tria() 133 | 134 | # Check if the boundary triangle mesh has consistent orientations 135 | result = boundary_mesh.is_oriented() 136 | 137 | expected_result = False 138 | 139 | assert result == expected_result 140 | 141 | 142 | def test_orient_and_check_oriented(tet_mesh_fixture): 143 | """ 144 | Test orienting the tetrahedral mesh for consistency. 145 | """ 146 | mesh = tet_mesh_fixture 147 | 148 | # Correct the orientation of the tetrahedral mesh 149 | flipped_tetrahedra = mesh.orient_() 150 | 151 | # Check if the orientations of the tetrahedra are consistent 152 | result = mesh.is_oriented() 153 | 154 | expected_flipped_tetrahedra = 1 155 | expected_oriented_result = True 156 | 157 | # print(f"{flipped_tetrahedra}") 158 | 159 | assert flipped_tetrahedra == expected_flipped_tetrahedra 160 | assert result == expected_oriented_result 161 | 162 | 163 | def test_correct_orientations_and_boundary(tet_mesh_fixture): 164 | """ 165 | Testing correcting orientation and checking boundary surface orientation 166 | """ 167 | mesh = tet_mesh_fixture 168 | 169 | # Correct the orientation of the tetrahedral mesh 170 | mesh.orient_() 171 | 172 | # Check if the orientations of the tetrahedra are consistent 173 | result_oriented = mesh.is_oriented() 174 | expected_oriented_result = True 175 | assert result_oriented == expected_oriented_result 176 | 177 | # Extract the boundary surface 178 | boundary_surface = mesh.boundary_tria() 179 | print(f"{boundary_surface}") 180 | 181 | # Check if the orientations of the boundary surface are consistent 182 | result_boundary_oriented = boundary_surface.is_oriented() 183 | print(f"{result_boundary_oriented}") 184 | expected_boundary_oriented_result = True 185 | assert result_boundary_oriented == expected_boundary_oriented_result 186 | 187 | 188 | def test_boundary_surface_volume(tet_mesh_fixture): 189 | """ 190 | Testing computation of volume for the boundary surface mesh 191 | """ 192 | mesh = tet_mesh_fixture 193 | 194 | # Correct the orientation of the tetrahedral mesh 195 | mesh.orient_() 196 | 197 | # Extract the boundary surface 198 | boundary_surface = mesh.boundary_tria() 199 | 200 | # Compute the volume of the boundary surface 201 | result_volume = boundary_surface.volume() 202 | expected_volume = 1.0 203 | 204 | assert np.isclose(result_volume, expected_volume) 205 | -------------------------------------------------------------------------------- /lapy/_read_geometry.py: -------------------------------------------------------------------------------- 1 | """Read FreeSurfer geometry (fix for dev, ll 151-153). 2 | 3 | Code was taken from nibabel.freesurfer package 4 | (https://github.com/nipy/nibabel/blob/master/nibabel/freesurfer/io.py). 5 | This software is licensed under the following license: 6 | 7 | The MIT License 8 | 9 | Copyright (c) 2009-2019 Matthew Brett 10 | Copyright (c) 2010-2013 Stephan Gerhard 11 | Copyright (c) 2006-2014 Michael Hanke 12 | Copyright (c) 2011 Christian Haselgrove 13 | Copyright (c) 2010-2011 Jarrod Millman 14 | Copyright (c) 2011-2019 Yaroslav Halchenko 15 | Copyright (c) 2015-2019 Chris Markiewicz 16 | 17 | Permission is hereby granted, free of charge, to any person obtaining a copy 18 | of this software and associated documentation files (the "Software"), to deal 19 | in the Software without restriction, including without limitation the rights 20 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 21 | copies of the Software, and to permit persons to whom the Software is 22 | furnished to do so, subject to the following conditions: 23 | 24 | The above copyright notice and this permission notice shall be included in 25 | all copies or substantial portions of the Software. 26 | 27 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 30 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 33 | THE SOFTWARE. 34 | """ 35 | 36 | import warnings 37 | from collections import OrderedDict 38 | 39 | import numpy as np 40 | 41 | 42 | def _fread3(fobj): 43 | """Read a 3-byte int from an open binary file object. 44 | 45 | Parameters 46 | ---------- 47 | fobj : file 48 | File descriptor 49 | 50 | Returns 51 | ------- 52 | n : int 53 | A 3 byte int 54 | """ 55 | b1, b2, b3 = np.fromfile(fobj, ">u1", 3) 56 | # the bit-shifting operator does not return 57 | # identical results on all platforms, therefore 58 | # we disable it and return / compare the first 59 | # three bytes separately 60 | # return (b1 << 16) + (b2 << 8) + b3 61 | return b1, b2, b3 62 | 63 | 64 | def _read_volume_info(fobj): 65 | """Read the footer from a surface file. 66 | 67 | Parameters 68 | ---------- 69 | fobj : file 70 | File descriptor 71 | 72 | Returns 73 | ------- 74 | volume_info : array 75 | Key-value pairs found in the file. 76 | """ 77 | volume_info = OrderedDict() 78 | head = np.fromfile(fobj, ">i4", 1) 79 | if not np.array_equal(head, [20]): # Read two bytes more 80 | head = np.concatenate([head, np.fromfile(fobj, ">i4", 2)]) 81 | if not np.array_equal(head, [2, 0, 20]) and not np.array_equal( 82 | head, [2, 1, 20] 83 | ): 84 | warnings.warn("Unknown extension code.", stacklevel=2) 85 | return volume_info 86 | head = [2, 0, 20] 87 | 88 | volume_info["head"] = head 89 | for key in [ 90 | "valid", 91 | "filename", 92 | "volume", 93 | "voxelsize", 94 | "xras", 95 | "yras", 96 | "zras", 97 | "cras", 98 | ]: 99 | pair = fobj.readline().decode("utf-8").split("=") 100 | if pair[0].strip() != key or len(pair) != 2: 101 | raise OSError("Error parsing volume info.") 102 | if key in ("valid", "filename"): 103 | volume_info[key] = pair[1].strip() 104 | elif key == "volume": 105 | volume_info[key] = np.array(pair[1].split()).astype(int) 106 | else: 107 | volume_info[key] = np.array(pair[1].split()).astype(float) 108 | # Ignore the rest 109 | return volume_info 110 | 111 | 112 | def read_geometry(filepath, read_metadata=False, read_stamp=False): 113 | """Read a triangular format Freesurfer surface mesh. 114 | 115 | Parameters 116 | ---------- 117 | filepath : str 118 | Path to surface file. 119 | read_metadata : bool, optional 120 | If True, read and return metadata as key-value pairs. 121 | Valid keys: 122 | * 'head' : array of int 123 | * 'valid' : str 124 | * 'filename' : str 125 | * 'volume' : array of int, shape (3,) 126 | * 'voxelsize' : array of float, shape (3,) 127 | * 'xras' : array of float, shape (3,) 128 | * 'yras' : array of float, shape (3,) 129 | * 'zras' : array of float, shape (3,) 130 | * 'cras' : array of float, shape (3,) 131 | read_stamp : bool, optional 132 | Return the comment from the file 133 | 134 | Returns 135 | ------- 136 | coords : numpy array 137 | nvtx x 3 array of vertex (x, y, z) coordinates. 138 | faces : numpy array 139 | nfaces x 3 array of defining mesh triangles. 140 | volume_info : OrderedDict 141 | Returned only if `read_metadata` is True. Key-value pairs found in the 142 | geometry file. 143 | create_stamp : str 144 | Returned only if `read_stamp` is True. The comment added by the 145 | program that saved the file. 146 | """ 147 | volume_info = OrderedDict() 148 | 149 | # See comment in _fread3() on why we have changed the 150 | # comparison 151 | # TRIANGLE_MAGIC = 16777214 152 | TRIANGLE_MAGIC = (np.uint8(255), np.uint8(255), np.uint8(254)) 153 | 154 | with open(filepath, "rb") as fobj: 155 | magic = _fread3(fobj) 156 | 157 | if magic == TRIANGLE_MAGIC: # Triangle file 158 | create_stamp = fobj.readline().rstrip(b"\n").decode("utf-8") 159 | test_dev = fobj.peek(1)[:1] 160 | if test_dev == b"\n": 161 | fobj.readline() 162 | vnum = np.fromfile(fobj, ">i4", 1)[0] 163 | fnum = np.fromfile(fobj, ">i4", 1)[0] 164 | coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3) 165 | faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3) 166 | 167 | if read_metadata: 168 | volume_info = _read_volume_info(fobj) 169 | else: 170 | raise ValueError( 171 | "File does not appear to be a Freesurfer surface (triangle file)" 172 | ) 173 | 174 | coords = coords.astype(float) # XXX: due to mayavi bug on mac 32bits 175 | 176 | ret = (coords, faces) 177 | if read_metadata: 178 | if len(volume_info) == 0: 179 | warnings.warn("No volume information contained in the file", stacklevel=2) 180 | ret += (volume_info,) 181 | if read_stamp: 182 | ret += (create_stamp,) 183 | 184 | return ret 185 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_TetMesh_Geodesics.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import numpy as np 4 | import pytest 5 | from scipy.sparse.linalg import splu 6 | 7 | from ...diffgeo import compute_divergence, compute_gradient 8 | from ...heat import diffusion 9 | from ...solver import Solver 10 | from ...tet_mesh import TetMesh 11 | 12 | 13 | # Fixture to load the TetMesh 14 | @pytest.fixture 15 | def load_tet_mesh(): 16 | T = TetMesh.read_vtk("data/cubeTetra.vtk") 17 | return T 18 | 19 | 20 | @pytest.fixture 21 | def loaded_data(): 22 | """ 23 | Load and provide the expected outcomes data from a JSON file. 24 | 25 | Returns: 26 | dict: Dictionary containing the expected outcomes data. 27 | """ 28 | with open("lapy/utils/tests/expected_outcomes.json") as f: 29 | expected_outcomes = json.load(f) 30 | return expected_outcomes 31 | 32 | 33 | # Test if the mesh is oriented 34 | def test_is_oriented(load_tet_mesh): 35 | T = load_tet_mesh 36 | assert not T.is_oriented(), "Mesh is already oriented" 37 | 38 | 39 | # Test orienting the mesh 40 | def test_orient_mesh(load_tet_mesh): 41 | T = load_tet_mesh 42 | T.orient_() 43 | assert T.is_oriented(), "Mesh is not oriented" 44 | 45 | 46 | # Test solving the Laplace eigenvalue problem 47 | def test_solve_eigenvalue_problem(load_tet_mesh): 48 | T = load_tet_mesh 49 | fem = Solver(T, lump=True) 50 | 51 | num_eigenvalues = 10 52 | evals, evecs = fem.eigs(num_eigenvalues) 53 | 54 | assert len(evals) == num_eigenvalues 55 | assert evecs.shape == (len(T.v), num_eigenvalues) 56 | 57 | 58 | def test_evals_evec_dimension(load_tet_mesh, loaded_data): 59 | T = load_tet_mesh 60 | 61 | expected_evals_len = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][ 62 | "expected_evals_len" 63 | ] 64 | 65 | fem = Solver(T, lump=True) 66 | evals, evecs = fem.eigs(expected_evals_len) 67 | assert len(evals) == expected_evals_len 68 | assert np.shape(evecs) == (9261, 10) 69 | 70 | 71 | # Geodesics 72 | 73 | 74 | def test_gradients_normalization_and_divergence(load_tet_mesh, loaded_data): 75 | """ 76 | Test the computation of gradients, normalization, and divergence for a TetMesh. 77 | 78 | Parameters: 79 | load_tet_mesh (fixture): Fixture to load a TetMesh for testing. 80 | loaded_data (dict): Dictionary containing loaded test data. 81 | 82 | Raises: 83 | AssertionError: If any test condition is not met. 84 | """ 85 | T = load_tet_mesh 86 | tria = T.boundary_tria() 87 | bvert = np.unique(tria.t) 88 | u = diffusion(T, bvert, m=1) 89 | 90 | # Compute gradients 91 | tfunc = compute_gradient(T, u) 92 | 93 | # Define the expected shape of tfunc (gradient) 94 | expected_tfunc_shape = (48000, 3) 95 | 96 | # Assert that the shape of tfunc matches the expected shape 97 | assert tfunc.shape == expected_tfunc_shape 98 | 99 | # Flip and normalize 100 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis] 101 | 102 | # Define the expected shape of X (normalized gradient) 103 | expected_X_shape = (48000, 3) 104 | 105 | # Assert that the shape of X matches the expected shape 106 | assert X.shape == expected_X_shape 107 | 108 | # Load the expected maximum and minimum values for each column of X 109 | expected_max_col_values = loaded_data["expected_outcomes"][ 110 | "test_TetMesh_Geodesics" 111 | ]["expected_max_col_values"] 112 | expected_min_col_values = loaded_data["expected_outcomes"][ 113 | "test_TetMesh_Geodesics" 114 | ]["expected_min_col_values"] 115 | 116 | # Assert maximum and minimum values of each column of X match the expected values 117 | for col in range(X.shape[1]): 118 | assert np.allclose(np.max(X[:, col]), expected_max_col_values[col], atol=1e-6) 119 | assert np.allclose(np.min(X[:, col]), expected_min_col_values[col], atol=1e-6) 120 | 121 | # Compute divergence 122 | divx = compute_divergence(T, X) 123 | 124 | # Define the expected shape of divx (divergence) 125 | expected_divx_shape = (9261,) 126 | 127 | # Assert that the shape of divx matches the expected shape 128 | assert divx.shape == expected_divx_shape 129 | 130 | 131 | def test_tetMesh_Geodesics_format(load_tet_mesh, loaded_data): 132 | """ 133 | Test if matrix format, solver settings, max distance, 134 | and computed values match the expected outcomes. 135 | 136 | Parameters: 137 | - loaded_data (dict): Dictionary containing loaded test data. 138 | 139 | Raises: 140 | - AssertionError: If any test condition is not met. 141 | """ 142 | 143 | T = load_tet_mesh 144 | tria = T.boundary_tria() 145 | bvert = np.unique(tria.t) 146 | u = diffusion(T, bvert, m=1) 147 | 148 | # get gradients 149 | tfunc = compute_gradient(T, u) 150 | # flip and normalize 151 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis] 152 | X = np.nan_to_num(X) 153 | # compute divergence 154 | divx = compute_divergence(T, X) 155 | 156 | # compute distance 157 | useCholmod = True 158 | try: 159 | from sksparse.cholmod import cholesky 160 | except ImportError: 161 | useCholmod = False 162 | 163 | fem = Solver(T, lump=True) 164 | A, B = fem.stiffness, fem.mass # computed above when creating Solver 165 | 166 | H = A 167 | b0 = -divx 168 | 169 | # solve H x = b0 170 | if useCholmod: 171 | print("Solver: cholesky decomp - performance optimal ...") 172 | chol = cholesky(H) 173 | x = chol(b0) 174 | else: 175 | print("Solver: spsolve (LU decomp) - performance not optimal ...") 176 | lu = splu(H) 177 | x = lu.solve(b0) 178 | 179 | x = x - np.min(x) 180 | 181 | # get heat diffusion 182 | 183 | v1func = T.v[:, 0] * T.v[:, 0] + T.v[:, 1] * T.v[:, 1] + T.v[:, 2] * T.v[:, 2] 184 | grad = compute_gradient(T, v1func) 185 | glength = np.sqrt(np.sum(grad * grad, axis=1)) 186 | A, B = fem.stiffness, fem.mass 187 | Bi = B.copy() 188 | Bi.data **= -1 189 | divx2 = Bi * divx 190 | 191 | expected_matrix_format = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][ 192 | "expected_matrix_format" 193 | ] 194 | assert H.getformat() == expected_matrix_format 195 | assert np.shape(x) == (9261,) 196 | assert not useCholmod, "Solver: cholesky decomp - performance optimal ..." 197 | expected_max_x = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][ 198 | "max_distance" 199 | ] 200 | expected_sqrt_3 = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][ 201 | "expected_sqrt" 202 | ] 203 | assert np.isclose(max(x), expected_max_x) 204 | computed_sqrt_3 = 0.5 * np.sqrt(3.0) 205 | assert np.isclose(computed_sqrt_3, expected_sqrt_3) 206 | assert np.shape(glength) == (48000,) 207 | expected_divx = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][ 208 | "expected_divx" 209 | ] 210 | assert len(divx2[5000:5010]) == len(expected_divx) 211 | assert not np.all(divx2[5000:5010] == expected_divx), "divergence is equal" 212 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_shape_DNA.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from ...shapedna import compute_distance, compute_shapedna, normalize_ev, reweight_ev 7 | from ...tet_mesh import TetMesh 8 | from ...tria_mesh import TriaMesh 9 | 10 | tria = TriaMesh.read_vtk("data/cubeTria.vtk") 11 | tet = TetMesh.read_vtk("data/cubeTetra.vtk") 12 | 13 | 14 | @pytest.fixture 15 | def loaded_data(): 16 | """ 17 | Load expected outcomes data from a JSON file as a dictionary. 18 | """ 19 | with open("lapy/utils/tests/expected_outcomes.json") as f: 20 | expected_outcomes = json.load(f) 21 | return expected_outcomes 22 | 23 | 24 | def test_compute_shapedna(loaded_data): 25 | """ 26 | Test compute_shapedna function for triangular mesh. 27 | 28 | Args: 29 | loaded_data (dict): Expected outcomes loaded from a JSON file. 30 | 31 | Raises: 32 | AssertionError: If computed eigenvalues don't match within tolerance. 33 | """ 34 | ev = compute_shapedna(tria, k=3) 35 | 36 | expected_Eigenvalues = np.array( 37 | loaded_data["expected_outcomes"]["test_compute_shapedna"][ 38 | "expected_eigenvalues" 39 | ] 40 | ) 41 | tolerance = loaded_data["expected_outcomes"]["test_compute_shapedna"]["tolerance"] 42 | assert np.allclose(ev["Eigenvalues"], expected_Eigenvalues, atol=tolerance) 43 | 44 | 45 | def test_normalize_ev_geometry(loaded_data): 46 | """ 47 | Test normalize_ev() using 'geometry' method for a triangular mesh. 48 | 49 | Args: 50 | loaded_data (dict): Expected outcomes from a JSON file. 51 | 52 | Raises: 53 | AssertionError: If normalized eigenvalues don't match within tolerance. 54 | """ 55 | ev = compute_shapedna(tria, k=3) 56 | 57 | expected_normalized_values = np.array( 58 | loaded_data["expected_outcomes"]["test_normalize_ev_geometry"][ 59 | "expected_normalized_values" 60 | ] 61 | ) 62 | tolerance = loaded_data["expected_outcomes"]["test_normalize_ev_geometry"][ 63 | "tolerance" 64 | ] 65 | normalized_eigenvalues = normalize_ev(tria, ev["Eigenvalues"], method="geometry") 66 | assert np.allclose( 67 | normalized_eigenvalues, expected_normalized_values, atol=tolerance 68 | ) 69 | 70 | 71 | def test_reweight_ev(loaded_data): 72 | """ 73 | Test reweighted_ev() and validate reweighted eigenvalues' data type. 74 | 75 | Args: 76 | loaded_data (dict): Expected outcomes from a JSON file. 77 | 78 | Raises: 79 | AssertionError: If reweighted eigenvalues don't match within tolerance. 80 | """ 81 | ev = compute_shapedna(tria, k=3) 82 | 83 | expected_reweighted_values = np.array( 84 | loaded_data["expected_outcomes"]["test_reweight_ev"][ 85 | "expected_reweighted_values" 86 | ] 87 | ) 88 | tolerance = loaded_data["expected_outcomes"]["test_reweight_ev"]["tolerance"] 89 | reweighted_eigenvalues = reweight_ev(ev["Eigenvalues"]) 90 | tolerance = 1e-4 91 | assert np.allclose( 92 | reweighted_eigenvalues, expected_reweighted_values, atol=tolerance 93 | ) 94 | 95 | 96 | def test_compute_distance(loaded_data): 97 | """ 98 | Test compute_distance() for eigenvalues and validate the computed distance. 99 | 100 | Args: 101 | loaded_data (dict): Expected outcomes from a JSON file. 102 | 103 | Raises: 104 | AssertionError: If computed distance doesn't match the expected value. 105 | """ 106 | ev = compute_shapedna(tria, k=3) 107 | 108 | expected_compute_distance = loaded_data["expected_outcomes"][ 109 | "test_compute_distance" 110 | ]["expected_compute_distance"] 111 | # compute distance for tria eigenvalues (trivial case) 112 | computed_distance = compute_distance(ev["Eigenvalues"], ev["Eigenvalues"]) 113 | assert computed_distance == expected_compute_distance 114 | 115 | 116 | # Repeating test steps for a tetrahedral mesh 117 | 118 | 119 | def test_compute_shapedna_tet(loaded_data): 120 | """ 121 | Test compute_shapedna for a tetrahedral mesh. 122 | 123 | Args: 124 | loaded_data (dict): Expected outcomes from a JSON file. 125 | 126 | Raises: 127 | AssertionError: If computed eigenvalues don't match within tolerance. 128 | """ 129 | evTet = compute_shapedna(tet, k=3) 130 | 131 | expected_eigen_values = np.array( 132 | loaded_data["expected_outcomes"]["test_compute_shapedna_tet"][ 133 | "expected_eigen_values" 134 | ] 135 | ) 136 | tolerance = loaded_data["expected_outcomes"]["test_compute_shapedna_tet"][ 137 | "tolerance" 138 | ] 139 | evTet = compute_shapedna(tet, k=3) 140 | assert np.allclose(evTet["Eigenvalues"], expected_eigen_values, atol=tolerance) 141 | 142 | 143 | def test_normalize_ev_geometry_tet(loaded_data): 144 | """ 145 | Test normalize_ev() using 'geometry' method for a tetrahedral mesh. 146 | 147 | Args: 148 | loaded_data (dict): Expected outcomes from a JSON file. 149 | 150 | Raises: 151 | AssertionError: If normalized eigenvalues don't match within tolerance. 152 | """ 153 | evTet = compute_shapedna(tet, k=3) 154 | 155 | expected_normalized_values = np.array( 156 | loaded_data["expected_outcomes"]["test_normalize_ev_geometry_tet"][ 157 | "expected_normalized_values" 158 | ] 159 | ) 160 | tolerance = loaded_data["expected_outcomes"]["test_normalize_ev_geometry_tet"][ 161 | "tolerance" 162 | ] 163 | # volume / surface / geometry normalization of tet eigenvalues 164 | normalized_eigenvalues = normalize_ev(tet, evTet["Eigenvalues"], method="geometry") 165 | 166 | assert np.allclose( 167 | normalized_eigenvalues, expected_normalized_values, atol=tolerance 168 | ) 169 | 170 | 171 | def test_reweight_ev_tet(loaded_data): 172 | """ 173 | Test reweighted_ev() for tetrahedral meshes and validate data type. 174 | 175 | Args: 176 | loaded_data (dict): Expected outcomes from a JSON file. 177 | 178 | Raises: 179 | AssertionError: If reweighted eigenvalues don't match within tolerance. 180 | """ 181 | evTet = compute_shapedna(tet, k=3) 182 | 183 | expected_reweighted_values = np.array( 184 | loaded_data["expected_outcomes"]["test_reweight_ev_tet"][ 185 | "expected_reweighted_values" 186 | ] 187 | ) 188 | tolerance = loaded_data["expected_outcomes"]["test_reweight_ev_tet"]["tolerance"] 189 | # Linear reweighting of tet eigenvalues 190 | reweighted_eigenvalues = reweight_ev(evTet["Eigenvalues"]) 191 | assert np.allclose( 192 | reweighted_eigenvalues, expected_reweighted_values, atol=tolerance 193 | ) 194 | 195 | 196 | def test_compute_distance_tet(loaded_data): 197 | """ 198 | Test compute_distance() for eigenvalues of tetrahedral meshes. 199 | 200 | Args: 201 | loaded_data (dict): Expected outcomes from a JSON file. 202 | 203 | Raises: 204 | AssertionError: If computed distance doesn't match the expected value. 205 | """ 206 | evTet = compute_shapedna(tet, k=3) 207 | 208 | # compute distance for tria eigenvalues (trivial case) 209 | computed_distance = compute_distance(evTet["Eigenvalues"], evTet["Eigenvalues"]) 210 | expected_compute_distance = loaded_data["expected_outcomes"][ 211 | "test_compute_distance_tet" 212 | ]["exp_compute_distance"] 213 | 214 | # Compare the computed distance with the expected distance using a tolerance 215 | assert computed_distance == expected_compute_distance 216 | -------------------------------------------------------------------------------- /examples/Test_TetMesh.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# Tetrahedral Mesh" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "from lapy import TetMesh" 17 | ] 18 | }, 19 | { 20 | "cell_type": "markdown", 21 | "metadata": {}, 22 | "source": [ 23 | "First, instead of loading, we define a small tetrahedral mesh representing a cube with a center vertex and twelve tetrahedra." 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "metadata": {}, 30 | "outputs": [], 31 | "source": [ 32 | "# cube9 (cube with center node)\n", 33 | "points = [\n", 34 | " [0, 0, 0],\n", 35 | " [1, 0, 0],\n", 36 | " [1, 1, 0],\n", 37 | " [0, 1, 0],\n", 38 | " [0, 0, 1],\n", 39 | " [1, 0, 1],\n", 40 | " [1, 1, 1],\n", 41 | " [0, 1, 1],\n", 42 | " [0.5, 0.5, 0.5],\n", 43 | "]\n", 44 | "tets = [\n", 45 | " [0, 5, 8, 1],\n", 46 | " [0, 4, 5, 8],\n", 47 | " [2, 5, 6, 8],\n", 48 | " [1, 5, 2, 8],\n", 49 | " [6, 7, 3, 8],\n", 50 | " [6, 3, 2, 8],\n", 51 | " [0, 3, 4, 8],\n", 52 | " [3, 7, 4, 8],\n", 53 | " [0, 1, 2, 8],\n", 54 | " [0, 2, 3, 8],\n", 55 | " [4, 6, 5, 8],\n", 56 | " [4, 7, 6, 8],\n", 57 | "]\n", 58 | "T = TetMesh(points, tets)" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": {}, 64 | "source": [ 65 | "Note, that we flipped the first tetrahedron vertex order on purpose (it should be 0,5,1,8) to test and correct orientation below." 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "We can check if our tet mesh has free vertices (these are vertices that are not used in any tetrahedron)." 73 | ] 74 | }, 75 | { 76 | "cell_type": "code", 77 | "execution_count": 3, 78 | "metadata": {}, 79 | "outputs": [ 80 | { 81 | "data": { 82 | "text/plain": [ 83 | "False" 84 | ] 85 | }, 86 | "execution_count": 3, 87 | "metadata": {}, 88 | "output_type": "execute_result" 89 | } 90 | ], 91 | "source": [ 92 | "T.has_free_vertices()" 93 | ] 94 | }, 95 | { 96 | "cell_type": "markdown", 97 | "metadata": {}, 98 | "source": [ 99 | "No free vertices are found, so we cannot remove any and the attempt will keep all 9." 100 | ] 101 | }, 102 | { 103 | "cell_type": "code", 104 | "execution_count": 4, 105 | "metadata": {}, 106 | "outputs": [ 107 | { 108 | "data": { 109 | "text/plain": [ 110 | "(array([0, 1, 2, 3, 4, 5, 6, 7, 8]), [])" 111 | ] 112 | }, 113 | "execution_count": 4, 114 | "metadata": {}, 115 | "output_type": "execute_result" 116 | } 117 | ], 118 | "source": [ 119 | "T.rm_free_vertices_()" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": {}, 125 | "source": [ 126 | "Let's see next, if we have consistent orientations (this should fail)." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": 5, 132 | "metadata": {}, 133 | "outputs": [ 134 | { 135 | "name": "stdout", 136 | "output_type": "stream", 137 | "text": [ 138 | "Orientations are not uniform\n" 139 | ] 140 | }, 141 | { 142 | "data": { 143 | "text/plain": [ 144 | "False" 145 | ] 146 | }, 147 | "execution_count": 5, 148 | "metadata": {}, 149 | "output_type": "execute_result" 150 | } 151 | ], 152 | "source": [ 153 | "T.is_oriented()" 154 | ] 155 | }, 156 | { 157 | "cell_type": "markdown", 158 | "metadata": {}, 159 | "source": [ 160 | "Some functions don't care about the orientation, for example the average edge length computation." 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": 6, 166 | "metadata": {}, 167 | "outputs": [ 168 | { 169 | "data": { 170 | "text/plain": [ 171 | "1.0543647924813107" 172 | ] 173 | }, 174 | "execution_count": 6, 175 | "metadata": {}, 176 | "output_type": "execute_result" 177 | } 178 | ], 179 | "source": [ 180 | "T.avg_edge_length()" 181 | ] 182 | }, 183 | { 184 | "cell_type": "markdown", 185 | "metadata": {}, 186 | "source": [ 187 | "We can also get the boundary of the tet mesh as a triangle mesh." 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": 7, 193 | "metadata": {}, 194 | "outputs": [ 195 | { 196 | "name": "stdout", 197 | "output_type": "stream", 198 | "text": [ 199 | "Found 12 triangles on boundary.\n" 200 | ] 201 | } 202 | ], 203 | "source": [ 204 | "BT = T.boundary_tria()" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": {}, 210 | "source": [ 211 | "But also the boundary is not oriented consistently (triangle normals of neighboring triangles point in opposite directions)." 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": 8, 217 | "metadata": {}, 218 | "outputs": [ 219 | { 220 | "data": { 221 | "text/plain": [ 222 | "False" 223 | ] 224 | }, 225 | "execution_count": 8, 226 | "metadata": {}, 227 | "output_type": "execute_result" 228 | } 229 | ], 230 | "source": [ 231 | "BT.is_oriented()" 232 | ] 233 | }, 234 | { 235 | "cell_type": "markdown", 236 | "metadata": {}, 237 | "source": [ 238 | "Let's repeat those steps after correcting the orientation in the tet mesh." 239 | ] 240 | }, 241 | { 242 | "cell_type": "code", 243 | "execution_count": 9, 244 | "metadata": {}, 245 | "outputs": [ 246 | { 247 | "name": "stdout", 248 | "output_type": "stream", 249 | "text": [ 250 | "Flipped 1 tetrahedra\n", 251 | "All tet orientations are correct\n" 252 | ] 253 | }, 254 | { 255 | "data": { 256 | "text/plain": [ 257 | "True" 258 | ] 259 | }, 260 | "execution_count": 9, 261 | "metadata": {}, 262 | "output_type": "execute_result" 263 | } 264 | ], 265 | "source": [ 266 | "T.orient_()\n", 267 | "T.is_oriented()" 268 | ] 269 | }, 270 | { 271 | "cell_type": "markdown", 272 | "metadata": {}, 273 | "source": [ 274 | "When we extract the boundary surface now, we see it is also consistently oriented." 275 | ] 276 | }, 277 | { 278 | "cell_type": "code", 279 | "execution_count": 10, 280 | "metadata": {}, 281 | "outputs": [ 282 | { 283 | "name": "stdout", 284 | "output_type": "stream", 285 | "text": [ 286 | "Found 12 triangles on boundary.\n" 287 | ] 288 | }, 289 | { 290 | "data": { 291 | "text/plain": [ 292 | "True" 293 | ] 294 | }, 295 | "execution_count": 10, 296 | "metadata": {}, 297 | "output_type": "execute_result" 298 | } 299 | ], 300 | "source": [ 301 | "BT = T.boundary_tria()\n", 302 | "BT.is_oriented()" 303 | ] 304 | }, 305 | { 306 | "cell_type": "markdown", 307 | "metadata": {}, 308 | "source": [ 309 | "Correct orientation is needed, e.g., to compute the volume of a surface mesh." 310 | ] 311 | }, 312 | { 313 | "cell_type": "code", 314 | "execution_count": 11, 315 | "metadata": {}, 316 | "outputs": [ 317 | { 318 | "data": { 319 | "text/plain": [ 320 | "1.0" 321 | ] 322 | }, 323 | "execution_count": 11, 324 | "metadata": {}, 325 | "output_type": "execute_result" 326 | } 327 | ], 328 | "source": [ 329 | "BT.volume()" 330 | ] 331 | } 332 | ], 333 | "metadata": { 334 | "kernelspec": { 335 | "display_name": "Python3", 336 | "language": "python", 337 | "name": "python3" 338 | }, 339 | "language_info": { 340 | "codemirror_mode": { 341 | "name": "ipython", 342 | "version": 3 343 | }, 344 | "file_extension": ".py", 345 | "mimetype": "text/x-python", 346 | "name": "python", 347 | "nbconvert_exporter": "python", 348 | "pygments_lexer": "ipython3", 349 | "version": "3" 350 | } 351 | }, 352 | "nbformat": 4, 353 | "nbformat_minor": 4 354 | } 355 | -------------------------------------------------------------------------------- /lapy/_tet_io.py: -------------------------------------------------------------------------------- 1 | """Functions for IO of Tetrahedra Meshes. 2 | 3 | Should be called via the TetMesh member functions. 4 | """ 5 | 6 | import logging 7 | import os.path 8 | from typing import TYPE_CHECKING 9 | 10 | import numpy as np 11 | 12 | if TYPE_CHECKING: 13 | from .tet_mesh import TetMesh 14 | 15 | logger = logging.getLogger(__name__) 16 | 17 | def read_gmsh(filename: str) -> "TetMesh": 18 | """Load GMSH tetrahedron mesh. 19 | 20 | Parameters 21 | ---------- 22 | filename : str 23 | Filename to load. 24 | 25 | Returns 26 | ------- 27 | TetMesh 28 | Object of loaded GMSH tetrahedron mesh. 29 | 30 | Raises 31 | ------ 32 | OSError 33 | If file is not found or not readable. 34 | ValueError 35 | If file format is invalid or binary format is encountered. 36 | """ 37 | extension = os.path.splitext(filename)[1] 38 | verbose = 1 39 | if verbose > 0: 40 | logger.info("--> GMSH format ... ") 41 | if extension != ".msh": 42 | msg = "[no .msh file] --> FAILED\n" 43 | logger.error(msg) 44 | raise ValueError(msg) 45 | try: 46 | f = open(filename) 47 | except OSError: 48 | logger.error("[file not found or not readable]") 49 | raise 50 | line = f.readline() 51 | if not line.startswith("$MeshFormat"): 52 | msg = "[$MeshFormat keyword not found] --> FAILED\n" 53 | logger.error(msg) 54 | f.close() 55 | raise ValueError(msg) 56 | line = f.readline() 57 | larr = line.split() 58 | ver = float(larr[0]) 59 | ftype = int(larr[1]) 60 | datatype = int(larr[2]) 61 | logger.debug( 62 | "Msh file ver %s, ftype %s, datatype %s", 63 | ver, 64 | ftype, 65 | datatype, 66 | ) 67 | if ftype != 0: 68 | msg = "[binary format not implemented] --> FAILED\n" 69 | logger.error(msg) 70 | f.close() 71 | raise ValueError(msg) 72 | line = f.readline() 73 | if not line.startswith("$EndMeshFormat"): 74 | msg = "[$EndMeshFormat keyword not found] --> FAILED\n" 75 | logger.error(msg) 76 | f.close() 77 | raise ValueError(msg) 78 | line = f.readline() 79 | if not line.startswith("$Nodes"): 80 | msg = "[$Nodes keyword not found] --> FAILED\n" 81 | logger.error(msg) 82 | f.close() 83 | raise ValueError(msg) 84 | pnum = int(f.readline()) 85 | # read (nodes X 4) matrix as chunk 86 | # drop first column 87 | v = np.fromfile(f, "float32", 4 * pnum, " ") 88 | v.shape = (pnum, 4) 89 | v = np.delete(v, 0, 1) 90 | line = f.readline() 91 | if not line.startswith("$EndNodes"): 92 | msg = "[$EndNodes keyword not found] --> FAILED\n" 93 | logger.error(msg) 94 | f.close() 95 | raise ValueError(msg) 96 | line = f.readline() 97 | if not line.startswith("$Elements"): 98 | msg = "[$Elements keyword not found] --> FAILED\n" 99 | logger.error(msg) 100 | f.close() 101 | raise ValueError(msg) 102 | tnum = int(f.readline()) 103 | pos = f.tell() 104 | line = f.readline() 105 | f.seek(pos) 106 | larr = line.split() 107 | if int(larr[1]) != 4: 108 | logger.debug("larr: %s", larr) 109 | msg = "[can only read tetras] --> FAILED\n" 110 | logger.error(msg) 111 | f.close() 112 | raise ValueError(msg) 113 | # read (nodes X ?) matrix 114 | t = np.fromfile(f, "int", tnum * len(larr), " ") 115 | t.shape = (tnum, len(larr)) 116 | t = np.delete(t, np.s_[0 : len(larr) - 4], 1) 117 | line = f.readline() 118 | if not line.startswith("$EndElements"): 119 | logger.debug("Line: %s", line) 120 | msg = "[$EndElements keyword not found] --> FAILED\n" 121 | logger.error(msg) 122 | f.close() 123 | raise ValueError(msg) 124 | f.close() 125 | logger.info(" --> DONE ( V: %d , T: %d )", v.shape[0], t.shape[0]) 126 | from . import TetMesh 127 | 128 | return TetMesh(v, t) 129 | 130 | 131 | def read_vtk(filename: str) -> "TetMesh": 132 | """Load VTK tetrahedron mesh. 133 | 134 | Parameters 135 | ---------- 136 | filename : str 137 | Filename to load. 138 | 139 | Returns 140 | ------- 141 | TetMesh 142 | Object of loaded VTK tetrahedron mesh. 143 | 144 | Raises 145 | ------ 146 | OSError 147 | If file is not found or not readable. 148 | ValueError 149 | If ASCII keyword is not found. 150 | If DATASET POLYDATA or DATASET UNSTRUCTURED_GRID is not found. 151 | If POINTS keyword is malformed. 152 | If file does not contain tetrahedra data. 153 | """ 154 | verbose = 1 155 | if verbose > 0: 156 | logger.info("--> VTK format ... ") 157 | try: 158 | f = open(filename) 159 | except OSError: 160 | logger.error("[file not found or not readable]") 161 | raise 162 | # skip comments 163 | line = f.readline() 164 | while line[0] == "#": 165 | line = f.readline() 166 | # search for ASCII keyword in first 5 lines: 167 | count = 0 168 | while count < 5 and not line.startswith("ASCII"): 169 | line = f.readline() 170 | # print line 171 | count = count + 1 172 | if not line.startswith("ASCII"): 173 | msg = "[ASCII keyword not found] --> FAILED\n" 174 | logger.error(msg) 175 | raise ValueError(msg) 176 | # expect Dataset Polydata line after ASCII: 177 | line = f.readline() 178 | if not line.startswith("DATASET POLYDATA") and not line.startswith( 179 | "DATASET UNSTRUCTURED_GRID" 180 | ): 181 | msg = ( 182 | f"[read: {line} expected DATASET POLYDATA or DATASET UNSTRUCTURED_GRID]" 183 | f" --> FAILED\n" 184 | ) 185 | logger.error(msg) 186 | raise ValueError(msg) 187 | # read number of points 188 | line = f.readline() 189 | larr = line.split() 190 | if larr[0] != "POINTS" or (larr[2] != "float" and larr[2] != "double"): 191 | msg = f"[read: {line} expected POINTS # float or POINTS # double ] --> FAILED\n" 192 | logger.error(msg) 193 | raise ValueError(msg) 194 | pnum = int(larr[1]) 195 | # read points as chunk 196 | v = np.fromfile(f, "float32", 3 * pnum, " ") 197 | v.shape = (pnum, 3) 198 | # expect polygon or tria_strip line 199 | line = f.readline() 200 | larr = line.split() 201 | if larr[0] == "POLYGONS" or larr[0] == "CELLS": 202 | tnum = int(larr[1]) 203 | ttnum = int(larr[2]) 204 | npt = float(ttnum) / tnum 205 | if npt != 5.0: 206 | msg = f"[having: {npt} data per tetra, expected 4+1] --> FAILED\n" 207 | logger.error(msg) 208 | raise ValueError(msg) 209 | t = np.fromfile(f, "int", ttnum, " ") 210 | t.shape = (tnum, 5) 211 | if t[tnum - 1][0] != 4: 212 | msg = "[can only read tetras] --> FAILED\n" 213 | logger.error(msg) 214 | raise ValueError(msg) 215 | t = np.delete(t, 0, 1) 216 | else: 217 | msg = f"[read: {line} expected POLYGONS or CELLS] --> FAILED\n" 218 | logger.error(msg) 219 | raise ValueError(msg) 220 | f.close() 221 | logger.info(" --> DONE ( V: %d , T: %d )", v.shape[0], t.shape[0]) 222 | from . import TetMesh 223 | 224 | return TetMesh(v, t) 225 | 226 | 227 | def write_vtk(tet: "TetMesh", filename: str) -> None: 228 | """Save VTK file. 229 | 230 | Parameters 231 | ---------- 232 | tet : TetMesh 233 | Tetrahedron mesh to save. 234 | filename : str 235 | Filename to save to. 236 | 237 | Raises 238 | ------ 239 | OSError 240 | If file is not writable. 241 | """ 242 | # open file 243 | try: 244 | f = open(filename, "w") 245 | except OSError: 246 | logger.error("[File %s not writable]", filename) 247 | raise 248 | # check data structure 249 | # ... 250 | # Write 251 | f.write("# vtk DataFile Version 1.0\n") 252 | f.write("vtk output\n") 253 | f.write("ASCII\n") 254 | f.write("DATASET POLYDATA\n") 255 | f.write("POINTS " + str(np.shape(tet.v)[0]) + " float\n") 256 | for i in range(np.shape(tet.v)[0]): 257 | f.write(" ".join(map(str, tet.v[i, :]))) 258 | f.write("\n") 259 | f.write( 260 | "POLYGONS " + str(np.shape(tet.t)[0]) + " " + str(5 * np.shape(tet.t)[0]) + "\n" 261 | ) 262 | for i in range(np.shape(tet.t)[0]): 263 | f.write(" ".join(map(str, np.append(4, tet.t[i, :])))) 264 | f.write("\n") 265 | f.close() 266 | -------------------------------------------------------------------------------- /lapy/shapedna.py: -------------------------------------------------------------------------------- 1 | """Functions for computing and comparing Laplace spectra. 2 | 3 | Includes code for solving the anisotropic Laplace-Beltrami eigenvalue 4 | problem as well as functions for normalization and comparison of 5 | Laplace spectra (shapeDNA descriptors). 6 | 7 | The shapeDNA is a descriptor based on the eigenvalues and eigenvectors 8 | of the Laplace-Beltrami operator and can be used for shape analysis 9 | and comparison. 10 | """ 11 | import logging 12 | from typing import TYPE_CHECKING, Optional, Union 13 | 14 | import numpy as np 15 | import scipy.spatial.distance as di 16 | 17 | from . import Solver 18 | 19 | if TYPE_CHECKING: 20 | from .tet_mesh import TetMesh 21 | from .tria_mesh import TriaMesh 22 | 23 | logger = logging.getLogger(__name__) 24 | 25 | def _positive_measure(value: float, name: str) -> float: 26 | """Validate that a measure is positive. 27 | 28 | Parameters 29 | ---------- 30 | value : float 31 | The measure value to validate. 32 | name : str 33 | Name of the measure for error messages. 34 | 35 | Returns 36 | ------- 37 | float 38 | The validated positive measure value. 39 | 40 | Raises 41 | ------ 42 | ValueError 43 | If value is not positive (<=0). 44 | """ 45 | if value <= 0: 46 | raise ValueError(f"{name} must be positive for normalization") 47 | return value 48 | 49 | 50 | def _boundary_volume(geom: "TetMesh") -> float: 51 | """Compute the volume enclosed by the boundary of a tetrahedral mesh. 52 | 53 | Parameters 54 | ---------- 55 | geom : TetMesh 56 | Tetrahedral mesh geometry. 57 | 58 | Returns 59 | ------- 60 | float 61 | Volume enclosed by the oriented boundary surface. 62 | 63 | Raises 64 | ------ 65 | ValueError 66 | If boundary volume is not positive. 67 | """ 68 | bnd = geom.boundary_tria() 69 | bnd.orient_() 70 | return _positive_measure(bnd.volume(), "boundary volume") 71 | 72 | 73 | def _surface_measure(geom: "TriaMesh") -> float: 74 | """Compute the surface area of a triangle mesh. 75 | 76 | Parameters 77 | ---------- 78 | geom : TriaMesh 79 | Triangle mesh geometry. 80 | 81 | Returns 82 | ------- 83 | float 84 | Surface area of the mesh. 85 | 86 | Raises 87 | ------ 88 | ValueError 89 | If area is not positive. 90 | """ 91 | area = _positive_measure(geom.area(), "area") 92 | return area 93 | 94 | 95 | 96 | def compute_shapedna( 97 | geom: Union["TriaMesh", "TetMesh"], 98 | k: int = 50, 99 | lump: bool = False, 100 | aniso: Optional[Union[float, tuple[float, float]]] = None, 101 | aniso_smooth: int = 10, 102 | use_cholmod: bool = False, 103 | ) -> dict: 104 | """Compute the shapeDNA descriptor for triangle or tetrahedral meshes. 105 | 106 | The shapeDNA descriptor consists of the eigenvalues and eigenvectors of 107 | the Laplace-Beltrami operator and can be used for shape analysis and 108 | comparison. 109 | 110 | Parameters 111 | ---------- 112 | geom : TriaMesh | TetMesh 113 | Mesh geometry. 114 | k : int, default=50 115 | Number of eigenvalues/eigenvectors to compute. 116 | lump : bool, default=False 117 | If True, lump the mass matrix (diagonal). See `lapy.Solver` class. 118 | aniso : float | tuple of shape (2,) | None, default=None 119 | Anisotropy for curvature-based anisotropic Laplace. See `lapy.Solver` 120 | class. 121 | aniso_smooth : int, default=10 122 | Number of smoothing iterations for curvature computation on vertices. 123 | See `lapy.Solver` class. 124 | use_cholmod : bool, default=False 125 | If True, attempts to use the Cholesky decomposition for improved 126 | execution speed. Requires the ``scikit-sparse`` library. If it cannot 127 | be found, an error will be thrown. If False, will use slower LU 128 | decomposition. 129 | 130 | Returns 131 | ------- 132 | dict 133 | A dictionary with the following keys: 134 | 135 | - 'Refine' : int - Refinement level (0) 136 | - 'Degree' : int - Polynomial degree (1) 137 | - 'Dimension' : int - Mesh dimension (2 for TriaMesh, 3 for TetMesh) 138 | - 'Elements' : int - Number of mesh elements 139 | - 'DoF' : int - Degrees of freedom (number of vertices) 140 | - 'NumEW' : int - Number of eigenvalues computed 141 | - 'Eigenvalues' : np.ndarray - Array of eigenvalues, shape (k,) 142 | - 'Eigenvectors' : np.ndarray - Array of eigenvectors, shape (n_vertices, k) 143 | """ 144 | # get fem, evals, evecs 145 | 146 | fem = Solver( 147 | geom, lump=lump, aniso=aniso, aniso_smooth=aniso_smooth, use_cholmod=use_cholmod 148 | ) 149 | evals, evecs = fem.eigs(k=k) 150 | 151 | # write ev 152 | 153 | evDict = dict() 154 | evDict["Refine"] = 0 155 | evDict["Degree"] = 1 156 | if type(geom).__name__ == "TriaMesh": 157 | evDict["Dimension"] = 2 158 | elif type(geom).__name__ == "TetMesh": 159 | evDict["Dimension"] = 3 160 | evDict["Elements"] = len(geom.t) 161 | evDict["DoF"] = len(geom.v) 162 | evDict["NumEW"] = k 163 | evDict["Eigenvalues"] = evals 164 | evDict["Eigenvectors"] = evecs 165 | 166 | return evDict 167 | 168 | 169 | def normalize_ev( 170 | geom: Union["TriaMesh", "TetMesh"], 171 | evals: np.ndarray, 172 | method: str = "geometry", 173 | ) -> np.ndarray: 174 | """Normalize eigenvalues for a surface or a volume. 175 | 176 | Normalizes eigenvalues to account for different mesh sizes and dimensions, 177 | enabling meaningful comparison between different shapes. 178 | 179 | Parameters 180 | ---------- 181 | geom : TriaMesh or TetMesh 182 | Mesh geometry. 183 | evals : np.ndarray 184 | Set of sorted eigenvalues, shape (k,). 185 | method : {'surface', 'volume', 'geometry'}, default='geometry' 186 | Normalization method: 187 | 188 | - 'surface': Normalize by surface area (for 2D surfaces) 189 | - 'volume': Normalize by enclosed volume (for 3D objects) 190 | - 'geometry': Automatically choose surface for TriaMesh, volume for TetMesh 191 | 192 | Returns 193 | ------- 194 | np.ndarray 195 | Vector of re-weighted eigenvalues, shape (k,). 196 | 197 | Raises 198 | ------ 199 | ValueError 200 | If method is not one of 'surface', 'volume', or 'geometry'. 201 | If geometry type is unsupported for the chosen normalization. 202 | If the measure (area/volume) is not positive. 203 | """ 204 | geom_type = type(geom).__name__ 205 | if method == "surface": 206 | return evals * _surface_measure(geom) ** (2.0 / 2.0) 207 | 208 | if method == "volume": 209 | if geom_type == "TriaMesh": 210 | return evals * _positive_measure(geom.volume(), "volume") ** (2.0 / 3.0) 211 | if geom_type == "TetMesh": 212 | return evals * _boundary_volume(geom) ** (2.0 / 3.0) 213 | raise ValueError("Unsupported geometry type for volume normalization") 214 | 215 | if method == "geometry": 216 | if geom_type == "TriaMesh": 217 | return evals * _surface_measure(geom) ** (2.0 / 2.0) 218 | if geom_type == "TetMesh": 219 | return evals * _boundary_volume(geom) ** (2.0 / 3.0) 220 | raise ValueError("Unsupported geometry type for geometry normalization") 221 | 222 | raise ValueError(f"Unknown normalization method: {method}") 223 | 224 | 225 | def reweight_ev(evals: np.ndarray) -> np.ndarray: 226 | """Apply linear re-weighting to eigenvalues. 227 | 228 | Divides each eigenvalue by its index to reduce the influence of higher 229 | eigenvalues, which tend to be less stable. 230 | 231 | Parameters 232 | ---------- 233 | evals : np.ndarray 234 | Set of sorted eigenvalues, shape (k,). 235 | 236 | Returns 237 | ------- 238 | np.ndarray 239 | Vector of re-weighted eigenvalues, shape (k,). Each eigenvalue is 240 | divided by its 1-based index: ``evals[i] / (i+1)``. 241 | 242 | Notes 243 | ----- 244 | This reweighting scheme gives less importance to higher eigenvalues, which 245 | are typically more sensitive to discretization and numerical errors. 246 | """ 247 | # evals[1:] = evals[1:] / np.arange(1, len(evals)) 248 | evals = evals / np.arange(1, len(evals) + 1) 249 | 250 | return evals 251 | 252 | 253 | def compute_distance( 254 | ev1: np.ndarray, ev2: np.ndarray, dist: str = "euc" 255 | ) -> float: 256 | """Compute the shape dissimilarity from two shapeDNA descriptors. 257 | 258 | Computes a distance metric between two sets of eigenvalues to quantify 259 | the dissimilarity between two shapes. 260 | 261 | Parameters 262 | ---------- 263 | ev1 : np.ndarray 264 | First set of sorted eigenvalues, shape (k,). 265 | ev2 : np.ndarray 266 | Second set of sorted eigenvalues, shape (k,). 267 | dist : {'euc'}, default='euc' 268 | Distance measure. Currently only 'euc' (Euclidean) is implemented. 269 | 270 | Returns 271 | ------- 272 | float 273 | Distance between the eigenvalue arrays. 274 | 275 | Raises 276 | ------ 277 | ValueError 278 | If dist is not 'euc' (other distance metrics not yet implemented). 279 | 280 | Notes 281 | ----- 282 | The eigenvalue arrays should have the same length and be normalized and 283 | reweighted in the same way for meaningful comparison. 284 | """ 285 | if dist == "euc": 286 | return di.euclidean(ev1, ev2) 287 | else: 288 | logger.warning( 289 | "Only Euclidean distance is currently implemented; received %s", dist 290 | ) 291 | raise ValueError(f"Distance metric {dist} is not implemented.") -------------------------------------------------------------------------------- /examples/Test_ShapeDNA.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# ShapeDNA" 8 | ] 9 | }, 10 | { 11 | "cell_type": "markdown", 12 | "metadata": {}, 13 | "source": [ 14 | "ShapeDNA is an n-dimensional intrinsic shape descriptor (see Reuter et al., CAD Journal, 2006). It can be used to compare two geometric objects independent of their pose or posture as the ShapeDNA is not affected by (near)-isometric deformations. This tutorial shows how you compute, normalize and re-weight Laplace-Beltrami spectra to obtain the ShapeDNA." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": 1, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "# imports\n", 24 | "from lapy import TetMesh, TriaMesh, shapedna" 25 | ] 26 | }, 27 | { 28 | "cell_type": "markdown", 29 | "metadata": {}, 30 | "source": [ 31 | "First we load some data: a tria mesh representing the boundary of a cube and a tetrahedral mesh representing the full cube." 32 | ] 33 | }, 34 | { 35 | "cell_type": "code", 36 | "execution_count": 2, 37 | "metadata": {}, 38 | "outputs": [ 39 | { 40 | "name": "stdout", 41 | "output_type": "stream", 42 | "text": [ 43 | "--> VTK format ... \n", 44 | " --> DONE ( V: 2402 , T: 4800 )\n", 45 | "\n", 46 | "--> VTK format ... \n", 47 | " --> DONE ( V: 9261 , T: 48000 )\n", 48 | "\n" 49 | ] 50 | } 51 | ], 52 | "source": [ 53 | "# load data\n", 54 | "tria = TriaMesh.read_vtk(\"../data/cubeTria.vtk\")\n", 55 | "tet = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")" 56 | ] 57 | }, 58 | { 59 | "cell_type": "markdown", 60 | "metadata": {}, 61 | "source": [ 62 | "Let's compute the first three eigenvalues and eigenvectors of the triangle mesh..." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": 3, 68 | "metadata": {}, 69 | "outputs": [ 70 | { 71 | "name": "stdout", 72 | "output_type": "stream", 73 | "text": [ 74 | "TriaMesh with regular Laplace-Beltrami\n", 75 | "Solver: spsolve (LU decomposition) ...\n" 76 | ] 77 | }, 78 | { 79 | "data": { 80 | "text/plain": [ 81 | "array([-4.0165149e-05, 4.1696410e+00, 4.1704664e+00], dtype=float32)" 82 | ] 83 | }, 84 | "execution_count": 3, 85 | "metadata": {}, 86 | "output_type": "execute_result" 87 | } 88 | ], 89 | "source": [ 90 | "# compute eigenvalues and eigenvectors for tria mesh\n", 91 | "ev = shapedna.compute_shapedna(tria, k=3)\n", 92 | "ev[\"Eigenvectors\"]\n", 93 | "ev[\"Eigenvalues\"]" 94 | ] 95 | }, 96 | { 97 | "cell_type": "markdown", 98 | "metadata": {}, 99 | "source": [ 100 | "Now we perform a normalization of the eigenvalues using the method \"geometry\" which is equal to surface area normalization for 2d meshes. The resulting eigenvalues are the same as when computing them on the same shape with unit surface area (=1)." 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": 4, 106 | "metadata": {}, 107 | "outputs": [ 108 | { 109 | "data": { 110 | "text/plain": [ 111 | "array([-2.4099089e-04, 2.5017845e+01, 2.5022799e+01], dtype=float32)" 112 | ] 113 | }, 114 | "execution_count": 4, 115 | "metadata": {}, 116 | "output_type": "execute_result" 117 | } 118 | ], 119 | "source": [ 120 | "# volume / surface / geometry normalization of tria eigenvalues\n", 121 | "shapedna.normalize_ev(tria, ev[\"Eigenvalues\"], method=\"geometry\")" 122 | ] 123 | }, 124 | { 125 | "cell_type": "markdown", 126 | "metadata": {}, 127 | "source": [ 128 | "For surfaces, eigenvalues increase linearly with their ordering. In order to reduce the influence of higher (and probably more noise affected) eigenvalues it is common practice to perform a linear re-weighting." 129 | ] 130 | }, 131 | { 132 | "cell_type": "code", 133 | "execution_count": 5, 134 | "metadata": {}, 135 | "outputs": [ 136 | { 137 | "data": { 138 | "text/plain": [ 139 | "array([-4.01651487e-05, 2.08482051e+00, 1.39015547e+00])" 140 | ] 141 | }, 142 | "execution_count": 5, 143 | "metadata": {}, 144 | "output_type": "execute_result" 145 | } 146 | ], 147 | "source": [ 148 | "# linear reweighting of tria eigenvalues\n", 149 | "shapedna.reweight_ev(ev[\"Eigenvalues\"])" 150 | ] 151 | }, 152 | { 153 | "cell_type": "markdown", 154 | "metadata": {}, 155 | "source": [ 156 | "The normalized and re-weighted eigenvalues are called the ShapeDNA. We can now compute the distance between two shapes by comparing their ShapeDNA. The default is the Euclidean distance between two ShapeDNA vectors." 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": 6, 162 | "metadata": {}, 163 | "outputs": [ 164 | { 165 | "data": { 166 | "text/plain": [ 167 | "0.0" 168 | ] 169 | }, 170 | "execution_count": 6, 171 | "metadata": {}, 172 | "output_type": "execute_result" 173 | } 174 | ], 175 | "source": [ 176 | "# compute distance for tria eigenvalues (trivial case)\n", 177 | "shapedna.compute_distance(ev[\"Eigenvalues\"], ev[\"Eigenvalues\"])" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "metadata": {}, 183 | "source": [ 184 | "Note, that usually more eigenvalues are used (in the order of 15 to 50) for shape comparison. Also you can do other analyses, e.g. find clusters in this shape space or project it via PCA for visualization." 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "We now repeat the above steps for a tetrahedral mesh, again computing the first three eigenvalues and -vectors." 192 | ] 193 | }, 194 | { 195 | "cell_type": "code", 196 | "execution_count": 7, 197 | "metadata": {}, 198 | "outputs": [ 199 | { 200 | "name": "stdout", 201 | "output_type": "stream", 202 | "text": [ 203 | "TetMesh with regular Laplace\n", 204 | "Solver: spsolve (LU decomposition) ...\n" 205 | ] 206 | }, 207 | { 208 | "data": { 209 | "text/plain": [ 210 | "array([8.4440224e-05, 9.8897915e+00, 9.8898811e+00], dtype=float32)" 211 | ] 212 | }, 213 | "execution_count": 7, 214 | "metadata": {}, 215 | "output_type": "execute_result" 216 | } 217 | ], 218 | "source": [ 219 | "# compute eigenvalues and eigenvectors for tet mesh\n", 220 | "evTet = shapedna.compute_shapedna(tet, k=3)\n", 221 | "evTet[\"Eigenvectors\"]\n", 222 | "evTet[\"Eigenvalues\"]" 223 | ] 224 | }, 225 | { 226 | "cell_type": "markdown", 227 | "metadata": {}, 228 | "source": [ 229 | "For 3d meshes the \"geometry\" normalization defaults to unit volume normalization. Since the cube is already unit volume, nothing happens." 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": 8, 235 | "metadata": {}, 236 | "outputs": [ 237 | { 238 | "name": "stdout", 239 | "output_type": "stream", 240 | "text": [ 241 | "Found 4800 triangles on boundary.\n", 242 | "Searched mesh after 79 flooding iterations (0.012834310531616211 sec).\n" 243 | ] 244 | }, 245 | { 246 | "data": { 247 | "text/plain": [ 248 | "array([8.4440224e-05, 9.8897915e+00, 9.8898811e+00], dtype=float32)" 249 | ] 250 | }, 251 | "execution_count": 8, 252 | "metadata": {}, 253 | "output_type": "execute_result" 254 | } 255 | ], 256 | "source": [ 257 | "# volume / surface / geometry normalization of tet eigenvalues\n", 258 | "shapedna.normalize_ev(tet, evTet[\"Eigenvalues\"], method=\"geometry\")" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "Again we perform linear re-weighting. This is only meaningful for small eigenvalues as the asymtotic trend of eigenvalues of 3d solids is not linear." 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": 9, 271 | "metadata": {}, 272 | "outputs": [ 273 | { 274 | "data": { 275 | "text/plain": [ 276 | "array([8.44402239e-05, 4.94489574e+00, 3.29662704e+00])" 277 | ] 278 | }, 279 | "execution_count": 9, 280 | "metadata": {}, 281 | "output_type": "execute_result" 282 | } 283 | ], 284 | "source": [ 285 | "# linear reweighting of tet eigenvalues\n", 286 | "shapedna.reweight_ev(evTet[\"Eigenvalues\"])" 287 | ] 288 | }, 289 | { 290 | "cell_type": "markdown", 291 | "metadata": {}, 292 | "source": [ 293 | "Now that we have the ShapeDNA of the 3D solid cube, we can compare it to other ShapeDNA (or to itself, which of course yields zero)." 294 | ] 295 | }, 296 | { 297 | "cell_type": "code", 298 | "execution_count": 10, 299 | "metadata": {}, 300 | "outputs": [ 301 | { 302 | "data": { 303 | "text/plain": [ 304 | "0.0" 305 | ] 306 | }, 307 | "execution_count": 10, 308 | "metadata": {}, 309 | "output_type": "execute_result" 310 | } 311 | ], 312 | "source": [ 313 | "# compute distance for tria eigenvalues (trivial case)\n", 314 | "shapedna.compute_distance(evTet[\"Eigenvalues\"], evTet[\"Eigenvalues\"])" 315 | ] 316 | } 317 | ], 318 | "metadata": { 319 | "kernelspec": { 320 | "display_name": "Python3", 321 | "language": "python", 322 | "name": "python3" 323 | }, 324 | "language_info": { 325 | "codemirror_mode": { 326 | "name": "ipython", 327 | "version": 3 328 | }, 329 | "file_extension": ".py", 330 | "mimetype": "text/x-python", 331 | "name": "python", 332 | "nbconvert_exporter": "python", 333 | "pygments_lexer": "ipython3", 334 | "version": "3" 335 | } 336 | }, 337 | "nbformat": 4, 338 | "nbformat_minor": 4 339 | } 340 | -------------------------------------------------------------------------------- /doc/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # For the full list of built-in configuration values, see the documentation: 4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 5 | 6 | 7 | import inspect 8 | from datetime import date 9 | from importlib import import_module 10 | from typing import Dict, Optional 11 | 12 | from sphinx_gallery.sorting import FileNameSortKey 13 | 14 | import lapy 15 | 16 | # -- project information ----------------------------------------------------- 17 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information 18 | 19 | project = "LaPy" 20 | author = "Martin Reuter" 21 | copyright = f"{date.today().year}, {author}" 22 | release = lapy.__version__ 23 | package = lapy.__name__ 24 | gh_url = "https://github.com/Deep-MI/LaPy" 25 | 26 | # -- general configuration --------------------------------------------------- 27 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration 28 | 29 | # If your documentation needs a minimal Sphinx version, state it here. 30 | needs_sphinx = "5.0" 31 | 32 | # The document name of the “root” document, that is, the document that contains 33 | # the root toctree directive. 34 | root_doc = "index" 35 | 36 | # Add any Sphinx extension module names here, as strings. They can be 37 | # extensions coming with Sphinx (named "sphinx.ext.*") or your custom 38 | # ones. 39 | extensions = [ 40 | "sphinx.ext.autodoc", 41 | "sphinx.ext.autosectionlabel", 42 | "sphinx.ext.autosummary", 43 | "sphinx.ext.intersphinx", 44 | "sphinx.ext.linkcode", 45 | "numpydoc", 46 | "sphinxcontrib.bibtex", 47 | "sphinx_copybutton", 48 | "sphinx_design", 49 | "sphinx_gallery.gen_gallery", 50 | "sphinx_issues", 51 | "nbsphinx", 52 | "IPython.sphinxext.ipython_console_highlighting", 53 | ] 54 | 55 | templates_path = ["_templates"] 56 | exclude_patterns = [ 57 | "_build", 58 | "Thumbs.db", 59 | ".DS_Store", 60 | "**.ipynb_checkpoints", 61 | "tutorials/examples/README.rst", 62 | ] 63 | 64 | # Sphinx will warn about all references where the target cannot be found. 65 | nitpicky = True 66 | nitpick_ignore = [] 67 | 68 | show_warning_types = True 69 | suppress_warnings = [ 70 | # Ignore new warning in Sphinx 7.3.0 while pickling environment: 71 | # WARNING: cannot cache unpickable configuration value: 'sphinx_gallery_conf' 72 | "config.cache", 73 | ] 74 | 75 | # A list of ignored prefixes for module index sorting. 76 | modindex_common_prefix = [f"{package}."] 77 | 78 | # The name of a reST role (builtin or Sphinx extension) to use as the default 79 | # role, that is, for text marked up `like this`. This can be set to 'py:obj' to 80 | # make `filter` a cross-reference to the Python function “filter”. 81 | default_role = "py:obj" 82 | 83 | # -- options for HTML output ------------------------------------------------- 84 | html_theme = "furo" 85 | html_static_path = ["_static"] 86 | html_css_files = [ 87 | "css/style.css", 88 | ] 89 | html_title = project 90 | html_show_sphinx = False 91 | 92 | # Documentation to change footer icons: 93 | # https://pradyunsg.me/furo/customisation/footer/#changing-footer-icons 94 | html_theme_options = { 95 | "footer_icons": [ 96 | { 97 | "name": "GitHub", 98 | "url": gh_url, 99 | "html": """ 100 | 101 | 102 | 103 | """, 104 | "class": "", 105 | }, 106 | ], 107 | } 108 | 109 | # -- autosummary ------------------------------------------------------------- 110 | autosummary_generate = True 111 | 112 | # -- autodoc ----------------------------------------------------------------- 113 | autodoc_typehints = "none" 114 | autodoc_member_order = "groupwise" 115 | autodoc_warningiserror = True 116 | autoclass_content = "class" 117 | 118 | # -- intersphinx ------------------------------------------------------------- 119 | intersphinx_mapping = { 120 | "matplotlib": ("https://matplotlib.org/stable", None), 121 | "mne": ("https://mne.tools/stable/", None), 122 | "numpy": ("https://numpy.org/doc/stable", None), 123 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None), 124 | "python": ("https://docs.python.org/3", None), 125 | "scipy": ("https://docs.scipy.org/doc/scipy", None), 126 | "sklearn": ("https://scikit-learn.org/stable/", None), 127 | } 128 | intersphinx_timeout = 5 129 | 130 | # -- sphinx-issues ----------------------------------------------------------- 131 | issues_github_path = gh_url.split("https://github.com/")[-1] 132 | 133 | # -- autosectionlabels ------------------------------------------------------- 134 | autosectionlabel_prefix_document = True 135 | 136 | # -- numpydoc ---------------------------------------------------------------- 137 | numpydoc_class_members_toctree = False 138 | numpydoc_attributes_as_param_list = False 139 | 140 | # x-ref 141 | numpydoc_xref_param_type = True 142 | numpydoc_xref_aliases = { 143 | # LaPy 144 | "TetMesh": "lapy.TetMesh", 145 | "TriaMesh": "lapy.TriaMesh", 146 | # Matplotlib 147 | "Axes": "matplotlib.axes.Axes", 148 | "Figure": "matplotlib.figure.Figure", 149 | # Python 150 | "bool": ":class:`python:bool`", 151 | "Path": "pathlib.Path", 152 | "TextIO": "io.TextIOBase", 153 | # Scipy 154 | "csc_matrix": "scipy.sparse.csc_matrix", 155 | } 156 | numpydoc_xref_ignore = { 157 | "k", # Solver, bad variable name 158 | "n", # Solver, bad variable name 159 | "N", # Solver, bad variable name 160 | "n_triangles", # TriaMesh 161 | "of", 162 | "shape", 163 | "vnum", # TriaMesh 164 | } 165 | 166 | # validation 167 | # https://numpydoc.readthedocs.io/en/latest/validation.html#validation-checks 168 | error_ignores = { 169 | "GL01", # docstring should start in the line immediately after the quotes 170 | "EX01", # section 'Examples' not found 171 | "ES01", # no extended summary found 172 | "SA01", # section 'See Also' not found 173 | "RT02", # The first line of the Returns section should contain only the type, unless multiple values are being returned # noqa 174 | } 175 | numpydoc_validate = True 176 | numpydoc_validation_checks = {"all"} | set(error_ignores) 177 | numpydoc_validation_exclude = { # regex to ignore during docstring check 178 | r"\.__getitem__", 179 | r"\.__contains__", 180 | r"\.__hash__", 181 | r"\.__mul__", 182 | r"\.__sub__", 183 | r"\.__add__", 184 | r"\.__iter__", 185 | r"\.__div__", 186 | r"\.__neg__", 187 | } 188 | 189 | # -- sphinxcontrib-bibtex ---------------------------------------------------- 190 | bibtex_bibfiles = ["./references.bib"] 191 | 192 | # -- sphinx.ext.linkcode ----------------------------------------------------- 193 | # https://www.sphinx-doc.org/en/master/usage/extensions/linkcode.html 194 | 195 | 196 | def linkcode_resolve(domain: str, info: Dict[str, str]) -> Optional[str]: 197 | """Determine the URL corresponding to a Python object. 198 | 199 | Parameters 200 | ---------- 201 | domain : str 202 | One of 'py', 'c', 'cpp', 'javascript'. 203 | info : dict 204 | With keys "module" and "fullname". 205 | 206 | Returns 207 | ------- 208 | url : str | None 209 | The code URL. If None, no link is added. 210 | """ 211 | if domain != "py": 212 | return None # only document python objects 213 | 214 | # retrieve pyobject and file 215 | try: 216 | module = import_module(info["module"]) 217 | pyobject = module 218 | for elt in info["fullname"].split("."): 219 | pyobject = getattr(pyobject, elt) 220 | fname = inspect.getsourcefile(pyobject).replace("\\", "/") 221 | except Exception: 222 | # Either the object could not be loaded or the file was not found. 223 | # For instance, properties will raise. 224 | return None 225 | 226 | # retrieve start/stop lines 227 | source, start_line = inspect.getsourcelines(pyobject) 228 | lines = "L%d-L%d" % (start_line, start_line + len(source) - 1) 229 | 230 | # create URL 231 | if "dev" in release: 232 | branch = "main" 233 | else: 234 | return None # alternatively, link to a maint/version branch 235 | fname = fname.rsplit(f"/{package}/")[1] 236 | url = f"{gh_url}/blob/{branch}/{package}/{fname}#{lines}" 237 | return url 238 | 239 | 240 | # -- sphinx-gallery ---------------------------------------------------------- 241 | sphinx_gallery_conf = { 242 | "backreferences_dir": "generated/backreferences", 243 | "doc_module": (f"{package}",), 244 | "examples_dirs": ["../examples"], 245 | "exclude_implicit_doc": {}, # set 246 | "filename_pattern": r"\d{2}_", 247 | "gallery_dirs": ["generated/examples"], 248 | "line_numbers": False, 249 | "plot_gallery": True, 250 | "reference_url": {f"{package}": None}, 251 | "remove_config_comments": True, 252 | "show_memory": True, 253 | "within_subsection_order": FileNameSortKey, 254 | } 255 | 256 | # -- make sure pandoc gets installed ----------------------------------------- 257 | from inspect import getsourcefile 258 | import os 259 | 260 | # Get path to directory containing this file, conf.py. 261 | DOCS_DIRECTORY = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0))) 262 | 263 | def ensure_pandoc_installed(_): 264 | import pypandoc 265 | 266 | # Download pandoc if necessary. If pandoc is already installed and on 267 | # the PATH, the installed version will be used. Otherwise, we will 268 | # download a copy of pandoc into docs/bin/ and add that to our PATH. 269 | pandoc_dir = os.path.join(DOCS_DIRECTORY, "bin") 270 | # Add dir containing pandoc binary to the PATH environment variable 271 | if pandoc_dir not in os.environ["PATH"].split(os.pathsep): 272 | os.environ["PATH"] += os.pathsep + pandoc_dir 273 | pypandoc.ensure_pandoc_installed( 274 | targetfolder=pandoc_dir, 275 | delete_installer=True, 276 | ) 277 | 278 | def setup(app): 279 | app.connect("builder-inited", ensure_pandoc_installed) 280 | -------------------------------------------------------------------------------- /lapy/io.py: -------------------------------------------------------------------------------- 1 | """Functions to read and write spectra and vertex functions.""" 2 | 3 | import logging 4 | 5 | import numpy as np 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | def read_vfunc(filename: str) -> list[float]: 11 | """Import vertex functions from txt file. 12 | 13 | Values can be separated by ``;`` or ``,`` and surrounded by ``{}`` or ``()`` 14 | brackets. Also first line can have the keyword "Solution:", i.e. the PSOL format 15 | from ShapeDNA. 16 | 17 | Parameters 18 | ---------- 19 | filename : str 20 | Filename of input. 21 | 22 | Returns 23 | ------- 24 | list of float 25 | List of vfunc parameters. 26 | 27 | Raises 28 | ------ 29 | OSError 30 | If file is not found or not readable. 31 | ValueError 32 | If 'Solution:' marker is not found or no vertex function data exists. 33 | """ 34 | import re 35 | 36 | try: 37 | with open(filename) as f: 38 | txt = [x.strip() for x in f] 39 | except OSError: 40 | logger.error("File %s not found or not readable", filename) 41 | raise 42 | 43 | if "Solution:" not in txt: 44 | raise ValueError(f"Expected 'Solution:' marker in {filename}") 45 | txt.remove("Solution:") 46 | txt = [re.sub("[{()}]", "", x) for x in txt if x] 47 | if not txt: 48 | raise ValueError(f"No vertex function data found in {filename}") 49 | 50 | if len(txt) == 1: 51 | txt = [re.split("[,;]", x) for x in txt][0] 52 | return [float(x) for x in txt] 53 | 54 | 55 | def read_ev(filename: str) -> dict: 56 | """Load EV file. 57 | 58 | Parameters 59 | ---------- 60 | filename : str 61 | Filename of input. 62 | 63 | Returns 64 | ------- 65 | dict 66 | Dictionary of eigenvalues, eigenvectors (optional), and associated 67 | information. Contains keys like 'Eigenvalues' (np.ndarray), 'Eigenvectors' 68 | (np.ndarray, optional), and metadata fields. 69 | 70 | Raises 71 | ------ 72 | OSError 73 | If file is not readable. 74 | """ 75 | # open file 76 | try: 77 | with open(filename) as f: 78 | # read file (and get rid of all \n) 79 | ll = f.read().splitlines() 80 | except OSError: 81 | logger.error("File %s not readable", filename) 82 | raise 83 | 84 | # define data structure 85 | d = {} 86 | # go through each line and parse it 87 | i = 0 88 | 89 | def _parse_field(label, cast=int): 90 | nonlocal i 91 | d[label] = cast(ll[i].split(":", 1)[1].strip()) 92 | i += 1 93 | 94 | while i < len(ll): 95 | line = ll[i].lstrip() 96 | if ":" in line: 97 | key, _ = line.split(":", 1) 98 | if key in { 99 | "Creator", 100 | "File", 101 | "User", 102 | "Refine", 103 | "Degree", 104 | "Dimension", 105 | "Elements", 106 | "DoF", 107 | "NumEW", 108 | "Area", 109 | "Volume", 110 | "BLength", 111 | "EulerChar", 112 | "Time(pre)", 113 | "Time(calcAB)", 114 | "Time(calcEW)", 115 | }: 116 | label = { 117 | "Time(pre)": "TimePre", 118 | "Time(calcAB)": "TimeCalcAB", 119 | "Time(calcEW)": "TimeCalcEW", 120 | }.get(key, key) 121 | _parse_field(label, 122 | float if key in {"Area", "Volume", "BLength"} else int) 123 | continue 124 | 125 | if line.startswith("Eigenvalues"): 126 | i = i + 1 127 | while ll[i].find("{") < 0: # possibly introduce termination criterion 128 | i = i + 1 129 | if ll[i].find("}") >= 0: # '{' and '}' on the same line 130 | evals = ll[i].strip().replace("{", "").replace("}", "") 131 | else: 132 | evals = "" 133 | while ll[i].find("}") < 0: 134 | evals = evals + ll[i].strip().replace("{", "").replace("}", "") 135 | i = i + 1 136 | evals = evals + ll[i].strip().replace("{", "").replace("}", "") 137 | evals = np.array(evals.split(";")).astype(float) 138 | d.update({"Eigenvalues": evals}) 139 | i = i + 1 140 | elif line.startswith("Eigenvectors"): 141 | i = i + 1 142 | while not (ll[i].strip().startswith("sizes")): 143 | i = i + 1 144 | d.update( 145 | {"EigenvectorsSize": np.array(ll[i].strip().split()[1:]).astype(int)} 146 | ) 147 | i = i + 1 148 | while ll[i].find("{") < 0: # possibly introduce termination criterion 149 | i = i + 1 150 | if ll[i].find("}") >= 0: # '{' and '}' on the same line 151 | evecs = ll[i].strip().replace("{", "").replace("}", "") 152 | else: 153 | evecs = "" 154 | while ll[i].find("}") < 0: 155 | evecs = evecs + ll[i].strip().replace("{", "").replace( 156 | "}", "" 157 | ).replace("(", "").replace(")", "") 158 | i = i + 1 159 | evecs = evecs + ll[i].strip().replace("{", "").replace("}", "").replace( 160 | "(", "" 161 | ).replace(")", "") 162 | evecs = np.array( 163 | evecs.replace(";", " ").replace(",", " ").strip().split() 164 | ).astype(float) 165 | if len(evecs) == (d["EigenvectorsSize"][0] * d["EigenvectorsSize"][1]): 166 | evecs = np.transpose(np.reshape(evecs, d["EigenvectorsSize"][1::-1])) 167 | d.update({"Eigenvectors": evecs}) 168 | else: 169 | print( 170 | "[Length of eigenvectors is not " 171 | + str(d["EigenvectorsSize"][0]) 172 | + " times " 173 | + str(d["EigenvectorsSize"][1]) 174 | + "." 175 | ) 176 | i = i + 1 177 | else: 178 | i = i + 1 179 | # close file 180 | f.close() 181 | # return dict 182 | return d 183 | 184 | 185 | def write_ev(filename: str, d: dict) -> None: 186 | """Save EV data structures as txt file (format from ShapeDNA). 187 | 188 | Parameters 189 | ---------- 190 | filename : str 191 | Filename to save to. 192 | d : dict 193 | Dictionary of eigenvalues, eigenvectors (optional), and associated 194 | information. Must contain key 'Eigenvalues' (np.ndarray). Optional keys 195 | include 'Eigenvectors' (np.ndarray) and various metadata fields. 196 | 197 | Raises 198 | ------ 199 | OSError 200 | If file is not writable. 201 | ValueError 202 | If 'Eigenvalues' key is missing from dictionary. 203 | """ 204 | # open file 205 | try: 206 | f = open(filename, "w") 207 | except OSError: 208 | logger.error("File %s not writable", filename) 209 | raise 210 | # check data structure 211 | if "Eigenvalues" not in d: 212 | raise ValueError("ERROR: no Eigenvalues specified") 213 | # ... 214 | # Write 215 | if "Creator" in d: 216 | f.write(" Creator: " + d["Creator"] + "\n") 217 | if "File" in d: 218 | f.write(" File: " + d["File"] + "\n") 219 | if "User" in d: 220 | f.write(" User: " + d["User"] + "\n") 221 | if "Refine" in d: 222 | f.write(" Refine: " + str(d["Refine"]) + "\n") 223 | if "Degree" in d: 224 | f.write(" Degree: " + str(d["Degree"]) + "\n") 225 | if "Dimension" in d: 226 | f.write(" Dimension: " + str(d["Dimension"]) + "\n") 227 | if "Elements" in d: 228 | f.write(" Elements: " + str(d["Elements"]) + "\n") 229 | if "DoF" in d: 230 | f.write(" DoF: " + str(d["DoF"]) + "\n") 231 | if "NumEW" in d: 232 | f.write(" NumEW: " + str(d["NumEW"]) + "\n") 233 | f.write("\n") 234 | if "Area" in d: 235 | f.write(" Area: " + str(d["Area"]) + "\n") 236 | if "Volume" in d: 237 | f.write(" Volume: " + str(d["Volume"]) + "\n") 238 | if "BLength" in d: 239 | f.write(" BLength: " + str(d["BLength"]) + "\n") 240 | if "EulerChar" in d: 241 | f.write(" EulerChar: " + str(d["EulerChar"]) + "\n") 242 | f.write("\n") 243 | if "TimePre" in d: 244 | f.write(" Time(Pre) : " + str(d["TimePre"]) + "\n") 245 | if "TimeCalcAB" in d: 246 | f.write(" Time(calcAB) : " + str(d["TimeCalcAB"]) + "\n") 247 | if "TimeCalcEW" in d: 248 | f.write(" Time(calcEW) : " + str(d["TimeCalcEW"]) + "\n") 249 | if "TimePre" in d and "TimeCalcAB" in d and "TimeCalcEW" in d: 250 | f.write( 251 | " Time(total ) : " 252 | + str(d["TimePre"] + d["TimeCalcAB"] + d["TimeCalcEW"]) 253 | + "\n" 254 | ) 255 | f.write("\n") 256 | f.write("Eigenvalues:\n") 257 | f.write( 258 | "{ " + " ; ".join(map(str, d["Eigenvalues"])) + " }\n" 259 | ) # consider precision 260 | f.write("\n") 261 | if "Eigenvectors" in d: 262 | f.write("Eigenvectors:\n") 263 | # f.write('sizes: '+' '.join(map(str,d['EigenvectorsSize']))+'\n') 264 | # better compute real sizes from eigenvector array? 265 | f.write("sizes: " + " ".join(map(str, d["Eigenvectors"].shape)) + "\n") 266 | f.write("\n") 267 | f.write("{ ") 268 | for i in range(np.shape(d["Eigenvectors"])[1] - 1): 269 | f.write("(") 270 | f.write(",".join(map(str, d["Eigenvectors"][:, i]))) 271 | f.write(") ;\n") 272 | f.write("(") 273 | f.write( 274 | ",".join( 275 | map( 276 | str, 277 | d["Eigenvectors"][:, np.shape(d["Eigenvectors"])[1] - 1], 278 | ) 279 | ) 280 | ) 281 | f.write(") }\n") 282 | # close file 283 | f.close() 284 | 285 | 286 | def write_vfunc(filename: str, vfunc: np.ndarray) -> None: 287 | """Save vertex function in PSOL txt file. 288 | 289 | First line "Solution:", "," separated values inside () 290 | 291 | Parameters 292 | ---------- 293 | filename : str 294 | Filename to save to. 295 | vfunc : np.ndarray 296 | Array of vfunc parameters, shape (n_vertices,). 297 | 298 | Raises 299 | ------ 300 | OSError 301 | If file is not writable. 302 | """ 303 | try: 304 | f = open(filename, "w") 305 | except OSError: 306 | logger.error("File %s not writable", filename) 307 | raise 308 | f.write("Solution:\n") 309 | f.write("(" + ",".join(vfunc.astype(str)) + ")") 310 | f.close() 311 | -------------------------------------------------------------------------------- /lapy/utils/tests/test_polygon.py: -------------------------------------------------------------------------------- 1 | """Tests for the polygon module.""" 2 | 3 | import numpy as np 4 | import pytest 5 | 6 | from ... import Polygon 7 | 8 | 9 | class TestPolygonClass: 10 | """Test cases for the Polygon class.""" 11 | 12 | def test_init_2d(self): 13 | """Test initialization with 2D points.""" 14 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) 15 | poly = Polygon(points, closed=True) 16 | 17 | assert poly.is_2d(), "Should be 2D polygon" 18 | assert poly.is_closed(), "Should be closed" 19 | assert poly.n_points() == 4, "Should have 4 points" 20 | 21 | def test_init_3d(self): 22 | """Test initialization with 3D points.""" 23 | points = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0]]) 24 | poly = Polygon(points, closed=False) 25 | 26 | assert not poly.is_2d(), "Should be 3D polygon" 27 | assert not poly.is_closed(), "Should be open" 28 | assert poly.n_points() == 3, "Should have 3 points" 29 | 30 | def test_init_empty_raises(self): 31 | """Test that empty points raise ValueError.""" 32 | with pytest.raises(ValueError, match="empty"): 33 | Polygon(np.array([])) 34 | 35 | def test_init_invalid_dimensions_raises(self): 36 | """Test that invalid dimensions raise ValueError.""" 37 | with pytest.raises(ValueError, match="2 or 3 coordinates"): 38 | Polygon(np.array([[0.0, 1.0, 2.0, 3.0]])) 39 | 40 | def test_length_open(self): 41 | """Test length computation for open polygon.""" 42 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]) 43 | poly = Polygon(points, closed=False) 44 | length = poly.length() 45 | 46 | expected = 2.0 # 1.0 + 1.0 47 | assert np.isclose(length, expected), f"Expected {expected}, got {length}" 48 | 49 | def test_length_closed(self): 50 | """Test length computation for closed polygon (square).""" 51 | square = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) 52 | poly = Polygon(square, closed=True) 53 | length = poly.length() 54 | 55 | expected = 4.0 56 | assert np.isclose(length, expected), f"Expected {expected}, got {length}" 57 | 58 | def test_centroid_open(self): 59 | """Test centroid for open polygon.""" 60 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) 61 | poly = Polygon(points, closed=False) 62 | centroid = poly.centroid() 63 | 64 | expected = np.array([0.5, 0.5]) 65 | assert np.allclose(centroid, expected), f"Expected {expected}, got {centroid}" 66 | 67 | def test_centroid_closed_2d(self): 68 | """Test area-weighted centroid for closed 2D polygon.""" 69 | # Unit square centered at origin 70 | square = np.array([[-0.5, -0.5], [0.5, -0.5], [0.5, 0.5], [-0.5, 0.5]]) 71 | poly = Polygon(square, closed=True) 72 | centroid = poly.centroid() 73 | 74 | expected = np.array([0.0, 0.0]) 75 | assert np.allclose(centroid, expected, atol=1e-10), \ 76 | f"Expected {expected}, got {centroid}" 77 | 78 | def test_centroid_closed_3d(self): 79 | """Test centroid for closed 3D polygon (simple average).""" 80 | points = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0]]) 81 | poly = Polygon(points, closed=True) 82 | centroid = poly.centroid() 83 | 84 | expected = np.mean(points, axis=0) 85 | assert np.allclose(centroid, expected), f"Expected {expected}, got {centroid}" 86 | 87 | def test_area_closed_2d(self): 88 | """Test area computation for closed 2D polygon.""" 89 | square = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) 90 | poly = Polygon(square, closed=True) 91 | area = poly.area() 92 | 93 | expected = 1.0 94 | assert np.isclose(area, expected), f"Expected {expected}, got {area}" 95 | 96 | def test_area_open_raises(self): 97 | """Test that area computation raises for open polygon.""" 98 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]) 99 | poly = Polygon(points, closed=False) 100 | 101 | with pytest.raises(ValueError, match="closed polygon"): 102 | poly.area() 103 | 104 | def test_area_3d_raises(self): 105 | """Test that area computation raises for 3D polygon.""" 106 | points = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0]]) 107 | poly = Polygon(points, closed=True) 108 | 109 | with pytest.raises(ValueError, match="2D polygons"): 110 | poly.area() 111 | 112 | def test_resample_open(self): 113 | """Test resampling an open polygon.""" 114 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]) 115 | poly = Polygon(points, closed=False) 116 | resampled = poly.resample(n_points=10, inplace=False) 117 | 118 | assert resampled.n_points() == 10, "Should have 10 points" 119 | assert not resampled.is_closed(), "Should remain open" 120 | assert np.allclose(resampled.get_points()[0], [0.0, 0.0]), \ 121 | "First point should match" 122 | assert np.allclose(resampled.get_points()[-1], [1.0, 1.0]), \ 123 | "Last point should match" 124 | 125 | def test_resample_closed(self): 126 | """Test resampling a closed polygon.""" 127 | square = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]]) 128 | poly = Polygon(square, closed=True) 129 | resampled = poly.resample(n_points=12, inplace=False) 130 | 131 | assert resampled.n_points() == 12, "Should have 12 points" 132 | assert resampled.is_closed(), "Should remain closed" 133 | # Check perimeter 134 | length = resampled.length() 135 | assert np.isclose(length, 4.0, atol=1e-10), \ 136 | f"Perimeter should be 4.0, got {length}" 137 | 138 | def test_resample_inplace(self): 139 | """Test in-place resampling.""" 140 | points = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0]]) 141 | poly = Polygon(points, closed=False) 142 | result = poly.resample(n_points=10, inplace=True) 143 | 144 | assert result is poly, "Should return self when inplace=True" 145 | assert poly.n_points() == 10, "Should have 10 points after in-place resample" 146 | 147 | def test_resample_iterative(self): 148 | """Test that iterative resampling improves uniformity.""" 149 | # Create path with non-uniform spacing 150 | points = np.array([[0.0, 0.0], [0.1, 0.0], [1.0, 0.0], [1.0, 1.0]]) 151 | poly = Polygon(points, closed=False) 152 | 153 | result1 = poly.resample(n_points=20, n_iter=1, inplace=False) 154 | result5 = poly.resample(n_points=20, n_iter=5, inplace=False) 155 | 156 | # Calculate spacing uniformity 157 | pts1 = result1.get_points() 158 | pts5 = result5.get_points() 159 | dists1 = np.sqrt(np.sum(np.diff(pts1, axis=0)**2, axis=1)) 160 | dists5 = np.sqrt(np.sum(np.diff(pts5, axis=0)**2, axis=1)) 161 | 162 | assert np.std(dists5) <= np.std(dists1), \ 163 | "More iterations should improve uniformity" 164 | 165 | def test_smooth_laplace_open(self): 166 | """Test Laplace smoothing on open polygon.""" 167 | # Create a jagged path 168 | points = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 0.0], [1.5, 0.5], [2.0, 0.0]]) 169 | poly = Polygon(points, closed=False) 170 | smoothed = poly.smooth_laplace(n=3, lambda_=0.5, inplace=False) 171 | 172 | assert smoothed.n_points() == poly.n_points(), \ 173 | "Should preserve number of points" 174 | # First and last points should remain unchanged for open polygon 175 | assert np.allclose(smoothed.get_points()[0], points[0]), \ 176 | "First point should remain unchanged (up to numerical precision)" 177 | assert np.allclose(smoothed.get_points()[-1], points[-1]), \ 178 | "Last point should remain unchanged (up to numerical precision)" 179 | 180 | def test_smooth_laplace_closed(self): 181 | """Test Laplace smoothing on closed polygon.""" 182 | # Create a slightly irregular square 183 | square = np.array([ 184 | [0.0, 0.0], [1.0, 0.1], [1.0, 1.0], [0.1, 1.0] 185 | ]) 186 | poly = Polygon(square, closed=True) 187 | smoothed = poly.smooth_laplace(n=5, lambda_=0.5, inplace=False) 188 | 189 | assert smoothed.n_points() == 4, "Should preserve number of points" 190 | assert smoothed.is_closed(), "Should remain closed" 191 | 192 | def test_smooth_laplace_inplace(self): 193 | """Test in-place Laplace smoothing.""" 194 | points = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 0.0]]) 195 | poly = Polygon(points, closed=False) 196 | original_points = poly.get_points().copy() 197 | result = poly.smooth_laplace(n=1, lambda_=0.5, inplace=True) 198 | 199 | assert result is poly, "Should return self when inplace=True" 200 | assert not np.allclose(poly.get_points(), original_points), \ 201 | "Points should be modified in-place" 202 | 203 | def test_smooth_taubin(self): 204 | """Test Taubin smoothing on polygon.""" 205 | # Create a jagged path 206 | points = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 0.0], [1.5, 0.5], [2.0, 0.0]]) 207 | poly = Polygon(points, closed=False) 208 | smoothed = poly.smooth_taubin(n=3, lambda_=0.5, mu=-0.53, inplace=False) 209 | 210 | assert smoothed.n_points() == poly.n_points(), \ 211 | "Should preserve number of points" 212 | # Taubin should preserve overall shape better than pure Laplace 213 | assert not np.allclose(smoothed.get_points(), points), \ 214 | "Points should be smoothed" 215 | 216 | def test_smooth_taubin_inplace(self): 217 | """Test in-place Taubin smoothing.""" 218 | points = np.array([[0.0, 0.0], [0.5, 0.5], [1.0, 0.0]]) 219 | poly = Polygon(points, closed=False) 220 | result = poly.smooth_taubin(n=1, inplace=True) 221 | 222 | assert result is poly, "Should return self when inplace=True" 223 | 224 | def test_smooth_two_point_open(self): 225 | """Test smoothing on two-point open polygon.""" 226 | points = np.array([[0.0, 0.0], [1.0, 1.0]]) 227 | poly = Polygon(points, closed=False) 228 | smoothed = poly.smooth_laplace(n=5, lambda_=0.5, inplace=False) 229 | 230 | # Both points should remain unchanged (boundary points are fixed) 231 | assert np.allclose(smoothed.get_points(), points), \ 232 | "Two-point open polygon should not change when smoothed" 233 | 234 | def test_smooth_two_point_closed(self): 235 | """Test smoothing on two-point closed polygon.""" 236 | points = np.array([[0.0, 0.0], [2.0, 2.0]]) 237 | poly = Polygon(points, closed=True) 238 | smoothed = poly.smooth_laplace(n=5, lambda_=0.5, inplace=False) 239 | 240 | # Points should converge to their midpoint 241 | midpoint = np.mean(points, axis=0) 242 | assert np.allclose(smoothed.get_points(), midpoint, atol=1e-10), \ 243 | "Two-point closed polygon should converge to midpoint" 244 | 245 | def test_smooth_three_point_open(self): 246 | """Test smoothing on three-point open polygon.""" 247 | points = np.array([[0.0, 0.0], [0.5, 2.0], [1.0, 0.0]]) 248 | poly = Polygon(points, closed=False) 249 | smoothed = poly.smooth_laplace(n=5, lambda_=0.5, inplace=False) 250 | 251 | # First and last points should remain fixed 252 | assert np.allclose(smoothed.get_points()[0], points[0]), \ 253 | "First point should remain fixed in open polygon" 254 | assert np.allclose(smoothed.get_points()[-1], points[-1]), \ 255 | "Last point should remain fixed in open polygon" 256 | # Middle point should be smoothed (moved toward average of neighbors) 257 | assert smoothed.get_points()[1, 1] < points[1, 1], \ 258 | "Middle point should be smoothed downward" 259 | 260 | 261 | if __name__ == "__main__": 262 | pytest.main([__file__, "-v"]) 263 | -------------------------------------------------------------------------------- /lapy/tet_mesh.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import TYPE_CHECKING, Optional, Union 3 | 4 | import numpy as np 5 | from scipy import sparse 6 | 7 | from . import _tet_io as io 8 | 9 | if TYPE_CHECKING: 10 | from .tria_mesh import TriaMesh 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class TetMesh: 15 | """Class representing a tetrahedral mesh. 16 | 17 | This is an efficient implementation of a tetrahedral mesh data structure 18 | with core functionality using sparse matrices internally (Scipy). 19 | 20 | Parameters 21 | ---------- 22 | v : array_like 23 | List of lists of 3 float coordinates, shape (n_vertices, 3). 24 | t : array_like 25 | List of lists of 4 int of indices (>=0) into ``v`` array, 26 | shape (n_tetrahedra, 4). Ordering is important: so that t0, t1, t2 27 | are oriented counterclockwise when looking from above, and t3 is 28 | on top of that triangle. 29 | 30 | Attributes 31 | ---------- 32 | v : np.ndarray 33 | Vertex coordinates, shape (n_vertices, 3). 34 | t : np.ndarray 35 | Tetrahedron vertex indices, shape (n_tetrahedra, 4). 36 | adj_sym : scipy.sparse.csc_matrix 37 | Symmetric adjacency matrix as csc sparse matrix. 38 | 39 | Raises 40 | ------ 41 | ValueError 42 | If max index in t exceeds number of vertices. 43 | 44 | Notes 45 | ----- 46 | The class has static class methods to read tetrahedra meshes from 47 | `GMSH `_ 48 | and `VTK `_ files. 49 | """ 50 | 51 | def __init__(self, v, t): 52 | self.v = np.array(v) 53 | self.t = np.array(t) 54 | vnum = self.v.shape[0] 55 | if np.max(self.t) >= vnum: 56 | raise ValueError("Max index exceeds number of vertices") 57 | # put more checks here (e.g. the dim 3 conditions on columns) 58 | # self.orient_() 59 | self.adj_sym = self.construct_adj_sym() 60 | 61 | @classmethod 62 | def read_gmsh(cls, filename: str) -> "TetMesh": 63 | """Load GMSH tetrahedron mesh. 64 | 65 | Parameters 66 | ---------- 67 | filename : str 68 | Filename to load. 69 | 70 | Returns 71 | ------- 72 | TetMesh 73 | Object of loaded GMSH tetrahedron mesh. 74 | """ 75 | return io.read_gmsh(filename) 76 | 77 | @classmethod 78 | def read_vtk(cls, filename: str) -> "TetMesh": 79 | """Load VTK tetrahedron mesh. 80 | 81 | Parameters 82 | ---------- 83 | filename : str 84 | Filename to load. 85 | 86 | Returns 87 | ------- 88 | TetMesh 89 | Object of loaded VTK tetrahedron mesh. 90 | """ 91 | return io.read_vtk(filename) 92 | 93 | def write_vtk(self, filename: str) -> None: 94 | """Save as VTK file. 95 | 96 | Parameters 97 | ---------- 98 | filename : str 99 | Filename to save to. 100 | """ 101 | io.write_vtk(self, filename) 102 | 103 | def construct_adj_sym(self) -> sparse.csc_matrix: 104 | """Create adjacency symmetric matrix. 105 | 106 | The adjacency matrix will be symmetric. Each inner edge will get the 107 | number of tetrahedra that contain this edge. Inner edges are usually 108 | 3 or larger, boundary edges are 2 or 1. Works on tetras only. 109 | 110 | Returns 111 | ------- 112 | scipy.sparse.csc_matrix 113 | Symmetric adjacency matrix as csc sparse matrix. 114 | """ 115 | t1 = self.t[:, 0] 116 | t2 = self.t[:, 1] 117 | t3 = self.t[:, 2] 118 | t4 = self.t[:, 3] 119 | i = np.hstack((t1, t2, t2, t3, t3, t1, t1, t2, t3, t4, t4, t4)) 120 | j = np.hstack((t2, t1, t3, t2, t1, t3, t4, t4, t4, t1, t2, t3)) 121 | adj = sparse.csc_matrix((np.ones(i.shape, dtype=int), (i, j))) 122 | return adj 123 | 124 | def has_free_vertices(self) -> bool: 125 | """Check if the vertex list has more vertices than what is used in tetra. 126 | 127 | (same implementation as in `~lapy.TriaMesh`) 128 | 129 | Returns 130 | ------- 131 | bool 132 | Whether vertex list has more vertices than tetrahedra use or not. 133 | """ 134 | vnum = len(self.v) 135 | vnumt = len(np.unique(self.t.reshape(-1))) 136 | return vnum != vnumt 137 | 138 | def is_oriented(self) -> bool: 139 | """Check if tet mesh is oriented. 140 | 141 | True if all tetrahedra are oriented so that v0, v1, v2 are oriented 142 | counterclockwise when looking from above, and v3 is on top of that 143 | triangle. 144 | 145 | Returns 146 | ------- 147 | bool 148 | True if all tet volumes are positive, False if some or all are 149 | negative. 150 | 151 | Raises 152 | ------ 153 | ValueError 154 | If degenerate (zero-volume) tets are found. 155 | """ 156 | # Compute vertex coordinates and a difference vector for each triangle: 157 | t0 = self.t[:, 0] 158 | t1 = self.t[:, 1] 159 | t2 = self.t[:, 2] 160 | t3 = self.t[:, 3] 161 | v0 = self.v[t0, :] 162 | v1 = self.v[t1, :] 163 | v2 = self.v[t2, :] 164 | v3 = self.v[t3, :] 165 | e0 = v1 - v0 166 | e2 = v2 - v0 167 | e3 = v3 - v0 168 | # Compute cross product and 6 * vol for each triangle: 169 | cr = np.cross(e0, e2) 170 | vol = np.sum(e3 * cr, axis=1) 171 | if np.any(vol == 0): 172 | raise ValueError("Degenerate (zero-volume) tetrahedra detected") 173 | if np.max(vol) < 0.0: 174 | #print("All tet orientations are flipped") 175 | return False 176 | elif np.min(vol) > 0.0: 177 | #print("All tet orientations are correct") 178 | return True 179 | #print("Orientations are not uniform") 180 | return False 181 | 182 | def avg_edge_length(self) -> float: 183 | """Get average edge lengths in tet mesh. 184 | 185 | Returns 186 | ------- 187 | float 188 | Average edge length. 189 | """ 190 | # get only upper off-diag elements from symmetric adj matrix 191 | triadj = sparse.triu(self.adj_sym, 1, format="coo") 192 | edgelens = np.sqrt( 193 | ((self.v[triadj.row, :] - self.v[triadj.col, :]) ** 2).sum(1) 194 | ) 195 | return edgelens.mean() 196 | 197 | def boundary_tria( 198 | self, tetfunc: Optional[np.ndarray] = None 199 | ) -> Union["TriaMesh", tuple["TriaMesh", np.ndarray]]: 200 | """Get boundary triangle mesh of tetrahedra. 201 | 202 | It can have multiple connected components. Tria will have same vertices 203 | (including free vertices), so that the tria indices agree with the 204 | tet-mesh, in case we want to transfer information back, e.g. a FEM 205 | boundary condition, or to access a TetMesh vertex function with 206 | TriaMesh.t indices. 207 | 208 | .. warning:: 209 | 210 | Note, that it seems to be returning non-oriented triangle meshes, 211 | may need some debugging, until then use tria.orient_() after this. 212 | 213 | Parameters 214 | ---------- 215 | tetfunc : np.ndarray or None, default=None 216 | List of tetra function values, shape (n_tetrahedra,). Optional. 217 | 218 | Returns 219 | ------- 220 | TriaMesh 221 | TriaMesh of boundary (potentially >1 components). 222 | triafunc : np.ndarray 223 | List of tria function values, shape (n_boundary_triangles,). 224 | Only returned if ``tetfunc`` is provided. 225 | """ 226 | from . import TriaMesh 227 | 228 | # get all triangles 229 | allt = np.vstack( 230 | ( 231 | self.t[:, np.array([3, 1, 2])], 232 | self.t[:, np.array([2, 0, 3])], 233 | self.t[:, np.array([1, 3, 0])], 234 | self.t[:, np.array([0, 2, 1])], 235 | ) 236 | ) 237 | # sort rows so that faces are reorder in ascending order of indices 238 | allts = np.sort(allt, axis=1) 239 | # find unique trias without a neighbor 240 | tria, indices, count = np.unique( 241 | allts, axis=0, return_index=True, return_counts=True 242 | ) 243 | tria = allt[indices[count == 1]] 244 | logger.info("Found %d triangles on boundary.", np.size(tria, 0)) 245 | # if we have tetra function, map these to the boundary triangles 246 | if tetfunc is not None: 247 | alltidx = np.tile(np.arange(self.t.shape[0]), 4) 248 | tidx = alltidx[indices[count == 1]] 249 | triafunc = tetfunc[tidx] 250 | return TriaMesh(self.v, tria), triafunc 251 | return TriaMesh(self.v, tria) 252 | 253 | def rm_free_vertices_(self) -> tuple[np.ndarray, np.ndarray]: 254 | """Remove unused (free) vertices from v and t. 255 | 256 | These are vertices that are not used in any tetrahedron. They can 257 | produce problems when constructing, e.g., Laplace matrices. 258 | 259 | Will update v and t in mesh. 260 | Same implementation as in `~lapy.TriaMesh`. 261 | 262 | Returns 263 | ------- 264 | vkeep : np.ndarray 265 | Indices (from original list) of kept vertices. 266 | vdel : np.ndarray 267 | Indices of deleted (unused) vertices. 268 | 269 | Raises 270 | ------ 271 | ValueError 272 | If max index in t exceeds number of vertices. 273 | """ 274 | tflat = self.t.reshape(-1) 275 | vnum = len(self.v) 276 | if np.max(tflat) >= vnum: 277 | raise ValueError("Max index exceeds number of vertices") 278 | # determine which vertices to keep 279 | vkeep = np.full(vnum, False, dtype=bool) 280 | vkeep[tflat] = True 281 | # list of deleted vertices (old indices) 282 | vdel = np.nonzero(~vkeep)[0] 283 | # if nothing to delete return 284 | if len(vdel) == 0: 285 | return np.arange(vnum), np.array([], dtype=int) 286 | # delete unused vertices 287 | vnew = self.v[vkeep, :] 288 | # create lookup table 289 | tlookup = np.cumsum(vkeep) - 1 290 | # reindex tria 291 | tnew = tlookup[self.t] 292 | # convert vkeep to index list 293 | vkeep = np.nonzero(vkeep)[0] 294 | self.v = vnew 295 | self.t = tnew 296 | return vkeep, vdel 297 | 298 | def orient_(self) -> int: 299 | """Ensure that tet mesh is oriented. 300 | 301 | Re-orient tetras so that v0, v1, v2 are oriented counterclockwise when 302 | looking from above, and v3 is on top of that triangle. 303 | 304 | Returns 305 | ------- 306 | int 307 | Number of re-oriented tetras. 308 | 309 | Raises 310 | ------ 311 | ValueError 312 | If degenerate (zero-volume) tetrahedra are detected. 313 | """ 314 | # Compute vertex coordinates and a difference vector for each tetra: 315 | t0 = self.t[:, 0] 316 | t1 = self.t[:, 1] 317 | t2 = self.t[:, 2] 318 | t3 = self.t[:, 3] 319 | v0 = self.v[t0, :] 320 | v1 = self.v[t1, :] 321 | v2 = self.v[t2, :] 322 | v3 = self.v[t3, :] 323 | e0 = v1 - v0 324 | e2 = v2 - v0 325 | e3 = v3 - v0 326 | # Compute cross product and 6 * vol for each tetra: 327 | cr = np.cross(e0, e2) 328 | vol = np.sum(e3 * cr, axis=1) 329 | if np.any(vol == 0): 330 | raise ValueError("Degenerate (zero-volume) tetrahedra detected") 331 | negtet = vol < 0.0 332 | negnum = np.sum(negtet) 333 | if negnum == 0: 334 | logger.info("Mesh is oriented, nothing to do") 335 | return 0 336 | tnew = self.t.copy() 337 | temp = tnew[negtet, 1].copy() 338 | tnew[negtet, 1] = tnew[negtet, 2] 339 | tnew[negtet, 2] = temp 340 | self.t = tnew 341 | self.adj_sym = self.construct_adj_sym() 342 | logger.info("Flipped %d tetrahedra", negnum) 343 | #self.__init__(self.v, tnew) 344 | return negnum 345 | -------------------------------------------------------------------------------- /examples/Test_TetMesh_Geodesics.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "# TetMesh Geodesics" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [ 15 | { 16 | "data": { 17 | "text/html": [ 18 | " \n", 33 | " " 34 | ] 35 | }, 36 | "metadata": {}, 37 | "output_type": "display_data" 38 | } 39 | ], 40 | "source": [ 41 | "import numpy as np\n", 42 | "\n", 43 | "# import plotly\n", 44 | "# plotly.offline.init_notebook_mode(connected=True)\n", 45 | "import plotly.io as pio\n", 46 | "\n", 47 | "from lapy import TetMesh\n", 48 | "from lapy.plot import plot_tet_mesh\n", 49 | "\n", 50 | "pio.renderers.default = \"sphinx_gallery\"" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "First we need a TetMesh, so lets open a cube with 48K tetrahedra and make sure it is oriented consistently." 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": 2, 63 | "metadata": {}, 64 | "outputs": [ 65 | { 66 | "name": "stdout", 67 | "output_type": "stream", 68 | "text": [ 69 | "--> VTK format ... \n", 70 | " --> DONE ( V: 9261 , T: 48000 )\n", 71 | "\n", 72 | "Flipped 24000 tetrahedra\n" 73 | ] 74 | }, 75 | { 76 | "data": { 77 | "text/plain": [ 78 | "24000" 79 | ] 80 | }, 81 | "execution_count": 2, 82 | "metadata": {}, 83 | "output_type": "execute_result" 84 | } 85 | ], 86 | "source": [ 87 | "T = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")\n", 88 | "# T.is_oriented()\n", 89 | "T.orient_()" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": {}, 95 | "source": [ 96 | "## Laplace" 97 | ] 98 | }, 99 | { 100 | "cell_type": "markdown", 101 | "metadata": {}, 102 | "source": [ 103 | "Next we solve the Laplace eigenvalue problem to get 10 eigenvalues and -vectors/functions." 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": 3, 109 | "metadata": {}, 110 | "outputs": [ 111 | { 112 | "name": "stdout", 113 | "output_type": "stream", 114 | "text": [ 115 | "TetMesh with regular Laplace\n", 116 | "Solver: spsolve (LU decomposition) ...\n" 117 | ] 118 | } 119 | ], 120 | "source": [ 121 | "from lapy import Solver\n", 122 | "\n", 123 | "fem = Solver(T, lump=True)\n", 124 | "\n", 125 | "evals, evec = fem.eigs(10)" 126 | ] 127 | }, 128 | { 129 | "cell_type": "markdown", 130 | "metadata": {}, 131 | "source": [ 132 | "To better see the first non-constant function also in the interior we slice the cube at x<0.5." 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": 4, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "# also get A,B (lumped), and inverse of B (easy as it is diagonal)\n", 142 | "A, B = fem.stiffness, fem.mass\n", 143 | "Bi = B.copy()\n", 144 | "Bi.data **= -1" 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": 5, 150 | "metadata": { 151 | "scrolled": true 152 | }, 153 | "outputs": [], 154 | "source": [ 155 | "evnum = 1\n", 156 | "cutting = [\"x<0.5\"]\n", 157 | "# also here we comment all plots to reduce file size\n", 158 | "# uncomment and take a look\n", 159 | "plot_tet_mesh(\n", 160 | " T,\n", 161 | " vfunc=evals[evnum] * evec[:, evnum],\n", 162 | " plot_edges=None,\n", 163 | " plot_levels=False,\n", 164 | " cutting=cutting,\n", 165 | " edge_color=\"rgb(50,50,50)\",\n", 166 | " html_output=False,\n", 167 | " flatshading=True,\n", 168 | ")" 169 | ] 170 | }, 171 | { 172 | "cell_type": "markdown", 173 | "metadata": {}, 174 | "source": [ 175 | "Similar to the triangle case, computing the - divergence of the gradient of an eigenfunctions (and multiplying with inv(B)) yields a scaled version of that function." 176 | ] 177 | }, 178 | { 179 | "cell_type": "code", 180 | "execution_count": 6, 181 | "metadata": {}, 182 | "outputs": [], 183 | "source": [ 184 | "from lapy.diffgeo import compute_divergence, compute_gradient\n", 185 | "\n", 186 | "grad = compute_gradient(T, evec[:, evnum])\n", 187 | "divx = -compute_divergence(T, grad)\n", 188 | "vfunc = Bi * divx" 189 | ] 190 | }, 191 | { 192 | "cell_type": "code", 193 | "execution_count": 7, 194 | "metadata": {}, 195 | "outputs": [], 196 | "source": [ 197 | "cutting = [\"x<0.5\"]\n", 198 | "plot_tet_mesh(\n", 199 | " T,\n", 200 | " vfunc=vfunc,\n", 201 | " plot_edges=None,\n", 202 | " plot_levels=False,\n", 203 | " cutting=cutting,\n", 204 | " edge_color=\"rgb(50,50,50)\",\n", 205 | " html_output=False,\n", 206 | " flatshading=True,\n", 207 | ")" 208 | ] 209 | }, 210 | { 211 | "cell_type": "markdown", 212 | "metadata": {}, 213 | "source": [ 214 | "In fact, it is scaled by the eigenvalue." 215 | ] 216 | }, 217 | { 218 | "cell_type": "code", 219 | "execution_count": 8, 220 | "metadata": {}, 221 | "outputs": [ 222 | { 223 | "data": { 224 | "text/plain": [ 225 | "0.0059814453" 226 | ] 227 | }, 228 | "execution_count": 8, 229 | "metadata": {}, 230 | "output_type": "execute_result" 231 | } 232 | ], 233 | "source": [ 234 | "np.max(np.abs(vfunc - (evals[evnum] * evec[:, evnum])))" 235 | ] 236 | }, 237 | { 238 | "cell_type": "markdown", 239 | "metadata": {}, 240 | "source": [ 241 | "## Geodesics" 242 | ] 243 | }, 244 | { 245 | "cell_type": "markdown", 246 | "metadata": {}, 247 | "source": [ 248 | "Now we run a heat diffusion, applying initial heat to the boundary of the cube." 249 | ] 250 | }, 251 | { 252 | "cell_type": "code", 253 | "execution_count": 9, 254 | "metadata": {}, 255 | "outputs": [ 256 | { 257 | "name": "stdout", 258 | "output_type": "stream", 259 | "text": [ 260 | "Found 4800 triangles on boundary.\n", 261 | "TetMesh with regular Laplace\n", 262 | "Matrix Format now: csc\n", 263 | "Solver: spsolve (LU decomposition) ...\n" 264 | ] 265 | } 266 | ], 267 | "source": [ 268 | "from lapy import heat\n", 269 | "\n", 270 | "tria = T.boundary_tria()\n", 271 | "bvert = np.unique(tria.t)\n", 272 | "\n", 273 | "u = heat.diffusion(T, bvert, m=1)\n", 274 | "cutting = [\"x<0.5\"]\n", 275 | "plot_tet_mesh(\n", 276 | " T,\n", 277 | " vfunc=u,\n", 278 | " plot_edges=None,\n", 279 | " plot_levels=True,\n", 280 | " cutting=cutting,\n", 281 | " edge_color=\"rgb(50,50,50)\",\n", 282 | " html_output=False,\n", 283 | " flatshading=True,\n", 284 | ")" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "You can see that we get level sets that are not evenly spaced and dense along the boundary. Next we compute the gradient of this heat diffusion, normalize it, and compute the divergence of this normalized gradient." 292 | ] 293 | }, 294 | { 295 | "cell_type": "code", 296 | "execution_count": 10, 297 | "metadata": {}, 298 | "outputs": [], 299 | "source": [ 300 | "# get gradients\n", 301 | "tfunc = compute_gradient(T, u)\n", 302 | "# flip and normalize\n", 303 | "X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]\n", 304 | "X = np.nan_to_num(X)\n", 305 | "# compute divergence\n", 306 | "divx = compute_divergence(T, X)" 307 | ] 308 | }, 309 | { 310 | "cell_type": "markdown", 311 | "metadata": {}, 312 | "source": [ 313 | "Finally, we need to solve a Poisson equation to obtain a function that has these normalized gradients (and remove the remaining shift)." 314 | ] 315 | }, 316 | { 317 | "cell_type": "code", 318 | "execution_count": 11, 319 | "metadata": {}, 320 | "outputs": [ 321 | { 322 | "name": "stdout", 323 | "output_type": "stream", 324 | "text": [ 325 | "Matrix Format now: csc\n", 326 | "Solver: cholesky decomp - performance optimal ...\n" 327 | ] 328 | } 329 | ], 330 | "source": [ 331 | "# compute distance\n", 332 | "from scipy.sparse.linalg import splu\n", 333 | "\n", 334 | "useCholmod = True\n", 335 | "try:\n", 336 | " from sksparse.cholmod import cholesky\n", 337 | "except ImportError:\n", 338 | " useCholmod = False\n", 339 | "\n", 340 | "A, B = fem.stiffness, fem.mass # computed above when creating Solver\n", 341 | "\n", 342 | "H = A\n", 343 | "b0 = -divx\n", 344 | "\n", 345 | "# solve H x = b0\n", 346 | "print(\"Matrix Format now: \" + H.getformat())\n", 347 | "if useCholmod:\n", 348 | " print(\"Solver: cholesky decomp - performance optimal ...\")\n", 349 | " chol = cholesky(H)\n", 350 | " x = chol(b0)\n", 351 | "else:\n", 352 | " print(\"Solver: spsolve (LU decomp) - performance not optimal ...\")\n", 353 | " lu = splu(H)\n", 354 | " x = lu.solve(b0)\n", 355 | "\n", 356 | "x = x - np.min(x)" 357 | ] 358 | }, 359 | { 360 | "cell_type": "code", 361 | "execution_count": 12, 362 | "metadata": {}, 363 | "outputs": [ 364 | { 365 | "data": { 366 | "text/plain": [ 367 | "(0.6993174268615026, 0.8660254037844386)" 368 | ] 369 | }, 370 | "execution_count": 12, 371 | "metadata": {}, 372 | "output_type": "execute_result" 373 | } 374 | ], 375 | "source": [ 376 | "cutting = [\"x<0.5\"]\n", 377 | "plot_tet_mesh(\n", 378 | " T,\n", 379 | " vfunc=x,\n", 380 | " plot_edges=None,\n", 381 | " plot_levels=True,\n", 382 | " cutting=cutting,\n", 383 | " edge_color=\"rgb(50,50,50)\",\n", 384 | " html_output=False,\n", 385 | " flatshading=True,\n", 386 | ")\n", 387 | "max(x), 0.5 * np.sqrt(3.0)" 388 | ] 389 | }, 390 | { 391 | "cell_type": "markdown", 392 | "metadata": {}, 393 | "source": [ 394 | "This results in equally spaced level sets. Instead of solving this manually, we can get the same by simply computing the heat diffusion and the distance function directly." 395 | ] 396 | }, 397 | { 398 | "cell_type": "code", 399 | "execution_count": 13, 400 | "metadata": {}, 401 | "outputs": [ 402 | { 403 | "name": "stdout", 404 | "output_type": "stream", 405 | "text": [ 406 | "Found 4800 triangles on boundary.\n", 407 | "TetMesh with regular Laplace\n", 408 | "Matrix Format now: csc\n", 409 | "Solver: spsolve (LU decomposition) ...\n", 410 | "TetMesh with regular Laplace\n", 411 | "Matrix Format now: csc\n", 412 | "Solver: spsolve (LU decomposition) ...\n" 413 | ] 414 | } 415 | ], 416 | "source": [ 417 | "from lapy import heat\n", 418 | "from lapy.diffgeo import compute_geodesic_f\n", 419 | "\n", 420 | "tria = T.boundary_tria()\n", 421 | "bvert = np.unique(tria.t)\n", 422 | "\n", 423 | "# get heat diffusion\n", 424 | "u = heat.diffusion(T, bvert, m=1)\n", 425 | "\n", 426 | "gu = compute_geodesic_f(T, u)\n", 427 | "\n", 428 | "cutting = [\"x<0.5\"]\n", 429 | "plot_tet_mesh(\n", 430 | " T,\n", 431 | " vfunc=gu,\n", 432 | " plot_edges=None,\n", 433 | " plot_levels=True,\n", 434 | " cutting=cutting,\n", 435 | " edge_color=\"rgb(50,50,50)\",\n", 436 | " html_output=False,\n", 437 | " flatshading=True,\n", 438 | ")" 439 | ] 440 | }, 441 | { 442 | "cell_type": "markdown", 443 | "metadata": {}, 444 | "source": [ 445 | "Finally, we want to explore the gradient and divergence functions a little more. Here we construct the gradient of a function that computes the squared distance to each vertex (x^2+y^2+z^2). As the color of each tetrahedon we set the z component of the gradient which should be 2z (or you could try any other value, such as the gradient length)." 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": 14, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [ 454 | "# test function is squared distance to each vertex\n", 455 | "v1func = T.v[:, 0] * T.v[:, 0] + T.v[:, 1] * T.v[:, 1] + T.v[:, 2] * T.v[:, 2]\n", 456 | "\n", 457 | "grad = compute_gradient(T, v1func)\n", 458 | "# glength = np.sqrt(np.sum(grad * grad, axis=1))\n", 459 | "# fcols=glength\n", 460 | "fcols = grad[:, 2]\n", 461 | "# cutting = ['x<0.5']\n", 462 | "cutting = None\n", 463 | "plot_tet_mesh(\n", 464 | " T,\n", 465 | " vfunc=None,\n", 466 | " tfunc=fcols,\n", 467 | " plot_edges=None,\n", 468 | " plot_levels=False,\n", 469 | " cutting=cutting,\n", 470 | " edge_color=\"rgb(50,50,50)\",\n", 471 | " html_output=False,\n", 472 | ")" 473 | ] 474 | }, 475 | { 476 | "cell_type": "markdown", 477 | "metadata": {}, 478 | "source": [ 479 | "Now let's look at the divergence. While the gradient is constant for each tetrahedron, the divergence is a scalar function again, summing up the partial derivatives of the gradient components. In our case it should be 2+2+2=6." 480 | ] 481 | }, 482 | { 483 | "cell_type": "code", 484 | "execution_count": 15, 485 | "metadata": {}, 486 | "outputs": [], 487 | "source": [ 488 | "divx = compute_divergence(T, grad)\n", 489 | "divx2 = Bi * divx\n", 490 | "cutting = [\"z<0.5\"]\n", 491 | "plot_tet_mesh(\n", 492 | " T,\n", 493 | " vfunc=divx2,\n", 494 | " plot_edges=True,\n", 495 | " plot_levels=False,\n", 496 | " cutting=cutting,\n", 497 | " edge_color=\"rgb(50,50,50)\",\n", 498 | " html_output=False,\n", 499 | " flatshading=True,\n", 500 | " caxis=[0, 8],\n", 501 | ")" 502 | ] 503 | }, 504 | { 505 | "cell_type": "code", 506 | "execution_count": 16, 507 | "metadata": {}, 508 | "outputs": [ 509 | { 510 | "data": { 511 | "text/plain": [ 512 | "array([5.9999948, 6.0000215, 6.0000215, 5.999988 , 6.000053 , 5.999975 ,\n", 513 | " 5.9999676, 6.000024 , 6.000013 , 6.000008 ], dtype=float32)" 514 | ] 515 | }, 516 | "execution_count": 16, 517 | "metadata": {}, 518 | "output_type": "execute_result" 519 | } 520 | ], 521 | "source": [ 522 | "divx2[5000:5010]" 523 | ] 524 | } 525 | ], 526 | "metadata": { 527 | "kernelspec": { 528 | "display_name": "Python3", 529 | "language": "python", 530 | "name": "python3" 531 | }, 532 | "language_info": { 533 | "codemirror_mode": { 534 | "name": "ipython", 535 | "version": 3 536 | }, 537 | "file_extension": ".py", 538 | "mimetype": "text/x-python", 539 | "name": "python", 540 | "nbconvert_exporter": "python", 541 | "pygments_lexer": "ipython3", 542 | "version": "3" 543 | }, 544 | "nbsphinx": { 545 | "execute": "always" 546 | } 547 | }, 548 | "nbformat": 4, 549 | "nbformat_minor": 4 550 | } 551 | -------------------------------------------------------------------------------- /lapy/polygon.py: -------------------------------------------------------------------------------- 1 | """Polygon class for open and closed polygon paths. 2 | 3 | This module provides a Polygon class for processing 2D and 3D polygon paths with 4 | various geometric operations including resampling, smoothing, and metric computations. 5 | """ 6 | import logging 7 | import sys 8 | 9 | import numpy as np 10 | from scipy import sparse 11 | 12 | logger = logging.getLogger(__name__) 13 | 14 | class Polygon: 15 | """Class representing a polygon path (open or closed). 16 | 17 | This class handles 2D and 3D polygon paths with operations for resampling, 18 | smoothing, and computing geometric properties like length, centroid, and area. 19 | 20 | Parameters 21 | ---------- 22 | points : np.ndarray 23 | Array of shape (n, d) containing coordinates of polygon vertices 24 | in order, where d is 2 or 3 for 2D (x, y) or 3D (x, y, z) paths. 25 | For closed polygons, the last point should not duplicate the first point. 26 | closed : bool, default=False 27 | If True, treats the path as a closed polygon. If False, treats it as 28 | an open polyline. 29 | 30 | Attributes 31 | ---------- 32 | points : np.ndarray 33 | Polygon vertex coordinates, shape (n_points, d). 34 | closed : bool 35 | Whether the polygon is closed or open. 36 | _is_2d : bool 37 | Internal flag indicating if polygon is 2D (True) or 3D (False). 38 | 39 | Raises 40 | ------ 41 | ValueError 42 | If points array is empty. 43 | If points don't have 2 or 3 coordinates. 44 | 45 | Examples 46 | -------- 47 | >>> import numpy as np 48 | >>> from lapy.polygon import Polygon 49 | >>> # Create a 2D closed polygon (square) 50 | >>> square = np.array([[0, 0], [1, 0], [1, 1], [0, 1]]) 51 | >>> poly = Polygon(square, closed=True) 52 | >>> poly.is_2d() 53 | True 54 | >>> poly.length() 55 | 4.0 56 | >>> # Create a 3D open path 57 | >>> path_3d = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]]) 58 | >>> poly3d = Polygon(path_3d, closed=False) 59 | >>> poly3d.is_2d() 60 | False 61 | """ 62 | 63 | def __init__(self, points: np.ndarray, closed: bool = False): 64 | self.points = np.array(points) 65 | self.closed = closed 66 | 67 | # Validate non-empty polygon 68 | if self.points.size == 0: 69 | raise ValueError("Polygon has no points (empty)") 70 | 71 | # Ensure points array is 2-dimensional 72 | if self.points.ndim != 2: 73 | raise ValueError("Points array must be 2-dimensional") 74 | 75 | n_rows, n_cols = self.points.shape 76 | 77 | # Support both (n_points, dim) and (dim, n_points) where dim is 2 or 3. 78 | # Only transpose when it is unambiguous that the first dimension is dim. 79 | if n_cols not in (2, 3) and n_rows in (2, 3): 80 | logger.warning( 81 | "Transposing points array from shape %s to %s; expected shape (n_points, dim).", 82 | self.points.shape, 83 | self.points.T.shape, 84 | ) 85 | self.points = self.points.T 86 | n_rows, n_cols = self.points.shape 87 | 88 | # Support both 2D and 3D points 89 | if n_cols == 2: 90 | self._is_2d = True 91 | elif n_cols == 3: 92 | self._is_2d = False 93 | else: 94 | raise ValueError("Points should have 2 or 3 coordinates") 95 | 96 | def is_2d(self) -> bool: 97 | """Check if the polygon is 2D. 98 | 99 | Returns 100 | ------- 101 | bool 102 | True if polygon is 2D, False if 3D. 103 | """ 104 | return self._is_2d 105 | 106 | def is_closed(self) -> bool: 107 | """Check if the polygon is closed. 108 | 109 | Returns 110 | ------- 111 | bool 112 | True if polygon is closed, False if open. 113 | """ 114 | return self.closed 115 | 116 | def n_points(self) -> int: 117 | """Get number of points in polygon. 118 | 119 | Returns 120 | ------- 121 | int 122 | Number of points. 123 | """ 124 | return self.points.shape[0] 125 | 126 | def get_points(self) -> np.ndarray: 127 | """Get polygon points. 128 | 129 | Returns 130 | ------- 131 | np.ndarray 132 | Point array of shape (n, 2) or (n, 3). 133 | """ 134 | return self.points 135 | 136 | def length(self) -> float: 137 | """Compute total length of polygon path. 138 | 139 | For closed polygons, includes the segment from last to first point. 140 | 141 | Returns 142 | ------- 143 | float 144 | Total path length. 145 | """ 146 | if self.closed: 147 | points_closed = np.vstack([self.points, self.points[0]]) 148 | edge_vecs = np.diff(points_closed, axis=0) 149 | else: 150 | edge_vecs = np.diff(self.points, axis=0) 151 | 152 | edge_lens = np.sqrt((edge_vecs**2).sum(axis=1)) 153 | return edge_lens.sum() 154 | 155 | def centroid(self) -> np.ndarray: 156 | """Compute centroid of polygon. 157 | 158 | For open polygons or closed 3D polygons, returns the simple arithmetic mean 159 | of all vertex coordinates. 160 | 161 | For closed 2D polygons, returns the area-weighted centroid (geometric center 162 | of mass). The area weighting accounts for the shape's geometry, ensuring the 163 | centroid lies at the balance point of the polygon as if it were a uniform 164 | plate. This differs from the simple average of vertices, which would not 165 | account for how vertices are distributed around the polygon's boundary. 166 | 167 | Returns 168 | ------- 169 | np.ndarray 170 | Centroid coordinates, shape (2,) or (3,). 171 | 172 | Notes 173 | ----- 174 | For closed 2D polygons, uses the standard formula: 175 | C_x = (1 / (6*A)) * sum((x_i + x_{i+1}) * (x_i * y_{i+1} - x_{i+1} * y_i)) 176 | C_y = (1 / (6*A)) * sum((y_i + y_{i+1}) * (x_i * y_{i+1} - x_{i+1} * y_i)) 177 | where A is the polygon area. 178 | """ 179 | if not self.closed or not self._is_2d: 180 | # Simple average for open polygons or 3D closed polygons 181 | return np.mean(self.points, axis=0) 182 | 183 | # Area-weighted centroid for closed 2D polygons 184 | x = self.points[:, 0] 185 | y = self.points[:, 1] 186 | # Append first point to close polygon 187 | x_closed = np.append(x, x[0]) 188 | y_closed = np.append(y, y[0]) 189 | # Shoelace formula components 190 | cross = x_closed[:-1] * y_closed[1:] - x_closed[1:] * y_closed[:-1] 191 | signed_area = 0.5 * cross.sum() 192 | 193 | if abs(signed_area) < sys.float_info.epsilon: 194 | # Degenerate case: zero or near-zero area 195 | return np.mean(self.points, axis=0) 196 | 197 | cx = np.sum((x_closed[:-1] + x_closed[1:]) * cross) / (6.0 * signed_area) 198 | cy = np.sum((y_closed[:-1] + y_closed[1:]) * cross) / (6.0 * signed_area) 199 | return np.array([cx, cy]) 200 | 201 | def area(self) -> float: 202 | """Compute area enclosed by closed 2D polygon. 203 | 204 | Uses the shoelace formula. Only valid for closed 2D polygons. 205 | 206 | Returns 207 | ------- 208 | float 209 | Enclosed area (always positive). 210 | 211 | Raises 212 | ------ 213 | ValueError 214 | If polygon is not closed or not 2D. 215 | """ 216 | if not self.closed: 217 | raise ValueError("Area computation requires closed polygon.") 218 | if not self._is_2d: 219 | raise ValueError("Area computation only valid for 2D polygons.") 220 | 221 | x = self.points[:, 0] 222 | y = self.points[:, 1] 223 | # Append first point to close polygon 224 | x_closed = np.append(x, x[0]) 225 | y_closed = np.append(y, y[0]) 226 | # Shoelace formula 227 | area = 0.5 * np.abs( 228 | np.sum(x_closed[:-1] * y_closed[1:] - x_closed[1:] * y_closed[:-1]) 229 | ) 230 | return area 231 | 232 | def resample( 233 | self, n_points: int = 100, n_iter: int = 1, inplace: bool = False 234 | ) -> "Polygon": 235 | """Resample polygon to have equidistant points. 236 | 237 | Creates n_points that are approximately equidistantly spaced along 238 | the cumulative Euclidean distance. Uses linear interpolation. 239 | 240 | Parameters 241 | ---------- 242 | n_points : int, default=100 243 | Number of points in resampled polygon. Must be at least 2. 244 | n_iter : int, default=1 245 | Number of resampling iterations. Higher values (e.g., 3-5) provide 246 | better equidistant spacing. Must be at least 1. 247 | inplace : bool, default=False 248 | If True, modify this polygon in-place. If False, return new polygon. 249 | 250 | Returns 251 | ------- 252 | Polygon 253 | Resampled polygon. Returns self if inplace=True, new instance otherwise. 254 | """ 255 | if n_points < 2: 256 | raise ValueError("n_points must be at least 2") 257 | if n_iter < 1: 258 | raise ValueError("n_iter must be at least 1") 259 | def _resample_once(p: np.ndarray, n: int, is_closed: bool) -> np.ndarray: 260 | """Single resampling pass.""" 261 | if is_closed: 262 | p_closed = np.vstack([p, p[0]]) 263 | d = np.cumsum( 264 | np.r_[0, np.sqrt((np.diff(p_closed, axis=0) ** 2).sum(axis=1))] 265 | ) 266 | d_sampled = np.linspace(0, d.max(), n + 1)[:-1] 267 | else: 268 | d = np.cumsum(np.r_[0, np.sqrt((np.diff(p, axis=0) ** 2).sum(axis=1))]) 269 | d_sampled = np.linspace(0, d.max(), n) 270 | p_closed = p 271 | 272 | n_dims = p.shape[1] 273 | return np.column_stack( 274 | [np.interp(d_sampled, d, p_closed[:, i]) for i in range(n_dims)] 275 | ) 276 | 277 | # Perform resampling n_iter times 278 | points_resampled = _resample_once(self.points, n_points, self.closed) 279 | for _ in range(n_iter - 1): 280 | points_resampled = _resample_once(points_resampled, n_points, self.closed) 281 | 282 | if inplace: 283 | self.points = points_resampled 284 | return self 285 | else: 286 | return Polygon(points_resampled, closed=self.closed) 287 | 288 | def _construct_smoothing_matrix(self) -> sparse.csc_matrix: 289 | """Construct smoothing matrix for Laplace smoothing. 290 | 291 | Creates a row-stochastic matrix where each point is connected to 292 | its neighbors (previous and next point). For open polygons, boundary 293 | points (first and last) are kept fixed. 294 | 295 | The method handles polygons of any size: 296 | 297 | - For open polygons with 2 points: Both boundary points remain fixed 298 | (identity matrix), so smoothing has no effect. 299 | - For open polygons with 3+ points: Boundary points are fixed, interior 300 | points are averaged with their neighbors. 301 | - For closed polygons with 2 points: Each point is averaged with its 302 | neighbor, causing them to converge to their midpoint. 303 | - For closed polygons with 3+ points: All points are averaged with 304 | their neighbors in a circular manner. 305 | 306 | Returns 307 | ------- 308 | scipy.sparse.csc_matrix 309 | Sparse smoothing matrix. 310 | """ 311 | n = self.points.shape[0] 312 | 313 | if self.closed: 314 | # For closed polygons, connect last to first 315 | i = np.arange(n) 316 | j_prev = np.roll(np.arange(n), 1) 317 | j_next = np.roll(np.arange(n), -1) 318 | 319 | # Create adjacency with neighbors 320 | i_all = np.concatenate([i, i]) 321 | j_all = np.concatenate([j_prev, j_next]) 322 | data = np.ones(len(i_all)) 323 | 324 | adj = sparse.csc_matrix((data, (i_all, j_all)), shape=(n, n)) 325 | 326 | # Normalize rows to create stochastic matrix 327 | row_sum = np.array(adj.sum(axis=1)).ravel() 328 | row_sum[row_sum == 0] = 1.0 # Avoid division by zero 329 | adj = adj.multiply(1.0 / row_sum[:, np.newaxis]) 330 | else: 331 | # For open polygons, use LIL format for easier construction 332 | adj = sparse.lil_matrix((n, n)) 333 | 334 | # Set identity for boundary points (they stay fixed) 335 | adj[0, 0] = 1.0 336 | adj[n - 1, n - 1] = 1.0 337 | 338 | # For interior points, connect to neighbors 339 | for i in range(1, n - 1): 340 | adj[i, i - 1] = 0.5 341 | adj[i, i + 1] = 0.5 342 | 343 | # Convert to CSC for efficient operations 344 | adj = adj.tocsc() 345 | 346 | return adj 347 | 348 | def smooth_laplace( 349 | self, 350 | n: int = 1, 351 | lambda_: float = 0.5, 352 | inplace: bool = False, 353 | ) -> "Polygon": 354 | """Smooth polygon using Laplace smoothing. 355 | 356 | Applies iterative smoothing: p_new = (1-lambda)*p + lambda * M*p 357 | where M is the neighbor-averaging matrix. 358 | 359 | Parameters 360 | ---------- 361 | n : int, default=1 362 | Number of smoothing iterations. 363 | lambda_ : float, default=0.5 364 | Diffusion speed parameter in range [0, 1]. 365 | inplace : bool, default=False 366 | If True, modify this polygon in-place. If False, return new polygon. 367 | 368 | Returns 369 | ------- 370 | Polygon 371 | Smoothed polygon. Returns self if inplace=True, new instance otherwise. 372 | """ 373 | # Input validation to enforce documented parameter ranges 374 | if not isinstance(n, int) or n <= 0: 375 | raise ValueError(f"n must be a positive integer, got {n!r}") 376 | if not (0.0 <= lambda_ <= 1.0): 377 | raise ValueError(f"lambda_ must be in the range [0, 1], got {lambda_!r}") 378 | mat = self._construct_smoothing_matrix() 379 | points_smooth = self.points.copy() 380 | 381 | for _ in range(n): 382 | points_smooth = (1.0 - lambda_) * points_smooth + lambda_ * mat.dot( 383 | points_smooth 384 | ) 385 | 386 | if inplace: 387 | self.points = points_smooth 388 | return self 389 | else: 390 | return Polygon(points_smooth, closed=self.closed) 391 | 392 | def smooth_taubin( 393 | self, 394 | n: int = 1, 395 | lambda_: float = 0.5, 396 | mu: float = -0.53, 397 | inplace: bool = False, 398 | ) -> "Polygon": 399 | """Smooth polygon using Taubin smoothing. 400 | 401 | Alternates between shrinking (positive lambda) and expanding (negative mu) 402 | steps to reduce shrinkage while smoothing. 403 | 404 | Parameters 405 | ---------- 406 | n : int, default=1 407 | Number of smoothing iterations. 408 | lambda_ : float, default=0.5 409 | Positive diffusion parameter for shrinking step. 410 | mu : float, default=-0.53 411 | Negative diffusion parameter for expanding step. 412 | inplace : bool, default=False 413 | If True, modify this polygon in-place. If False, return new polygon. 414 | 415 | Returns 416 | ------- 417 | Polygon 418 | Smoothed polygon. Returns self if inplace=True, new instance otherwise. 419 | """ 420 | if n <= 0: 421 | raise ValueError("n must be a positive integer") 422 | if lambda_ <= 0: 423 | raise ValueError("lambda_ must be positive") 424 | if mu >= 0: 425 | raise ValueError("mu must be negative") 426 | mat = self._construct_smoothing_matrix() 427 | points_smooth = self.points.copy() 428 | 429 | for _ in range(n): 430 | # Lambda step (shrinking) 431 | points_smooth = (1.0 - lambda_) * points_smooth + lambda_ * mat.dot( 432 | points_smooth 433 | ) 434 | # Mu step (expanding) 435 | points_smooth = (1.0 - mu) * points_smooth + mu * mat.dot(points_smooth) 436 | 437 | if inplace: 438 | self.points = points_smooth 439 | return self 440 | else: 441 | return Polygon(points_smooth, closed=self.closed) 442 | 443 | --------------------------------------------------------------------------------