├── .codecov.yml
├── .codespellignore
├── .github
├── ISSUE_TEMPLATE
│ ├── bug-report.md
│ ├── documentation.md
│ ├── feature-request.md
│ └── questions-help-support.md
├── dependabot.yml
└── workflows
│ ├── build.yml
│ ├── code-style.yml
│ ├── doc.yml
│ ├── publish.yml
│ └── pytest.yml
├── .gitignore
├── LICENSE
├── README.md
├── data
├── cubeTetra.ev
├── cubeTetra.vtk
├── cubeTria.ev
├── cubeTria.vtk
├── icosahedron.off
└── square-mesh.off
├── doc
├── Makefile
├── _static
│ └── css
│ │ └── style.css
├── _templates
│ └── autosummary
│ │ ├── class.rst
│ │ ├── function.rst
│ │ └── module.rst
├── api
│ ├── index.rst
│ ├── lapy.meshes.rst
│ ├── lapy.modules.rst
│ └── lapy.solver.rst
├── changes
│ ├── authors.inc
│ ├── index.rst
│ ├── latest.rst
│ └── latest.rst.template
├── conf.py
├── index.rst
├── links.inc
├── make.bat
├── references.bib
└── tutorials
│ ├── examples
│ └── index.rst
├── examples
├── README.rst
├── Test_Plot.ipynb
├── Test_ShapeDNA.ipynb
├── Test_TetMesh.ipynb
├── Test_TetMesh_Geodesics.ipynb
├── Test_TriaMesh.ipynb
├── Test_TriaMesh_Geodesics.ipynb
└── lapy
├── lapy
├── __init__.py
├── _read_geometry.py
├── _tet_io.py
├── _tria_io.py
├── _version.py
├── commands
│ ├── __init__.py
│ └── sys_info.py
├── conformal.py
├── diffgeo.py
├── heat.py
├── io.py
├── plot.py
├── shapedna.py
├── solver.py
├── tet_mesh.py
├── tria_mesh.py
└── utils
│ ├── __init__.py
│ ├── _config.py
│ ├── _imports.py
│ └── tests
│ ├── __init__.py
│ ├── expected_outcomes.json
│ ├── test_TetMesh_Geodesics.py
│ ├── test_TriaMesh_Geodesics.py
│ ├── test_config.py
│ ├── test_imports.py
│ ├── test_shape_DNA.py
│ ├── test_tet_mesh.py
│ ├── test_tria_mesh.py
│ └── test_visualization_meshes.py
├── pyproject.toml
├── setup.py
└── tutorials
└── README.rst
/.codecov.yml:
--------------------------------------------------------------------------------
1 | github_checks:
2 | annotations: false
3 |
4 | coverage:
5 | status:
6 | project:
7 | default:
8 | informational: true
9 | patch:
10 | default:
11 | informational: true
12 |
--------------------------------------------------------------------------------
/.codespellignore:
--------------------------------------------------------------------------------
1 | coo
2 | daty
3 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug-report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug Report
3 | about: Create a bug report to help us improve LaPy
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Description
11 |
12 | ....
13 |
14 | ## Steps to Reproduce
15 |
22 |
23 |
24 | ...
25 |
26 | ## Expected Behavior
27 |
28 | ...
29 |
30 | ## Screenshots
31 |
32 | ...
33 |
34 | ## Environment
35 | - LaPy Version: ...
36 | - OS: ...
37 | - CPU: ...
38 |
39 |
40 |
41 |
42 |
43 | ## Additional Context
44 |
45 | ...
46 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/documentation.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Documentation
3 | about: Report an issue or make a suggestion related to LaPy documentation
4 | title: ''
5 | labels: documentation
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Documentation
11 |
12 | ...
13 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature-request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature Request
3 | about: Submit a proposal/request for a new LaPy feature
4 | title: ''
5 | labels: enhancement
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Feature Description
11 |
12 | ...
13 |
14 | ## Motivation
15 |
16 | ...
17 |
18 | ## Alternatives
19 |
20 | ...
21 |
22 | ## Additional Context
23 |
24 | ...
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/questions-help-support.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Questions/Help/Support
3 | about: Submit a request for support or a question
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Question/Support Request
11 |
12 | ...
13 |
14 | ## Screenshots
15 |
16 | ...
17 |
18 |
19 |
20 | ## Environment
21 | - LaPy Version: ...
22 | - OS: ...
23 | - CPU: ...
24 |
25 |
26 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | - package-ecosystem: "github-actions"
4 | directory: "/"
5 | schedule:
6 | interval: "weekly"
7 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 | concurrency:
3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
4 | cancel-in-progress: true
5 | on:
6 | pull_request:
7 | push:
8 | branches: [main]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | timeout-minutes: 10
14 | strategy:
15 | fail-fast: false
16 | matrix:
17 | os: [ubuntu, macos, windows]
18 | python-version: ["3.9", "3.10", "3.11", "3.12"]
19 | name: ${{ matrix.os }} - py${{ matrix.python-version }}
20 | runs-on: ${{ matrix.os }}-latest
21 | defaults:
22 | run:
23 | shell: bash
24 | steps:
25 | - name: Checkout repository
26 | uses: actions/checkout@v4
27 | - name: Setup Python ${{ matrix.python-version }}
28 | uses: actions/setup-python@v5
29 | with:
30 | python-version: ${{ matrix.python-version }}
31 | - name: Install dependencies
32 | run: |
33 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
34 | python -m pip install --progress-bar off .[build]
35 | - name: Test package install
36 | run: lapy-sys_info
37 | - name: Remove package install
38 | run: python -m pip uninstall -yq lapy
39 | - name: Build package
40 | run: python -m build
41 | - name: Install sdist
42 | run: pip install ./dist/*.tar.gz
43 | - name: Test sdist install
44 | run: lapy-sys_info
45 | - name: Remove sdist install
46 | run: python -m pip uninstall -yq lapy
47 | - name: Install wheel
48 | run: pip install ./dist/*.whl
49 | - name: Test wheel install
50 | run: lapy-sys_info
51 | - name: Remove wheel install
52 | run: python -m pip uninstall -yq lapy
53 |
--------------------------------------------------------------------------------
/.github/workflows/code-style.yml:
--------------------------------------------------------------------------------
1 | name: code-style
2 | concurrency:
3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
4 | cancel-in-progress: true
5 | on:
6 | pull_request:
7 | push:
8 | branches: [main]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | style:
13 | timeout-minutes: 10
14 | runs-on: ubuntu-latest
15 | steps:
16 | - name: Checkout repository
17 | uses: actions/checkout@v4
18 | - name: Setup Python 3.10
19 | uses: actions/setup-python@v5
20 | with:
21 | python-version: '3.10'
22 | - name: Install dependencies
23 | run: |
24 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
25 | python -m pip install --progress-bar off .[style]
26 | - name: Run Ruff
27 | run: ruff check .
28 | - name: Run codespell
29 | uses: codespell-project/actions-codespell@master
30 | with:
31 | check_filenames: true
32 | check_hidden: true
33 | skip: './.git,./build,./.mypy_cache,./.pytest_cache'
34 | ignore_words_file: ./.codespellignore
35 | - name: Run pydocstyle
36 | run: pydocstyle .
37 | - name: Run bibclean
38 | run: bibclean-check doc/references.bib
39 |
--------------------------------------------------------------------------------
/.github/workflows/doc.yml:
--------------------------------------------------------------------------------
1 | name: doc
2 | concurrency:
3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
4 | cancel-in-progress: true
5 | on:
6 | pull_request:
7 | push:
8 | branches: [main]
9 | workflow_dispatch:
10 |
11 | jobs:
12 | build:
13 | timeout-minutes: 10
14 | runs-on: ubuntu-latest
15 | defaults:
16 | run:
17 | shell: bash
18 | steps:
19 | - name: Checkout repository
20 | uses: actions/checkout@v4
21 | with:
22 | path: ./main
23 | - name: Setup Python 3.10
24 | uses: actions/setup-python@v5
25 | with:
26 | python-version: '3.10'
27 | - name: Install package
28 | run: |
29 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
30 | python -m pip install --progress-bar off main/.[doc]
31 | - name: Display system information
32 | run: lapy-sys_info --developer
33 | - name: Build doc
34 | run: TZ=UTC sphinx-build ./main/doc ./doc-build/dev -W --keep-going
35 | - name: Upload documentation
36 | uses: actions/upload-artifact@v4
37 | with:
38 | name: doc-dev
39 | path: ./doc-build/dev
40 |
41 | deploy:
42 | if: github.event_name == 'push'
43 | needs: build
44 | timeout-minutes: 10
45 | runs-on: ubuntu-latest
46 | permissions:
47 | contents: write
48 | defaults:
49 | run:
50 | shell: bash
51 | steps:
52 | - name: Download documentation
53 | uses: actions/download-artifact@v4
54 | with:
55 | name: doc-dev
56 | path: ./doc-dev
57 | - name: Deploy dev documentation
58 | uses: peaceiris/actions-gh-pages@v4
59 | with:
60 | github_token: ${{ secrets.GITHUB_TOKEN }}
61 | publish_dir: ./doc-dev
62 | destination_dir: ./dev
63 | user_name: 'github-actions[bot]'
64 | user_email: 'github-actions[bot]@users.noreply.github.com'
65 |
--------------------------------------------------------------------------------
/.github/workflows/publish.yml:
--------------------------------------------------------------------------------
1 | name: publish
2 | on:
3 | workflow_dispatch:
4 | release:
5 | types: [published]
6 |
7 | jobs:
8 | pypi:
9 | timeout-minutes: 10
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout repository
13 | uses: actions/checkout@v4
14 | - name: Setup Python 3.10
15 | uses: actions/setup-python@v5
16 | with:
17 | python-version: '3.10'
18 | - name: Install dependencies
19 | run: |
20 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
21 | python -m pip install --progress-bar off .[build]
22 | - name: Display system information
23 | run: lapy-sys_info --developer
24 | - name: Build and publish
25 | env:
26 | TWINE_USERNAME: __token__
27 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
28 | run: |
29 | python -m build
30 | twine upload dist/*
31 |
--------------------------------------------------------------------------------
/.github/workflows/pytest.yml:
--------------------------------------------------------------------------------
1 | name: pytest
2 | concurrency:
3 | group: ${{ github.workflow }}-${{ github.event.number }}-${{ github.event.ref }}
4 | cancel-in-progress: true
5 | on:
6 | pull_request:
7 | paths:
8 | - '**.py'
9 | push:
10 | branches: [main]
11 | paths:
12 | - '**.py'
13 | workflow_dispatch:
14 |
15 | jobs:
16 | pytest:
17 | timeout-minutes: 30
18 | strategy:
19 | fail-fast: false
20 | matrix:
21 | os: [ubuntu, macos, windows]
22 | python-version: ["3.9", "3.10", "3.11", "3.12"]
23 | # some tests fail (numerical issues) in older python on mac, so we ...
24 | exclude:
25 | - os: macos
26 | python-version: '3.9'
27 | name: ${{ matrix.os }} - py${{ matrix.python-version }}
28 | runs-on: ${{ matrix.os }}-latest
29 | defaults:
30 | run:
31 | shell: bash
32 | steps:
33 | - name: Checkout repository
34 | uses: actions/checkout@v4
35 | - name: Setup Python ${{ matrix.python-version }}
36 | uses: actions/setup-python@v5
37 | with:
38 | python-version: ${{ matrix.python-version }}
39 | - name: Install package
40 | run: |
41 | python -m pip install --progress-bar off --upgrade pip setuptools wheel
42 | python -m pip install --progress-bar off .[test]
43 | - name: Display system information
44 | run: lapy-sys_info --developer
45 | - name: Run pytest
46 | run: pytest lapy --cov=lapy --cov-report=xml --cov-config=pyproject.toml
47 | - name: Upload to codecov
48 | if: ${{ matrix.os == 'ubuntu' && matrix.python-version == '3.10' }}
49 | uses: codecov/codecov-action@v5
50 | with:
51 | files: ./coverage.xml
52 | flags: unittests # optional
53 | name: codecov-umbrella # optional
54 | fail_ci_if_error: true # optional (default = false)
55 | verbose: true # optional (default = false)
56 | token: ${{ secrets.CODECOV_TOKEN }}
57 | slug: deep-mi/LaPy
58 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # macOS
2 | .DS_Store
3 |
4 | # Byte-compiled / optimized / DLL files
5 | __pycache__/
6 | *.py[cod]
7 | *$py.class
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | .hypothesis/
52 | .pytest_cache/
53 | junit-results.xml
54 |
55 | # Translations
56 | *.mo
57 | *.pot
58 |
59 | # Django stuff:
60 | *.log
61 | local_settings.py
62 | db.sqlite3
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | doc/bin
73 | doc/_build/
74 | doc/generated/
75 | doc/api/generated/
76 |
77 | # PyBuilder
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | .python-version
89 |
90 | # celery beat schedule file
91 | celerybeat-schedule
92 |
93 | # SageMath parsed files
94 | *.sage.py
95 |
96 | # Environments
97 | .env
98 | .venv
99 | env/
100 | venv/
101 | ENV/
102 | env.bak/
103 | venv.bak/
104 |
105 | # Spyder project settings
106 | .spyderproject
107 | .spyproject
108 |
109 | # Rope project settings
110 | .ropeproject
111 |
112 | # mkdocs documentation
113 | /site
114 |
115 | # mypy
116 | .mypy_cache/
117 | .dmypy.json
118 | dmypy.json
119 |
120 | # Pyre type checker
121 | .pyre/
122 |
123 | # PyCharm
124 | **/.idea/
125 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Deep Medical Imaging Lab (PI Reuter)
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://pypi.org/project/lapy/)
2 | # LaPy
3 |
4 | LaPy is an open-source Python package for differential geometry on triangle
5 | and tetrahedra meshes. It includes an FEM solver to estimate the Laplace,
6 | Poisson or Heat equations. Further functionality includes the computations
7 | of gradients, divergence, mean-curvature flow, conformal mappings,
8 | geodesics, ShapeDNA (Laplace spectra), and IO and plotting methods.
9 |
10 | LaPy is written purely in Python 3 without sacrificing speed as almost all
11 | loops are vectorized, drawing upon efficient and sparse mesh data structures.
12 |
13 | ## Contents:
14 |
15 | - **TriaMesh**: a class for triangle meshes offering various operations, such as
16 | fixing orientation, smoothing, curvature, boundary, quality, normals, and
17 | various efficient mesh datastructures (edges, adjacency matrices). IO from
18 | OFF, VTK and other formats.
19 | - **TetMesh**: a class for tetrahedral meshes (orientation, boundary, IO ...)
20 | - **Solver**: a class for linear FEM computation (Laplace stiffness and mass
21 | matrix, fast and sparse eigenvalue solver, anisotropic Laplace, Poisson)
22 | - **io**: module for IO of vertex functions and eigenvector files
23 | - **diffgeo**: module for gradients, divergence, mean curvature flow, etc.
24 | - **heat**: module for heat kernel and diffusion
25 | - **shapedna**: module for the ShapeDNA descriptor of surfaces and solids
26 | - **plot**: module for interactive visualizations (wrapping plotly)
27 |
28 | ## Usage:
29 |
30 | The LaPy package is a comprehensive collection of scripts, so we refer to the
31 | 'help' function and docstring of each module / function / class for usage info.
32 | For example:
33 |
34 | ```
35 | import lapy as lp
36 | help(lp.TriaMesh)
37 | help(lp.Solver)
38 | ```
39 |
40 | In the `examples` subdirectory, we provide several Jupyter notebooks that
41 | illustrate prototypical use cases of the toolbox.
42 |
43 | ## Installation:
44 |
45 | Use the following code to install the latest release of LaPy into your local
46 | Python package directory:
47 |
48 | `python3 -m pip install lapy`
49 |
50 | Use the following code to install the dev package in editable mode to a location of
51 | your choice:
52 |
53 | `python3 -m pip install --user --src /my/preferred/location --editable git+https://github.com/Deep-MI/Lapy.git#egg=lapy`
54 |
55 | Several functions, e.g. the Solver, require a sparse matrix decomposition, for which either the LU decomposition (from scipy sparse, default) or the faster Cholesky decomposition (from scikit-sparse cholmod, recommended) can be used. If the parameter flag use_cholmod is True, the code will try to import cholmod from the scikit-sparse package. If this fails, an error will be thrown. If you would like to use cholmod, you need to install scikit-sparse separately, as pip currently cannot install it (conda can). scikit-sparse requires numpy and scipy to be installed separately beforehand.
56 |
57 | ## API Documentation
58 |
59 | The API Documentation can be found at https://deep-mi.org/LaPy .
60 |
61 | ## References:
62 |
63 | If you use this software for a publication please cite both these papers:
64 |
65 | **[1]** Laplace-Beltrami spectra as 'Shape-DNA' of surfaces and solids. Reuter M, Wolter F-E, Peinecke N. Computer-Aided Design. 2006;38(4):342-366. http://dx.doi.org/10.1016/j.cad.2005.10.011
66 |
67 | **[2]** BrainPrint: a discriminative characterization of brain morphology. Wachinger C, Golland P, Kremen W, Fischl B, Reuter M. Neuroimage. 2015;109:232-48. http://dx.doi.org/10.1016/j.neuroimage.2015.01.032 http://www.ncbi.nlm.nih.gov/pubmed/25613439
68 |
69 | Shape-DNA [1] introduces the FEM methods and the Laplace spectra for shape analysis, while BrainPrint [2] focusses on medical applications.
70 |
71 | For Geodesics please also cite:
72 |
73 | [3] Crane K, Weischedel C, Wardetzky M. Geodesics in heat: A new approach to computing distance based on heat flow. ACM Transactions on Graphics. https://doi.org/10.1145/2516971.2516977
74 |
75 | For non-singular mean curvature flow please cite:
76 |
77 | [4] Kazhdan M, Solomon J, Ben-Chen M. 2012. Can Mean-Curvature Flow be Modified to be Non-singular? Comput. Graph. Forum 31, 5, 1745–1754.
78 | https://doi.org/10.1111/j.1467-8659.2012.03179.x
79 |
80 | For conformal mapping please cite:
81 |
82 | [5] Choi PT, Lam KC, Lui LM. FLASH: Fast Landmark Aligned Spherical Harmonic Parameterization for Genus-0 Closed Brain Surfaces. SIAM Journal on Imaging Sciences, vol. 8, no. 1, pp. 67-94, 2015. https://doi.org/10.1137/130950008
83 |
84 | We invite you to check out our lab webpage at https://deep-mi.org
85 |
--------------------------------------------------------------------------------
/data/icosahedron.off:
--------------------------------------------------------------------------------
1 | OFF
2 | 12 20 0
3 | 0.0 0.0 2.0
4 | 1.788854 0.000000 0.894427
5 | 0.552786 1.701302 0.894427
6 | -1.447214 1.051462 0.894427
7 | -1.447214 -1.051462 0.894427
8 | 0.552786 -1.701302 0.894427
9 | 1.447214 1.051462 -0.894427
10 | -0.552786 1.701302 -0.894427
11 | -1.788854 0.000000 -0.894427
12 | -0.552786 -1.701302 -0.894427
13 | 1.447214 -1.051462 -0.894427
14 | 0.0 0.0 -2.0
15 | 3 2 0 1
16 | 3 3 0 2
17 | 3 4 0 3
18 | 3 5 0 4
19 | 3 1 0 5
20 | 3 2 1 6
21 | 3 7 2 6
22 | 3 3 2 7
23 | 3 8 3 7
24 | 3 4 3 8
25 | 3 9 4 8
26 | 3 5 4 9
27 | 3 10 5 9
28 | 3 6 1 10
29 | 3 1 5 10
30 | 3 6 11 7
31 | 3 7 11 8
32 | 3 8 11 9
33 | 3 9 11 10
34 | 3 10 11 6
35 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/doc/_static/css/style.css:
--------------------------------------------------------------------------------
1 | div.sphx-glr-download-link-note {
2 | height: 0px;
3 | visibility: hidden;
4 | }
5 |
--------------------------------------------------------------------------------
/doc/_templates/autosummary/class.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline }}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autoclass:: {{ objname }}
6 | :members:
7 | :inherited-members:
8 |
9 | .. minigallery:: {{ fullname }}
10 | :add-heading:
11 |
--------------------------------------------------------------------------------
/doc/_templates/autosummary/function.rst:
--------------------------------------------------------------------------------
1 | {{ fullname | escape | underline }}
2 |
3 | .. currentmodule:: {{ module }}
4 |
5 | .. autofunction:: {{ objname }}
6 |
7 | .. minigallery:: {{ fullname }}
8 | :add-heading:
9 |
--------------------------------------------------------------------------------
/doc/_templates/autosummary/module.rst:
--------------------------------------------------------------------------------
1 | {{ fullname }}
2 | {{ underline }}
3 |
4 | .. automodule:: {{ fullname }}
5 | :members:
6 |
7 |
--------------------------------------------------------------------------------
/doc/api/index.rst:
--------------------------------------------------------------------------------
1 | API References
2 | ==============
3 |
4 | This is the reference for classes (``CamelCase`` names) and functions
5 | (``underscore_case`` names) of ``lapy`` grouped thematically.
6 |
7 | .. toctree::
8 | :maxdepth: 2
9 |
10 | lapy.meshes.rst
11 | lapy.solver.rst
12 | lapy.modules.rst
13 |
--------------------------------------------------------------------------------
/doc/api/lapy.meshes.rst:
--------------------------------------------------------------------------------
1 | Meshes
2 | ======
3 |
4 | .. currentmodule:: lapy
5 |
6 | .. autosummary::
7 | :toctree: generated/
8 |
9 | TriaMesh
10 | TetMesh
11 |
--------------------------------------------------------------------------------
/doc/api/lapy.modules.rst:
--------------------------------------------------------------------------------
1 | Modules
2 | =======
3 |
4 | .. currentmodule:: lapy
5 |
6 | .. autosummary::
7 | :toctree: generated/
8 |
9 | io
10 | shapedna
11 | heat
12 | diffgeo
13 | conformal
14 | plot
15 |
--------------------------------------------------------------------------------
/doc/api/lapy.solver.rst:
--------------------------------------------------------------------------------
1 | Solver
2 | ======
3 |
4 | .. currentmodule:: lapy
5 |
6 | .. autosummary::
7 | :toctree: generated/
8 |
9 | Solver
10 |
--------------------------------------------------------------------------------
/doc/changes/authors.inc:
--------------------------------------------------------------------------------
1 | .. _Andreas Girodi: https://github.com/agirodi
2 | .. _Kersten Diers: https://github.com/kdiers
3 | .. _Martin Reuter: https://github.com/m-reuter
4 | .. _Mathieu Scheltienne: https://github.com/mscheltienne
5 |
--------------------------------------------------------------------------------
/doc/changes/index.rst:
--------------------------------------------------------------------------------
1 | Changelog
2 | =========
3 |
4 | .. toctree::
5 | :titlesonly:
6 |
7 | latest.rst
8 |
--------------------------------------------------------------------------------
/doc/changes/latest.rst:
--------------------------------------------------------------------------------
1 | .. NOTE: we use cross-references to highlight new functions and classes.
2 | Please follow the examples below, so the changelog page will have a link to
3 | the function/class documentation.
4 |
5 | .. NOTE: there are 3 separate sections for changes, based on type:
6 | - "Enhancements" for new features
7 | - "Bugs" for bug fixes
8 | - "API changes" for backward-incompatible changes
9 |
10 | .. NOTE: You can use the :pr:`xx` and :issue:`xx` role to x-ref to a GitHub PR
11 | or issue from this project.
12 |
13 | .. include:: ./authors.inc
14 |
15 | .. _latest:
16 |
17 | Version 1.0
18 | ===========
19 |
20 | API changes
21 | -----------
22 |
23 | - Classes: TriaMesh, TetMesh, and Solver are still available directly at top level and imported directly from lapy.
24 | - Mesh IO: mesh classes have been extended with IO class member functions and TriaIO and TetIO have been deprecated. Use read\_* and write\_* class members to load and write mehses, for example, TriaMesh.read_vtk() to import a VTK triangle mesh file. This simplifies IO greatly.
25 | - Module names have been changed to comply with PEP8 conventions (lower case). For example, DiffGeo to diffgeo, FuncIO to io, and Plot to plot, etc.
26 |
27 | Bugs
28 | ----
29 |
30 | - Fixed numpy deprecation issue in import_vfunc and import_ev functions.
31 |
32 | Enhancements
33 | ------------
34 |
35 | - Comply with the numpy convention for docstrings (by `Andreas Girodi`_, `Kersten Diers`_ and `Martin Reuter`_ in :pr:`19` and :pr:`21`)
36 | - Add initial documentation build (by `Mathieu Scheltienne`_ in :pr:`22`)
37 |
38 |
39 | Authors
40 | -------
41 |
42 | * `Andreas Girodi`_
43 | * `Kersten Diers`_
44 | * `Martin Reuter`_
45 | * `Mathieu Scheltienne`_
46 |
--------------------------------------------------------------------------------
/doc/changes/latest.rst.template:
--------------------------------------------------------------------------------
1 | .. NOTE: we use cross-references to highlight new functions and classes.
2 | Please follow the examples below, so the changelog page will have a link to
3 | the function/class documentation.
4 |
5 | .. NOTE: there are 3 separate sections for changes, based on type:
6 | - "Enhancements" for new features
7 | - "Bugs" for bug fixes
8 | - "API changes" for backward-incompatible changes
9 |
10 | .. NOTE: You can use the :pr:`xx` and :issue:`xx` role to x-ref to a GitHub PR
11 | or issue from this project.
12 |
13 | .. include:: ./authors.inc
14 |
15 | .. _latest:
16 |
17 | Version x.x
18 | ===========
19 |
20 | Enhancements
21 | ------------
22 |
23 | - xxx
24 |
25 | Bugs
26 | ----
27 |
28 | - xxx
29 |
30 | API and behavior changes
31 | ------------------------
32 |
33 | - xxx
34 |
35 | Authors
36 | -------
37 |
38 | * `Mathieu Scheltienne`_
39 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 |
7 | import inspect
8 | from datetime import date
9 | from importlib import import_module
10 | from typing import Dict, Optional
11 |
12 | from sphinx_gallery.sorting import FileNameSortKey
13 |
14 | import lapy
15 |
16 | # -- project information -----------------------------------------------------
17 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
18 |
19 | project = "LaPy"
20 | author = "Martin Reuter"
21 | copyright = f"{date.today().year}, {author}"
22 | release = lapy.__version__
23 | package = lapy.__name__
24 | gh_url = "https://github.com/Deep-MI/LaPy"
25 |
26 | # -- general configuration ---------------------------------------------------
27 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
28 |
29 | # If your documentation needs a minimal Sphinx version, state it here.
30 | needs_sphinx = "5.0"
31 |
32 | # The document name of the “root” document, that is, the document that contains
33 | # the root toctree directive.
34 | root_doc = "index"
35 |
36 | # Add any Sphinx extension module names here, as strings. They can be
37 | # extensions coming with Sphinx (named "sphinx.ext.*") or your custom
38 | # ones.
39 | extensions = [
40 | "sphinx.ext.autodoc",
41 | "sphinx.ext.autosectionlabel",
42 | "sphinx.ext.autosummary",
43 | "sphinx.ext.intersphinx",
44 | "sphinx.ext.linkcode",
45 | "numpydoc",
46 | "sphinxcontrib.bibtex",
47 | "sphinx_copybutton",
48 | "sphinx_design",
49 | "sphinx_gallery.gen_gallery",
50 | "sphinx_issues",
51 | "nbsphinx",
52 | "IPython.sphinxext.ipython_console_highlighting",
53 | ]
54 |
55 | templates_path = ["_templates"]
56 | exclude_patterns = [
57 | "_build",
58 | "Thumbs.db",
59 | ".DS_Store",
60 | "**.ipynb_checkpoints",
61 | "tutorials/examples/README.rst",
62 | ]
63 |
64 | # Sphinx will warn about all references where the target cannot be found.
65 | nitpicky = True
66 | nitpick_ignore = []
67 |
68 | show_warning_types = True
69 | suppress_warnings = [
70 | # Ignore new warning in Sphinx 7.3.0 while pickling environment:
71 | # WARNING: cannot cache unpickable configuration value: 'sphinx_gallery_conf'
72 | "config.cache",
73 | ]
74 |
75 | # A list of ignored prefixes for module index sorting.
76 | modindex_common_prefix = [f"{package}."]
77 |
78 | # The name of a reST role (builtin or Sphinx extension) to use as the default
79 | # role, that is, for text marked up `like this`. This can be set to 'py:obj' to
80 | # make `filter` a cross-reference to the Python function “filter”.
81 | default_role = "py:obj"
82 |
83 | # -- options for HTML output -------------------------------------------------
84 | html_theme = "furo"
85 | html_static_path = ["_static"]
86 | html_css_files = [
87 | "css/style.css",
88 | ]
89 | html_title = project
90 | html_show_sphinx = False
91 |
92 | # Documentation to change footer icons:
93 | # https://pradyunsg.me/furo/customisation/footer/#changing-footer-icons
94 | html_theme_options = {
95 | "footer_icons": [
96 | {
97 | "name": "GitHub",
98 | "url": gh_url,
99 | "html": """
100 |
103 | """,
104 | "class": "",
105 | },
106 | ],
107 | }
108 |
109 | # -- autosummary -------------------------------------------------------------
110 | autosummary_generate = True
111 |
112 | # -- autodoc -----------------------------------------------------------------
113 | autodoc_typehints = "none"
114 | autodoc_member_order = "groupwise"
115 | autodoc_warningiserror = True
116 | autoclass_content = "class"
117 |
118 | # -- intersphinx -------------------------------------------------------------
119 | intersphinx_mapping = {
120 | "matplotlib": ("https://matplotlib.org/stable", None),
121 | "mne": ("https://mne.tools/stable/", None),
122 | "numpy": ("https://numpy.org/doc/stable", None),
123 | "pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
124 | "python": ("https://docs.python.org/3", None),
125 | "scipy": ("https://docs.scipy.org/doc/scipy", None),
126 | "sklearn": ("https://scikit-learn.org/stable/", None),
127 | }
128 | intersphinx_timeout = 5
129 |
130 | # -- sphinx-issues -----------------------------------------------------------
131 | issues_github_path = gh_url.split("https://github.com/")[-1]
132 |
133 | # -- autosectionlabels -------------------------------------------------------
134 | autosectionlabel_prefix_document = True
135 |
136 | # -- numpydoc ----------------------------------------------------------------
137 | numpydoc_class_members_toctree = False
138 | numpydoc_attributes_as_param_list = False
139 |
140 | # x-ref
141 | numpydoc_xref_param_type = True
142 | numpydoc_xref_aliases = {
143 | # LaPy
144 | "TetMesh": "lapy.TetMesh",
145 | "TriaMesh": "lapy.TriaMesh",
146 | # Matplotlib
147 | "Axes": "matplotlib.axes.Axes",
148 | "Figure": "matplotlib.figure.Figure",
149 | # Python
150 | "bool": ":class:`python:bool`",
151 | "Path": "pathlib.Path",
152 | "TextIO": "io.TextIOBase",
153 | # Scipy
154 | "csc_matrix": "scipy.sparse.csc_matrix",
155 | }
156 | numpydoc_xref_ignore = {
157 | "k", # Solver, bad variable name
158 | "n", # Solver, bad variable name
159 | "N", # Solver, bad variable name
160 | "n_triangles", # TriaMesh
161 | "of",
162 | "shape",
163 | "vnum", # TriaMesh
164 | }
165 |
166 | # validation
167 | # https://numpydoc.readthedocs.io/en/latest/validation.html#validation-checks
168 | error_ignores = {
169 | "GL01", # docstring should start in the line immediately after the quotes
170 | "EX01", # section 'Examples' not found
171 | "ES01", # no extended summary found
172 | "SA01", # section 'See Also' not found
173 | "RT02", # The first line of the Returns section should contain only the type, unless multiple values are being returned # noqa
174 | }
175 | numpydoc_validate = True
176 | numpydoc_validation_checks = {"all"} | set(error_ignores)
177 | numpydoc_validation_exclude = { # regex to ignore during docstring check
178 | r"\.__getitem__",
179 | r"\.__contains__",
180 | r"\.__hash__",
181 | r"\.__mul__",
182 | r"\.__sub__",
183 | r"\.__add__",
184 | r"\.__iter__",
185 | r"\.__div__",
186 | r"\.__neg__",
187 | }
188 |
189 | # -- sphinxcontrib-bibtex ----------------------------------------------------
190 | bibtex_bibfiles = ["./references.bib"]
191 |
192 | # -- sphinx.ext.linkcode -----------------------------------------------------
193 | # https://www.sphinx-doc.org/en/master/usage/extensions/linkcode.html
194 |
195 |
196 | def linkcode_resolve(domain: str, info: Dict[str, str]) -> Optional[str]:
197 | """Determine the URL corresponding to a Python object.
198 |
199 | Parameters
200 | ----------
201 | domain : str
202 | One of 'py', 'c', 'cpp', 'javascript'.
203 | info : dict
204 | With keys "module" and "fullname".
205 |
206 | Returns
207 | -------
208 | url : str | None
209 | The code URL. If None, no link is added.
210 | """
211 | if domain != "py":
212 | return None # only document python objects
213 |
214 | # retrieve pyobject and file
215 | try:
216 | module = import_module(info["module"])
217 | pyobject = module
218 | for elt in info["fullname"].split("."):
219 | pyobject = getattr(pyobject, elt)
220 | fname = inspect.getsourcefile(pyobject).replace("\\", "/")
221 | except Exception:
222 | # Either the object could not be loaded or the file was not found.
223 | # For instance, properties will raise.
224 | return None
225 |
226 | # retrieve start/stop lines
227 | source, start_line = inspect.getsourcelines(pyobject)
228 | lines = "L%d-L%d" % (start_line, start_line + len(source) - 1)
229 |
230 | # create URL
231 | if "dev" in release:
232 | branch = "main"
233 | else:
234 | return None # alternatively, link to a maint/version branch
235 | fname = fname.rsplit(f"/{package}/")[1]
236 | url = f"{gh_url}/blob/{branch}/{package}/{fname}#{lines}"
237 | return url
238 |
239 |
240 | # -- sphinx-gallery ----------------------------------------------------------
241 | sphinx_gallery_conf = {
242 | "backreferences_dir": "generated/backreferences",
243 | "doc_module": (f"{package}",),
244 | "examples_dirs": ["../examples"],
245 | "exclude_implicit_doc": {}, # set
246 | "filename_pattern": r"\d{2}_",
247 | "gallery_dirs": ["generated/examples"],
248 | "line_numbers": False,
249 | "plot_gallery": True,
250 | "reference_url": {f"{package}": None},
251 | "remove_config_comments": True,
252 | "show_memory": True,
253 | "within_subsection_order": FileNameSortKey,
254 | }
255 |
256 | # -- make sure pandoc gets installed -----------------------------------------
257 | from inspect import getsourcefile
258 | import os
259 |
260 | # Get path to directory containing this file, conf.py.
261 | DOCS_DIRECTORY = os.path.dirname(os.path.abspath(getsourcefile(lambda: 0)))
262 |
263 | def ensure_pandoc_installed(_):
264 | import pypandoc
265 |
266 | # Download pandoc if necessary. If pandoc is already installed and on
267 | # the PATH, the installed version will be used. Otherwise, we will
268 | # download a copy of pandoc into docs/bin/ and add that to our PATH.
269 | pandoc_dir = os.path.join(DOCS_DIRECTORY, "bin")
270 | # Add dir containing pandoc binary to the PATH environment variable
271 | if pandoc_dir not in os.environ["PATH"].split(os.pathsep):
272 | os.environ["PATH"] += os.pathsep + pandoc_dir
273 | pypandoc.ensure_pandoc_installed(
274 | targetfolder=pandoc_dir,
275 | delete_installer=True,
276 | )
277 |
278 | def setup(app):
279 | app.connect("builder-inited", ensure_pandoc_installed)
280 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. include:: ./links.inc
2 |
3 | **LaPy**
4 | ========
5 |
6 | .. toctree::
7 | :hidden:
8 |
9 | api/index
10 | tutorials/index
11 | changes/index
12 |
13 | LaPy is an `open-source Python package `_ for differential
14 | geometry on triangle and tetrahedra meshes. It includes an FEM solver to
15 | estimate the Laplace, Poisson or Heat equations. Further functionality
16 | includes the computations of gradients, divergence, mean-curvature flow,
17 | conformal mappings, geodesics, ShapeDNA (Laplace spectra), and IO and
18 | plotting methods.
19 |
20 | LaPy is written purely in Python 3 without sacrificing speed as almost all
21 | loops are vectorized, drawing upon efficient and sparse mesh data structures.
22 |
23 |
24 | Install
25 | -------
26 |
27 | LaPy is available on `Pypi `_ and on
28 | `conda-forge `_.
29 |
30 | .. tab-set::
31 |
32 | .. tab-item:: Pypi
33 |
34 | .. code-block:: bash
35 |
36 | pip install lapy
37 |
38 | .. tab-item:: Conda
39 |
40 | .. code-block:: bash
41 |
42 | conda install -c conda-forge lapy
43 |
44 | .. tab-item:: Source
45 |
46 | .. code-block:: bash
47 |
48 | pip install git+https://github.com/Deep-MI/LaPy
49 |
50 | License
51 | -------
52 |
53 | ``lapy`` is licensed under the `MIT license`_.
54 | A full copy of the license can be found `on GitHub `_.
55 |
--------------------------------------------------------------------------------
/doc/links.inc:
--------------------------------------------------------------------------------
1 | .. This (-*- rst -*-) format file contains commonly used link targets and name
2 | substitutions. It may be included in many files, therefore it should only
3 | contain link targets and name substitutions. Try grepping for "^\.\. _" to
4 | find plausible candidates for this list.
5 |
6 | .. NOTE: reST targets are
7 | __not_case_sensitive__, so only one target definition is needed for:
8 | nipy, NIPY, Nipy, etc...
9 |
10 |
11 | .. project
12 |
13 | .. _project pypi: https://pypi.org/project/lapy/
14 | .. _project conda: https://anaconda.org/conda-forge/lapy
15 | .. _project github: https://github.com/Deep-MI/LaPy
16 | .. _project license: https://github.com/Deep-MI/LaPy/blob/master/LICENSE
17 |
18 |
19 | .. license
20 |
21 | .. _MIT license: https://opensource.org/licenses/MIT
22 |
23 |
24 | .. numpy
25 |
26 | .. _numpy: https://numpy.org/
27 |
28 |
29 | .. sklearn
30 |
31 | .. _scikit-learn: https://scikit-learn.org/stable/
32 |
33 |
34 | .. scipy
35 |
36 | .. _scipy: https://scipy.org/
37 |
--------------------------------------------------------------------------------
/doc/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | %SPHINXBUILD% >NUL 2>NUL
14 | if errorlevel 9009 (
15 | echo.
16 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
17 | echo.installed, then set the SPHINXBUILD environment variable to point
18 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
19 | echo.may add the Sphinx directory to PATH.
20 | echo.
21 | echo.If you don't have Sphinx installed, grab it from
22 | echo.https://www.sphinx-doc.org/
23 | exit /b 1
24 | )
25 |
26 | if "%1" == "" goto help
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/doc/references.bib:
--------------------------------------------------------------------------------
1 | @article{conformal_parameterization_2020,
2 | author = {Choi, Gary P. T. and Leung-Liu, Yusan and Gu, Xianfeng and Lui, Lok Ming},
3 | doi = {10.1137/19M125337X},
4 | journal = {SIAM Journal on Imaging Sciences},
5 | number = {3},
6 | pages = {1049-1083},
7 | title = {Parallelizable Global Conformal Parameterization of Simply-Connected Surfaces via Partial Welding},
8 | volume = {13},
9 | year = {2020}
10 | }
11 |
12 | @article{numpy_2020,
13 | author = {Harris, Charles R. and Millman, K. Jarrod and van der Walt, Stéfan J. and Gommers, Ralf and Virtanen, Pauli and Cournapeau, David and Wieser, Eric and Taylor, Julian and Berg, Sebastian and Smith, Nathaniel J. and Kern, Robert and Picus, Matti and Hoyer, Stephan and van Kerkwijk, Marten H. and Brett, Matthew and Haldane, Allan and del Río, Jaime Fernández and Wiebe, Mark and Peterson, Pearu and Gérard-Marchant, Pierre and Sheppard, Kevin and Reddy, Tyler and Weckesser, Warren and Abbasi, Hameer and Gohlke, Christoph and Oliphant, Travis E.},
14 | doi = {10.1038/s41586-020-2649-2},
15 | journal = {Nature},
16 | month = {September},
17 | number = {7825},
18 | pages = {357--362},
19 | title = {Array programming with {NumPy}},
20 | volume = {585},
21 | year = {2020}
22 | }
23 |
24 | @article{scipy_2020,
25 | author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and van der Walt, Stéfan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, İlhan and Feng, Yu and Moore, Eric W. and VanderPlas, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Antônio H. and Pedregosa, Fabian and van Mulbregt, Paul and {SciPy 1.0 Contributors} and Vijaykumar, Aditya and Bardelli, Alessandro Pietro and Rothberg, Alex and Hilboll, Andreas and Kloeckner, Andreas and Scopatz, Anthony and Lee, Antony and Rokem, Ariel and Woods, C. Nathan and Fulton, Chad and Masson, Charles and Häggström, Christian and Fitzgerald, Clark and Nicholson, David A. and Hagen, David R. and Pasechnik, Dmitrii V. and Olivetti, Emanuele and Martin, Eric and Wieser, Eric and Silva, Fabrice and Lenders, Felix and Wilhelm, Florian and Young, G. and Price, Gavin A. and Ingold, Gert-Ludwig and Allen, Gregory E. and Lee, Gregory R. and Audren, Hervé and Probst, Irvin and Dietrich, Jörg P. and Silterra, Jacob and Webber, James T and Slavič, Janko and Nothman, Joel and Buchner, Johannes and Kulick, Johannes and Schönberger, Johannes L. and de Miranda Cardoso, José Vinícius and Reimer, Joscha and Harrington, Joseph and Rodríguez, Juan Luis Cano and Nunez-Iglesias, Juan and Kuczynski, Justin and Tritz, Kevin and Thoma, Martin and Newville, Matthew and Kümmerer, Matthias and Bolingbroke, Maximilian and Tartre, Michael and Pak, Mikhail and Smith, Nathaniel J. and Nowaczyk, Nikolai and Shebanov, Nikolay and Pavlyk, Oleksandr and Brodtkorb, Per A. and Lee, Perry and McGibbon, Robert T. and Feldbauer, Roman and Lewis, Sam and Tygier, Sam and Sievert, Scott and Vigna, Sebastiano and Peterson, Stefan and More, Surhud and Pudlik, Tadeusz and Oshima, Takuya and Pingel, Thomas J. and Robitaille, Thomas P. and Spura, Thomas and Jones, Thouis R. and Cera, Tim and Leslie, Tim and Zito, Tiziano and Krauss, Tom and Upadhyay, Utkarsh and Halchenko, Yaroslav O. and Vázquez-Baeza, Yoshiki},
26 | doi = {10.1038/s41592-019-0686-2},
27 | journal = {Nature Methods},
28 | month = {March},
29 | number = {3},
30 | pages = {261--272},
31 | title = {{SciPy} 1.0: fundamental algorithms for scientific computing in {Python}},
32 | volume = {17},
33 | year = {2020}
34 | }
35 |
--------------------------------------------------------------------------------
/doc/tutorials/examples:
--------------------------------------------------------------------------------
1 | ../../examples
--------------------------------------------------------------------------------
/doc/tutorials/index.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
4 | Here you can find Notebooks with examples highlighting some of LaPy's functionality.
5 |
6 | .. toctree::
7 | :maxdepth: 1
8 |
9 | examples/Test_TriaMesh.ipynb
10 | examples/Test_TetMesh.ipynb
11 | examples/Test_Plot.ipynb
12 | examples/Test_ShapeDNA.ipynb
13 | examples/Test_TriaMesh_Geodesics.ipynb
14 | examples/Test_TetMesh_Geodesics.ipynb
15 |
--------------------------------------------------------------------------------
/examples/README.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
--------------------------------------------------------------------------------
/examples/Test_Plot.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Visualization"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "## Triangle Mesh"
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": null,
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "import plotly.io as pio\n",
24 | "\n",
25 | "from lapy import Solver, TetMesh, TriaMesh, io, plot\n",
26 | "\n",
27 | "pio.renderers.default = \"sphinx_gallery\""
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "metadata": {},
33 | "source": [
34 | "This tutorial will show you some of our visualization functionality. For that we load a larger mesh of the cube and compute the first three eigenvalues and eigenvectors. We also show how to save the eigenfunctions to disk."
35 | ]
36 | },
37 | {
38 | "cell_type": "code",
39 | "execution_count": null,
40 | "metadata": {},
41 | "outputs": [],
42 | "source": [
43 | "tria = TriaMesh.read_vtk(\"../data/cubeTria.vtk\")\n",
44 | "fem = Solver(tria)\n",
45 | "evals, evecs = fem.eigs(k=3)\n",
46 | "evDict = dict()\n",
47 | "evDict[\"Refine\"] = 0\n",
48 | "evDict[\"Degree\"] = 1\n",
49 | "evDict[\"Dimension\"] = 2\n",
50 | "evDict[\"Elements\"] = len(tria.t)\n",
51 | "evDict[\"DoF\"] = len(tria.v)\n",
52 | "evDict[\"NumEW\"] = 3\n",
53 | "evDict[\"Eigenvalues\"] = evals\n",
54 | "evDict[\"Eigenvectors\"] = evecs\n",
55 | "io.write_ev(\"../data/cubeTria.ev\", evDict)"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "Let's look at the result by visualizing the first non-constant eigenfunction on top of the cube mesh. You can see that the extrema localize in two diametrically opposed corners."
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": null,
68 | "metadata": {},
69 | "outputs": [],
70 | "source": [
71 | "plot.plot_tria_mesh(\n",
72 | " tria,\n",
73 | " vfunc=evecs[:, 1],\n",
74 | " xrange=None,\n",
75 | " yrange=None,\n",
76 | " zrange=None,\n",
77 | " showcaxis=False,\n",
78 | " caxis=None,\n",
79 | ")"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "We can also adjust the axes and add a color scale."
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "plot.plot_tria_mesh(\n",
96 | " tria,\n",
97 | " vfunc=evecs[:, 1],\n",
98 | " xrange=[-2, 2],\n",
99 | " yrange=[-2, 2],\n",
100 | " zrange=[-2, 2],\n",
101 | " showcaxis=True,\n",
102 | " caxis=[-0.3, 0.5],\n",
103 | ")"
104 | ]
105 | },
106 | {
107 | "cell_type": "markdown",
108 | "metadata": {},
109 | "source": [
110 | "## Tetrahedral Mesh"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "Next we load a tetrahedral mesh and again compute the first 3 eigenvectors."
118 | ]
119 | },
120 | {
121 | "cell_type": "code",
122 | "execution_count": null,
123 | "metadata": {},
124 | "outputs": [],
125 | "source": [
126 | "tetra = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")\n",
127 | "fem = Solver(tetra)\n",
128 | "evals, evecs = fem.eigs(k=3)\n",
129 | "evDict = dict()\n",
130 | "evDict[\"Refine\"] = 0\n",
131 | "evDict[\"Degree\"] = 1\n",
132 | "evDict[\"Dimension\"] = 2\n",
133 | "evDict[\"Elements\"] = len(tetra.t)\n",
134 | "evDict[\"DoF\"] = len(tetra.v)\n",
135 | "evDict[\"NumEW\"] = 3\n",
136 | "evDict[\"Eigenvalues\"] = evals\n",
137 | "evDict[\"Eigenvectors\"] = evecs\n",
138 | "io.write_ev(\"../data/cubeTetra.ev\", evDict)"
139 | ]
140 | },
141 | {
142 | "cell_type": "markdown",
143 | "metadata": {},
144 | "source": [
145 | "The eigenvector defines a function on all vertices, also inside the cube. Here we can see it as a color overlay on the boundary."
146 | ]
147 | },
148 | {
149 | "cell_type": "code",
150 | "execution_count": null,
151 | "metadata": {},
152 | "outputs": [],
153 | "source": [
154 | "plot.plot_tet_mesh(\n",
155 | " tetra,\n",
156 | " vfunc=evecs[:, 1],\n",
157 | " xrange=None,\n",
158 | " yrange=None,\n",
159 | " zrange=None,\n",
160 | " showcaxis=False,\n",
161 | " caxis=None,\n",
162 | ")"
163 | ]
164 | },
165 | {
166 | "cell_type": "markdown",
167 | "metadata": {},
168 | "source": [
169 | "The plot function allows cutting the solid object open (here we keep every vertex where the function is larger than 0)."
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "metadata": {},
176 | "outputs": [],
177 | "source": [
178 | "plot.plot_tet_mesh(\n",
179 | " tetra,\n",
180 | " cutting=(\"f>0\"),\n",
181 | " vfunc=evecs[:, 1],\n",
182 | " xrange=[-2, 2],\n",
183 | " yrange=[-2, 2],\n",
184 | " zrange=[-2, 2],\n",
185 | " showcaxis=True,\n",
186 | " caxis=[-0.3, 0.5],\n",
187 | ")"
188 | ]
189 | }
190 | ],
191 | "metadata": {
192 | "kernelspec": {
193 | "display_name": "Python3",
194 | "language": "python",
195 | "name": "python3"
196 | },
197 | "language_info": {
198 | "codemirror_mode": {
199 | "name": "ipython",
200 | "version": 3
201 | },
202 | "file_extension": ".py",
203 | "mimetype": "text/x-python",
204 | "name": "python",
205 | "nbconvert_exporter": "python",
206 | "pygments_lexer": "ipython3",
207 | "version": "3"
208 | }
209 | },
210 | "nbformat": 4,
211 | "nbformat_minor": 4
212 | }
213 |
--------------------------------------------------------------------------------
/examples/Test_ShapeDNA.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# ShapeDNA"
8 | ]
9 | },
10 | {
11 | "cell_type": "markdown",
12 | "metadata": {},
13 | "source": [
14 | "ShapeDNA is an n-dimensional intrinsic shape descriptor (see Reuter et al., CAD Journal, 2006). It can be used to compare two geometric objects independent of their pose or posture as the ShapeDNA is not affected by (near)-isometric deformations. This tutorial shows how you compute, normalize and re-weight Laplace-Beltrami spectra to obtain the ShapeDNA."
15 | ]
16 | },
17 | {
18 | "cell_type": "code",
19 | "execution_count": 1,
20 | "metadata": {},
21 | "outputs": [],
22 | "source": [
23 | "# imports\n",
24 | "from lapy import TetMesh, TriaMesh, shapedna"
25 | ]
26 | },
27 | {
28 | "cell_type": "markdown",
29 | "metadata": {},
30 | "source": [
31 | "First we load some data: a tria mesh representing the boundary of a cube and a tetrahedral mesh representing the full cube."
32 | ]
33 | },
34 | {
35 | "cell_type": "code",
36 | "execution_count": 2,
37 | "metadata": {},
38 | "outputs": [
39 | {
40 | "name": "stdout",
41 | "output_type": "stream",
42 | "text": [
43 | "--> VTK format ... \n",
44 | " --> DONE ( V: 2402 , T: 4800 )\n",
45 | "\n",
46 | "--> VTK format ... \n",
47 | " --> DONE ( V: 9261 , T: 48000 )\n",
48 | "\n"
49 | ]
50 | }
51 | ],
52 | "source": [
53 | "# load data\n",
54 | "tria = TriaMesh.read_vtk(\"../data/cubeTria.vtk\")\n",
55 | "tet = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")"
56 | ]
57 | },
58 | {
59 | "cell_type": "markdown",
60 | "metadata": {},
61 | "source": [
62 | "Let's compute the first three eigenvalues and eigenvectors of the triangle mesh..."
63 | ]
64 | },
65 | {
66 | "cell_type": "code",
67 | "execution_count": 3,
68 | "metadata": {},
69 | "outputs": [
70 | {
71 | "name": "stdout",
72 | "output_type": "stream",
73 | "text": [
74 | "TriaMesh with regular Laplace-Beltrami\n",
75 | "Solver: spsolve (LU decomposition) ...\n"
76 | ]
77 | },
78 | {
79 | "data": {
80 | "text/plain": [
81 | "array([-4.0165149e-05, 4.1696410e+00, 4.1704664e+00], dtype=float32)"
82 | ]
83 | },
84 | "execution_count": 3,
85 | "metadata": {},
86 | "output_type": "execute_result"
87 | }
88 | ],
89 | "source": [
90 | "# compute eigenvalues and eigenvectors for tria mesh\n",
91 | "ev = shapedna.compute_shapedna(tria, k=3)\n",
92 | "ev[\"Eigenvectors\"]\n",
93 | "ev[\"Eigenvalues\"]"
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "Now we perform a normalization of the eigenvalues using the method \"geometry\" which is equal to surface area normalization for 2d meshes. The resulting eigenvalues are the same as when computing them on the same shape with unit surface area (=1)."
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 4,
106 | "metadata": {},
107 | "outputs": [
108 | {
109 | "data": {
110 | "text/plain": [
111 | "array([-2.4099089e-04, 2.5017845e+01, 2.5022799e+01], dtype=float32)"
112 | ]
113 | },
114 | "execution_count": 4,
115 | "metadata": {},
116 | "output_type": "execute_result"
117 | }
118 | ],
119 | "source": [
120 | "# volume / surface / geometry normalization of tria eigenvalues\n",
121 | "shapedna.normalize_ev(tria, ev[\"Eigenvalues\"], method=\"geometry\")"
122 | ]
123 | },
124 | {
125 | "cell_type": "markdown",
126 | "metadata": {},
127 | "source": [
128 | "For surfaces, eigenvalues increase linearly with their ordering. In order to reduce the influence of higher (and probably more noise affected) eigenvalues it is common practice to perform a linear re-weighting."
129 | ]
130 | },
131 | {
132 | "cell_type": "code",
133 | "execution_count": 5,
134 | "metadata": {},
135 | "outputs": [
136 | {
137 | "data": {
138 | "text/plain": [
139 | "array([-4.01651487e-05, 2.08482051e+00, 1.39015547e+00])"
140 | ]
141 | },
142 | "execution_count": 5,
143 | "metadata": {},
144 | "output_type": "execute_result"
145 | }
146 | ],
147 | "source": [
148 | "# linear reweighting of tria eigenvalues\n",
149 | "shapedna.reweight_ev(ev[\"Eigenvalues\"])"
150 | ]
151 | },
152 | {
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "The normalized and re-weighted eigenvalues are called the ShapeDNA. We can now compute the distance between two shapes by comparing their ShapeDNA. The default is the Euclidean distance between two ShapeDNA vectors."
157 | ]
158 | },
159 | {
160 | "cell_type": "code",
161 | "execution_count": 6,
162 | "metadata": {},
163 | "outputs": [
164 | {
165 | "data": {
166 | "text/plain": [
167 | "0.0"
168 | ]
169 | },
170 | "execution_count": 6,
171 | "metadata": {},
172 | "output_type": "execute_result"
173 | }
174 | ],
175 | "source": [
176 | "# compute distance for tria eigenvalues (trivial case)\n",
177 | "shapedna.compute_distance(ev[\"Eigenvalues\"], ev[\"Eigenvalues\"])"
178 | ]
179 | },
180 | {
181 | "cell_type": "markdown",
182 | "metadata": {},
183 | "source": [
184 | "Note, that usually more eigenvalues are used (in the order of 15 to 50) for shape comparison. Also you can do other analyses, e.g. find clusters in this shape space or project it via PCA for visualization."
185 | ]
186 | },
187 | {
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "We now repeat the above steps for a tetrahedral mesh, again computing the first three eigenvalues and -vectors."
192 | ]
193 | },
194 | {
195 | "cell_type": "code",
196 | "execution_count": 7,
197 | "metadata": {},
198 | "outputs": [
199 | {
200 | "name": "stdout",
201 | "output_type": "stream",
202 | "text": [
203 | "TetMesh with regular Laplace\n",
204 | "Solver: spsolve (LU decomposition) ...\n"
205 | ]
206 | },
207 | {
208 | "data": {
209 | "text/plain": [
210 | "array([8.4440224e-05, 9.8897915e+00, 9.8898811e+00], dtype=float32)"
211 | ]
212 | },
213 | "execution_count": 7,
214 | "metadata": {},
215 | "output_type": "execute_result"
216 | }
217 | ],
218 | "source": [
219 | "# compute eigenvalues and eigenvectors for tet mesh\n",
220 | "evTet = shapedna.compute_shapedna(tet, k=3)\n",
221 | "evTet[\"Eigenvectors\"]\n",
222 | "evTet[\"Eigenvalues\"]"
223 | ]
224 | },
225 | {
226 | "cell_type": "markdown",
227 | "metadata": {},
228 | "source": [
229 | "For 3d meshes the \"geometry\" normalization defaults to unit volume normalization. Since the cube is already unit volume, nothing happens."
230 | ]
231 | },
232 | {
233 | "cell_type": "code",
234 | "execution_count": 8,
235 | "metadata": {},
236 | "outputs": [
237 | {
238 | "name": "stdout",
239 | "output_type": "stream",
240 | "text": [
241 | "Found 4800 triangles on boundary.\n",
242 | "Searched mesh after 79 flooding iterations (0.012834310531616211 sec).\n"
243 | ]
244 | },
245 | {
246 | "data": {
247 | "text/plain": [
248 | "array([8.4440224e-05, 9.8897915e+00, 9.8898811e+00], dtype=float32)"
249 | ]
250 | },
251 | "execution_count": 8,
252 | "metadata": {},
253 | "output_type": "execute_result"
254 | }
255 | ],
256 | "source": [
257 | "# volume / surface / geometry normalization of tet eigenvalues\n",
258 | "shapedna.normalize_ev(tet, evTet[\"Eigenvalues\"], method=\"geometry\")"
259 | ]
260 | },
261 | {
262 | "cell_type": "markdown",
263 | "metadata": {},
264 | "source": [
265 | "Again we perform linear re-weighting. This is only meaningful for small eigenvalues as the asymtotic trend of eigenvalues of 3d solids is not linear."
266 | ]
267 | },
268 | {
269 | "cell_type": "code",
270 | "execution_count": 9,
271 | "metadata": {},
272 | "outputs": [
273 | {
274 | "data": {
275 | "text/plain": [
276 | "array([8.44402239e-05, 4.94489574e+00, 3.29662704e+00])"
277 | ]
278 | },
279 | "execution_count": 9,
280 | "metadata": {},
281 | "output_type": "execute_result"
282 | }
283 | ],
284 | "source": [
285 | "# linear reweighting of tet eigenvalues\n",
286 | "shapedna.reweight_ev(evTet[\"Eigenvalues\"])"
287 | ]
288 | },
289 | {
290 | "cell_type": "markdown",
291 | "metadata": {},
292 | "source": [
293 | "Now that we have the ShapeDNA of the 3D solid cube, we can compare it to other ShapeDNA (or to itself, which of course yields zero)."
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": 10,
299 | "metadata": {},
300 | "outputs": [
301 | {
302 | "data": {
303 | "text/plain": [
304 | "0.0"
305 | ]
306 | },
307 | "execution_count": 10,
308 | "metadata": {},
309 | "output_type": "execute_result"
310 | }
311 | ],
312 | "source": [
313 | "# compute distance for tria eigenvalues (trivial case)\n",
314 | "shapedna.compute_distance(evTet[\"Eigenvalues\"], evTet[\"Eigenvalues\"])"
315 | ]
316 | }
317 | ],
318 | "metadata": {
319 | "kernelspec": {
320 | "display_name": "Python3",
321 | "language": "python",
322 | "name": "python3"
323 | },
324 | "language_info": {
325 | "codemirror_mode": {
326 | "name": "ipython",
327 | "version": 3
328 | },
329 | "file_extension": ".py",
330 | "mimetype": "text/x-python",
331 | "name": "python",
332 | "nbconvert_exporter": "python",
333 | "pygments_lexer": "ipython3",
334 | "version": "3"
335 | }
336 | },
337 | "nbformat": 4,
338 | "nbformat_minor": 4
339 | }
340 |
--------------------------------------------------------------------------------
/examples/Test_TetMesh.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# Tetrahedral Mesh"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "from lapy import TetMesh"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "First, instead of loading, we define a small tetrahedral mesh representing a cube with a center vertex and twelve tetrahedra."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 2,
29 | "metadata": {},
30 | "outputs": [],
31 | "source": [
32 | "# cube9 (cube with center node)\n",
33 | "points = [\n",
34 | " [0, 0, 0],\n",
35 | " [1, 0, 0],\n",
36 | " [1, 1, 0],\n",
37 | " [0, 1, 0],\n",
38 | " [0, 0, 1],\n",
39 | " [1, 0, 1],\n",
40 | " [1, 1, 1],\n",
41 | " [0, 1, 1],\n",
42 | " [0.5, 0.5, 0.5],\n",
43 | "]\n",
44 | "tets = [\n",
45 | " [0, 5, 8, 1],\n",
46 | " [0, 4, 5, 8],\n",
47 | " [2, 5, 6, 8],\n",
48 | " [1, 5, 2, 8],\n",
49 | " [6, 7, 3, 8],\n",
50 | " [6, 3, 2, 8],\n",
51 | " [0, 3, 4, 8],\n",
52 | " [3, 7, 4, 8],\n",
53 | " [0, 1, 2, 8],\n",
54 | " [0, 2, 3, 8],\n",
55 | " [4, 6, 5, 8],\n",
56 | " [4, 7, 6, 8],\n",
57 | "]\n",
58 | "T = TetMesh(points, tets)"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "Note, that we flipped the first tetrahedron vertex order on purpose (it should be 0,5,1,8) to test and correct orientation below."
66 | ]
67 | },
68 | {
69 | "cell_type": "markdown",
70 | "metadata": {},
71 | "source": [
72 | "We can check if our tet mesh has free vertices (these are vertices that are not used in any tetrahedron)."
73 | ]
74 | },
75 | {
76 | "cell_type": "code",
77 | "execution_count": 3,
78 | "metadata": {},
79 | "outputs": [
80 | {
81 | "data": {
82 | "text/plain": [
83 | "False"
84 | ]
85 | },
86 | "execution_count": 3,
87 | "metadata": {},
88 | "output_type": "execute_result"
89 | }
90 | ],
91 | "source": [
92 | "T.has_free_vertices()"
93 | ]
94 | },
95 | {
96 | "cell_type": "markdown",
97 | "metadata": {},
98 | "source": [
99 | "No free vertices are found, so we cannot remove any and the attempt will keep all 9."
100 | ]
101 | },
102 | {
103 | "cell_type": "code",
104 | "execution_count": 4,
105 | "metadata": {},
106 | "outputs": [
107 | {
108 | "data": {
109 | "text/plain": [
110 | "(array([0, 1, 2, 3, 4, 5, 6, 7, 8]), [])"
111 | ]
112 | },
113 | "execution_count": 4,
114 | "metadata": {},
115 | "output_type": "execute_result"
116 | }
117 | ],
118 | "source": [
119 | "T.rm_free_vertices_()"
120 | ]
121 | },
122 | {
123 | "cell_type": "markdown",
124 | "metadata": {},
125 | "source": [
126 | "Let's see next, if we have consistent orientations (this should fail)."
127 | ]
128 | },
129 | {
130 | "cell_type": "code",
131 | "execution_count": 5,
132 | "metadata": {},
133 | "outputs": [
134 | {
135 | "name": "stdout",
136 | "output_type": "stream",
137 | "text": [
138 | "Orientations are not uniform\n"
139 | ]
140 | },
141 | {
142 | "data": {
143 | "text/plain": [
144 | "False"
145 | ]
146 | },
147 | "execution_count": 5,
148 | "metadata": {},
149 | "output_type": "execute_result"
150 | }
151 | ],
152 | "source": [
153 | "T.is_oriented()"
154 | ]
155 | },
156 | {
157 | "cell_type": "markdown",
158 | "metadata": {},
159 | "source": [
160 | "Some functions don't care about the orientation, for example the average edge length computation."
161 | ]
162 | },
163 | {
164 | "cell_type": "code",
165 | "execution_count": 6,
166 | "metadata": {},
167 | "outputs": [
168 | {
169 | "data": {
170 | "text/plain": [
171 | "1.0543647924813107"
172 | ]
173 | },
174 | "execution_count": 6,
175 | "metadata": {},
176 | "output_type": "execute_result"
177 | }
178 | ],
179 | "source": [
180 | "T.avg_edge_length()"
181 | ]
182 | },
183 | {
184 | "cell_type": "markdown",
185 | "metadata": {},
186 | "source": [
187 | "We can also get the boundary of the tet mesh as a triangle mesh."
188 | ]
189 | },
190 | {
191 | "cell_type": "code",
192 | "execution_count": 7,
193 | "metadata": {},
194 | "outputs": [
195 | {
196 | "name": "stdout",
197 | "output_type": "stream",
198 | "text": [
199 | "Found 12 triangles on boundary.\n"
200 | ]
201 | }
202 | ],
203 | "source": [
204 | "BT = T.boundary_tria()"
205 | ]
206 | },
207 | {
208 | "cell_type": "markdown",
209 | "metadata": {},
210 | "source": [
211 | "But also the boundary is not oriented consistently (triangle normals of neighboring triangles point in opposite directions)."
212 | ]
213 | },
214 | {
215 | "cell_type": "code",
216 | "execution_count": 8,
217 | "metadata": {},
218 | "outputs": [
219 | {
220 | "data": {
221 | "text/plain": [
222 | "False"
223 | ]
224 | },
225 | "execution_count": 8,
226 | "metadata": {},
227 | "output_type": "execute_result"
228 | }
229 | ],
230 | "source": [
231 | "BT.is_oriented()"
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {},
237 | "source": [
238 | "Let's repeat those steps after correcting the orientation in the tet mesh."
239 | ]
240 | },
241 | {
242 | "cell_type": "code",
243 | "execution_count": 9,
244 | "metadata": {},
245 | "outputs": [
246 | {
247 | "name": "stdout",
248 | "output_type": "stream",
249 | "text": [
250 | "Flipped 1 tetrahedra\n",
251 | "All tet orientations are correct\n"
252 | ]
253 | },
254 | {
255 | "data": {
256 | "text/plain": [
257 | "True"
258 | ]
259 | },
260 | "execution_count": 9,
261 | "metadata": {},
262 | "output_type": "execute_result"
263 | }
264 | ],
265 | "source": [
266 | "T.orient_()\n",
267 | "T.is_oriented()"
268 | ]
269 | },
270 | {
271 | "cell_type": "markdown",
272 | "metadata": {},
273 | "source": [
274 | "When we extract the boundary surface now, we see it is also consistently oriented."
275 | ]
276 | },
277 | {
278 | "cell_type": "code",
279 | "execution_count": 10,
280 | "metadata": {},
281 | "outputs": [
282 | {
283 | "name": "stdout",
284 | "output_type": "stream",
285 | "text": [
286 | "Found 12 triangles on boundary.\n"
287 | ]
288 | },
289 | {
290 | "data": {
291 | "text/plain": [
292 | "True"
293 | ]
294 | },
295 | "execution_count": 10,
296 | "metadata": {},
297 | "output_type": "execute_result"
298 | }
299 | ],
300 | "source": [
301 | "BT = T.boundary_tria()\n",
302 | "BT.is_oriented()"
303 | ]
304 | },
305 | {
306 | "cell_type": "markdown",
307 | "metadata": {},
308 | "source": [
309 | "Correct orientation is needed, e.g., to compute the volume of a surface mesh."
310 | ]
311 | },
312 | {
313 | "cell_type": "code",
314 | "execution_count": 11,
315 | "metadata": {},
316 | "outputs": [
317 | {
318 | "data": {
319 | "text/plain": [
320 | "1.0"
321 | ]
322 | },
323 | "execution_count": 11,
324 | "metadata": {},
325 | "output_type": "execute_result"
326 | }
327 | ],
328 | "source": [
329 | "BT.volume()"
330 | ]
331 | }
332 | ],
333 | "metadata": {
334 | "kernelspec": {
335 | "display_name": "Python3",
336 | "language": "python",
337 | "name": "python3"
338 | },
339 | "language_info": {
340 | "codemirror_mode": {
341 | "name": "ipython",
342 | "version": 3
343 | },
344 | "file_extension": ".py",
345 | "mimetype": "text/x-python",
346 | "name": "python",
347 | "nbconvert_exporter": "python",
348 | "pygments_lexer": "ipython3",
349 | "version": "3"
350 | }
351 | },
352 | "nbformat": 4,
353 | "nbformat_minor": 4
354 | }
355 |
--------------------------------------------------------------------------------
/examples/Test_TetMesh_Geodesics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# TetMesh Geodesics"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [
15 | {
16 | "data": {
17 | "text/html": [
18 | " \n",
33 | " "
34 | ]
35 | },
36 | "metadata": {},
37 | "output_type": "display_data"
38 | }
39 | ],
40 | "source": [
41 | "import numpy as np\n",
42 | "\n",
43 | "# import plotly\n",
44 | "# plotly.offline.init_notebook_mode(connected=True)\n",
45 | "import plotly.io as pio\n",
46 | "\n",
47 | "from lapy import TetMesh\n",
48 | "from lapy.plot import plot_tet_mesh\n",
49 | "\n",
50 | "pio.renderers.default = \"sphinx_gallery\""
51 | ]
52 | },
53 | {
54 | "cell_type": "markdown",
55 | "metadata": {},
56 | "source": [
57 | "First we need a TetMesh, so lets open a cube with 48K tetrahedra and make sure it is oriented consistently."
58 | ]
59 | },
60 | {
61 | "cell_type": "code",
62 | "execution_count": 2,
63 | "metadata": {},
64 | "outputs": [
65 | {
66 | "name": "stdout",
67 | "output_type": "stream",
68 | "text": [
69 | "--> VTK format ... \n",
70 | " --> DONE ( V: 9261 , T: 48000 )\n",
71 | "\n",
72 | "Flipped 24000 tetrahedra\n"
73 | ]
74 | },
75 | {
76 | "data": {
77 | "text/plain": [
78 | "24000"
79 | ]
80 | },
81 | "execution_count": 2,
82 | "metadata": {},
83 | "output_type": "execute_result"
84 | }
85 | ],
86 | "source": [
87 | "T = TetMesh.read_vtk(\"../data/cubeTetra.vtk\")\n",
88 | "# T.is_oriented()\n",
89 | "T.orient_()"
90 | ]
91 | },
92 | {
93 | "cell_type": "markdown",
94 | "metadata": {},
95 | "source": [
96 | "## Laplace"
97 | ]
98 | },
99 | {
100 | "cell_type": "markdown",
101 | "metadata": {},
102 | "source": [
103 | "Next we solve the Laplace eigenvalue problem to get 10 eigenvalues and -vectors/functions."
104 | ]
105 | },
106 | {
107 | "cell_type": "code",
108 | "execution_count": 3,
109 | "metadata": {},
110 | "outputs": [
111 | {
112 | "name": "stdout",
113 | "output_type": "stream",
114 | "text": [
115 | "TetMesh with regular Laplace\n",
116 | "Solver: spsolve (LU decomposition) ...\n"
117 | ]
118 | }
119 | ],
120 | "source": [
121 | "from lapy import Solver\n",
122 | "\n",
123 | "fem = Solver(T, lump=True)\n",
124 | "\n",
125 | "evals, evec = fem.eigs(10)"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "To better see the first non-constant function also in the interior we slice the cube at x<0.5."
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": 4,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "# also get A,B (lumped), and inverse of B (easy as it is diagonal)\n",
142 | "A, B = fem.stiffness, fem.mass\n",
143 | "Bi = B.copy()\n",
144 | "Bi.data **= -1"
145 | ]
146 | },
147 | {
148 | "cell_type": "code",
149 | "execution_count": 5,
150 | "metadata": {
151 | "scrolled": true
152 | },
153 | "outputs": [],
154 | "source": [
155 | "evnum = 1\n",
156 | "cutting = [\"x<0.5\"]\n",
157 | "# also here we comment all plots to reduce file size\n",
158 | "# uncomment and take a look\n",
159 | "plot_tet_mesh(\n",
160 | " T,\n",
161 | " vfunc=evals[evnum] * evec[:, evnum],\n",
162 | " plot_edges=None,\n",
163 | " plot_levels=False,\n",
164 | " cutting=cutting,\n",
165 | " edge_color=\"rgb(50,50,50)\",\n",
166 | " html_output=False,\n",
167 | " flatshading=True,\n",
168 | ")"
169 | ]
170 | },
171 | {
172 | "cell_type": "markdown",
173 | "metadata": {},
174 | "source": [
175 | "Similar to the triangle case, computing the - divergence of the gradient of an eigenfunctions (and multiplying with inv(B)) yields a scaled version of that function."
176 | ]
177 | },
178 | {
179 | "cell_type": "code",
180 | "execution_count": 6,
181 | "metadata": {},
182 | "outputs": [],
183 | "source": [
184 | "from lapy.diffgeo import compute_divergence, compute_gradient\n",
185 | "\n",
186 | "grad = compute_gradient(T, evec[:, evnum])\n",
187 | "divx = -compute_divergence(T, grad)\n",
188 | "vfunc = Bi * divx"
189 | ]
190 | },
191 | {
192 | "cell_type": "code",
193 | "execution_count": 7,
194 | "metadata": {},
195 | "outputs": [],
196 | "source": [
197 | "cutting = [\"x<0.5\"]\n",
198 | "plot_tet_mesh(\n",
199 | " T,\n",
200 | " vfunc=vfunc,\n",
201 | " plot_edges=None,\n",
202 | " plot_levels=False,\n",
203 | " cutting=cutting,\n",
204 | " edge_color=\"rgb(50,50,50)\",\n",
205 | " html_output=False,\n",
206 | " flatshading=True,\n",
207 | ")"
208 | ]
209 | },
210 | {
211 | "cell_type": "markdown",
212 | "metadata": {},
213 | "source": [
214 | "In fact, it is scaled by the eigenvalue."
215 | ]
216 | },
217 | {
218 | "cell_type": "code",
219 | "execution_count": 8,
220 | "metadata": {},
221 | "outputs": [
222 | {
223 | "data": {
224 | "text/plain": [
225 | "0.0059814453"
226 | ]
227 | },
228 | "execution_count": 8,
229 | "metadata": {},
230 | "output_type": "execute_result"
231 | }
232 | ],
233 | "source": [
234 | "np.max(np.abs(vfunc - (evals[evnum] * evec[:, evnum])))"
235 | ]
236 | },
237 | {
238 | "cell_type": "markdown",
239 | "metadata": {},
240 | "source": [
241 | "## Geodesics"
242 | ]
243 | },
244 | {
245 | "cell_type": "markdown",
246 | "metadata": {},
247 | "source": [
248 | "Now we run a heat diffusion, applying initial heat to the boundary of the cube."
249 | ]
250 | },
251 | {
252 | "cell_type": "code",
253 | "execution_count": 9,
254 | "metadata": {},
255 | "outputs": [
256 | {
257 | "name": "stdout",
258 | "output_type": "stream",
259 | "text": [
260 | "Found 4800 triangles on boundary.\n",
261 | "TetMesh with regular Laplace\n",
262 | "Matrix Format now: csc\n",
263 | "Solver: spsolve (LU decomposition) ...\n"
264 | ]
265 | }
266 | ],
267 | "source": [
268 | "from lapy import heat\n",
269 | "\n",
270 | "tria = T.boundary_tria()\n",
271 | "bvert = np.unique(tria.t)\n",
272 | "\n",
273 | "u = heat.diffusion(T, bvert, m=1)\n",
274 | "cutting = [\"x<0.5\"]\n",
275 | "plot_tet_mesh(\n",
276 | " T,\n",
277 | " vfunc=u,\n",
278 | " plot_edges=None,\n",
279 | " plot_levels=True,\n",
280 | " cutting=cutting,\n",
281 | " edge_color=\"rgb(50,50,50)\",\n",
282 | " html_output=False,\n",
283 | " flatshading=True,\n",
284 | ")"
285 | ]
286 | },
287 | {
288 | "cell_type": "markdown",
289 | "metadata": {},
290 | "source": [
291 | "You can see that we get level sets that are not evenly spaced and dense along the boundary. Next we compute the gradient of this heat diffusion, normalize it, and compute the divergence of this normalized gradient."
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": 10,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "# get gradients\n",
301 | "tfunc = compute_gradient(T, u)\n",
302 | "# flip and normalize\n",
303 | "X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]\n",
304 | "X = np.nan_to_num(X)\n",
305 | "# compute divergence\n",
306 | "divx = compute_divergence(T, X)"
307 | ]
308 | },
309 | {
310 | "cell_type": "markdown",
311 | "metadata": {},
312 | "source": [
313 | "Finally, we need to solve a Poisson equation to obtain a function that has these normalized gradients (and remove the remaining shift)."
314 | ]
315 | },
316 | {
317 | "cell_type": "code",
318 | "execution_count": 11,
319 | "metadata": {},
320 | "outputs": [
321 | {
322 | "name": "stdout",
323 | "output_type": "stream",
324 | "text": [
325 | "Matrix Format now: csc\n",
326 | "Solver: cholesky decomp - performance optimal ...\n"
327 | ]
328 | }
329 | ],
330 | "source": [
331 | "# compute distance\n",
332 | "from scipy.sparse.linalg import splu\n",
333 | "\n",
334 | "useCholmod = True\n",
335 | "try:\n",
336 | " from sksparse.cholmod import cholesky\n",
337 | "except ImportError:\n",
338 | " useCholmod = False\n",
339 | "\n",
340 | "A, B = fem.stiffness, fem.mass # computed above when creating Solver\n",
341 | "\n",
342 | "H = A\n",
343 | "b0 = -divx\n",
344 | "\n",
345 | "# solve H x = b0\n",
346 | "print(\"Matrix Format now: \" + H.getformat())\n",
347 | "if useCholmod:\n",
348 | " print(\"Solver: cholesky decomp - performance optimal ...\")\n",
349 | " chol = cholesky(H)\n",
350 | " x = chol(b0)\n",
351 | "else:\n",
352 | " print(\"Solver: spsolve (LU decomp) - performance not optimal ...\")\n",
353 | " lu = splu(H)\n",
354 | " x = lu.solve(b0)\n",
355 | "\n",
356 | "x = x - np.min(x)"
357 | ]
358 | },
359 | {
360 | "cell_type": "code",
361 | "execution_count": 12,
362 | "metadata": {},
363 | "outputs": [
364 | {
365 | "data": {
366 | "text/plain": [
367 | "(0.6993174268615026, 0.8660254037844386)"
368 | ]
369 | },
370 | "execution_count": 12,
371 | "metadata": {},
372 | "output_type": "execute_result"
373 | }
374 | ],
375 | "source": [
376 | "cutting = [\"x<0.5\"]\n",
377 | "plot_tet_mesh(\n",
378 | " T,\n",
379 | " vfunc=x,\n",
380 | " plot_edges=None,\n",
381 | " plot_levels=True,\n",
382 | " cutting=cutting,\n",
383 | " edge_color=\"rgb(50,50,50)\",\n",
384 | " html_output=False,\n",
385 | " flatshading=True,\n",
386 | ")\n",
387 | "max(x), 0.5 * np.sqrt(3.0)"
388 | ]
389 | },
390 | {
391 | "cell_type": "markdown",
392 | "metadata": {},
393 | "source": [
394 | "This results in equally spaced level sets. Instead of solving this manually, we can get the same by simply computing the heat diffusion and the distance function directly."
395 | ]
396 | },
397 | {
398 | "cell_type": "code",
399 | "execution_count": 13,
400 | "metadata": {},
401 | "outputs": [
402 | {
403 | "name": "stdout",
404 | "output_type": "stream",
405 | "text": [
406 | "Found 4800 triangles on boundary.\n",
407 | "TetMesh with regular Laplace\n",
408 | "Matrix Format now: csc\n",
409 | "Solver: spsolve (LU decomposition) ...\n",
410 | "TetMesh with regular Laplace\n",
411 | "Matrix Format now: csc\n",
412 | "Solver: spsolve (LU decomposition) ...\n"
413 | ]
414 | }
415 | ],
416 | "source": [
417 | "from lapy import heat\n",
418 | "from lapy.diffgeo import compute_geodesic_f\n",
419 | "\n",
420 | "tria = T.boundary_tria()\n",
421 | "bvert = np.unique(tria.t)\n",
422 | "\n",
423 | "# get heat diffusion\n",
424 | "u = heat.diffusion(T, bvert, m=1)\n",
425 | "\n",
426 | "gu = compute_geodesic_f(T, u)\n",
427 | "\n",
428 | "cutting = [\"x<0.5\"]\n",
429 | "plot_tet_mesh(\n",
430 | " T,\n",
431 | " vfunc=gu,\n",
432 | " plot_edges=None,\n",
433 | " plot_levels=True,\n",
434 | " cutting=cutting,\n",
435 | " edge_color=\"rgb(50,50,50)\",\n",
436 | " html_output=False,\n",
437 | " flatshading=True,\n",
438 | ")"
439 | ]
440 | },
441 | {
442 | "cell_type": "markdown",
443 | "metadata": {},
444 | "source": [
445 | "Finally, we want to explore the gradient and divergence functions a little more. Here we construct the gradient of a function that computes the squared distance to each vertex (x^2+y^2+z^2). As the color of each tetrahedon we set the z component of the gradient which should be 2z (or you could try any other value, such as the gradient length)."
446 | ]
447 | },
448 | {
449 | "cell_type": "code",
450 | "execution_count": 14,
451 | "metadata": {},
452 | "outputs": [],
453 | "source": [
454 | "# test function is squared distance to each vertex\n",
455 | "v1func = T.v[:, 0] * T.v[:, 0] + T.v[:, 1] * T.v[:, 1] + T.v[:, 2] * T.v[:, 2]\n",
456 | "\n",
457 | "grad = compute_gradient(T, v1func)\n",
458 | "# glength = np.sqrt(np.sum(grad * grad, axis=1))\n",
459 | "# fcols=glength\n",
460 | "fcols = grad[:, 2]\n",
461 | "# cutting = ['x<0.5']\n",
462 | "cutting = None\n",
463 | "plot_tet_mesh(\n",
464 | " T,\n",
465 | " vfunc=None,\n",
466 | " tfunc=fcols,\n",
467 | " plot_edges=None,\n",
468 | " plot_levels=False,\n",
469 | " cutting=cutting,\n",
470 | " edge_color=\"rgb(50,50,50)\",\n",
471 | " html_output=False,\n",
472 | ")"
473 | ]
474 | },
475 | {
476 | "cell_type": "markdown",
477 | "metadata": {},
478 | "source": [
479 | "Now let's look at the divergence. While the gradient is constant for each tetrahedron, the divergence is a scalar function again, summing up the partial derivatives of the gradient components. In our case it should be 2+2+2=6."
480 | ]
481 | },
482 | {
483 | "cell_type": "code",
484 | "execution_count": 15,
485 | "metadata": {},
486 | "outputs": [],
487 | "source": [
488 | "divx = compute_divergence(T, grad)\n",
489 | "divx2 = Bi * divx\n",
490 | "cutting = [\"z<0.5\"]\n",
491 | "plot_tet_mesh(\n",
492 | " T,\n",
493 | " vfunc=divx2,\n",
494 | " plot_edges=True,\n",
495 | " plot_levels=False,\n",
496 | " cutting=cutting,\n",
497 | " edge_color=\"rgb(50,50,50)\",\n",
498 | " html_output=False,\n",
499 | " flatshading=True,\n",
500 | " caxis=[0, 8],\n",
501 | ")"
502 | ]
503 | },
504 | {
505 | "cell_type": "code",
506 | "execution_count": 16,
507 | "metadata": {},
508 | "outputs": [
509 | {
510 | "data": {
511 | "text/plain": [
512 | "array([5.9999948, 6.0000215, 6.0000215, 5.999988 , 6.000053 , 5.999975 ,\n",
513 | " 5.9999676, 6.000024 , 6.000013 , 6.000008 ], dtype=float32)"
514 | ]
515 | },
516 | "execution_count": 16,
517 | "metadata": {},
518 | "output_type": "execute_result"
519 | }
520 | ],
521 | "source": [
522 | "divx2[5000:5010]"
523 | ]
524 | }
525 | ],
526 | "metadata": {
527 | "kernelspec": {
528 | "display_name": "Python3",
529 | "language": "python",
530 | "name": "python3"
531 | },
532 | "language_info": {
533 | "codemirror_mode": {
534 | "name": "ipython",
535 | "version": 3
536 | },
537 | "file_extension": ".py",
538 | "mimetype": "text/x-python",
539 | "name": "python",
540 | "nbconvert_exporter": "python",
541 | "pygments_lexer": "ipython3",
542 | "version": "3"
543 | },
544 | "nbsphinx": {
545 | "execute": "always"
546 | }
547 | },
548 | "nbformat": 4,
549 | "nbformat_minor": 4
550 | }
551 |
--------------------------------------------------------------------------------
/examples/Test_TriaMesh_Geodesics.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "# TriaMesh Geodesics"
8 | ]
9 | },
10 | {
11 | "cell_type": "code",
12 | "execution_count": 1,
13 | "metadata": {},
14 | "outputs": [],
15 | "source": [
16 | "from lapy import TriaMesh"
17 | ]
18 | },
19 | {
20 | "cell_type": "markdown",
21 | "metadata": {},
22 | "source": [
23 | "Load a triangle mesh of a flat square (OFF file)."
24 | ]
25 | },
26 | {
27 | "cell_type": "code",
28 | "execution_count": 2,
29 | "metadata": {},
30 | "outputs": [
31 | {
32 | "name": "stdout",
33 | "output_type": "stream",
34 | "text": [
35 | "--> OFF format ... \n",
36 | " --> DONE ( V: 415 , T: 768 )\n"
37 | ]
38 | },
39 | {
40 | "data": {
41 | "text/plain": [
42 | "'TriaMesh'"
43 | ]
44 | },
45 | "execution_count": 2,
46 | "metadata": {},
47 | "output_type": "execute_result"
48 | }
49 | ],
50 | "source": [
51 | "T = TriaMesh.read_off(\"../data/square-mesh.off\")\n",
52 | "type(T).__name__"
53 | ]
54 | },
55 | {
56 | "cell_type": "code",
57 | "execution_count": 3,
58 | "metadata": {},
59 | "outputs": [
60 | {
61 | "data": {
62 | "text/html": [
63 | " \n",
78 | " "
79 | ]
80 | },
81 | "metadata": {},
82 | "output_type": "display_data"
83 | }
84 | ],
85 | "source": [
86 | "# import plotting functions\n",
87 | "# import plotly\n",
88 | "# plotly.offline.init_notebook_mode(connected=True)\n",
89 | "import plotly.io as pio\n",
90 | "\n",
91 | "from lapy.plot import plot_tria_mesh\n",
92 | "\n",
93 | "pio.renderers.default = \"sphinx_gallery\""
94 | ]
95 | },
96 | {
97 | "cell_type": "markdown",
98 | "metadata": {},
99 | "source": [
100 | "We now plot the triangle mesh with a function overlay of the triangle quality. Note, that this is a function attached to the triangles, not the vertices, so it is piecewise flat."
101 | ]
102 | },
103 | {
104 | "cell_type": "code",
105 | "execution_count": 4,
106 | "metadata": {},
107 | "outputs": [],
108 | "source": [
109 | "q = T.tria_qualities()\n",
110 | "plot_tria_mesh(T, plot_edges=True, tfunc=q)"
111 | ]
112 | },
113 | {
114 | "cell_type": "markdown",
115 | "metadata": {},
116 | "source": [
117 | "## Laplace"
118 | ]
119 | },
120 | {
121 | "cell_type": "markdown",
122 | "metadata": {},
123 | "source": [
124 | "Next, we check a few properties of eigenfunctions. For this we get the first few and also the stiffness matrix (A) and the lumped mass matrix (B, reduced to a diagonal), of which we can easily compute the inverse."
125 | ]
126 | },
127 | {
128 | "cell_type": "code",
129 | "execution_count": 5,
130 | "metadata": {},
131 | "outputs": [
132 | {
133 | "name": "stdout",
134 | "output_type": "stream",
135 | "text": [
136 | "TriaMesh with regular Laplace-Beltrami\n",
137 | "Solver: spsolve (LU decomposition) ...\n"
138 | ]
139 | }
140 | ],
141 | "source": [
142 | "# compute first eigenfunction\n",
143 | "from lapy import Solver\n",
144 | "\n",
145 | "fem = Solver(T, lump=True)\n",
146 | "eval, evec = fem.eigs()\n",
147 | "vfunc = evec[:, 1]\n",
148 | "\n",
149 | "# also get A,B (lumped), and inverse of B (easy as it is diagonal due to lumping)\n",
150 | "A, B = fem.stiffness, fem.mass\n",
151 | "Bi = B.copy()\n",
152 | "Bi.data **= -1"
153 | ]
154 | },
155 | {
156 | "cell_type": "markdown",
157 | "metadata": {},
158 | "source": [
159 | "The mass matrix B represents the inner product so that the integral of the product of two functions x and y over the whole mesh is x B y'. The lumped mass matrix that we use here is a simplified version, as all off-diagonal elements are added to the diagonal. The entries on the diagonal represent the vertex areas and their sum is the total area of the mesh. For our unit square it will be 1."
160 | ]
161 | },
162 | {
163 | "cell_type": "code",
164 | "execution_count": 6,
165 | "metadata": {},
166 | "outputs": [
167 | {
168 | "data": {
169 | "text/plain": [
170 | "1.0"
171 | ]
172 | },
173 | "execution_count": 6,
174 | "metadata": {},
175 | "output_type": "execute_result"
176 | }
177 | ],
178 | "source": [
179 | "B.sum()"
180 | ]
181 | },
182 | {
183 | "cell_type": "markdown",
184 | "metadata": {},
185 | "source": [
186 | "Let's see what happens when we apply the Laplace operator to an eigenfunction. Eigenfunctions are solutions to Delta f = - lambda f, so we should obtain a scaled version of f."
187 | ]
188 | },
189 | {
190 | "cell_type": "code",
191 | "execution_count": 7,
192 | "metadata": {},
193 | "outputs": [],
194 | "source": [
195 | "plot_tria_mesh(T, Bi * (A * vfunc), plot_edges=True)"
196 | ]
197 | },
198 | {
199 | "cell_type": "markdown",
200 | "metadata": {},
201 | "source": [
202 | "This is the same as the corresponding eigenvalue times the eigenfunction."
203 | ]
204 | },
205 | {
206 | "cell_type": "code",
207 | "execution_count": 8,
208 | "metadata": {},
209 | "outputs": [],
210 | "source": [
211 | "plot_tria_mesh(T, eval[1] * vfunc, plot_edges=True)"
212 | ]
213 | },
214 | {
215 | "cell_type": "markdown",
216 | "metadata": {},
217 | "source": [
218 | "Laplace is also defined as the -div(grad(f)). So first applying the gradient and then the divergence to an eigenfunction and then multiplying with inv(B) should yield the same result as above again. Note, that multiplying with inv(B) is necessary to get back from the integrated divergence to the original function."
219 | ]
220 | },
221 | {
222 | "cell_type": "code",
223 | "execution_count": 9,
224 | "metadata": {},
225 | "outputs": [],
226 | "source": [
227 | "from lapy.diffgeo import compute_divergence, compute_gradient\n",
228 | "\n",
229 | "grad = compute_gradient(T, vfunc)\n",
230 | "divx = -compute_divergence(T, grad)\n",
231 | "plot_tria_mesh(T, Bi * divx, plot_edges=True)"
232 | ]
233 | },
234 | {
235 | "cell_type": "markdown",
236 | "metadata": {},
237 | "source": [
238 | "## Geodesics\n",
239 | "\n",
240 | "Now we will replicate the idea of geodesics in heat, where first a heat diffusion is solved and massaged in the right way to yield an approximation of geodesics on the mesh. This also works on curved meshes, but for simplicity we keep using the square here. So let's start with computing the heat diffusion from the boundary (with default time factor m=1)."
241 | ]
242 | },
243 | {
244 | "cell_type": "code",
245 | "execution_count": 10,
246 | "metadata": {},
247 | "outputs": [
248 | {
249 | "name": "stdout",
250 | "output_type": "stream",
251 | "text": [
252 | "TriaMesh with regular Laplace-Beltrami\n",
253 | "Matrix Format now: csc\n",
254 | "Solver: spsolve (LU decomposition) ...\n"
255 | ]
256 | }
257 | ],
258 | "source": [
259 | "from lapy import heat\n",
260 | "\n",
261 | "bvert = T.boundary_loops()\n",
262 | "u = heat.diffusion(T, bvert, m=1)"
263 | ]
264 | },
265 | {
266 | "cell_type": "markdown",
267 | "metadata": {},
268 | "source": [
269 | "We show some of the level sets. Note, that they are not evenly spaced and get steeper closer to the boundary."
270 | ]
271 | },
272 | {
273 | "cell_type": "code",
274 | "execution_count": 11,
275 | "metadata": {},
276 | "outputs": [],
277 | "source": [
278 | "plot_tria_mesh(T, u, plot_edges=True, plot_levels=True)"
279 | ]
280 | },
281 | {
282 | "cell_type": "markdown",
283 | "metadata": {},
284 | "source": [
285 | "Next step is to compute the gradient (vector field) of the heat diffusion function and normalize all vectors to unit length."
286 | ]
287 | },
288 | {
289 | "cell_type": "code",
290 | "execution_count": 12,
291 | "metadata": {},
292 | "outputs": [],
293 | "source": [
294 | "import numpy as np\n",
295 | "\n",
296 | "# compute gradient of heat diffusion\n",
297 | "tfunc = compute_gradient(T, u)\n",
298 | "\n",
299 | "# normalize gradient\n",
300 | "X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]\n",
301 | "X = np.nan_to_num(X)"
302 | ]
303 | },
304 | {
305 | "cell_type": "markdown",
306 | "metadata": {},
307 | "source": [
308 | "Then we get the integrated divergence of the normalized gradients."
309 | ]
310 | },
311 | {
312 | "cell_type": "code",
313 | "execution_count": 13,
314 | "metadata": {},
315 | "outputs": [],
316 | "source": [
317 | "divx = compute_divergence(T, X)"
318 | ]
319 | },
320 | {
321 | "cell_type": "markdown",
322 | "metadata": {},
323 | "source": [
324 | "Finally, to obtain the distance function, we need to solve a Poisson equation. The solution can be shifted arbitrary, so we need to subtract the minimum, which should be along the boundary of the mesh."
325 | ]
326 | },
327 | {
328 | "cell_type": "code",
329 | "execution_count": 14,
330 | "metadata": {},
331 | "outputs": [
332 | {
333 | "name": "stdout",
334 | "output_type": "stream",
335 | "text": [
336 | "TriaMesh with regular Laplace-Beltrami\n",
337 | "Matrix Format now: csc\n",
338 | "Solver: cholesky decomp - performance optimal ...\n"
339 | ]
340 | }
341 | ],
342 | "source": [
343 | "# compute distance\n",
344 | "from scipy.sparse.linalg import splu\n",
345 | "\n",
346 | "useCholmod = True\n",
347 | "try:\n",
348 | " from sksparse.cholmod import cholesky\n",
349 | "except ImportError:\n",
350 | " useCholmod = False\n",
351 | "\n",
352 | "fem = Solver(T, lump=True)\n",
353 | "A, B = fem.stiffness, fem.mass\n",
354 | "\n",
355 | "H = -A\n",
356 | "b0 = divx\n",
357 | "\n",
358 | "# solve H x = b0\n",
359 | "# we don't need the B matrix here, as divx is the integrated divergence\n",
360 | "print(\"Matrix Format now: \" + H.getformat())\n",
361 | "if useCholmod:\n",
362 | " print(\"Solver: cholesky decomp - performance optimal ...\")\n",
363 | " chol = cholesky(H)\n",
364 | " x = chol(b0)\n",
365 | "else:\n",
366 | " print(\"Solver: spsolve (LU decomp) - performance not optimal ...\")\n",
367 | " lu = splu(H)\n",
368 | " x = lu.solve(b0)\n",
369 | "\n",
370 | "# remove shift\n",
371 | "x = x - min(x)"
372 | ]
373 | },
374 | {
375 | "cell_type": "markdown",
376 | "metadata": {},
377 | "source": [
378 | "In short, the idea is to first get a function (heat diffusion) that flows from a point or from a set of points (boundary) through the mesh, then normalize its gradient, compute the divergence and finally step backward through the Laplace operator to find a function that has this normalized gradient. If we look at it, we actually notice that level sets are equally spaced now."
379 | ]
380 | },
381 | {
382 | "cell_type": "code",
383 | "execution_count": 15,
384 | "metadata": {},
385 | "outputs": [],
386 | "source": [
387 | "plot_tria_mesh(T, x, plot_edges=True, plot_levels=True)"
388 | ]
389 | },
390 | {
391 | "cell_type": "markdown",
392 | "metadata": {},
393 | "source": [
394 | "Nice, but only an approximation."
395 | ]
396 | },
397 | {
398 | "cell_type": "code",
399 | "execution_count": 16,
400 | "metadata": {},
401 | "outputs": [
402 | {
403 | "data": {
404 | "text/plain": [
405 | "(0.6049783117351546, 0.7071067811865476)"
406 | ]
407 | },
408 | "execution_count": 16,
409 | "metadata": {},
410 | "output_type": "execute_result"
411 | }
412 | ],
413 | "source": [
414 | "# max distance (smoothed)\n",
415 | "(max(x), np.sqrt(2) / 2)"
416 | ]
417 | },
418 | {
419 | "cell_type": "markdown",
420 | "metadata": {},
421 | "source": [
422 | "Instead of computing the solution ourselves, we could simply employ the standard Poisson solver on inv(B) times divx."
423 | ]
424 | },
425 | {
426 | "cell_type": "code",
427 | "execution_count": 17,
428 | "metadata": {},
429 | "outputs": [
430 | {
431 | "name": "stdout",
432 | "output_type": "stream",
433 | "text": [
434 | "Matrix Format now: csc\n",
435 | "Solver: spsolve (LU decomposition) ...\n"
436 | ]
437 | }
438 | ],
439 | "source": [
440 | "vf = fem.poisson(-Bi * divx)\n",
441 | "vf = vf - min(vf)\n",
442 | "plot_tria_mesh(T, vf, plot_edges=True, plot_levels=True)"
443 | ]
444 | },
445 | {
446 | "cell_type": "markdown",
447 | "metadata": {},
448 | "source": [
449 | "This should give us the same result as what we had earlier."
450 | ]
451 | },
452 | {
453 | "cell_type": "code",
454 | "execution_count": 18,
455 | "metadata": {},
456 | "outputs": [
457 | {
458 | "data": {
459 | "text/plain": [
460 | "5.30470728232757e-07"
461 | ]
462 | },
463 | "execution_count": 18,
464 | "metadata": {},
465 | "output_type": "execute_result"
466 | }
467 | ],
468 | "source": [
469 | "max(abs(vf - x))"
470 | ]
471 | },
472 | {
473 | "cell_type": "markdown",
474 | "metadata": {},
475 | "source": [
476 | "Or we can just call compute_geodesic_f which does all the work for us."
477 | ]
478 | },
479 | {
480 | "cell_type": "code",
481 | "execution_count": 19,
482 | "metadata": {},
483 | "outputs": [
484 | {
485 | "name": "stdout",
486 | "output_type": "stream",
487 | "text": [
488 | "TriaMesh with regular Laplace-Beltrami\n",
489 | "Matrix Format now: csc\n",
490 | "Solver: spsolve (LU decomposition) ...\n"
491 | ]
492 | }
493 | ],
494 | "source": [
495 | "from lapy.diffgeo import compute_geodesic_f\n",
496 | "\n",
497 | "gf = compute_geodesic_f(T, u)\n",
498 | "plot_tria_mesh(T, gf, plot_edges=True, plot_levels=True)"
499 | ]
500 | },
501 | {
502 | "cell_type": "markdown",
503 | "metadata": {},
504 | "source": [
505 | "And verify it is still the same result."
506 | ]
507 | },
508 | {
509 | "cell_type": "code",
510 | "execution_count": 20,
511 | "metadata": {},
512 | "outputs": [
513 | {
514 | "data": {
515 | "text/plain": [
516 | "3.2569845126451114e-07"
517 | ]
518 | },
519 | "execution_count": 20,
520 | "metadata": {},
521 | "output_type": "execute_result"
522 | }
523 | ],
524 | "source": [
525 | "# again should be the same\n",
526 | "max(abs(gf - x))"
527 | ]
528 | },
529 | {
530 | "cell_type": "markdown",
531 | "metadata": {},
532 | "source": [
533 | "Similar to normalizing the gradients of a function, we can see if we can rotate it."
534 | ]
535 | },
536 | {
537 | "cell_type": "code",
538 | "execution_count": 21,
539 | "metadata": {},
540 | "outputs": [
541 | {
542 | "name": "stdout",
543 | "output_type": "stream",
544 | "text": [
545 | "TriaMesh with regular Laplace-Beltrami\n",
546 | "Matrix Format now: csc\n",
547 | "Solver: spsolve (LU decomposition) ...\n"
548 | ]
549 | }
550 | ],
551 | "source": [
552 | "# testing if we can rotate the function\n",
553 | "from lapy.diffgeo import compute_rotated_f\n",
554 | "\n",
555 | "gf = compute_rotated_f(T, vf)\n",
556 | "plot_tria_mesh(T, gf, plot_edges=True, plot_levels=True)"
557 | ]
558 | },
559 | {
560 | "cell_type": "markdown",
561 | "metadata": {},
562 | "source": [
563 | "## Mean Curvature Mapping\n",
564 | "\n",
565 | "To demonstrate the mean curvature mapping to a sphere, we need to have a closed mesh. It should not have too many flat regions (so not the cube) as there is no cuvature."
566 | ]
567 | },
568 | {
569 | "cell_type": "code",
570 | "execution_count": 22,
571 | "metadata": {},
572 | "outputs": [],
573 | "source": [
574 | "# Load your mesh here and uncomment. The mesh should have not too many flat regions (not a cube)\n",
575 | "# from lapy.diffgeo import tria_mean_curvature_flow\n",
576 | "# from lapy.plot import plot_tria_mesh\n",
577 | "# T = TriaIO.import_off(\"../data/???\")\n",
578 | "# T2 = tria_mean_curvature_flow(T)\n",
579 | "# plot_tria_mesh(T2,plot_edges=True,plot_levels=True)"
580 | ]
581 | }
582 | ],
583 | "metadata": {
584 | "kernelspec": {
585 | "display_name": "Python3",
586 | "language": "python",
587 | "name": "python3"
588 | },
589 | "language_info": {
590 | "codemirror_mode": {
591 | "name": "ipython",
592 | "version": 3
593 | },
594 | "file_extension": ".py",
595 | "mimetype": "text/x-python",
596 | "name": "python",
597 | "nbconvert_exporter": "python",
598 | "pygments_lexer": "ipython3",
599 | "version": "3"
600 | },
601 | "nbsphinx": {
602 | "execute": "always"
603 | }
604 | },
605 | "nbformat": 4,
606 | "nbformat_minor": 4
607 | }
608 |
--------------------------------------------------------------------------------
/examples/lapy:
--------------------------------------------------------------------------------
1 | ../lapy
--------------------------------------------------------------------------------
/lapy/__init__.py:
--------------------------------------------------------------------------------
1 | from ._version import __version__ # noqa: F401
2 | from .solver import Solver # noqa: F401
3 | from .tet_mesh import TetMesh # noqa: F401
4 | from .tria_mesh import TriaMesh # noqa: F401
5 | from .utils._config import sys_info # noqa: F401
6 |
--------------------------------------------------------------------------------
/lapy/_read_geometry.py:
--------------------------------------------------------------------------------
1 | """Read FreeSurfer geometry (fix for dev, ll 151-153).
2 |
3 | Code was taken from nibabel.freesurfer package
4 | (https://github.com/nipy/nibabel/blob/master/nibabel/freesurfer/io.py).
5 | This software is licensed under the following license:
6 |
7 | The MIT License
8 |
9 | Copyright (c) 2009-2019 Matthew Brett
10 | Copyright (c) 2010-2013 Stephan Gerhard
11 | Copyright (c) 2006-2014 Michael Hanke
12 | Copyright (c) 2011 Christian Haselgrove
13 | Copyright (c) 2010-2011 Jarrod Millman
14 | Copyright (c) 2011-2019 Yaroslav Halchenko
15 | Copyright (c) 2015-2019 Chris Markiewicz
16 |
17 | Permission is hereby granted, free of charge, to any person obtaining a copy
18 | of this software and associated documentation files (the "Software"), to deal
19 | in the Software without restriction, including without limitation the rights
20 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
21 | copies of the Software, and to permit persons to whom the Software is
22 | furnished to do so, subject to the following conditions:
23 |
24 | The above copyright notice and this permission notice shall be included in
25 | all copies or substantial portions of the Software.
26 |
27 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
30 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
31 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
32 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
33 | THE SOFTWARE.
34 | """
35 |
36 | import warnings
37 | from collections import OrderedDict
38 |
39 | import numpy as np
40 |
41 |
42 | def _fread3(fobj):
43 | """Read a 3-byte int from an open binary file object.
44 |
45 | Parameters
46 | ----------
47 | fobj : file
48 | File descriptor
49 |
50 | Returns
51 | -------
52 | n : int
53 | A 3 byte int
54 | """
55 | b1, b2, b3 = np.fromfile(fobj, ">u1", 3)
56 | # the bit-shifting operator does not return
57 | # identical results on all platforms, therefore
58 | # we disable it and return / compare the first
59 | # three bytes separately
60 | # return (b1 << 16) + (b2 << 8) + b3
61 | return b1, b2, b3
62 |
63 |
64 | def _read_volume_info(fobj):
65 | """Read the footer from a surface file.
66 |
67 | Parameters
68 | ----------
69 | fobj : file
70 | File descriptor
71 |
72 | Returns
73 | -------
74 | volume_info : array
75 | Key-value pairs found in the file.
76 | """
77 | volume_info = OrderedDict()
78 | head = np.fromfile(fobj, ">i4", 1)
79 | if not np.array_equal(head, [20]): # Read two bytes more
80 | head = np.concatenate([head, np.fromfile(fobj, ">i4", 2)])
81 | if not np.array_equal(head, [2, 0, 20]) and not np.array_equal(
82 | head, [2, 1, 20]
83 | ):
84 | warnings.warn("Unknown extension code.", stacklevel=2)
85 | return volume_info
86 | head = [2, 0, 20]
87 |
88 | volume_info["head"] = head
89 | for key in [
90 | "valid",
91 | "filename",
92 | "volume",
93 | "voxelsize",
94 | "xras",
95 | "yras",
96 | "zras",
97 | "cras",
98 | ]:
99 | pair = fobj.readline().decode("utf-8").split("=")
100 | if pair[0].strip() != key or len(pair) != 2:
101 | raise OSError("Error parsing volume info.")
102 | if key in ("valid", "filename"):
103 | volume_info[key] = pair[1].strip()
104 | elif key == "volume":
105 | volume_info[key] = np.array(pair[1].split()).astype(int)
106 | else:
107 | volume_info[key] = np.array(pair[1].split()).astype(float)
108 | # Ignore the rest
109 | return volume_info
110 |
111 |
112 | def read_geometry(filepath, read_metadata=False, read_stamp=False):
113 | """Read a triangular format Freesurfer surface mesh.
114 |
115 | Parameters
116 | ----------
117 | filepath : str
118 | Path to surface file.
119 | read_metadata : bool, optional
120 | If True, read and return metadata as key-value pairs.
121 | Valid keys:
122 | * 'head' : array of int
123 | * 'valid' : str
124 | * 'filename' : str
125 | * 'volume' : array of int, shape (3,)
126 | * 'voxelsize' : array of float, shape (3,)
127 | * 'xras' : array of float, shape (3,)
128 | * 'yras' : array of float, shape (3,)
129 | * 'zras' : array of float, shape (3,)
130 | * 'cras' : array of float, shape (3,)
131 | read_stamp : bool, optional
132 | Return the comment from the file
133 |
134 | Returns
135 | -------
136 | coords : numpy array
137 | nvtx x 3 array of vertex (x, y, z) coordinates.
138 | faces : numpy array
139 | nfaces x 3 array of defining mesh triangles.
140 | volume_info : OrderedDict
141 | Returned only if `read_metadata` is True. Key-value pairs found in the
142 | geometry file.
143 | create_stamp : str
144 | Returned only if `read_stamp` is True. The comment added by the
145 | program that saved the file.
146 | """
147 | volume_info = OrderedDict()
148 |
149 | # See comment in _fread3() on why we have changed the
150 | # comparison
151 | # TRIANGLE_MAGIC = 16777214
152 | TRIANGLE_MAGIC = (np.uint8(255), np.uint8(255), np.uint8(254))
153 |
154 | with open(filepath, "rb") as fobj:
155 | magic = _fread3(fobj)
156 |
157 | if magic == TRIANGLE_MAGIC: # Triangle file
158 | create_stamp = fobj.readline().rstrip(b"\n").decode("utf-8")
159 | test_dev = fobj.peek(1)[:1]
160 | if test_dev == b"\n":
161 | fobj.readline()
162 | vnum = np.fromfile(fobj, ">i4", 1)[0]
163 | fnum = np.fromfile(fobj, ">i4", 1)[0]
164 | coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
165 | faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
166 |
167 | if read_metadata:
168 | volume_info = _read_volume_info(fobj)
169 | else:
170 | raise ValueError(
171 | "File does not appear to be a Freesurfer surface (triangle file)"
172 | )
173 |
174 | coords = coords.astype(float) # XXX: due to mayavi bug on mac 32bits
175 |
176 | ret = (coords, faces)
177 | if read_metadata:
178 | if len(volume_info) == 0:
179 | warnings.warn("No volume information contained in the file", stacklevel=2)
180 | ret += (volume_info,)
181 | if read_stamp:
182 | ret += (create_stamp,)
183 |
184 | return ret
185 |
--------------------------------------------------------------------------------
/lapy/_tet_io.py:
--------------------------------------------------------------------------------
1 | """Functions for IO of Tetrahedra Meshes.
2 |
3 | Should be called via the TetMesh member functions.
4 | """
5 |
6 | import os.path
7 |
8 | import numpy as np
9 |
10 |
11 | def read_gmsh(filename):
12 | """Load GMSH tetrahedron mesh.
13 |
14 | Parameters
15 | ----------
16 | filename : str
17 | Filename to load.
18 |
19 | Returns
20 | -------
21 | tet : TetMesh
22 | Object of loaded GMSH tetrahedron mesh.
23 | """
24 | extension = os.path.splitext(filename)[1]
25 | verbose = 1
26 | if verbose > 0:
27 | print("--> GMSH format ... ")
28 | if extension != ".msh":
29 | print("[no .msh file] --> FAILED\n")
30 | return
31 | try:
32 | f = open(filename)
33 | except OSError:
34 | print("[file not found or not readable]\n")
35 | return
36 | line = f.readline()
37 | if not line.startswith("$MeshFormat"):
38 | print("[$MeshFormat keyword not found] --> FAILED\n")
39 | f.close()
40 | return
41 | line = f.readline()
42 | larr = line.split()
43 | ver = float(larr[0])
44 | ftype = int(larr[1])
45 | datatype = int(larr[2])
46 | print(
47 | "Msh file ver ",
48 | ver,
49 | " , ftype ",
50 | ftype,
51 | " , datatype ",
52 | datatype,
53 | "\n",
54 | )
55 | if ftype != 0:
56 | print("[binary format not implemented] --> FAILED\n")
57 | f.close()
58 | return
59 | line = f.readline()
60 | if not line.startswith("$EndMeshFormat"):
61 | print("[$EndMeshFormat keyword not found] --> FAILED\n")
62 | f.close()
63 | return
64 | line = f.readline()
65 | if not line.startswith("$Nodes"):
66 | print("[$Nodes keyword not found] --> FAILED\n")
67 | f.close()
68 | return
69 | pnum = int(f.readline())
70 | # read (nodes X 4) matrix as chunk
71 | # drop first column
72 | v = np.fromfile(f, "float32", 4 * pnum, " ")
73 | v.shape = (pnum, 4)
74 | v = np.delete(v, 0, 1)
75 | line = f.readline()
76 | if not line.startswith("$EndNodes"):
77 | print("[$EndNodes keyword not found] --> FAILED\n")
78 | f.close()
79 | return
80 | line = f.readline()
81 | if not line.startswith("$Elements"):
82 | print("[$Elements keyword not found] --> FAILED\n")
83 | f.close()
84 | return
85 | tnum = int(f.readline())
86 | pos = f.tell()
87 | line = f.readline()
88 | f.seek(pos)
89 | larr = line.split()
90 | if int(larr[1]) != 4:
91 | print("larr: ", larr, "\n")
92 | print("[can only read tetras] --> FAILED\n")
93 | f.close()
94 | return
95 | # read (nodes X ?) matrix
96 | t = np.fromfile(f, "int", tnum * len(larr), " ")
97 | t.shape = (tnum, len(larr))
98 | t = np.delete(t, np.s_[0 : len(larr) - 4], 1)
99 | line = f.readline()
100 | if not line.startswith("$EndElements"):
101 | print("Line: ", line, " \n")
102 | print("[$EndElements keyword not found] --> FAILED\n")
103 | f.close()
104 | return
105 | f.close()
106 | print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
107 | from . import TetMesh
108 |
109 | return TetMesh(v, t)
110 |
111 |
112 | def read_vtk(filename):
113 | """Load VTK tetrahedron mesh.
114 |
115 | Parameters
116 | ----------
117 | filename : str
118 | Filename to load.
119 |
120 | Returns
121 | -------
122 | tet : TetMesh
123 | Object of loaded VTK tetrahedron mesh.
124 | """
125 | verbose = 1
126 | if verbose > 0:
127 | print("--> VTK format ... ")
128 | try:
129 | f = open(filename)
130 | except OSError:
131 | print("[file not found or not readable]\n")
132 | return
133 | # skip comments
134 | line = f.readline()
135 | while line[0] == "#":
136 | line = f.readline()
137 | # search for ASCII keyword in first 5 lines:
138 | count = 0
139 | while count < 5 and not line.startswith("ASCII"):
140 | line = f.readline()
141 | # print line
142 | count = count + 1
143 | if not line.startswith("ASCII"):
144 | print("[ASCII keyword not found] --> FAILED\n")
145 | return
146 | # expect Dataset Polydata line after ASCII:
147 | line = f.readline()
148 | if not line.startswith("DATASET POLYDATA") and not line.startswith(
149 | "DATASET UNSTRUCTURED_GRID"
150 | ):
151 | print(
152 | "[read: "
153 | + line
154 | + " expected DATASET POLYDATA or DATASET UNSTRUCTURED_GRID] --> FAILED\n"
155 | )
156 | return
157 | # read number of points
158 | line = f.readline()
159 | larr = line.split()
160 | if larr[0] != "POINTS" or (larr[2] != "float" and larr[2] != "double"):
161 | print(
162 | "[read: "
163 | + line
164 | + " expected POINTS # float or POINTS # double ] --> FAILED\n"
165 | )
166 | return
167 | pnum = int(larr[1])
168 | # read points as chunk
169 | v = np.fromfile(f, "float32", 3 * pnum, " ")
170 | v.shape = (pnum, 3)
171 | # expect polygon or tria_strip line
172 | line = f.readline()
173 | larr = line.split()
174 | if larr[0] == "POLYGONS" or larr[0] == "CELLS":
175 | tnum = int(larr[1])
176 | ttnum = int(larr[2])
177 | npt = float(ttnum) / tnum
178 | if npt != 5.0:
179 | print(
180 | "[having: " + str(npt) + " data per tetra, expected 4+1] --> FAILED\n"
181 | )
182 | return
183 | t = np.fromfile(f, "int", ttnum, " ")
184 | t.shape = (tnum, 5)
185 | if t[tnum - 1][0] != 4:
186 | print("[can only read tetras] --> FAILED\n")
187 | return
188 | t = np.delete(t, 0, 1)
189 | else:
190 | print("[read: " + line + " expected POLYGONS or CELLS] --> FAILED\n")
191 | return
192 | f.close()
193 | print(" --> DONE ( V: " + str(v.shape[0]) + " , T: " + str(t.shape[0]) + " )\n")
194 | from . import TetMesh
195 |
196 | return TetMesh(v, t)
197 |
198 |
199 | def write_vtk(tet, filename):
200 | """Save VTK file.
201 |
202 | Parameters
203 | ----------
204 | tet : TetMesh
205 | Tetrahedron mesh to save.
206 | filename : str
207 | Filename to save to.
208 | """
209 | # open file
210 | try:
211 | f = open(filename, "w")
212 | except OSError:
213 | print("[File " + filename + " not writable]")
214 | return
215 | # check data structure
216 | # ...
217 | # Write
218 | f.write("# vtk DataFile Version 1.0\n")
219 | f.write("vtk output\n")
220 | f.write("ASCII\n")
221 | f.write("DATASET POLYDATA\n")
222 | f.write("POINTS " + str(np.shape(tet.v)[0]) + " float\n")
223 | for i in range(np.shape(tet.v)[0]):
224 | f.write(" ".join(map(str, tet.v[i, :])))
225 | f.write("\n")
226 | f.write(
227 | "POLYGONS " + str(np.shape(tet.t)[0]) + " " + str(5 * np.shape(tet.t)[0]) + "\n"
228 | )
229 | for i in range(np.shape(tet.t)[0]):
230 | f.write(" ".join(map(str, np.append(4, tet.t[i, :]))))
231 | f.write("\n")
232 | f.close()
233 |
--------------------------------------------------------------------------------
/lapy/_version.py:
--------------------------------------------------------------------------------
1 | """Version number."""
2 |
3 | from importlib.metadata import version
4 |
5 | __version__ = version(__package__)
6 |
--------------------------------------------------------------------------------
/lapy/commands/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deep-MI/LaPy/d55eca505a071e97e14d513915efb6af664c346a/lapy/commands/__init__.py
--------------------------------------------------------------------------------
/lapy/commands/sys_info.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | from .. import sys_info
4 |
5 |
6 | def run():
7 | """Run sys_info() command."""
8 | parser = argparse.ArgumentParser(
9 | prog=f"{__package__.split('.')[0]}-sys_info", description="sys_info"
10 | )
11 | parser.add_argument(
12 | "--developer",
13 | help="display information for optional dependencies",
14 | action="store_true",
15 | )
16 | args = parser.parse_args()
17 |
18 | sys_info(developer=args.developer)
19 |
--------------------------------------------------------------------------------
/lapy/heat.py:
--------------------------------------------------------------------------------
1 | """Functions for computing heat kernel and diffusion.
2 |
3 | Inputs are eigenvalues and eigenvectors (for heat kernel) and the
4 | mesh geometries (tet or tria mesh) for heat diffusion.
5 | """
6 |
7 | import importlib
8 | from typing import Optional
9 |
10 | import numpy as np
11 |
12 | from .utils._imports import import_optional_dependency
13 |
14 |
15 | def diagonal(t, x, evecs, evals, n):
16 | """Compute heat kernel diagonal ( K(t,x,x,) ).
17 |
18 | For a given time t (can be a vector)
19 | using only the first n smallest eigenvalues and eigenvectors.
20 |
21 | Parameters
22 | ----------
23 | t : float | array
24 | Time or a row vector of time values.
25 | x : array
26 | Vertex ids for the positions of K(t,x,x).
27 | evecs : array
28 | Eigenvectors (matrix: vnum x evecsnum).
29 | evals : array
30 | Vector of eigenvalues (col vector: evecsnum x 1).
31 | n : int
32 | Number of evecs and vals to use (smaller or equal length).
33 |
34 | Returns
35 | -------
36 | h : array
37 | Matrix, rows: vertices selected in x, cols: times in t.
38 | """
39 | # maybe add code to check dimensions of input and flip axis if necessary
40 | h = np.matmul(evecs[x, 0:n] * evecs[x, 0:n], np.exp(-np.matmul(evals[0:n], t)))
41 | return h
42 |
43 |
44 | def kernel(t, vfix, evecs, evals, n):
45 | r"""Compute heat kernel from all points to a fixed point (vfix).
46 |
47 | For a given time t (using only the first n smallest eigenvalues
48 | and eigenvectors):
49 |
50 | .. math::
51 | K_t (p,q) = \sum_j \ exp(-eval_j \ t) \ evec_j(p) \ evec_j(q)
52 |
53 | Parameters
54 | ----------
55 | t : float | array
56 | Time (can also be a row vector, if passing multiple times).
57 | vfix : array
58 | Fixed vertex index.
59 | evecs : array
60 | Matrix of eigenvectors (M x N), M=#vertices, N=#eigenvectors.
61 | evals : array
62 | Column vector of eigenvalues (N).
63 | n : int
64 | Number of eigenvalues/vectors used in heat kernel (n<=N).
65 |
66 | Returns
67 | -------
68 | h : array
69 | Matrix m rows: all vertices, cols: times in t.
70 | """
71 | # h = evecs * ( exp(-evals * t) .* repmat(evecs(vfix,:)',1,length(t)) )
72 | h = np.matmul(evecs[:, 0:n], (np.exp(np.matmul(-evals[0:n], t)) * evecs[vfix, 0:n]))
73 | return h
74 |
75 |
76 | def diffusion(geometry, vids, m=1.0, aniso: Optional[int] = None, use_cholmod=False):
77 | """Compute the heat diffusion from initial vertices in vids.
78 |
79 | It uses the backward Euler solution :math:`t = m l^2`, where l describes
80 | the average edge length.
81 |
82 | Parameters
83 | ----------
84 | geometry : TriaMesh | TetMesh
85 | Geometric object on which to run diffusion.
86 | vids : array
87 | Vertex index or indices where initial heat is applied.
88 | m : float, default=1.0
89 | Factor to compute time of heat evolution.
90 | aniso : int
91 | Number of smoothing iterations for curvature computation on vertices.
92 | use_cholmod : bool, default=False
93 | Which solver to use:
94 | * True : Use Cholesky decomposition from scikit-sparse cholmod.
95 | * False: Use spsolve (LU decomposition).
96 |
97 | Returns
98 | -------
99 | vfunc: array of shape (n, 1)
100 | Heat diffusion at vertices.
101 | """
102 | if use_cholmod:
103 | sksparse = import_optional_dependency("sksparse", raise_error=True)
104 | importlib.import_module(".cholmod", sksparse.__name__)
105 | else:
106 | sksparse = None
107 | from . import Solver
108 |
109 | nv = len(geometry.v)
110 | fem = Solver(geometry, lump=True, aniso=aniso)
111 | # time of heat evolution:
112 | t = m * geometry.avg_edge_length() ** 2
113 | # backward Euler matrix:
114 | hmat = fem.mass + t * fem.stiffness
115 | # set initial heat
116 | b0 = np.zeros((nv,))
117 | b0[np.array(vids)] = 1.0
118 | # solve H x = b0
119 | print("Matrix Format now: " + hmat.getformat())
120 | if use_cholmod:
121 | print("Solver: Cholesky decomposition from scikit-sparse cholmod ...")
122 | chol = sksparse.cholmod.cholesky(hmat)
123 | vfunc = chol(b0)
124 | else:
125 | from scipy.sparse.linalg import splu
126 |
127 | print("Solver: spsolve (LU decomposition) ...")
128 | lu = splu(hmat)
129 | vfunc = lu.solve(np.float32(b0))
130 | return vfunc
131 |
--------------------------------------------------------------------------------
/lapy/io.py:
--------------------------------------------------------------------------------
1 | """Functions to read and write spectra and vertex functions."""
2 |
3 | import numpy as np
4 |
5 |
6 | def read_vfunc(filename):
7 | """Import vertex functions from txt file.
8 |
9 | Values can be separated by ``;`` or ``,`` and surrounded by ``{}`` or ``()``
10 | brackets. Also first line can have the keyword "Solution:", i.e. the PSOL format
11 | from ShapeDNA.
12 |
13 | Parameters
14 | ----------
15 | filename : str
16 | Filename of input.
17 |
18 | Returns
19 | -------
20 | vals : array
21 | List of vfunc parameters.
22 | """
23 | import re
24 |
25 | try:
26 | with open(filename) as f:
27 | txt = f.readlines()
28 | except OSError:
29 | print("[File " + filename + " not found or not readable]")
30 | return
31 | txt = [x.strip() for x in txt]
32 | txt.remove("Solution:")
33 | txt = [re.sub("[{()}]", "", x) for x in txt]
34 | if len(txt) == 1:
35 | txt = [re.split("[,;]", x) for x in txt][0]
36 | txt = [float(x) for x in txt]
37 | # txt = np.array(txt)
38 | return txt
39 |
40 |
41 | def read_ev(filename):
42 | """Load EV file.
43 |
44 | Parameters
45 | ----------
46 | filename : str
47 | Filename of input.
48 |
49 | Returns
50 | -------
51 | d: dict
52 | Dictionary of eigenvalues, eigenvectors (optional), and associated
53 | information.
54 | """
55 | # open file
56 | try:
57 | f = open(filename)
58 | except OSError:
59 | print("[File " + filename + " not found or not readable]")
60 | return
61 | # read file (and get rid of all \n)
62 | ll = f.read().splitlines()
63 | # define data structure
64 | d = dict()
65 | # go through each line and parse it
66 | i = 0
67 | while i < len(ll):
68 | if ll[i].lstrip().startswith("Creator:"):
69 | d.update({"Creator": ll[i].split(":", 1)[1].strip()})
70 | i = i + 1
71 | elif ll[i].lstrip().startswith("File:"):
72 | d.update({"File": ll[i].split(":", 1)[1].strip()})
73 | i = i + 1
74 | elif ll[i].lstrip().startswith("User:"):
75 | d.update({"User": ll[i].split(":", 1)[1].strip()})
76 | i = i + 1
77 | elif ll[i].lstrip().startswith("Refine:"):
78 | d.update({"Refine": int(ll[i].split(":", 1)[1].strip())})
79 | i = i + 1
80 | elif ll[i].lstrip().startswith("Degree:"):
81 | d.update({"Degree": int(ll[i].split(":", 1)[1].strip())})
82 | i = i + 1
83 | elif ll[i].lstrip().startswith("Dimension:"):
84 | d.update({"Dimension": int(ll[i].split(":", 1)[1].strip())})
85 | i = i + 1
86 | elif ll[i].lstrip().startswith("Elements:"):
87 | d.update({"Elements": int(ll[i].split(":", 1)[1].strip())})
88 | i = i + 1
89 | elif ll[i].lstrip().startswith("DoF:"):
90 | d.update({"DoF": int(ll[i].split(":", 1)[1].strip())})
91 | i = i + 1
92 | elif ll[i].lstrip().startswith("NumEW:"):
93 | d.update({"NumEW": int(ll[i].split(":", 1)[1].strip())})
94 | i = i + 1
95 | elif ll[i].lstrip().startswith("Area:"):
96 | d.update({"Area": float(ll[i].split(":", 1)[1].strip())})
97 | i = i + 1
98 | elif ll[i].lstrip().startswith("Volume:"):
99 | d.update({"Volume": float(ll[i].split(":", 1)[1].strip())})
100 | i = i + 1
101 | elif ll[i].lstrip().startswith("BLength:"):
102 | d.update({"BLength": float(ll[i].split(":", 1)[1].strip())})
103 | i = i + 1
104 | elif ll[i].lstrip().startswith("EulerChar:"):
105 | d.update({"EulerChar": int(ll[i].split(":", 1)[1].strip())})
106 | i = i + 1
107 | elif ll[i].lstrip().startswith("Time(pre)"):
108 | d.update({"TimePre": int(ll[i].split(":", 1)[1].strip())})
109 | i = i + 1
110 | elif ll[i].lstrip().startswith("Time(calcAB)"):
111 | d.update({"TimeCalcAB": int(ll[i].split(":", 1)[1].strip())})
112 | i = i + 1
113 | elif ll[i].lstrip().startswith("Time(calcEW)"):
114 | d.update({"TimeCalcEW": int(ll[i].split(":", 1)[1].strip())})
115 | i = i + 1
116 | elif ll[i].lstrip().startswith("Eigenvalues"):
117 | i = i + 1
118 | while ll[i].find("{") < 0: # possibly introduce termination criterion
119 | i = i + 1
120 | if ll[i].find("}") >= 0: # '{' and '}' on the same line
121 | evals = ll[i].strip().replace("{", "").replace("}", "")
122 | else:
123 | evals = ""
124 | while ll[i].find("}") < 0:
125 | evals = evals + ll[i].strip().replace("{", "").replace("}", "")
126 | i = i + 1
127 | evals = evals + ll[i].strip().replace("{", "").replace("}", "")
128 | evals = np.array(evals.split(";")).astype(float)
129 | d.update({"Eigenvalues": evals})
130 | i = i + 1
131 | elif ll[i].lstrip().startswith("Eigenvectors"):
132 | i = i + 1
133 | while not (ll[i].strip().startswith("sizes")):
134 | i = i + 1
135 | d.update(
136 | {"EigenvectorsSize": np.array(ll[i].strip().split()[1:]).astype(int)}
137 | )
138 | i = i + 1
139 | while ll[i].find("{") < 0: # possibly introduce termination criterion
140 | i = i + 1
141 | if ll[i].find("}") >= 0: # '{' and '}' on the same line
142 | evecs = ll[i].strip().replace("{", "").replace("}", "")
143 | else:
144 | evecs = ""
145 | while ll[i].find("}") < 0:
146 | evecs = evecs + ll[i].strip().replace("{", "").replace(
147 | "}", ""
148 | ).replace("(", "").replace(")", "")
149 | i = i + 1
150 | evecs = evecs + ll[i].strip().replace("{", "").replace("}", "").replace(
151 | "(", ""
152 | ).replace(")", "")
153 | evecs = np.array(
154 | evecs.replace(";", " ").replace(",", " ").strip().split()
155 | ).astype(float)
156 | if len(evecs) == (d["EigenvectorsSize"][0] * d["EigenvectorsSize"][1]):
157 | evecs = np.transpose(np.reshape(evecs, d["EigenvectorsSize"][1::-1]))
158 | d.update({"Eigenvectors": evecs})
159 | else:
160 | print(
161 | "[Length of eigenvectors is not "
162 | + str(d["EigenvectorsSize"][0])
163 | + " times "
164 | + str(d["EigenvectorsSize"][1])
165 | + "."
166 | )
167 | i = i + 1
168 | else:
169 | i = i + 1
170 | # close file
171 | f.close()
172 | # return dict
173 | return d
174 |
175 |
176 | def write_ev(filename, d):
177 | """Save EV data structures as txt file (format from ShapeDNA).
178 |
179 | Parameters
180 | ----------
181 | filename : str
182 | Filename to save to.
183 | d : dict
184 | Dictionary of eigenvalues, eigenvectors (optional), and associated
185 | information.
186 | """
187 | # open file
188 | try:
189 | f = open(filename, "w")
190 | except OSError:
191 | print("[File " + filename + " not writable]")
192 | return
193 | # check data structure
194 | if "Eigenvalues" not in d:
195 | print("ERROR: no Eigenvalues specified")
196 | exit(1)
197 | # ...
198 | # Write
199 | if "Creator" in d:
200 | f.write(" Creator: " + d["Creator"] + "\n")
201 | if "File" in d:
202 | f.write(" File: " + d["File"] + "\n")
203 | if "User" in d:
204 | f.write(" User: " + d["User"] + "\n")
205 | if "Refine" in d:
206 | f.write(" Refine: " + str(d["Refine"]) + "\n")
207 | if "Degree" in d:
208 | f.write(" Degree: " + str(d["Degree"]) + "\n")
209 | if "Dimension" in d:
210 | f.write(" Dimension: " + str(d["Dimension"]) + "\n")
211 | if "Elements" in d:
212 | f.write(" Elements: " + str(d["Elements"]) + "\n")
213 | if "DoF" in d:
214 | f.write(" DoF: " + str(d["DoF"]) + "\n")
215 | if "NumEW" in d:
216 | f.write(" NumEW: " + str(d["NumEW"]) + "\n")
217 | f.write("\n")
218 | if "Area" in d:
219 | f.write(" Area: " + str(d["Area"]) + "\n")
220 | if "Volume" in d:
221 | f.write(" Volume: " + str(d["Volume"]) + "\n")
222 | if "BLength" in d:
223 | f.write(" BLength: " + str(d["BLength"]) + "\n")
224 | if "EulerChar" in d:
225 | f.write(" EulerChar: " + str(d["EulerChar"]) + "\n")
226 | f.write("\n")
227 | if "TimePre" in d:
228 | f.write(" Time(Pre) : " + str(d["TimePre"]) + "\n")
229 | if "TimeCalcAB" in d:
230 | f.write(" Time(calcAB) : " + str(d["TimeCalcAB"]) + "\n")
231 | if "TimeCalcEW" in d:
232 | f.write(" Time(calcEW) : " + str(d["TimeCalcEW"]) + "\n")
233 | if "TimePre" in d and "TimeCalcAB" in d and "TimeCalcEW" in d:
234 | f.write(
235 | " Time(total ) : "
236 | + str(d["TimePre"] + d["TimeCalcAB"] + d["TimeCalcEW"])
237 | + "\n"
238 | )
239 | f.write("\n")
240 | f.write("Eigenvalues:\n")
241 | f.write(
242 | "{ " + " ; ".join(map(str, d["Eigenvalues"])) + " }\n"
243 | ) # consider precision
244 | f.write("\n")
245 | if "Eigenvectors" in d:
246 | f.write("Eigenvectors:\n")
247 | # f.write('sizes: '+' '.join(map(str,d['EigenvectorsSize']))+'\n')
248 | # better compute real sizes from eigenvector array?
249 | f.write("sizes: " + " ".join(map(str, d["Eigenvectors"].shape)) + "\n")
250 | f.write("\n")
251 | f.write("{ ")
252 | for i in range(np.shape(d["Eigenvectors"])[1] - 1):
253 | f.write("(")
254 | f.write(",".join(map(str, d["Eigenvectors"][:, i])))
255 | f.write(") ;\n")
256 | f.write("(")
257 | f.write(
258 | ",".join(
259 | map(
260 | str,
261 | d["Eigenvectors"][:, np.shape(d["Eigenvectors"])[1] - 1],
262 | )
263 | )
264 | )
265 | f.write(") }\n")
266 | # close file
267 | f.close()
268 |
269 |
270 | def write_vfunc(filename, vfunc):
271 | """Save vertex in PSOL txt file.
272 |
273 | First line "Solution:", "," separated values inside ()
274 |
275 | Parameters
276 | ----------
277 | filename : str
278 | Filename to save to.
279 | vfunc : array_like
280 | List of vfunc parameters.
281 | """
282 | try:
283 | f = open(filename, "w")
284 | except OSError:
285 | print("[File " + filename + " not writable]")
286 | return
287 | f.write("Solution:\n")
288 | f.write("(" + ",".join(vfunc.astype(str)) + ")")
289 | f.close()
290 |
--------------------------------------------------------------------------------
/lapy/shapedna.py:
--------------------------------------------------------------------------------
1 | """Functions for computing and comparing Laplace spectra.
2 |
3 | Includes code for solving the anisotropic Laplace-Beltrami eigenvalue
4 | problem as well as functions for normalization and comparison of
5 | Laplace spectra.
6 | """
7 |
8 | import numpy as np
9 | import scipy.spatial.distance as di
10 |
11 | from . import Solver
12 |
13 |
14 | def compute_shapedna(
15 | geom, k=50, lump=False, aniso=None, aniso_smooth=10, use_cholmod=False
16 | ):
17 | """Compute the shapeDNA descriptor for triangle or tetrahedral meshes.
18 |
19 | Parameters
20 | ----------
21 | geom : TriaMesh or TetMesh
22 | Mesh geometry.
23 | k : int, default=50
24 | Number of eigenfunctions / eigenvalues.
25 | lump : bool, default=False
26 | If True, lump the mass matrix (diagonal).
27 | (See 'lapy.Solver.Solver' class).
28 | aniso : float or tuple of shape (2,)
29 | Anisotropy for curvature based anisotopic Laplace.
30 | (See 'lapy.Solver.Solver' class).
31 | aniso_smooth : int
32 | Number of smoothing iterations for curvature computation on vertices.
33 | (See 'lapy.Solver.Solver' class).
34 | use_cholmod : bool, default: False
35 | If True, attempts to use the Cholesky decomposition for improved execution
36 | speed. Requires the ``scikit-sparse`` library. If it can not be found, an error
37 | will be thrown.
38 | If False, will use slower LU decomposition.
39 |
40 | Returns
41 | -------
42 | ev : dict
43 | A dictionary, including 'Eigenvalues' and 'Eigenvectors' fields.
44 | """
45 | # get fem, evals, evecs
46 |
47 | fem = Solver(
48 | geom, lump=lump, aniso=aniso, aniso_smooth=aniso_smooth, use_cholmod=use_cholmod
49 | )
50 | evals, evecs = fem.eigs(k=k)
51 |
52 | # write ev
53 |
54 | evDict = dict()
55 | evDict["Refine"] = 0
56 | evDict["Degree"] = 1
57 | if type(geom).__name__ == "TriaMesh":
58 | evDict["Dimension"] = 2
59 | elif type(geom).__name__ == "TetMesh":
60 | evDict["Dimension"] = 3
61 | evDict["Elements"] = len(geom.t)
62 | evDict["DoF"] = len(geom.v)
63 | evDict["NumEW"] = k
64 | evDict["Eigenvalues"] = evals
65 | evDict["Eigenvectors"] = evecs
66 |
67 | return evDict
68 |
69 |
70 | def normalize_ev(geom, evals, method="geometry"):
71 | """Normalize a surface or a volume.
72 |
73 | Parameters
74 | ----------
75 | geom : TriaMesh or TetMesh
76 | Mesh geometry.
77 | evals : array_like
78 | Set of sorted eigenvalues.
79 | method : str
80 | Either "surface", "volume", or "geometry";
81 | "geometry" will perform surface normalization for
82 | 2D objects, and volume normalization for 3D objects.
83 |
84 | Returns
85 | -------
86 | array_like
87 | Vector of re-weighted eigenvalues.
88 | """
89 | if method == "surface":
90 | vol = geom.area()
91 |
92 | return evals * vol ** np.divide(2.0, 2.0)
93 |
94 | elif method == "volume":
95 | if type(geom).__name__ == "TriaMesh":
96 | geom.orient_()
97 |
98 | vol = geom.volume()
99 |
100 | elif type(geom).__name__ == "TetMesh":
101 | bnd = geom.boundary_tria()
102 |
103 | bnd.orient_()
104 |
105 | vol = bnd.volume()
106 |
107 | return evals * vol ** np.divide(2.0, 3.0)
108 |
109 | elif method == "geometry":
110 | if type(geom).__name__ == "TriaMesh":
111 | vol = geom.area()
112 |
113 | return evals * vol ** np.divide(2.0, 2.0)
114 |
115 | elif type(geom).__name__ == "TetMesh":
116 | bnd = geom.boundary_tria()
117 |
118 | bnd.orient_()
119 |
120 | vol = bnd.volume()
121 |
122 | return evals * vol ** np.divide(2.0, 3.0)
123 |
124 |
125 | def reweight_ev(evals):
126 | """Apply linear re-weighting.
127 |
128 | Parameters
129 | ----------
130 | evals : array_like
131 | Set of sorted eigenvalues.
132 |
133 | Returns
134 | -------
135 | evals: array_like
136 | Vector of re-weighted eigenvalues.
137 | """
138 | # evals[1:] = evals[1:] / np.arange(1, len(evals))
139 | evals = evals / np.arange(1, len(evals) + 1)
140 |
141 | return evals
142 |
143 |
144 | def compute_distance(ev1, ev2, dist="euc"):
145 | """Compute the shape dissimilarity from two shapeDNA descriptors.
146 |
147 | Parameters
148 | ----------
149 | ev1 : array_like
150 | First set of sorted eigenvalues.
151 | ev2 : array_like
152 | Second set of sorted eigenvalues.
153 | dist : str
154 | Distance measure; currently only 'euc' (Euclidean).
155 |
156 | Returns
157 | -------
158 | * : float
159 | Distance between the eigenvalue arrays.
160 | """
161 | if dist == "euc":
162 | return di.euclidean(ev1, ev2)
163 | else:
164 | print("Only euclidean distance is currently implemented.")
165 | return
166 |
--------------------------------------------------------------------------------
/lapy/tet_mesh.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy import sparse
3 |
4 | from . import _tet_io as io
5 |
6 |
7 | class TetMesh:
8 | """Class representing a tetrahedral mesh.
9 |
10 | This is an efficient implementation of a tetrahedral mesh data structure
11 | with core functionality using sparse matrices internally (Scipy).
12 |
13 | Parameters
14 | ----------
15 | v : array_like
16 | List of lists of 3 float coordinates.
17 | t : array_like
18 | List of lists of 4 int of indices (>=0) into ``v`` array.
19 | Ordering is important: so that t0, t1, t2 are oriented
20 | counterclockwise when looking from above, and t3 is
21 | on top of that triangle.
22 |
23 | Notes
24 | -----
25 | The class has static class methods to read tetrahera meshes from
26 | `GMSH `_
27 | and `VTK `_ files.
28 | """
29 |
30 | def __init__(self, v, t):
31 | self.v = np.array(v)
32 | self.t = np.array(t)
33 | vnum = np.max(self.v.shape)
34 | if np.max(self.t) >= vnum:
35 | raise ValueError("Max index exceeds number of vertices")
36 | # put more checks here (e.g. the dim 3 conditions on columns)
37 | # self.orient_()
38 | self.adj_sym = self.construct_adj_sym()
39 |
40 | @classmethod
41 | def read_gmsh(cls, filename):
42 | """Load GMSH tetrahedron mesh.
43 |
44 | Parameters
45 | ----------
46 | filename : str
47 | Filename to load.
48 |
49 | Returns
50 | -------
51 | tet : TetMesh
52 | Object of loaded GMSH tetrahedron mesh.
53 | """
54 | return io.read_gmsh(filename)
55 |
56 | @classmethod
57 | def read_vtk(cls, filename):
58 | """Load VTK tetrahedron mesh.
59 |
60 | Parameters
61 | ----------
62 | filename : str
63 | Filename to load.
64 |
65 | Returns
66 | -------
67 | tet : TetMesh
68 | Object of loaded VTK tetrahedron mesh.
69 | """
70 | return io.read_vtk(filename)
71 |
72 | def write_vtk(self, filename):
73 | """Save as VTK file.
74 |
75 | Parameters
76 | ----------
77 | filename : str
78 | Filename to save to.
79 | """
80 | io.write_vtk(self, filename)
81 |
82 | def construct_adj_sym(self):
83 | """Create adjacency symmetric matrix.
84 |
85 | The adjacency matrix will be symmetric. Each inner
86 | edge will get the number of tetrahedra that contain this edge.
87 | Inner edges are usually 3 or larger, boundary, 2 or 1.
88 | Works on tetras only.
89 |
90 | Returns
91 | -------
92 | adj : csc_matrix
93 | Symmetric adjacency matrix as csc sparse matrix.
94 | """
95 | t1 = self.t[:, 0]
96 | t2 = self.t[:, 1]
97 | t3 = self.t[:, 2]
98 | t4 = self.t[:, 3]
99 | i = np.column_stack((t1, t2, t2, t3, t3, t1, t1, t2, t3, t4, t4, t4)).reshape(
100 | -1
101 | )
102 | j = np.column_stack((t2, t1, t3, t2, t1, t3, t4, t4, t4, t1, t2, t3)).reshape(
103 | -1
104 | )
105 | adj = sparse.csc_matrix((np.ones(i.shape), (i, j)))
106 | return adj
107 |
108 | def has_free_vertices(self):
109 | """Check if the vertex list has more vertices than what is used in tetra.
110 |
111 | (same implementation as in `~lapy.TriaMesh`)
112 |
113 | Returns
114 | -------
115 | bool
116 | Whether vertex list has more vertices than tetra or not.
117 | """
118 | vnum = np.max(self.v.shape)
119 | vnumt = len(np.unique(self.t.reshape(-1)))
120 | return vnum != vnumt
121 |
122 | def is_oriented(self):
123 | """Check if tet mesh is oriented.
124 |
125 | True if all tetrahedra are oriented
126 | so that v0,v1,v2 are oriented counterclockwise when looking from above,
127 | and v3 is on top of that triangle.
128 |
129 | Returns
130 | -------
131 | oriented: bool
132 | True if ``max(adj_directed)=1``.
133 | """
134 | # Compute vertex coordinates and a difference vector for each triangle:
135 | t0 = self.t[:, 0]
136 | t1 = self.t[:, 1]
137 | t2 = self.t[:, 2]
138 | t3 = self.t[:, 3]
139 | v0 = self.v[t0, :]
140 | v1 = self.v[t1, :]
141 | v2 = self.v[t2, :]
142 | v3 = self.v[t3, :]
143 | e0 = v1 - v0
144 | e2 = v2 - v0
145 | e3 = v3 - v0
146 | # Compute cross product and 6 * vol for each triangle:
147 | cr = np.cross(e0, e2)
148 | vol = np.sum(e3 * cr, axis=1)
149 | if np.max(vol) < 0.0:
150 | print("All tet orientations are flipped")
151 | return False
152 | elif np.min(vol) > 0.0:
153 | print("All tet orientations are correct")
154 | return True
155 | elif np.count_nonzero(vol) < len(vol):
156 | print("We have degenerated zero-volume tetrahedra")
157 | return False
158 | else:
159 | print("Orientations are not uniform")
160 | return False
161 |
162 | def avg_edge_length(self):
163 | """Get average edge lengths in tet mesh.
164 |
165 | Returns
166 | -------
167 | float
168 | Average edge length.
169 | """
170 | # get only upper off-diag elements from symmetric adj matrix
171 | triadj = sparse.triu(self.adj_sym, 1, format="coo")
172 | edgelens = np.sqrt(
173 | ((self.v[triadj.row, :] - self.v[triadj.col, :]) ** 2).sum(1)
174 | )
175 | return edgelens.mean()
176 |
177 | def boundary_tria(self, tetfunc=None):
178 | """Get boundary triangle mesh of tetrahedra.
179 |
180 | It can have multiple connected components.
181 | Tria will have same vertices (including free vertices),
182 | so that the tria indices agree with the tet-mesh, in case we want to
183 | transfer information back, e.g. a FEM boundary condition, or to access
184 | a TetMesh vertex function with TriaMesh.t indices.
185 |
186 | .. warning::
187 |
188 | Note, that it seems to be returning non-oriented triangle meshes,
189 | may need some debugging, until then use tria.orient_() after this.
190 |
191 | Parameters
192 | ----------
193 | tetfunc : array | None
194 | List of tetra function values (optional).
195 |
196 | Returns
197 | -------
198 | TriaMesh
199 | TriaMesh of boundary (potentially >1 components).
200 | triafunc : array
201 | List of tria function values (only returned if ``tetfunc`` is provided).
202 | """
203 | from . import TriaMesh
204 |
205 | # get all triangles
206 | allt = np.vstack(
207 | (
208 | self.t[:, np.array([3, 1, 2])],
209 | self.t[:, np.array([2, 0, 3])],
210 | self.t[:, np.array([1, 3, 0])],
211 | self.t[:, np.array([0, 2, 1])],
212 | )
213 | )
214 | # sort rows so that faces are reorder in ascending order of indices
215 | allts = np.sort(allt, axis=1)
216 | # find unique trias without a neighbor
217 | tria, indices, count = np.unique(
218 | allts, axis=0, return_index=True, return_counts=True
219 | )
220 | tria = allt[indices[count == 1]]
221 | print("Found " + str(np.size(tria, 0)) + " triangles on boundary.")
222 | # if we have tetra function, map these to the boundary triangles
223 | if tetfunc is not None:
224 | alltidx = np.tile(np.arange(self.t.shape[0]), 4)
225 | tidx = alltidx[indices[count == 1]]
226 | triafunc = tetfunc[tidx]
227 | return TriaMesh(self.v, tria), triafunc
228 | return TriaMesh(self.v, tria)
229 |
230 | def rm_free_vertices_(self):
231 | """Remove unused (free) vertices from v and t.
232 |
233 | These are vertices that are not used in any triangle. They can produce problems
234 | when constructing, e.g., Laplace matrices.
235 |
236 | Will update v and t in mesh.
237 | Same implementation as in `~lapy.TriaMesh`.
238 |
239 | Returns
240 | -------
241 | vkeep: array
242 | Indices (from original list) of kept vertices.
243 | vdel: array
244 | Indices of deleted (unused) vertices.
245 | """
246 | tflat = self.t.reshape(-1)
247 | vnum = np.max(self.v.shape)
248 | if np.max(tflat) >= vnum:
249 | raise ValueError("Max index exceeds number of vertices")
250 | # determine which vertices to keep
251 | vkeep = np.full(vnum, False, dtype=bool)
252 | vkeep[tflat] = True
253 | # list of deleted vertices (old indices)
254 | vdel = np.nonzero(~vkeep)[0]
255 | # if nothing to delete return
256 | if len(vdel) == 0:
257 | return np.arange(vnum), []
258 | # delete unused vertices
259 | vnew = self.v[vkeep, :]
260 | # create lookup table
261 | tlookup = np.cumsum(vkeep) - 1
262 | # reindex tria
263 | tnew = tlookup[self.t]
264 | # convert vkeep to index list
265 | vkeep = np.nonzero(vkeep)[0]
266 | self.v = vnew
267 | self.t = tnew
268 | return vkeep, vdel
269 |
270 | def orient_(self):
271 | """Ensure that tet mesh is oriented.
272 |
273 | Re-orient tetras so that
274 | v0,v1,v2 are oriented counterclockwise when looking from above,
275 | and v3 is on top of that triangle.
276 |
277 | Returns
278 | -------
279 | onum : int
280 | Number of re-oriented tetras.
281 | """
282 | # Compute vertex coordinates and a difference vector for each tetra:
283 | t0 = self.t[:, 0]
284 | t1 = self.t[:, 1]
285 | t2 = self.t[:, 2]
286 | t3 = self.t[:, 3]
287 | v0 = self.v[t0, :]
288 | v1 = self.v[t1, :]
289 | v2 = self.v[t2, :]
290 | v3 = self.v[t3, :]
291 | e0 = v1 - v0
292 | e2 = v2 - v0
293 | e3 = v3 - v0
294 | # Compute cross product and 6 * vol for each tetra:
295 | cr = np.cross(e0, e2)
296 | vol = np.sum(e3 * cr, axis=1)
297 | negtet = vol < 0.0
298 | negnum = np.sum(negtet)
299 | if negnum == 0:
300 | print("Mesh is oriented, nothing to do")
301 | return 0
302 | tnew = self.t
303 | # negtet = np.where(negtet)
304 | temp = self.t[negtet, 1]
305 | tnew[negtet, 1] = self.t[negtet, 2]
306 | tnew[negtet, 2] = temp
307 | onum = np.sum(negtet)
308 | print("Flipped " + str(onum) + " tetrahedra")
309 | self.__init__(self.v, tnew)
310 | return onum
311 |
--------------------------------------------------------------------------------
/lapy/utils/__init__.py:
--------------------------------------------------------------------------------
1 | """Utilities module."""
2 |
--------------------------------------------------------------------------------
/lapy/utils/_config.py:
--------------------------------------------------------------------------------
1 | import platform
2 | import re
3 | import sys
4 | from functools import partial
5 | from importlib.metadata import requires, version
6 | from typing import IO, Callable, Optional
7 |
8 | import psutil
9 |
10 |
11 | def sys_info(fid: Optional[IO] = None, developer: bool = False):
12 | """Print the system information for debugging.
13 |
14 | Parameters
15 | ----------
16 | fid : file-like, default=None
17 | The file to write to, passed to :func:`print`.
18 | Can be None to use :data:`sys.stdout`.
19 | developer : bool, default=False
20 | If True, display information about optional dependencies.
21 | """
22 |
23 | ljust = 26
24 | out = partial(print, end="", file=fid)
25 | package = __package__.split(".")[0]
26 |
27 | # OS information - requires python 3.8 or above
28 | out("Platform:".ljust(ljust) + platform.platform() + "\n")
29 | # Python information
30 | out("Python:".ljust(ljust) + sys.version.replace("\n", " ") + "\n")
31 | out("Executable:".ljust(ljust) + sys.executable + "\n")
32 | # CPU information
33 | out("CPU:".ljust(ljust) + platform.processor() + "\n")
34 | out("Physical cores:".ljust(ljust) + str(psutil.cpu_count(False)) + "\n")
35 | out("Logical cores:".ljust(ljust) + str(psutil.cpu_count(True)) + "\n")
36 | # Memory information
37 | out("RAM:".ljust(ljust))
38 | out(f"{psutil.virtual_memory().total / float(2 ** 30):0.1f} GB\n")
39 | out("SWAP:".ljust(ljust))
40 | out(f"{psutil.swap_memory().total / float(2 ** 30):0.1f} GB\n")
41 |
42 | # dependencies
43 | out("\nDependencies info\n")
44 | out(f"{package}:".ljust(ljust) + version(package) + "\n")
45 | dependencies = [
46 | elt.split(";")[0].rstrip() for elt in requires(package) if "extra" not in elt
47 | ]
48 | _list_dependencies_info(out, ljust, dependencies)
49 |
50 | # extras
51 | if developer:
52 | keys = (
53 | "build",
54 | "chol",
55 | "doc",
56 | "test",
57 | "style",
58 | )
59 | for key in keys:
60 | dependencies = [
61 | elt.split(";")[0].rstrip()
62 | for elt in requires(package)
63 | if f"extra == '{key}'" in elt or f'extra == "{key}"' in elt
64 | ]
65 | if len(dependencies) == 0:
66 | continue
67 | out(f"\nOptional '{key}' info\n")
68 | _list_dependencies_info(out, ljust, dependencies)
69 |
70 |
71 | def _list_dependencies_info(out: Callable, ljust: int, dependencies: list[str]):
72 | """List dependencies names and versions.
73 |
74 | Parameters
75 | ----------
76 | out : Callable
77 | output function
78 | ljust : int
79 | length of returned string
80 | dependencies : List[str]
81 | list of dependencies
82 |
83 | """
84 |
85 | for dep in dependencies:
86 | # handle dependencies with version specifiers
87 | specifiers_pattern = r"(~=|==|!=|<=|>=|<|>|===)"
88 | specifiers = re.findall(specifiers_pattern, dep)
89 | if len(specifiers) != 0:
90 | dep, _ = dep.split(specifiers[0])
91 | while not dep[-1].isalpha():
92 | dep = dep[:-1]
93 | # handle dependencies provided with a [key], e.g. pydocstyle[toml]
94 | if "[" in dep:
95 | dep = dep.split("[")[0]
96 | try:
97 | version_ = version(dep)
98 | except Exception:
99 | version_ = "Not found."
100 |
101 | # handle special dependencies with backends, C dep, ..
102 | if dep in ("matplotlib", "seaborn") and version_ != "Not found.":
103 | try:
104 | from matplotlib import pyplot as plt
105 |
106 | backend = plt.get_backend()
107 | except Exception:
108 | backend = "Not found"
109 |
110 | out(f"{dep}:".ljust(ljust) + version_ + f" (backend: {backend})\n")
111 |
112 | else:
113 | out(f"{dep}:".ljust(ljust) + version_ + "\n")
114 |
--------------------------------------------------------------------------------
/lapy/utils/_imports.py:
--------------------------------------------------------------------------------
1 | """Handle optional dependency imports.
2 |
3 | Inspired from pandas: https://pandas.pydata.org/
4 | """
5 |
6 | import importlib
7 |
8 | # A mapping from import name to package name (on PyPI) when the package name
9 | # is different.
10 | INSTALL_MAPPING = {
11 | "sksparse": "scikit-sparse",
12 | }
13 |
14 |
15 | def import_optional_dependency(
16 | name: str,
17 | extra: str = "",
18 | raise_error: bool = True,
19 | ):
20 | """Import an optional dependency.
21 |
22 | By default, if a dependency is missing an ImportError with a nice message
23 | will be raised.
24 |
25 | Parameters
26 | ----------
27 | name : str
28 | The module name.
29 | extra : str, default=""
30 | Additional text to include in the ImportError message.
31 | raise_error : bool, default=True
32 | What to do when a dependency is not found.
33 | * True : Raise an ImportError.
34 | * False: Return None.
35 |
36 | Returns
37 | -------
38 | module : Optional[ModuleType]
39 | The imported module when found.
40 | None is returned when the package is not found and raise_error is
41 | False.
42 |
43 | Raises
44 | -------
45 | ImportError
46 | dependency not found; see raise_error
47 | """
48 |
49 | package_name = INSTALL_MAPPING.get(name)
50 | install_name = package_name if package_name is not None else name
51 |
52 | try:
53 | module = importlib.import_module(name)
54 | except ImportError:
55 | if raise_error:
56 | raise ImportError(
57 | f"Missing optional dependency '{install_name}'. {extra} "
58 | f"Use pip or conda to install {install_name}."
59 | ) from None
60 | else:
61 | return None
62 |
63 | return module
64 |
--------------------------------------------------------------------------------
/lapy/utils/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Deep-MI/LaPy/d55eca505a071e97e14d513915efb6af664c346a/lapy/utils/tests/__init__.py
--------------------------------------------------------------------------------
/lapy/utils/tests/expected_outcomes.json:
--------------------------------------------------------------------------------
1 | {
2 | "expected_outcomes": {
3 | "test_tria_mesh": {
4 | "expected_euler_value": 2,
5 | "expected_area": [
6 | 0.5,
7 | 0.5,
8 | 0.5,
9 | 0.5,
10 | 0.5,
11 | 0.5,
12 | 0.5,
13 | 0.5,
14 | 0.5,
15 | 0.5,
16 | 0.5,
17 | 0.5
18 | ],
19 | "expected_mesh_area": 5.999999999999998,
20 | "expected_vertex_degrees": [6, 4, 4, 4, 4, 4, 6, 4],
21 | "expected_vertex_area": [
22 | 1.0,
23 | 0.66666667,
24 | 0.66666667,
25 | 0.66666667,
26 | 0.66666667,
27 | 0.66666667,
28 | 1.0,
29 | 0.66666667
30 | ],
31 | "expected_edge_length": 1.1380711874576983,
32 | "expected_triangle_normals": [
33 | [0.0, 0.0, -1.0],
34 | [0.0, -0.0, -1.0],
35 | [0.0, 0.0, -1.0],
36 | [0.0, -0.0, -1.0],
37 | [0.0, 1.0, 0.0],
38 | [0.0, 1.0, 0.0],
39 | [-1.0, 0.0, 0.0],
40 | [-1.0, 0.0, -0.0],
41 | [0.0, 1.0, 0.0],
42 | [0.0, 1.0, 0.0],
43 | [-1.0, 0.0, 0.0],
44 | [-1.0, 0.0, -0.0]
45 | ],
46 | "expected_triangle": [
47 | 0.8660254,
48 | 0.8660254,
49 | 0.8660254,
50 | 0.8660254,
51 | 0.8660254,
52 | 0.8660254,
53 | 0.8660254,
54 | 0.8660254,
55 | 0.8660254,
56 | 0.8660254,
57 | 0.8660254,
58 | 0.8660254
59 | ],
60 | "expected_vertices": [0, 1, 2, 3, 4, 5, 6, 7],
61 | "expected_flips": 6,
62 | "expected_result": [
63 | [-0.57735027, -0.57735027, -0.57735027],
64 | [-0.40824829, 0.81649658, -0.40824829],
65 | [0.40824829, 0.40824829, -0.81649658],
66 | [0.81649658, -0.40824829, -0.40824829],
67 | [-0.40824829, -0.40824829, 0.81649658],
68 | [-0.81649658, 0.40824829, 0.40824829],
69 | [0.57735027, 0.57735027, 0.57735027],
70 | [0.40824829, -0.81649658, 0.40824829]
71 | ],
72 | "expected_result_offset": [
73 | [-0.57735027, -0.57735027, -0.57735027],
74 | [-0.40824829, 0.81649658, -0.40824829],
75 | [0.40824829, 0.40824829, -0.81649658],
76 | [0.81649658, -0.40824829, -0.40824829],
77 | [-0.40824829, -0.40824829, 0.81649658],
78 | [-0.81649658, 0.40824829, 0.40824829],
79 | [0.57735027, 0.57735027, 0.57735027],
80 | [0.40824829, -0.81649658, 0.40824829]
81 | ],
82 | "expected_boundary_loop": [0, 8, 1, 13, 2, 16, 3, 9]
83 | },
84 | "test_tet_mesh": {"expected_vertices": [0, 1, 2, 3, 4, 5, 6, 7, 8]},
85 | "test_compute_shapedna": {
86 | "expected_eigenvalues": [-4.0165149e-05, 4.169641, 4.1704664],
87 | "tolerance": 1e-4
88 | },
89 | "test_normalize_ev_geometry": {
90 | "expected_normalized_values": [-0.00024099089, 25.017845, 25.022799],
91 | "tolerance": 1e-4
92 | },
93 | "test_reweight_ev": {
94 | "expected_reweighted_values": [
95 | -4.01651487e-05,
96 | 2.08482051e00,
97 | 1.39015547e00
98 | ],
99 | "tolerance": 1e-4
100 | },
101 | "test_compute_distance": {"expected_compute_distance": 0.0},
102 | "test_compute_shapedna_tet": {
103 | "expected_eigen_values": [8.4440224e-05, 9.8897915e00, 9.8898811e00],
104 | "tolerance": 1e-4
105 | },
106 | "test_normalize_ev_geometry_tet": {
107 | "expected_normalized_values": [8.4440224e-05, 9.8897915e00, 9.8898811e00],
108 | "tolerance": 1e-4
109 | },
110 | "test_reweight_ev_tet": {
111 | "expected_reweighted_values": [
112 | 8.44402239e-05,
113 | 4.94489574e00,
114 | 3.29662704e00
115 | ],
116 | "tolerance": 1e-4
117 | },
118 | "test_compute_distance_tet": {
119 | "exp_compute_distance": 0.0
120 | },
121 | "test_Geodesics_format": {
122 | "expected_matrix_format": "csc",
123 | "max_distance": 0.60497826,
124 | "expected_sqrt_2_div_2": 0.7071067811865476,
125 | "expected_max_abs_diff": 0.0
126 | },
127 | "test_TetMesh_Geodesics": {
128 | "expected_evals_len": 10,
129 | "expected_max_col_values": [1.0, 1.0, 1.0],
130 | "expected_min_col_values": [-1.0, -1.0, -1.0],
131 | "expected_matrix_format": "csc",
132 | "max_distance": 0.69931495,
133 | "expected_sqrt": 0.8660254037844386,
134 | "expected_divx": [
135 | 5.9999948,
136 | 6.0000215,
137 | 6.0000215,
138 | 5.999988,
139 | 6.000053,
140 | 5.999975,
141 | 5.9999676,
142 | 6.000024,
143 | 6.000013,
144 | 6.000008
145 | ]
146 | },
147 | "test_visualization_triangle_mesh": {
148 | "expected_elements": 4800,
149 | "expected_dof": 2402,
150 | "expected_ev": [-4.1549094e-05, 4.169634, 4.170457]
151 | },
152 | "test_visualization_tetrahedral_mesh": {
153 | "expected_elements": 48000,
154 | "expected_dof": 9261,
155 | "expected_ev": [8.4652565e-05, 9.889787, 9.889887]
156 | }
157 | }
158 | }
159 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_TetMesh_Geodesics.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import numpy as np
4 | import pytest
5 | from scipy.sparse.linalg import splu
6 |
7 | from ...diffgeo import compute_divergence, compute_gradient
8 | from ...heat import diffusion
9 | from ...solver import Solver
10 | from ...tet_mesh import TetMesh
11 |
12 |
13 | # Fixture to load the TetMesh
14 | @pytest.fixture
15 | def load_tet_mesh():
16 | T = TetMesh.read_vtk("data/cubeTetra.vtk")
17 | return T
18 |
19 |
20 | @pytest.fixture
21 | def loaded_data():
22 | """
23 | Load and provide the expected outcomes data from a JSON file.
24 |
25 | Returns:
26 | dict: Dictionary containing the expected outcomes data.
27 | """
28 | with open("lapy/utils/tests/expected_outcomes.json") as f:
29 | expected_outcomes = json.load(f)
30 | return expected_outcomes
31 |
32 |
33 | # Test if the mesh is oriented
34 | def test_is_oriented(load_tet_mesh):
35 | T = load_tet_mesh
36 | assert not T.is_oriented(), "Mesh is already oriented"
37 |
38 |
39 | # Test orienting the mesh
40 | def test_orient_mesh(load_tet_mesh):
41 | T = load_tet_mesh
42 | T.orient_()
43 | assert T.is_oriented(), "Mesh is not oriented"
44 |
45 |
46 | # Test solving the Laplace eigenvalue problem
47 | def test_solve_eigenvalue_problem(load_tet_mesh):
48 | T = load_tet_mesh
49 | fem = Solver(T, lump=True)
50 |
51 | num_eigenvalues = 10
52 | evals, evecs = fem.eigs(num_eigenvalues)
53 |
54 | assert len(evals) == num_eigenvalues
55 | assert evecs.shape == (len(T.v), num_eigenvalues)
56 |
57 |
58 | def test_evals_evec_dimension(load_tet_mesh, loaded_data):
59 | T = load_tet_mesh
60 |
61 | expected_evals_len = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][
62 | "expected_evals_len"
63 | ]
64 |
65 | fem = Solver(T, lump=True)
66 | evals, evecs = fem.eigs(expected_evals_len)
67 | assert len(evals) == expected_evals_len
68 | assert np.shape(evecs) == (9261, 10)
69 |
70 |
71 | # Geodesics
72 |
73 |
74 | def test_gradients_normalization_and_divergence(load_tet_mesh, loaded_data):
75 | """
76 | Test the computation of gradients, normalization, and divergence for a TetMesh.
77 |
78 | Parameters:
79 | load_tet_mesh (fixture): Fixture to load a TetMesh for testing.
80 | loaded_data (dict): Dictionary containing loaded test data.
81 |
82 | Raises:
83 | AssertionError: If any test condition is not met.
84 | """
85 | T = load_tet_mesh
86 | tria = T.boundary_tria()
87 | bvert = np.unique(tria.t)
88 | u = diffusion(T, bvert, m=1)
89 |
90 | # Compute gradients
91 | tfunc = compute_gradient(T, u)
92 |
93 | # Define the expected shape of tfunc (gradient)
94 | expected_tfunc_shape = (48000, 3)
95 |
96 | # Assert that the shape of tfunc matches the expected shape
97 | assert tfunc.shape == expected_tfunc_shape
98 |
99 | # Flip and normalize
100 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]
101 |
102 | # Define the expected shape of X (normalized gradient)
103 | expected_X_shape = (48000, 3)
104 |
105 | # Assert that the shape of X matches the expected shape
106 | assert X.shape == expected_X_shape
107 |
108 | # Load the expected maximum and minimum values for each column of X
109 | expected_max_col_values = loaded_data["expected_outcomes"][
110 | "test_TetMesh_Geodesics"
111 | ]["expected_max_col_values"]
112 | expected_min_col_values = loaded_data["expected_outcomes"][
113 | "test_TetMesh_Geodesics"
114 | ]["expected_min_col_values"]
115 |
116 | # Assert maximum and minimum values of each column of X match the expected values
117 | for col in range(X.shape[1]):
118 | assert np.allclose(np.max(X[:, col]), expected_max_col_values[col], atol=1e-6)
119 | assert np.allclose(np.min(X[:, col]), expected_min_col_values[col], atol=1e-6)
120 |
121 | # Compute divergence
122 | divx = compute_divergence(T, X)
123 |
124 | # Define the expected shape of divx (divergence)
125 | expected_divx_shape = (9261,)
126 |
127 | # Assert that the shape of divx matches the expected shape
128 | assert divx.shape == expected_divx_shape
129 |
130 |
131 | def test_tetMesh_Geodesics_format(load_tet_mesh, loaded_data):
132 | """
133 | Test if matrix format, solver settings, max distance,
134 | and computed values match the expected outcomes.
135 |
136 | Parameters:
137 | - loaded_data (dict): Dictionary containing loaded test data.
138 |
139 | Raises:
140 | - AssertionError: If any test condition is not met.
141 | """
142 |
143 | T = load_tet_mesh
144 | tria = T.boundary_tria()
145 | bvert = np.unique(tria.t)
146 | u = diffusion(T, bvert, m=1)
147 |
148 | # get gradients
149 | tfunc = compute_gradient(T, u)
150 | # flip and normalize
151 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]
152 | X = np.nan_to_num(X)
153 | # compute divergence
154 | divx = compute_divergence(T, X)
155 |
156 | # compute distance
157 | useCholmod = True
158 | try:
159 | from sksparse.cholmod import cholesky
160 | except ImportError:
161 | useCholmod = False
162 |
163 | fem = Solver(T, lump=True)
164 | A, B = fem.stiffness, fem.mass # computed above when creating Solver
165 |
166 | H = A
167 | b0 = -divx
168 |
169 | # solve H x = b0
170 | if useCholmod:
171 | print("Solver: cholesky decomp - performance optimal ...")
172 | chol = cholesky(H)
173 | x = chol(b0)
174 | else:
175 | print("Solver: spsolve (LU decomp) - performance not optimal ...")
176 | lu = splu(H)
177 | x = lu.solve(b0)
178 |
179 | x = x - np.min(x)
180 |
181 | # get heat diffusion
182 |
183 | v1func = T.v[:, 0] * T.v[:, 0] + T.v[:, 1] * T.v[:, 1] + T.v[:, 2] * T.v[:, 2]
184 | grad = compute_gradient(T, v1func)
185 | glength = np.sqrt(np.sum(grad * grad, axis=1))
186 | A, B = fem.stiffness, fem.mass
187 | Bi = B.copy()
188 | Bi.data **= -1
189 | divx2 = Bi * divx
190 |
191 | expected_matrix_format = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][
192 | "expected_matrix_format"
193 | ]
194 | assert H.getformat() == expected_matrix_format
195 | assert np.shape(x) == (9261,)
196 | assert not useCholmod, "Solver: cholesky decomp - performance optimal ..."
197 | expected_max_x = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][
198 | "max_distance"
199 | ]
200 | expected_sqrt_3 = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][
201 | "expected_sqrt"
202 | ]
203 | assert np.isclose(max(x), expected_max_x)
204 | computed_sqrt_3 = 0.5 * np.sqrt(3.0)
205 | assert np.isclose(computed_sqrt_3, expected_sqrt_3)
206 | assert np.shape(glength) == (48000,)
207 | expected_divx = loaded_data["expected_outcomes"]["test_TetMesh_Geodesics"][
208 | "expected_divx"
209 | ]
210 | assert len(divx2[5000:5010]) == len(expected_divx)
211 | assert not np.all(divx2[5000:5010] == expected_divx), "divergence is equal"
212 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_TriaMesh_Geodesics.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import numpy as np
4 | import pytest
5 | from scipy.sparse.linalg import splu
6 |
7 | from ...diffgeo import compute_divergence, compute_geodesic_f, compute_gradient
8 | from ...heat import diffusion
9 | from ...solver import Solver
10 | from ...tria_mesh import TriaMesh
11 |
12 |
13 | @pytest.fixture
14 | def loaded_data():
15 | """
16 | Load and provide the expected outcomes data from a JSON file.
17 |
18 | Returns:
19 | dict: Dictionary containing the expected outcomes data.
20 | """
21 | with open("lapy/utils/tests/expected_outcomes.json") as f:
22 | expected_outcomes = json.load(f)
23 | return expected_outcomes
24 |
25 |
26 | # Fixture to load the TetMesh
27 | @pytest.fixture
28 | def load_square_mesh():
29 | T = TriaMesh.read_off("data/square-mesh.off")
30 | return T
31 |
32 |
33 | def test_tria_qualities(load_square_mesh):
34 | """
35 | Test triangle mesh quality computation.
36 | """
37 | T = load_square_mesh
38 | computed_q = T.tria_qualities()
39 | expected_q_length = 768
40 | assert len(computed_q) == expected_q_length
41 |
42 |
43 | # Laplace
44 | def test_Laplace_Geodesics(load_square_mesh):
45 | """
46 | Test Laplace solver for geodesics on a mesh.
47 | """
48 |
49 | T = load_square_mesh
50 |
51 | # compute first eigenfunction
52 | fem = Solver(T, lump=True)
53 | eval, evec = fem.eigs()
54 | # vfunc = evec[:, 1]
55 |
56 | # Get A,B (lumped), and inverse of B (as it is diagonal due to lumping)
57 | A, B = fem.stiffness, fem.mass
58 | Bi = B.copy()
59 | Bi.data **= -1
60 |
61 | assert B.sum() == 1.0
62 | assert Bi is not B
63 | # Convert A to a dense NumPy array
64 | A_dense = A.toarray()
65 |
66 | # Assert that A is symmetric
67 | assert (A_dense == A_dense.T).all()
68 |
69 | expected_eval_length = 10
70 | assert len(eval) == expected_eval_length
71 |
72 |
73 | # Geodesics
74 | def test_Laplace_Geodesics_with_Gradient_Divergence(load_square_mesh):
75 | """
76 | Test Laplace geodesics using gradient and divergence.
77 | """
78 | T = load_square_mesh
79 |
80 | # Load eigenfunction
81 | fem = Solver(T, lump=True)
82 | eval, evec = fem.eigs()
83 | vfunc = evec[:, 1]
84 |
85 | # Compute Laplacian using -div(grad(f))
86 | grad = compute_gradient(T, vfunc)
87 | divx = -compute_divergence(T, grad)
88 |
89 | # Get the lumped mass matrix B
90 | fem = Solver(T, lump=True)
91 | B = fem.mass
92 | Bi = B.copy()
93 | Bi.data **= -1
94 |
95 | # Apply Laplacian operator and then the inverse of B
96 | Laplacian_result = -divx # The Laplacian result
97 |
98 | # Apply the inverse of B to recover vfunc
99 | recovered_vfunc = Bi.dot(Laplacian_result)
100 |
101 | # Check if the original vfunc and the recovered vfunc length are equal
102 | assert len(recovered_vfunc) == len(vfunc)
103 |
104 | expected_eval_length = 10
105 | assert len(eval) == expected_eval_length
106 |
107 |
108 | def test_heat_diffusion_shape(load_square_mesh):
109 | """
110 | Test the shape of the heat diffusion result on a square mesh.
111 |
112 | Parameters:
113 | load_square_mesh: Fixture providing a loaded square mesh.
114 |
115 | This test function computes the heat diffusion and verifies that the shape
116 | of the result matches the expected shape.
117 |
118 | Returns:
119 | None
120 | """
121 | T = load_square_mesh
122 | bvert = T.boundary_loops()
123 | u = diffusion(T, bvert, m=1)
124 | expected_shape = (len(T.v),)
125 | assert u.shape == expected_shape
126 |
127 |
128 | def test_Geodesics_format(loaded_data, load_square_mesh):
129 | """
130 | Test geodesics format and accuracy.
131 | """
132 | T = load_square_mesh
133 | bvert = T.boundary_loops()
134 | u = diffusion(T, bvert, m=1)
135 | # compute gradient of heat diffusion
136 | tfunc = compute_gradient(T, u)
137 |
138 | # normalize gradient
139 | X = -tfunc / np.sqrt((tfunc**2).sum(1))[:, np.newaxis]
140 | X = np.nan_to_num(X)
141 | divx = compute_divergence(T, X)
142 | # compute distance
143 |
144 | useCholmod = True
145 | try:
146 | from sksparse.cholmod import cholesky
147 | except ImportError:
148 | useCholmod = False
149 |
150 | fem = Solver(T, lump=True)
151 | A, B = fem.stiffness, fem.mass
152 | H = -A
153 | b0 = divx
154 |
155 | # solve H x = b0
156 | # we don't need the B matrix here, as divx is the integrated divergence
157 | print("Matrix Format now: " + H.getformat())
158 | if useCholmod:
159 | print("Solver: cholesky decomp - performance optimal ...")
160 | chol = cholesky(H)
161 | x = chol(b0)
162 | else:
163 | print("Solver: spsolve (LU decomp) - performance not optimal ...")
164 | lu = splu(H)
165 | x = lu.solve(b0)
166 |
167 | # remove shift
168 | x = x - min(x)
169 |
170 | Bi = B.copy()
171 | vf = fem.poisson(-Bi * divx)
172 | vf = vf - min(vf)
173 | gf = compute_geodesic_f(T, u)
174 | expected_matrix_format = loaded_data["expected_outcomes"]["test_Geodesics_format"][
175 | "expected_matrix_format"
176 | ]
177 | assert H.getformat() == expected_matrix_format
178 | assert not useCholmod, "Solver: cholesky decomp - performance optimal ..."
179 | expected_max_x = loaded_data["expected_outcomes"]["test_Geodesics_format"][
180 | "max_distance"
181 | ]
182 | expected_sqrt_2_div_2 = loaded_data["expected_outcomes"]["test_Geodesics_format"][
183 | "expected_sqrt_2_div_2"
184 | ]
185 | assert np.isclose(max(x), expected_max_x)
186 | computed_sqrt_2_div_2 = np.sqrt(2) / 2
187 | assert np.isclose(computed_sqrt_2_div_2, expected_sqrt_2_div_2)
188 | expected_max_abs_diff = loaded_data["expected_outcomes"]["test_Geodesics_format"][
189 | "expected_max_abs_diff"
190 | ]
191 | computed_max_abs_diff = max(abs(gf - x))
192 | assert np.allclose(computed_max_abs_diff, expected_max_abs_diff)
193 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_config.py:
--------------------------------------------------------------------------------
1 | from io import StringIO
2 |
3 | from .._config import sys_info
4 |
5 |
6 | def test_sys_info():
7 | """Test info-showing utility."""
8 | out = StringIO()
9 | sys_info(fid=out)
10 | value = out.getvalue()
11 | out.close()
12 | assert "Platform:" in value
13 | assert "Executable:" in value
14 | assert "CPU:" in value
15 | assert "Physical cores:" in value
16 | assert "Logical cores" in value
17 | assert "RAM:" in value
18 | assert "SWAP:" in value
19 |
20 | assert "numpy" in value
21 | assert "psutil" in value
22 |
23 | assert "style" not in value
24 | assert "test" not in value
25 |
26 | out = StringIO()
27 | sys_info(fid=out, developer=True)
28 | value = out.getvalue()
29 | out.close()
30 |
31 | assert "build" in value
32 | assert "style" in value
33 | assert "test" in value
34 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_imports.py:
--------------------------------------------------------------------------------
1 | """Test _imports.py"""
2 |
3 | import pytest
4 |
5 | from .._imports import import_optional_dependency
6 |
7 |
8 | def test_import_optional_dependency():
9 | """Test the import of optional dependencies."""
10 | # Test import of present package
11 | numpy = import_optional_dependency("numpy")
12 | assert isinstance(numpy.__version__, str)
13 |
14 | # Test import of absent package
15 | with pytest.raises(ImportError, match="Missing optional dependency"):
16 | import_optional_dependency("non_existing_pkg", raise_error=True)
17 |
18 | # Test import of absent package without raise
19 | pkg = import_optional_dependency("non_existing_pkg", raise_error=False)
20 | assert pkg is None
21 |
22 | # Test extra
23 | with pytest.raises(ImportError, match="blabla"):
24 | import_optional_dependency("non_existing_pkg", extra="blabla")
25 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_shape_DNA.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from ...shapedna import compute_distance, compute_shapedna, normalize_ev, reweight_ev
7 | from ...tet_mesh import TetMesh
8 | from ...tria_mesh import TriaMesh
9 |
10 | tria = TriaMesh.read_vtk("data/cubeTria.vtk")
11 | tet = TetMesh.read_vtk("data/cubeTetra.vtk")
12 |
13 |
14 | @pytest.fixture
15 | def loaded_data():
16 | """
17 | Load expected outcomes data from a JSON file as a dictionary.
18 | """
19 | with open("lapy/utils/tests/expected_outcomes.json") as f:
20 | expected_outcomes = json.load(f)
21 | return expected_outcomes
22 |
23 |
24 | def test_compute_shapedna(loaded_data):
25 | """
26 | Test compute_shapedna function for triangular mesh.
27 |
28 | Args:
29 | loaded_data (dict): Expected outcomes loaded from a JSON file.
30 |
31 | Raises:
32 | AssertionError: If computed eigenvalues don't match within tolerance.
33 | """
34 | ev = compute_shapedna(tria, k=3)
35 |
36 | expected_Eigenvalues = np.array(
37 | loaded_data["expected_outcomes"]["test_compute_shapedna"][
38 | "expected_eigenvalues"
39 | ]
40 | )
41 | tolerance = loaded_data["expected_outcomes"]["test_compute_shapedna"]["tolerance"]
42 | assert np.allclose(ev["Eigenvalues"], expected_Eigenvalues, atol=tolerance)
43 |
44 |
45 | def test_normalize_ev_geometry(loaded_data):
46 | """
47 | Test normalize_ev() using 'geometry' method for a triangular mesh.
48 |
49 | Args:
50 | loaded_data (dict): Expected outcomes from a JSON file.
51 |
52 | Raises:
53 | AssertionError: If normalized eigenvalues don't match within tolerance.
54 | """
55 | ev = compute_shapedna(tria, k=3)
56 |
57 | expected_normalized_values = np.array(
58 | loaded_data["expected_outcomes"]["test_normalize_ev_geometry"][
59 | "expected_normalized_values"
60 | ]
61 | )
62 | tolerance = loaded_data["expected_outcomes"]["test_normalize_ev_geometry"][
63 | "tolerance"
64 | ]
65 | normalized_eigenvalues = normalize_ev(tria, ev["Eigenvalues"], method="geometry")
66 | assert np.allclose(
67 | normalized_eigenvalues, expected_normalized_values, atol=tolerance
68 | )
69 |
70 |
71 | def test_reweight_ev(loaded_data):
72 | """
73 | Test reweighted_ev() and validate reweighted eigenvalues' data type.
74 |
75 | Args:
76 | loaded_data (dict): Expected outcomes from a JSON file.
77 |
78 | Raises:
79 | AssertionError: If reweighted eigenvalues don't match within tolerance.
80 | """
81 | ev = compute_shapedna(tria, k=3)
82 |
83 | expected_reweighted_values = np.array(
84 | loaded_data["expected_outcomes"]["test_reweight_ev"][
85 | "expected_reweighted_values"
86 | ]
87 | )
88 | tolerance = loaded_data["expected_outcomes"]["test_reweight_ev"]["tolerance"]
89 | reweighted_eigenvalues = reweight_ev(ev["Eigenvalues"])
90 | tolerance = 1e-4
91 | assert np.allclose(
92 | reweighted_eigenvalues, expected_reweighted_values, atol=tolerance
93 | )
94 |
95 |
96 | def test_compute_distance(loaded_data):
97 | """
98 | Test compute_distance() for eigenvalues and validate the computed distance.
99 |
100 | Args:
101 | loaded_data (dict): Expected outcomes from a JSON file.
102 |
103 | Raises:
104 | AssertionError: If computed distance doesn't match the expected value.
105 | """
106 | ev = compute_shapedna(tria, k=3)
107 |
108 | expected_compute_distance = loaded_data["expected_outcomes"][
109 | "test_compute_distance"
110 | ]["expected_compute_distance"]
111 | # compute distance for tria eigenvalues (trivial case)
112 | computed_distance = compute_distance(ev["Eigenvalues"], ev["Eigenvalues"])
113 | assert computed_distance == expected_compute_distance
114 |
115 |
116 | # Repeating test steps for a tetrahedral mesh
117 |
118 |
119 | def test_compute_shapedna_tet(loaded_data):
120 | """
121 | Test compute_shapedna for a tetrahedral mesh.
122 |
123 | Args:
124 | loaded_data (dict): Expected outcomes from a JSON file.
125 |
126 | Raises:
127 | AssertionError: If computed eigenvalues don't match within tolerance.
128 | """
129 | evTet = compute_shapedna(tet, k=3)
130 |
131 | expected_eigen_values = np.array(
132 | loaded_data["expected_outcomes"]["test_compute_shapedna_tet"][
133 | "expected_eigen_values"
134 | ]
135 | )
136 | tolerance = loaded_data["expected_outcomes"]["test_compute_shapedna_tet"][
137 | "tolerance"
138 | ]
139 | evTet = compute_shapedna(tet, k=3)
140 | assert np.allclose(evTet["Eigenvalues"], expected_eigen_values, atol=tolerance)
141 |
142 |
143 | def test_normalize_ev_geometry_tet(loaded_data):
144 | """
145 | Test normalize_ev() using 'geometry' method for a tetrahedral mesh.
146 |
147 | Args:
148 | loaded_data (dict): Expected outcomes from a JSON file.
149 |
150 | Raises:
151 | AssertionError: If normalized eigenvalues don't match within tolerance.
152 | """
153 | evTet = compute_shapedna(tet, k=3)
154 |
155 | expected_normalized_values = np.array(
156 | loaded_data["expected_outcomes"]["test_normalize_ev_geometry_tet"][
157 | "expected_normalized_values"
158 | ]
159 | )
160 | tolerance = loaded_data["expected_outcomes"]["test_normalize_ev_geometry_tet"][
161 | "tolerance"
162 | ]
163 | # volume / surface / geometry normalization of tet eigenvalues
164 | normalized_eigenvalues = normalize_ev(tet, evTet["Eigenvalues"], method="geometry")
165 |
166 | assert np.allclose(
167 | normalized_eigenvalues, expected_normalized_values, atol=tolerance
168 | )
169 |
170 |
171 | def test_reweight_ev_tet(loaded_data):
172 | """
173 | Test reweighted_ev() for tetrahedral meshes and validate data type.
174 |
175 | Args:
176 | loaded_data (dict): Expected outcomes from a JSON file.
177 |
178 | Raises:
179 | AssertionError: If reweighted eigenvalues don't match within tolerance.
180 | """
181 | evTet = compute_shapedna(tet, k=3)
182 |
183 | expected_reweighted_values = np.array(
184 | loaded_data["expected_outcomes"]["test_reweight_ev_tet"][
185 | "expected_reweighted_values"
186 | ]
187 | )
188 | tolerance = loaded_data["expected_outcomes"]["test_reweight_ev_tet"]["tolerance"]
189 | # Linear reweighting of tet eigenvalues
190 | reweighted_eigenvalues = reweight_ev(evTet["Eigenvalues"])
191 | assert np.allclose(
192 | reweighted_eigenvalues, expected_reweighted_values, atol=tolerance
193 | )
194 |
195 |
196 | def test_compute_distance_tet(loaded_data):
197 | """
198 | Test compute_distance() for eigenvalues of tetrahedral meshes.
199 |
200 | Args:
201 | loaded_data (dict): Expected outcomes from a JSON file.
202 |
203 | Raises:
204 | AssertionError: If computed distance doesn't match the expected value.
205 | """
206 | evTet = compute_shapedna(tet, k=3)
207 |
208 | # compute distance for tria eigenvalues (trivial case)
209 | computed_distance = compute_distance(evTet["Eigenvalues"], evTet["Eigenvalues"])
210 | expected_compute_distance = loaded_data["expected_outcomes"][
211 | "test_compute_distance_tet"
212 | ]["exp_compute_distance"]
213 |
214 | # Compare the computed distance with the expected distance using a tolerance
215 | assert computed_distance == expected_compute_distance
216 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_tet_mesh.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from ...tet_mesh import TetMesh
7 |
8 |
9 | @pytest.fixture
10 | def tet_mesh_fixture():
11 | points = np.array(
12 | [
13 | [0, 0, 0],
14 | [1, 0, 0],
15 | [1, 1, 0],
16 | [0, 1, 0],
17 | [0, 0, 1],
18 | [1, 0, 1],
19 | [1, 1, 1],
20 | [0, 1, 1],
21 | [0.5, 0.5, 0.5],
22 | ]
23 | )
24 | tets = np.array(
25 | [
26 | [0, 5, 8, 1],
27 | [0, 4, 5, 8],
28 | [2, 5, 6, 8],
29 | [1, 5, 2, 8],
30 | [6, 7, 3, 8],
31 | [6, 3, 2, 8],
32 | [0, 3, 4, 8],
33 | [3, 7, 4, 8],
34 | [0, 1, 2, 8],
35 | [0, 2, 3, 8],
36 | [4, 6, 5, 8],
37 | [4, 7, 6, 8],
38 | ]
39 | )
40 |
41 | return TetMesh(points, tets)
42 |
43 |
44 | @pytest.fixture
45 | def loaded_data():
46 | """
47 | Load expected outcomes data from a JSON file as a dictionary.
48 | """
49 | with open("lapy/utils/tests/expected_outcomes.json") as f:
50 | expected_outcomes = json.load(f)
51 | return expected_outcomes
52 |
53 |
54 | def test_has_free_vertices(tet_mesh_fixture):
55 | """
56 | Testing tet mesh has free vertices or not
57 | """
58 | mesh = tet_mesh_fixture
59 | result = mesh.has_free_vertices()
60 | expected_result = False
61 | assert result == expected_result
62 |
63 |
64 | def test_rm_free_vertices(tet_mesh_fixture, loaded_data):
65 | """
66 | Testing removing free vertices from tet mesh
67 | """
68 | mesh = tet_mesh_fixture
69 | updated_vertices, deleted_vertices = mesh.rm_free_vertices_()
70 | expected_vertices = np.array(
71 | loaded_data["expected_outcomes"]["test_tet_mesh"]["expected_vertices"]
72 | )
73 | expected_removed_vertices = np.array([])
74 | assert np.array_equal(
75 | updated_vertices, expected_vertices
76 | ), f"{updated_vertices}, {deleted_vertices}"
77 | assert np.array_equal(deleted_vertices, expected_removed_vertices)
78 |
79 |
80 | def test_is_oriented(tet_mesh_fixture):
81 | """
82 | Testing whether test mesh orientations are consistent
83 | """
84 | mesh = tet_mesh_fixture
85 | result = mesh.is_oriented()
86 | expected_result = False
87 | assert (
88 | result == expected_result
89 | ), f"Expected is_oriented result {expected_result}, but got {result}"
90 |
91 |
92 | def test_boundary_tria(tet_mesh_fixture):
93 | """
94 | Test computation of boundary triangles from tet mesh.
95 |
96 | `BT.t` represents the array of boundary triangles.
97 | `.shape[0]` counts the number of boundary triangles.
98 | """
99 | mesh = tet_mesh_fixture
100 | boundary_tria_mesh = mesh.boundary_tria()
101 |
102 | expected_num_traingles = 12
103 | assert boundary_tria_mesh.t.shape[0] == expected_num_traingles
104 |
105 | # Check if the boundary triangle mesh is not oriented (this should fail)
106 | result = boundary_tria_mesh.is_oriented()
107 | expected_result = False
108 | assert (
109 | result == expected_result
110 | ), f"Expected is_oriented result {expected_result}, but got {result}"
111 |
112 |
113 | def test_avg_edge_length(tet_mesh_fixture):
114 | """
115 | Testing the computatoin of average edge length for tetrahedral mesh
116 | """
117 | mesh = tet_mesh_fixture
118 | result = mesh.avg_edge_length()
119 |
120 | expected_avg_edge_length = 1.0543647924813107
121 |
122 | assert np.isclose(result, expected_avg_edge_length)
123 |
124 |
125 | def test_boundary_is_oriented(tet_mesh_fixture):
126 | """
127 | Test orientation consistency in boundary of tetrahedral mesh.
128 | """
129 | mesh = tet_mesh_fixture
130 |
131 | # Get the boundary triangle mesh
132 | boundary_mesh = mesh.boundary_tria()
133 |
134 | # Check if the boundary triangle mesh has consistent orientations
135 | result = boundary_mesh.is_oriented()
136 |
137 | expected_result = False
138 |
139 | assert result == expected_result
140 |
141 |
142 | def test_orient_and_check_oriented(tet_mesh_fixture):
143 | """
144 | Test orienting the tetrahedral mesh for consistency.
145 | """
146 | mesh = tet_mesh_fixture
147 |
148 | # Correct the orientation of the tetrahedral mesh
149 | flipped_tetrahedra = mesh.orient_()
150 |
151 | # Check if the orientations of the tetrahedra are consistent
152 | result = mesh.is_oriented()
153 |
154 | expected_flipped_tetrahedra = 1
155 | expected_oriented_result = True
156 |
157 | # print(f"{flipped_tetrahedra}")
158 |
159 | assert flipped_tetrahedra == expected_flipped_tetrahedra
160 | assert result == expected_oriented_result
161 |
162 |
163 | def test_correct_orientations_and_boundary(tet_mesh_fixture):
164 | """
165 | Testing correcting orientation and checking boundary surface orientation
166 | """
167 | mesh = tet_mesh_fixture
168 |
169 | # Correct the orientation of the tetrahedral mesh
170 | mesh.orient_()
171 |
172 | # Check if the orientations of the tetrahedra are consistent
173 | result_oriented = mesh.is_oriented()
174 | expected_oriented_result = True
175 | assert result_oriented == expected_oriented_result
176 |
177 | # Extract the boundary surface
178 | boundary_surface = mesh.boundary_tria()
179 | print(f"{boundary_surface}")
180 |
181 | # Check if the orientations of the boundary surface are consistent
182 | result_boundary_oriented = boundary_surface.is_oriented()
183 | print(f"{result_boundary_oriented}")
184 | expected_boundary_oriented_result = True
185 | assert result_boundary_oriented == expected_boundary_oriented_result
186 |
187 |
188 | def test_boundary_surface_volume(tet_mesh_fixture):
189 | """
190 | Testing computation of volume for the boundary surface mesh
191 | """
192 | mesh = tet_mesh_fixture
193 |
194 | # Correct the orientation of the tetrahedral mesh
195 | mesh.orient_()
196 |
197 | # Extract the boundary surface
198 | boundary_surface = mesh.boundary_tria()
199 |
200 | # Compute the volume of the boundary surface
201 | result_volume = boundary_surface.volume()
202 | expected_volume = 1.0
203 |
204 | assert np.isclose(result_volume, expected_volume)
205 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_tria_mesh.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | import numpy as np
4 | import pytest
5 |
6 | from ...tria_mesh import TriaMesh
7 |
8 |
9 | @pytest.fixture
10 | def tria_mesh_fixture():
11 | """
12 | fixture is a method to parse parameters to the class
13 | only once so that it is not required in each test case
14 | """
15 | points = np.array(
16 | [
17 | [0.0, 0.0, 0.0],
18 | [0, 1, 0],
19 | [1, 1, 0],
20 | [1, 0, 0],
21 | [0, 0, 1],
22 | [0, 1, 1],
23 | [1, 1, 1],
24 | [1, 0, 1],
25 | ]
26 | )
27 | trias = np.array(
28 | [
29 | [0, 1, 2],
30 | [2, 3, 0],
31 | [4, 5, 6],
32 | [6, 7, 4],
33 | [0, 4, 7],
34 | [7, 3, 0],
35 | [0, 4, 5],
36 | [5, 1, 0],
37 | [1, 5, 6],
38 | [6, 2, 1],
39 | [3, 7, 6],
40 | [6, 2, 3],
41 | ]
42 | )
43 | return TriaMesh(points, trias)
44 |
45 |
46 | @pytest.fixture
47 | def loaded_data():
48 | """
49 | Load and provide the expected outcomes data from a JSON file.
50 |
51 | Returns:
52 | dict: Dictionary containing the expected outcomes data.
53 | """
54 | with open("lapy/utils/tests/expected_outcomes.json") as f:
55 | expected_outcomes = json.load(f)
56 | return expected_outcomes
57 |
58 |
59 | def test_is_closed(tria_mesh_fixture):
60 | """
61 | testing whether the function is_closed() returns True
62 | """
63 |
64 | mesh = tria_mesh_fixture
65 | result = mesh.is_closed()
66 | assert result is True
67 |
68 |
69 | def test_is_manifold(tria_mesh_fixture):
70 | """
71 | Testing whether the function is_manifold() returns 1
72 | """
73 | mesh = tria_mesh_fixture
74 | result = mesh.is_manifold()
75 | expected_result = True
76 | assert (
77 | result == expected_result
78 | ), f"Expected is_manifold result {expected_result}, but got {result}"
79 |
80 |
81 | def test_is_oriented(tria_mesh_fixture):
82 | """
83 | Testing whether the function is_oriented() returns True
84 | """
85 | T = tria_mesh_fixture
86 | result = T.is_oriented()
87 | expected_result = False
88 | assert result == expected_result, f"returning {result}"
89 |
90 |
91 | def test_euler(tria_mesh_fixture, loaded_data):
92 | """
93 | Testing whether the function euler() is equal to 2
94 | """
95 | mesh = tria_mesh_fixture
96 | expected_euler_value = loaded_data["expected_outcomes"]["test_tria_mesh"][
97 | "expected_euler_value"
98 | ]
99 | result = mesh.euler()
100 | assert (
101 | result == expected_euler_value
102 | ), f"Expected Euler characteristic 2, but got {result}"
103 |
104 |
105 | def test_tria_areas(tria_mesh_fixture, loaded_data):
106 | """
107 | np.testing.assert_array_almost_equal raises an AssertionError if two objects
108 | i.e tria_areas and expected_area are not equal up to desired precision.
109 | """
110 | expected_area = np.array(
111 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_area"]
112 | )
113 |
114 | mesh = tria_mesh_fixture
115 | result = mesh.tria_areas()
116 | np.testing.assert_array_almost_equal(result, expected_area)
117 |
118 |
119 | def test_area(tria_mesh_fixture, loaded_data):
120 | """
121 | Testing whether the function area() return is almost equal to expected area
122 | """
123 | mesh = tria_mesh_fixture
124 | result = mesh.area()
125 | expected_mesh_area = float(
126 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_mesh_area"]
127 | )
128 | assert result == pytest.approx(expected_mesh_area)
129 |
130 |
131 | # Define the test case for non-oriented mesh
132 | def test_volume_oriented(tria_mesh_fixture):
133 | """
134 | This test is verifying that the T.volume() function raises a ValueError
135 | with the error message when the input TriaMesh object is not correctly oriented.
136 | The test will always pass by matching an error because the volume inside the
137 | closed mesh,however, requires the mesh to be correctly oriented
138 | """
139 | # Use the appropriate exception that T.volume() raises
140 | with pytest.raises(
141 | ValueError, match="Error: Can only compute volume for oriented triangle meshes!"
142 | ):
143 | tria_mesh_fixture.volume()
144 |
145 |
146 | def test_vertex_degrees(tria_mesh_fixture, loaded_data):
147 | """
148 | Testing the calculation of vertex degrees
149 | """
150 | mesh = tria_mesh_fixture
151 | result = mesh.vertex_degrees()
152 | expected_vertex_degrees = np.array(
153 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_vertex_degrees"]
154 | )
155 | np.testing.assert_array_equal(result, expected_vertex_degrees)
156 |
157 |
158 | def test_vertex_areas(tria_mesh_fixture, loaded_data):
159 | """
160 | Testing the calculation of vertex areas
161 | """
162 | expected_vertex_area = np.array(
163 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_vertex_area"]
164 | )
165 | mesh = tria_mesh_fixture
166 | result = mesh.vertex_areas()
167 | np.testing.assert_almost_equal(result, expected_vertex_area)
168 | # Verify that the sum of vertex areas is close to the total surface area
169 | vertex_areas_sum = np.sum(mesh.vertex_areas())
170 | total_surface_area = mesh.area()
171 | assert np.isclose(vertex_areas_sum, total_surface_area)
172 |
173 |
174 | def test_avg_edge_length(tria_mesh_fixture, loaded_data):
175 | """
176 | Testing the calculation of average edge length
177 | """
178 | mesh = tria_mesh_fixture
179 | expected_edge_length = float(
180 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_edge_length"]
181 | )
182 | result = mesh.avg_edge_length()
183 | assert np.isclose(
184 | result, expected_edge_length
185 | ), f"Average edge length {result} is not equal to expected {expected_edge_length}"
186 |
187 |
188 | def test_tria_normals(tria_mesh_fixture, loaded_data):
189 | """
190 | Testing whether the shape of tria_normals array is equal to
191 | (n_triangles,3)
192 | """
193 | expected_triangle_normals = np.array(
194 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_triangle_normals"]
195 | )
196 | mesh = tria_mesh_fixture
197 | result = mesh.tria_normals()
198 | np.testing.assert_allclose(result, expected_triangle_normals)
199 |
200 |
201 | def test_tria_qualities(tria_mesh_fixture, loaded_data):
202 | """
203 | Testing the calculation of triangle qualities
204 | """
205 | mesh = tria_mesh_fixture
206 | result = mesh.tria_qualities()
207 | expected_triangle = np.array(
208 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_triangle"]
209 | )
210 | np.testing.assert_almost_equal(result, expected_triangle)
211 |
212 |
213 | def test_has_free_vertices(tria_mesh_fixture):
214 | """
215 | Testing the detection of free vertices
216 | """
217 | mesh = tria_mesh_fixture
218 | result = mesh.has_free_vertices()
219 |
220 | expected_result = False
221 | assert (
222 | result == expected_result
223 | ), f"Expected has_free_vertices {expected_result}, but got {result}"
224 |
225 |
226 | def test_rm_free_vertices(tria_mesh_fixture, loaded_data):
227 | """
228 | Testing the removal of free vertices
229 | """
230 | updated_vertices, deleted_vertices = tria_mesh_fixture.rm_free_vertices_()
231 | expected_vertices = np.array(
232 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_vertices"]
233 | )
234 | expected_deleted_vertices = np.array([])
235 | assert np.array_equal(updated_vertices, expected_vertices)
236 | assert np.array_equal(deleted_vertices, expected_deleted_vertices)
237 |
238 |
239 | # Define the test case for orient_ function
240 | def test_orient(tria_mesh_fixture, loaded_data):
241 | """
242 | Testing the orienting of the mesh
243 | """
244 | # Call the tria_mesh_fixture.orient_() method to re-orient the mesh consistently
245 | mesh = tria_mesh_fixture
246 | flipped = mesh.orient_()
247 |
248 | # Check if the returned 'flipped' count matches the expected count i.e 6
249 | expected_flips = np.array(
250 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_flips"]
251 | )
252 | assert flipped == expected_flips
253 |
254 |
255 | def test_is_oriented_(tria_mesh_fixture):
256 | """
257 | Testing the check for mesh orientation
258 | """
259 | mesh = tria_mesh_fixture
260 | "orient the mesh consistently so that all triangle normals point outwards."
261 | mesh.orient_()
262 | result = mesh.is_oriented()
263 | expected_result = True
264 | assert (
265 | result == expected_result
266 | ), f"Expected is_oriented result {expected_result}, but got {result}"
267 |
268 |
269 | ## Compute volume (works only for oriented meshes)
270 |
271 |
272 | def test_volume_(tria_mesh_fixture):
273 | """
274 | Testing the computation of enclosed volume for oriented mesh
275 | """
276 | mesh = tria_mesh_fixture
277 | mesh.orient_()
278 | result = mesh.volume()
279 | expected_result = 1.0
280 | assert (
281 | result == expected_result
282 | ), f"Expected volume result {expected_result}, but got {result}"
283 |
284 |
285 | def test_vertex_normals(tria_mesh_fixture, loaded_data):
286 | """
287 | Testing the computation of vertex normals for oriented mesh.
288 | """
289 |
290 | # Calling tria_mesh_fixture.orient_() will modify the tria_mesh_fixture in-place and
291 | # return the number of flipped triangles. However, it won't return a new instance of
292 | # TriaMesh, so assigning the result to mesh like mesh = tria_mesh_fixture.orient_()
293 | # would not work as expected.
294 |
295 | # Ensure the mesh is oriented before computing vertex normals
296 | tria_mesh_fixture.orient_()
297 | mesh = tria_mesh_fixture
298 | result = mesh.vertex_normals()
299 | expected_result = np.array(
300 | loaded_data["expected_outcomes"]["test_tria_mesh"]["expected_result"]
301 | )
302 | np.testing.assert_allclose(result, expected_result)
303 |
304 |
305 | def test_normal_offset(tria_mesh_fixture, loaded_data):
306 | """
307 | Testing the normal offset operation
308 | """
309 |
310 | # Orient the mesh before applying normal offset
311 | mesh = tria_mesh_fixture
312 | mesh.orient_()
313 |
314 | # Get the initial vertex coordinates
315 | mesh.v.copy()
316 |
317 | # Calculate the distance 'd' for the offset
318 | d = 0.2 * mesh.avg_edge_length()
319 |
320 | # Act: Perform the 'normal_offset_' operation
321 | mesh.normal_offset_(d)
322 |
323 |
324 | def test_boundary_mesh(tria_mesh_fixture):
325 | # Original mesh with closed boundaries
326 |
327 | original_mesh = tria_mesh_fixture
328 |
329 | # Create a boundary mesh by dropping triangles
330 | boundary_mesh = TriaMesh(original_mesh.v, original_mesh.t[2:, :])
331 |
332 | # Check if the boundary mesh has the correct number of vertices and triangles
333 | assert boundary_mesh.v.shape[0] == original_mesh.v.shape[0]
334 | assert boundary_mesh.t.shape[0] == original_mesh.t.shape[0] - 2
335 |
336 |
337 | def test_refine_and_boundary_loops(tria_mesh_fixture, loaded_data):
338 | """
339 | Testing boundary loops after refining the mesh.
340 | """
341 | # Create a boundary mesh by dropping triangles
342 | tria_mesh_fixture.orient_()
343 |
344 | original_mesh = tria_mesh_fixture
345 |
346 | boundary_mesh = TriaMesh(original_mesh.v, original_mesh.t[2:, :])
347 |
348 | # Refine the boundary mesh
349 | boundary_mesh.refine_()
350 |
351 | # Get the boundary loops of the refined mesh
352 | boundary_loops = boundary_mesh.boundary_loops()
353 |
354 | # Check if there is only one boundary loop
355 | assert len(boundary_loops) == 1
356 |
357 | # Check the vertices along the boundary loop
358 | expected_boundary_loop = loaded_data["expected_outcomes"]["test_tria_mesh"][
359 | "expected_boundary_loop"
360 | ]
361 |
362 | assert boundary_loops[0] == expected_boundary_loop
363 |
--------------------------------------------------------------------------------
/lapy/utils/tests/test_visualization_meshes.py:
--------------------------------------------------------------------------------
1 | import json
2 | from pathlib import Path
3 |
4 | import pytest
5 |
6 | from ...io import write_ev
7 | from ...solver import Solver
8 | from ...tet_mesh import TetMesh
9 | from ...tria_mesh import TriaMesh
10 |
11 |
12 | # Fixture to load the TetMesh
13 | @pytest.fixture
14 | def load_tria_mesh():
15 | tria = TriaMesh.read_vtk("data/cubeTria.vtk")
16 | return tria
17 |
18 |
19 | @pytest.fixture
20 | def load_tet_mesh():
21 | tetra = TetMesh.read_vtk("data/cubeTetra.vtk")
22 | return tetra
23 |
24 |
25 | @pytest.fixture
26 | def loaded_data():
27 | """
28 | Load and provide the expected outcomes data from a JSON file.
29 |
30 | Returns:
31 | dict: Dictionary containing the expected outcomes data.
32 | """
33 | with open("lapy/utils/tests/expected_outcomes.json") as f:
34 | expected_outcomes = json.load(f)
35 | return expected_outcomes
36 |
37 |
38 | def test_visualization_triangle_mesh(load_tria_mesh, loaded_data):
39 | """
40 | Test visualization of a triangle mesh using expected outcomes.
41 |
42 | Parameters:
43 | - load_tria_mesh (fixture): Fixture for loading a triangle mesh.
44 | - loaded_data (fixture): Fixture for loading expected outcomes.
45 |
46 | Raises:
47 | - AssertionError: If any test assertions fail.
48 | """
49 | tria = load_tria_mesh
50 | fem = Solver(tria)
51 | evals, evecs = fem.eigs(k=3)
52 | evDict = dict()
53 | evDict["Refine"] = 0
54 | evDict["Degree"] = 1
55 | evDict["Dimension"] = 2
56 | evDict["Elements"] = len(tria.t)
57 | evDict["DoF"] = len(tria.v)
58 | evDict["NumEW"] = 3
59 | evDict["Eigenvalues"] = evals
60 | evDict["Eigenvectors"] = evecs
61 | write_ev("data/cubeTria.ev", evDict)
62 | output_file = Path("data/cubeTria.ev")
63 | assert output_file.exists() # Check if the output file exists
64 | expected_elements = loaded_data["expected_outcomes"][
65 | "test_visualization_triangle_mesh"
66 | ]["expected_elements"]
67 | expected_dof = loaded_data["expected_outcomes"]["test_visualization_triangle_mesh"][
68 | "expected_dof"
69 | ]
70 | expected_ev = loaded_data["expected_outcomes"]["test_visualization_triangle_mesh"][
71 | "expected_ev"
72 | ]
73 |
74 | expected_evec_shape = (2402, 3)
75 | assert evDict["Elements"] == expected_elements
76 | assert evDict["DoF"] == expected_dof
77 | assert evals == pytest.approx(expected_ev, rel=1e-5, abs=1e-5)
78 | assert evecs.shape == expected_evec_shape
79 |
80 |
81 | def test_visualization_tetrahedral_mesh(load_tet_mesh, loaded_data):
82 | """
83 | Test visualization of a tetrahedral mesh using expected outcomes.
84 |
85 | Parameters:
86 | - load_tet_mesh (fixture): Fixture for loading a tetrahedral mesh.
87 | - loaded_data (fixture): Fixture for loading expected outcomes.
88 |
89 | Raises:
90 | - AssertionError: If any test assertions fail.
91 | """
92 | tetra = load_tet_mesh
93 | fem = Solver(tetra)
94 | evals, evecs = fem.eigs(k=3)
95 | evDict = dict()
96 | evDict["Refine"] = 0
97 | evDict["Degree"] = 1
98 | evDict["Dimension"] = 2
99 | evDict["Elements"] = len(tetra.t)
100 | evDict["DoF"] = len(tetra.v)
101 | evDict["NumEW"] = 3
102 | evDict["Eigenvalues"] = evals
103 | evDict["Eigenvectors"] = evecs
104 | write_ev("data/cubeTetra.ev", evDict)
105 | output_file = Path("data/cubeTetra.ev")
106 | assert output_file.exists() # Check if the output file exists
107 | expected_elements = loaded_data["expected_outcomes"][
108 | "test_visualization_tetrahedral_mesh"
109 | ]["expected_elements"]
110 | expected_dof = loaded_data["expected_outcomes"][
111 | "test_visualization_tetrahedral_mesh"
112 | ]["expected_dof"]
113 | expected_ev = loaded_data["expected_outcomes"][
114 | "test_visualization_tetrahedral_mesh"
115 | ]["expected_ev"]
116 | expected_evec_shape = (9261, 3)
117 | assert evDict["Elements"] == expected_elements
118 | assert evDict["DoF"] == expected_dof
119 | assert evals == pytest.approx(expected_ev, rel=1e-4, abs=1e-4)
120 | assert evecs.shape == expected_evec_shape
121 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [build-system]
2 | requires = [
3 | 'setuptools >= 61.0.0',
4 | 'numpy>=2',
5 | ]
6 | build-backend = 'setuptools.build_meta'
7 |
8 | [project]
9 | name = 'lapy'
10 | version = '1.2.0'
11 | description = 'A package for differential geometry on meshes (Laplace, FEM)'
12 | readme = 'README.md'
13 | license = {file = 'LICENSE'}
14 | requires-python = '>=3.9'
15 | authors = [
16 | {name = 'Martin Reuter', email = 'martin.reuter@dzne.de'},
17 | ]
18 | maintainers = [
19 | {name = 'Martin Reuter', email = 'martin.reuter@dzne.de'},
20 | ]
21 | keywords = [
22 | 'python',
23 | 'Laplace',
24 | 'FEM',
25 | 'ShapeDNA',
26 | 'BrainPrint',
27 | 'Triangle Mesh',
28 | 'Tetrahedra Mesh',
29 | 'Geodesics in Heat',
30 | 'Mean Curvature Flow',
31 | ]
32 | classifiers = [
33 | 'Operating System :: Microsoft :: Windows',
34 | 'Operating System :: Unix',
35 | 'Operating System :: MacOS',
36 | 'Programming Language :: Python :: 3 :: Only',
37 | 'Programming Language :: Python :: 3.9',
38 | 'Programming Language :: Python :: 3.10',
39 | 'Programming Language :: Python :: 3.11',
40 | 'Programming Language :: Python :: 3.12',
41 | 'Natural Language :: English',
42 | 'License :: OSI Approved :: MIT License',
43 | 'Intended Audience :: Science/Research',
44 | ]
45 | dependencies = [
46 | 'nibabel',
47 | 'numpy>=1.21',
48 | 'plotly',
49 | 'psutil',
50 | 'scipy!=1.13.0',
51 | ]
52 |
53 | [project.optional-dependencies]
54 | build = [
55 | 'build',
56 | 'twine',
57 | ]
58 | chol = [
59 | 'scikit-sparse',
60 | ]
61 | doc = [
62 | 'furo!=2023.8.17',
63 | 'matplotlib',
64 | 'memory-profiler',
65 | 'numpydoc',
66 | 'sphinx!=7.2.*',
67 | 'sphinxcontrib-bibtex',
68 | 'sphinx-copybutton',
69 | 'sphinx-design',
70 | 'sphinx-gallery',
71 | 'sphinx-issues',
72 | 'pypandoc',
73 | 'nbsphinx',
74 | 'IPython', # For syntax highlighting in notebooks
75 | 'ipykernel',
76 | ]
77 | style = [
78 | 'bibclean',
79 | 'codespell',
80 | 'pydocstyle[toml]',
81 | 'ruff',
82 | ]
83 | test = [
84 | 'pytest',
85 | 'pytest-cov',
86 | 'pytest-timeout',
87 | ]
88 | all = [
89 | 'lapy[build]',
90 | 'lapy[chol]',
91 | 'lapy[doc]',
92 | 'lapy[style]',
93 | 'lapy[test]',
94 | ]
95 | full = [
96 | 'lapy[all]',
97 | ]
98 |
99 | [project.urls]
100 | homepage = 'https://Deep-MI.github.io/LaPy/dev/index.html'
101 | documentation = 'https://Deep-MI.github.io/LaPy/dev/index.html'
102 | source = 'https://github.com/Deep-MI/LaPy'
103 | tracker = 'https://github.com/Deep-MI/LaPy/issues'
104 |
105 | [project.scripts]
106 | lapy-sys_info = 'lapy.commands.sys_info:run'
107 |
108 | [tool.setuptools]
109 | include-package-data = false
110 |
111 | [tool.setuptools.packages.find]
112 | include = ['lapy*']
113 | exclude = ['lapy*tests']
114 |
115 | [tool.pydocstyle]
116 | convention = 'numpy'
117 | ignore-decorators = '(copy_doc|property|.*setter|.*getter|pyqtSlot|Slot)'
118 | match = '^(?!setup|__init__|test_).*\.py'
119 | match-dir = '^lapy.*'
120 | add_ignore = 'D100,D104,D107'
121 |
122 | [tool.ruff]
123 | line-length = 100
124 | extend-exclude = [
125 | "doc",
126 | ".github",
127 | "data",
128 | ]
129 |
130 | [tool.ruff.lint]
131 | # https://docs.astral.sh/ruff/linter/#rule-selection
132 | select = [
133 | "E", # pycodestyle
134 | "F", # Pyflakes
135 | "UP", # pyupgrade
136 | "B", # flake8-bugbear
137 | "I", # isort
138 | # "SIM", # flake8-simplify
139 | ]
140 |
141 | [tool.ruff.lint.per-file-ignores]
142 | "__init__.py" = ["F401"]
143 | "examples/*" = ["E501"] # ignore too long lines in example ipynb
144 |
145 | [tool.pytest.ini_options]
146 | minversion = '6.0'
147 | addopts = '--durations 20 --junit-xml=junit-results.xml --verbose'
148 | filterwarnings = []
149 |
150 | [tool.coverage.run]
151 | branch = true
152 | cover_pylib = false
153 | omit = [
154 | '**/__init__.py',
155 | '**/lapy/_version.py',
156 | '**/lapy/commands/*',
157 | '**/tests/**',
158 | ]
159 |
160 | [tool.coverage.report]
161 | exclude_lines = [
162 | 'pragma: no cover',
163 | 'if __name__ == .__main__.:',
164 | ]
165 | precision = 2
166 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup
2 |
3 | setup()
4 |
--------------------------------------------------------------------------------
/tutorials/README.rst:
--------------------------------------------------------------------------------
1 | Tutorials
2 | =========
3 |
--------------------------------------------------------------------------------