├── .github
└── workflows
│ ├── documentation.yml
│ └── test_and_deploy.yml
├── .gitignore
├── LICENSE
├── MANIFEST.in
├── README.md
├── doc
└── zoomout.gif
├── docs
└── source
│ ├── api.rst
│ ├── conf.py
│ ├── examples.md
│ ├── getting_started.md
│ ├── index.rst
│ └── install.md
├── examples
├── data
│ ├── camel_gallop
│ │ ├── camel-gallop-01.off
│ │ ├── camel-gallop-02.off
│ │ ├── camel-gallop-03.off
│ │ ├── camel-gallop-04.off
│ │ ├── camel-gallop-05.off
│ │ ├── camel-gallop-06.off
│ │ ├── camel-gallop-07.off
│ │ ├── camel-gallop-08.off
│ │ ├── camel-gallop-09.off
│ │ ├── camel-gallop-10.off
│ │ └── maps
│ │ │ ├── 10_to_2
│ │ │ ├── 10_to_8
│ │ │ ├── 1_to_3
│ │ │ ├── 1_to_5
│ │ │ ├── 1_to_7
│ │ │ ├── 2_to_10
│ │ │ ├── 2_to_3
│ │ │ ├── 2_to_4
│ │ │ ├── 2_to_5
│ │ │ ├── 3_to_1
│ │ │ ├── 3_to_2
│ │ │ ├── 3_to_5
│ │ │ ├── 3_to_6
│ │ │ ├── 3_to_9
│ │ │ ├── 4_to_2
│ │ │ ├── 4_to_8
│ │ │ ├── 5_to_1
│ │ │ ├── 5_to_2
│ │ │ ├── 5_to_3
│ │ │ ├── 5_to_7
│ │ │ ├── 6_to_3
│ │ │ ├── 6_to_7
│ │ │ ├── 7_to_1
│ │ │ ├── 7_to_5
│ │ │ ├── 7_to_6
│ │ │ ├── 8_to_10
│ │ │ ├── 8_to_4
│ │ │ ├── 8_to_9
│ │ │ ├── 9_to_3
│ │ │ └── 9_to_8
│ ├── cat-00.off
│ ├── landmarks.txt
│ ├── lion-00.off
│ └── lion2cat
├── functional_map_network
│ └── FMN_base.ipynb
└── mesh_and_matching
│ ├── basic_functions.ipynb
│ └── matching_with_zoomout.ipynb
├── pyFM
├── FMN
│ ├── FMN.py
│ └── __init__.py
├── __init__.py
├── eval
│ ├── __init__.py
│ ├── __pycache__
│ │ ├── __init__.cpython-36.pyc
│ │ └── evaluate.cpython-36.pyc
│ └── evaluate.py
├── functional.py
├── mesh
│ ├── __init__.py
│ ├── data
│ │ ├── texture_1.jpg
│ │ └── texture_2.jpg
│ ├── file_utils.py
│ ├── geometry.py
│ ├── laplacian.py
│ └── trimesh.py
├── optimize
│ ├── __init__.py
│ └── base_functions.py
├── refine
│ ├── __init__.py
│ ├── icp.py
│ └── zoomout.py
├── signatures
│ ├── HKS_functions.py
│ ├── WKS_functions.py
│ └── __init__.py
├── spectral
│ ├── __init__.py
│ ├── convert.py
│ ├── nn_utils.py
│ ├── projection_utils.py
│ └── shape_difference.py
└── tests
│ └── test_data.py
├── requirements.txt
├── requirements_docs.txt
├── setup.cfg
└── setup.py
/.github/workflows/documentation.yml:
--------------------------------------------------------------------------------
1 | name: Documentation
2 |
3 | on:
4 | push:
5 | branches:
6 | - master
7 | pull_request:
8 | workflow_dispatch:
9 | release:
10 |
11 | jobs:
12 | build_documentation:
13 | name: Build Documentation
14 | runs-on: ubuntu-latest
15 |
16 | permissions:
17 | contents: write
18 |
19 | steps:
20 | - uses: actions/checkout@v4
21 |
22 | - uses: actions/setup-python@v5
23 | with:
24 | python-version: "3.10"
25 | cache: "pip"
26 |
27 | - name: Install package (with dependencies for docs)
28 | run: |
29 | pip install --upgrade pip
30 | pip install -r requirements_docs.txt
31 | pip install .
32 |
33 | - name: Build HTML
34 | run: |
35 | sphinx-build -b html docs/source/ docs/_build/html
36 |
37 | - name: upload artifact
38 | uses: actions/upload-artifact@v4
39 | with:
40 | path:
41 | docs/_build/html/
42 | - name: Deploy to GitHub Pages
43 | uses: peaceiris/actions-gh-pages@v4
44 | if: github.ref == 'refs/heads/master'
45 | with:
46 | publish_branch: gh-pages
47 | github_token: ${{ secrets.GITHUB_TOKEN }}
48 | publish_dir: docs/_build/html
49 | force_orphan: true
--------------------------------------------------------------------------------
/.github/workflows/test_and_deploy.yml:
--------------------------------------------------------------------------------
1 | name: tests
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 | - master
8 | tags:
9 | - "v*" # Push events to matching v*, i.e. v1.0, v20.15.10
10 | pull_request:
11 | branches:
12 | - master
13 | - main
14 | workflow_dispatch:
15 |
16 | jobs:
17 | test:
18 | name: ${{ matrix.platform }} py${{ matrix.python-version }}
19 | runs-on: ${{ matrix.platform }}
20 | strategy:
21 | matrix:
22 | platform: [windows-latest, ubuntu-latest, macos-latest] # ubuntu-latest is failing anyway
23 | python-version: ['3.8', '3.9', '3.10']
24 |
25 | steps:
26 | - uses: actions/checkout@v3
27 |
28 | - name: Set up Python ${{ matrix.python-version }}
29 | uses: actions/setup-python@v4
30 | with:
31 | python-version: ${{ matrix.python-version }}
32 |
33 | # note: if you need dependencies from conda, considering using
34 | # setup-miniconda: https://github.com/conda-incubator/setup-miniconda
35 | # and
36 | # tox-conda: https://github.com/tox-dev/tox-conda
37 | - name: Install dependencies
38 | run: |
39 | python -m pip install --upgrade pip
40 | python -m pip install setuptools pytest-cov
41 | pip install .
42 |
43 | # this runs the platform-specific tests
44 | - name: Run tests
45 | shell: bash -l {0}
46 | run: pytest -v --cov=./ --cov-report=xml
47 | env:
48 | PLATFORM: ${{ matrix.platform }}
49 |
50 | - name: Coverage
51 | uses: codecov/codecov-action@v3
52 |
53 | deploy:
54 | # this will run when you have tagged a commit, starting with "v*"
55 | # and requires that you have put your twine API key in your
56 | # github secrets (see readme for details)
57 | needs: [test]
58 | runs-on: ubuntu-latest
59 | if: contains(github.ref, 'tags')
60 | steps:
61 | - uses: actions/checkout@v3
62 | - name: Set up Python
63 | uses: actions/setup-python@v4
64 | with:
65 | python-version: "3.x"
66 | - name: Install dependencies
67 | run: |
68 | python -m pip install --upgrade pip
69 | pip install -U setuptools setuptools_scm wheel twine build
70 | - name: Build and publish
71 | env:
72 | TWINE_USERNAME: __token__
73 | TWINE_PASSWORD: ${{ secrets.PYPI_API_TOKEN }}
74 | run: |
75 | git tag
76 | python -m build .
77 | twine upload dist/*
78 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | docs/build
2 | docs/source/generated/*
3 | docs/make.bat
4 | docs/Makefile
5 |
6 | # Byte-compiled / optimized / DLL files
7 | __pycache__/
8 | *.py[cod]
9 | *$py.class
10 |
11 | # data
12 | /data
13 |
14 | # STRESS outputs
15 | *.pdf
16 | *.vtp
17 | *.mat
18 |
19 | # C extensions
20 | *.so
21 |
22 | # Distribution / packaging
23 | .Python
24 | env/
25 | build/
26 | develop-eggs/
27 | dist/
28 | downloads/
29 | eggs/
30 | .eggs/
31 | lib/
32 | lib64/
33 | parts/
34 | sdist/
35 | var/
36 | *.egg-info/
37 | .installed.cfg
38 | *.egg
39 |
40 | # PyInstaller
41 | # Usually these files are written by a python script from a template
42 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
43 | *.manifest
44 | *.spec
45 |
46 | # Installer logs
47 | pip-log.txt
48 | pip-delete-this-directory.txt
49 |
50 | # Unit test / coverage reports
51 | htmlcov/
52 | .tox/
53 | .coverage
54 | .coverage.*
55 | .cache
56 | nosetests.xml
57 | coverage.xml
58 | *,cover
59 | .hypothesis/
60 | .napari_cache
61 |
62 | # Translations
63 | *.mo
64 | *.pot
65 |
66 | # Django stuff:
67 | *.log
68 | local_settings.py
69 |
70 | # Flask instance folder
71 | instance/
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # MkDocs documentation
77 | /site/
78 |
79 | # PyBuilder
80 | target/
81 |
82 | # Pycharm and VSCode
83 | .idea/
84 | venv/
85 | .vscode/
86 |
87 | # IPython Notebook
88 | .ipynb_checkpoints
89 |
90 | # pyenv
91 | .python-version
92 |
93 | # OS
94 | .DS_Store
95 |
96 | # written by setuptools_scm
97 | **/_version.py
98 |
99 | # Dask
100 | dask-worker-space/
101 |
102 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Robin Magnet
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the Software), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, andor sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED AS IS, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE
2 | include README.md
3 |
4 | include data/*
5 |
6 | recursive-exclude * __pycache__
7 | recursive-exclude * *.py[co]
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # pyFM - Python Functional Maps Library
2 |
3 |
4 |
5 |
6 |
7 | [](https://robinmagnet.github.io/pyFM/)
8 |
9 | **NEW** API Documentation is available [here](https://robinmagnet.github.io/pyFM/)
10 |
11 | This package contains a comprehensive Python implementation for shape correspondence using functional maps, featuring code from [multiple papers](#implemented-papers)
12 |
13 | ## Core Features
14 | - Complete TriMesh class with geometric measures (geodesics, normals, LBO projections, differential operators)
15 | - Fast Laplace-Beltrami Operator implementation
16 | - Shape descriptors (HKS, WKS) with flexible parameter control
17 | - Efficient correspondence refinement (ICP, ZoomOut)
18 | - Fast functional-to-pointwise map conversion
19 | - Functional Map Network utilities
20 |
21 | ## Installation
22 | ```bash
23 | pip install pyfmaps
24 | ```
25 |
26 | Please check the PyPi page to ensure the latest version is uploaded.
27 |
28 | ### Key Dependencies
29 | - Required: numpy, scipy, tqdm, scikit-learn
30 | - Optional: [`potpourri3d`](https://github.com/nmwsharp/potpourri3d) (geodesics), [`robust_laplacian`](https://github.com/nmwsharp/robust-laplacians-py) (Delaunay/tufted Laplacian)
31 |
32 | ## Design Philosophy
33 | This codebase prioritizes readability and adaptability over rigid modularity.
34 | The implementation seeks to make it easy to:
35 | - Read and understand the underlying algorithms
36 | - Copy and modify code snippets for your research
37 | - Experiment with and extend existing methods
38 | - Avoid rewriting core functionality from scratch
39 |
40 | Under the hood, core algorithms are implemented as standalone functions operating on basic numpy arrays.
41 | This dual-layer design means you can use the high-level interface for standard workflows, but still easily extract and modify the core functions for your research, as they work with simple numpy arrays.
42 |
43 | While this approach differs from highly modular packages like `scikit-learn`, it better serves researchers who need to modify and build upon existing shape correspondence methods.
44 |
45 |
46 | This design choice comes with some **trade-offs**:
47 | - Less abstraction means potentially more code to achieve certain tasks
48 | - Users need some familiarity with shape correspondence concepts
49 | - Implementation favors explicit algorithms over general-purpose interfaces
50 |
51 | I selected this approach from my personal experience with research codebases and may not suit everyone's needs. More generic and abstracted implementations of geometric processing can be expected to appear soonish in the [scikit-shapes](https://scikit-shapes.github.io/scikit-shapes/) or [geomstats](https://geomstats.github.io/) libraries.
52 |
53 | ## Notation Convention
54 | - Functional maps (FM_12): mesh1 → mesh2
55 | - Pointwise maps (p2p_21): mesh2 → mesh1
56 |
57 | ## Documentation & Examples
58 | - [API Documentation](https://robinmagnet.github.io/pyFM/)
59 | - [Example Notebooks](https://github.com/RobinMagnet/pyFM/tree/master/examples)
60 |
61 | ## Implemented Papers
62 | This library implements methods from several key papers in shape correspondence, including:
63 |
64 | * [The Heat Method for Distance Computation](https://www.cs.cmu.edu/~kmcrane/Projects/HeatMethod/)
65 | * [A Concise and Provably Informative Multi-Scale Signature Based on Heat Diffusion](http://www.lix.polytechnique.fr/~maks/papers/hks.pdf)
66 | * [The Wave Kernel Signature: A Quantum Mechanical Approach To Shape Analysis](http://imagine.enpc.fr/~aubrym/projects/wks/index.html)
67 | * [ZoomOut: Spectral Upsampling for Efficient Shape Correspondence](https://arxiv.org/abs/1904.07865), with MatLab implementation [here](https://github.com/llorz/SGA19_zoomOut)
68 | * [Deblurring and Denoising of Maps between Shapes](https://www.cs.technion.ac.il/~mirela/publications/p2p_recovery.pdf), with Matlab implementation [here](https://mirela.net.technion.ac.il/publications/)
69 | * [Functional Maps: A Flexible Representation of Maps Between Shapes](http://www.lix.polytechnique.fr/~maks/papers/obsbg_fmaps.pdf)
70 | * [Informative Descriptor Preservation via Commutativity for Shape Matching](http://www.lix.polytechnique.fr/~maks/papers/fundescEG17.pdf)
71 | * [Continuous and Orientation-preserving Correspondences via Functional Maps](https://arxiv.org/abs/1806.04455), only the orientation preserving / reversing term, matlab implementation can be found [here](https://github.com/llorz/SGA18_orientation_BCICP_code)
72 | * [Map-Based Exploration of Intrinsic Shape Differences and Variability](https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.642.4287&rep=rep1&type=pdf)
73 | * [An Optimization Approach to Improving Collections of Shape Maps](http://fodava.gatech.edu/files/reports/FODAVA-11-22.pdf)
74 | * [Limit Shapes – A Tool for Understanding Shape Differences and Variability in 3D Model Collections](http://www.lix.polytechnique.fr/~maks/papers/limit_shapes_SGP19.pdf)
75 | * [CONSISTENT ZOOMOUT: Efficient Spectral Map Synchronization](http://www.lix.polytechnique.fr/~maks/papers/ConsistentZoomOut_SGP2020.pdf), with Matlab implementation [here](https://github.com/llorz/SGA19_zoomOut)
76 |
77 | ## Torch Version
78 |
79 | Most functions in this package can easily be translated to torch. However, for clarity of the code, a separate version will be released.
80 |
81 | For now, the torch implementations can be found in different githubs (see my GitHub profile).
82 |
83 | ## Coming Soon
84 | - [Discrete Optimization for Shape Matching](https://www.lix.polytechnique.fr/~maks/papers/SGP21_DiscMapOpt.pdf) and [Smooth Non-Rigid Shape Matching via Effective Dirichlet Energy Optimization](https://www.lix.polytechnique.fr/Labo/Robin.Magnet/3DV2022_smooth_corres/smooth_corres_main.pdf), already implemented [here](https://github.com/RobinMagnet/SmoothFunctionalMaps)
85 | - [Reversible Harmonic Maps](https://dl.acm.org/doi/10.1145/3202660), already implemented [here](https://github.com/RobinMagnet/ReversibleHarmonicMaps)
86 |
87 | # Contact and Citation
88 |
89 | You can contact me for any questions or requests at robin.magnet@inria.fr
90 |
91 | This package has not (yet) been presented as a paper in itself, although all my works heavily rely on this.
92 |
93 | If you use this package or copy and paste parts of it for you experiments, please cite this github or one of the following paper. Note these papers never directly refer to pyfmaps as so I am fine with you only citing the github for now.
94 |
95 |
96 | ```bibtex
97 | @inproceedings{magnetDWKSLocalDescriptor2021,
98 | title = {{{DWKS}} : {{A Local Descriptor}} of {{Deformations Between Meshes}} and {{Point Clouds}}},
99 | shorttitle = {{{DWKS}}},
100 | booktitle = {2021 {{IEEE}}/{{CVF International Conference}} on {{Computer Vision}} ({{ICCV}})},
101 | author = {Magnet, Robin and Ovsjanikov, Maks},
102 | publisher = {IEEE},
103 | }
104 |
105 |
106 | @inproceedings{magnetSmoothNonRigidShape2022,
107 | title = {Smooth {{Non-Rigid Shape Matching}} via {{Effective Dirichlet Energy Optimization}}},
108 | booktitle = {2022 {{International Conference}} on {{3D Vision}} ({{3DV}})},
109 | author = {Magnet, Robin and Ren, Jing and {Sorkine-Hornung}, Olga and Ovsjanikov, Maks},
110 | year = {2022},
111 | publisher = {{IEEE}},
112 |
113 | }
114 |
115 | @article{magnetScalableEfficientFunctional2023,
116 | title = {Scalable and {{Efficient Functional Map Computations}} on {{Dense Meshes}}},
117 | author = {Magnet, Robin and Ovsjanikov, Maks},
118 | year = {2023},
119 | journal = {Computer Graphics Forum},
120 | }
121 | ```
--------------------------------------------------------------------------------
/doc/zoomout.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/doc/zoomout.gif
--------------------------------------------------------------------------------
/docs/source/api.rst:
--------------------------------------------------------------------------------
1 | .. _api_documentation:
2 |
3 | API Documentation
4 | =================
5 | I can't manage to hide this part. Hopefully it will be hidden in the final documentation.
6 |
7 | .. autosummary::
8 | :toctree: generated
9 | :recursive:
10 |
11 | pyFM
12 |
13 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # For the full list of built-in configuration values, see the documentation:
4 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
5 |
6 |
7 | import pathlib
8 | import sys
9 |
10 | sys.path.insert(0, pathlib.Path(__file__).parents[2].resolve().as_posix())
11 | print(pathlib.Path(__file__).parents[2].resolve().as_posix())
12 |
13 | import importlib
14 |
15 | # -- Project information -----------------------------------------------------
16 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
17 |
18 | project = "pyfmaps"
19 | copyright = "2024, Robin Magnet"
20 | author = "Robin Magnet"
21 | release = "1.0.0" # importlib.metadata.version("pyFM")
22 |
23 |
24 | # -- General configuration ---------------------------------------------------
25 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
26 |
27 | extensions = [
28 | "sphinx.ext.duration",
29 | "sphinx.ext.doctest",
30 | "sphinx.ext.autodoc",
31 | "sphinx.ext.autosummary",
32 | "sphinx.ext.napoleon",
33 | "sphinx_math_dollar",
34 | "sphinx.ext.mathjax",
35 | "myst_parser",
36 | "sphinx_design",
37 | ]
38 |
39 | # mathjax_config = {
40 | # 'tex2jax': {
41 | # 'inlineMath': [ ["\\(","\\)"] ],
42 | # 'displayMath': [["\\[","\\]"] ],
43 | # },
44 | # }
45 |
46 | # mathjax3_config = {
47 | # "tex": {
48 | # "inlineMath": [['\\(', '\\)']],
49 | # "displayMath": [["\\[", "\\]"]],
50 | # }
51 | # }
52 |
53 | autodoc_mock_imports = ["sklearn"]
54 | # autodoc_mock_imports = ["pyFM", "scipy", "numpy", "trimesh", "scipy.linalg", "scipy.sparse", 'potpourri3d', "robust_laplacian"]
55 |
56 | autodoc_default_options = {"members": True, "member-order": "bysource"}
57 |
58 | templates_path = ["_templates"]
59 | exclude_patterns = []
60 |
61 | source_suffix = [".rst", ".md"]
62 | autosummary_generate = True
63 |
64 |
65 | # -- Options for HTML output -------------------------------------------------
66 | # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
67 |
68 | html_theme = "furo"
69 | html_static_path = ["_static"]
70 |
71 |
72 | from sphinx.ext.autodoc import between
73 |
74 |
75 | def setup(app):
76 | # Register a sphinx.ext.autodoc.between listener to ignore everything
77 | # between lines that contain the word IGNORE
78 | app.connect("autodoc-process-docstring", between("^.*IGNORE.*$", exclude=True))
79 | return app
80 |
--------------------------------------------------------------------------------
/docs/source/examples.md:
--------------------------------------------------------------------------------
1 | Examples
2 | --------
3 |
4 | Examples can be in `this directory `_
--------------------------------------------------------------------------------
/docs/source/getting_started.md:
--------------------------------------------------------------------------------
1 | Getting Started
2 | ===============
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. pyfmaps documentation master file, created by
2 | sphinx-quickstart on Thu Apr 25 23:18:17 2024.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Python Bindings for Functional Map Computations !
7 | =================================================
8 |
9 | pyFM (or pyfmaps) is a Python library designed for computing and using functional maps, a powerful framework in shape analysis and geometry processing.
10 | Functional maps provide a compact and robust way to represent correspondences between shapes by transforming point-to-point mappings into small matrices.
11 |
12 | This package implements shape signatures, functional map optimization and refinement algorithms, and above all an easy-to-use interface for using functional maps.
13 |
14 | The package is now in **v1.0.0** as it has been stable for quite a long time. It had been released on PyPI.
15 |
16 | Key Features
17 | ------------
18 |
19 | - **Spectral Analysis**: Automatically compute Laplace-Beltrami eigenfunctions for shapes, enabling efficient computations in the spectral domain.
20 |
21 | - **Differential Geometry Tools**: Implements a variety of differential geometry tools directly in Python for advanced shape analysis workflows.
22 |
23 | - **Functional Map Computation**: Straightforward tools to calculate or refine functional maps using descriptors, landmarks, or initial blurry correspondences.
24 |
25 | - **Pointwise Correspondences**: Functions for navigating between point-to-point maps and functional maps.
26 |
27 |
28 | Why pyFM?
29 | ---------
30 |
31 | pyFM has been originally designed as a way to incorporate existing Matlab code into Python workflows.
32 | It has now grown beyond that, with a variety of tools and utilities for shape analysis and geometry processing.
33 | In short, `pyFM` is designed to be
34 |
35 | - **User-Friendly:** With clear APIs and detailed documentation, `pyFM` is accessible to both beginners and experts in shape analysis.
36 | - **Efficient:** Built with performance in mind, avoiding slow python loops.
37 | - **Extensible:** Highly modular. Most functions can be easily extracted from the package and used in other projects, as they usually only require `numpy` arrays as input.
38 | - **Research-Oriented:** Inspired by state-of-the-art research in geometry processing, making it a great choice for prototyping and academic projects.
39 |
40 | Whether you are an academic researcher exploring functional map theory or an industry professional working on advanced shape analysis tasks, I hope `pyFM` can be a valuable tool in your experiments.
41 |
42 |
43 | What’s Next?
44 | ------------
45 |
46 | To get started with `pyFM`, check out the example notebook for a quick overview of the package’s capabilities.
47 |
48 | .. To get started with `pyFM`, check out the [Installation Guide](#installation) and the [Quickstart Tutorial](#getting-started) for a hands-on introduction. For a deeper dive, explore the [API Reference](#detailed-api-reference) and [Tutorials](#tutorials) to learn more about advanced features and workflows.
49 |
50 | .. Let’s unlock the potential of functional maps together with `pyFM`!
51 |
52 | Table of Contents
53 | -----------------
54 | .. toctree::
55 | install.md
56 | getting_started.md
57 | examples.md
58 |
59 | .. toctree::
60 | api
--------------------------------------------------------------------------------
/docs/source/install.md:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | This package is pretty minimal and should be able to run on all machines. It is tested on Python 3.9.
5 |
6 | With pip
7 | --------
8 |
9 | ```bash
10 | pip install pyfmaps
11 | ```
--------------------------------------------------------------------------------
/examples/data/landmarks.txt:
--------------------------------------------------------------------------------
1 | 3177 1685
2 | 7178 4910
3 | 6565 4646
4 | 5472 4275
5 | 2383 1383
6 | 1417 898
7 | 202 25
8 | 627 426
9 | 5157 3867
10 | 1245 688
11 | 2263 1106
12 | 3972 1832
13 | 438 273
14 | 6046 4408
15 | 7107 4819
16 | 2271 1241
17 | 458 255
18 | 1115 348
19 | 4727 2368
20 | 5206 4005
21 |
--------------------------------------------------------------------------------
/examples/functional_map_network/FMN_base.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "\n",
8 | "# 1 - Imports and defining functions"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "import os\n",
18 | "import numpy as np\n",
19 | "\n",
20 | "from pyFM.mesh import TriMesh\n",
21 | "from tqdm.auto import tqdm\n",
22 | "\n",
23 | "import meshplot as mp\n",
24 | "\n",
25 | "def plot_mesh(myMesh,cmap=None):\n",
26 | " mp.plot(myMesh.vertlist, myMesh.facelist,c=cmap)\n",
27 | " \n",
28 | "def double_plot(myMesh1,myMesh2,cmap1=None,cmap2=None):\n",
29 | " d = mp.subplot(myMesh1.vertlist, myMesh1.facelist, c=cmap1, s=[2, 2, 0])\n",
30 | " mp.subplot(myMesh2.vertlist, myMesh2.facelist, c=cmap2, s=[2, 2, 1], data=d)\n",
31 | "\n",
32 | "def visu(vertices):\n",
33 | " min_coord,max_coord = np.min(vertices,axis=0,keepdims=True),np.max(vertices,axis=0,keepdims=True)\n",
34 | " cmap = (vertices-min_coord)/(max_coord-min_coord)\n",
35 | " return cmap"
36 | ]
37 | },
38 | {
39 | "cell_type": "markdown",
40 | "metadata": {},
41 | "source": [
42 | "# 2 - Loading Data"
43 | ]
44 | },
45 | {
46 | "cell_type": "markdown",
47 | "metadata": {},
48 | "source": [
49 | "A functional map network expects two objects as input:\n",
50 | " - A list `meshlist` of all the meshes in the networks (nodes in the graph)\n",
51 | " - A dictionary `maps_dict` containing functional maps, where keys are of the shape `(i,j)` with `i` and `j` the indices of the meshes in `meshlist`\n",
52 | "\n",
53 | "\n",
54 | "We have access both to the meshes and to noisy *pointwise maps*, which we will have to convert to functional maps !"
55 | ]
56 | },
57 | {
58 | "cell_type": "markdown",
59 | "metadata": {},
60 | "source": [
61 | "Let's first load the meshes"
62 | ]
63 | },
64 | {
65 | "cell_type": "code",
66 | "execution_count": null,
67 | "metadata": {},
68 | "outputs": [],
69 | "source": [
70 | "meshlist = [TriMesh(f'../data/camel_gallop/camel-gallop-{i:02d}.off', area_normalize=True, center=True).process(k=150, intrinsic=True) for i in tqdm(range(1,11))]"
71 | ]
72 | },
73 | {
74 | "cell_type": "code",
75 | "execution_count": null,
76 | "metadata": {},
77 | "outputs": [],
78 | "source": [
79 | "double_plot(meshlist[0], meshlist[5])"
80 | ]
81 | },
82 | {
83 | "cell_type": "markdown",
84 | "metadata": {},
85 | "source": [
86 | "Let's now load the pointwise maps and convert them to functional maps using pyFM, thus building `maps_dict`"
87 | ]
88 | },
89 | {
90 | "cell_type": "code",
91 | "execution_count": null,
92 | "metadata": {},
93 | "outputs": [],
94 | "source": [
95 | "import pyFM.spectral as spectral\n",
96 | "\n",
97 | "K = 30 # Size of initial functional maps, small value since our initial maps have noise\n",
98 | "\n",
99 | "# All pointwise maps are located here, with format 'ind2_to_ind1' for the map from mesh ind2 to mesh ind1\n",
100 | "map_files = os.listdir('../data/camel_gallop/maps')\n",
101 | "\n",
102 | "maps_dict = {}\n",
103 | "\n",
104 | "for map_filename in tqdm(map_files):\n",
105 | " ind2, ind1 = map_filename.split('_to_')\n",
106 | " ind1, ind2 = int(ind1), int(ind2)\n",
107 | "\n",
108 | " # Indicing starts at 1 in the names, but at 0 on the meshlist\n",
109 | " mesh1, mesh2 = meshlist[ind1-1], meshlist[ind2-1]\n",
110 | " \n",
111 | " # Load the pointwise map\n",
112 | " p2p_21 = np.loadtxt(f'../data/camel_gallop/maps/{map_filename}', dtype=int)\n",
113 | "\n",
114 | " # Convert to functional map\n",
115 | " FM_12 = spectral.mesh_p2p_to_FM(p2p_21, mesh1, mesh2, dims=K)\n",
116 | " \n",
117 | " # Populate the dictionary\n",
118 | " maps_dict[(ind1-1, ind2-1)] = FM_12"
119 | ]
120 | },
121 | {
122 | "cell_type": "markdown",
123 | "metadata": {},
124 | "source": [
125 | "# 3 - Building the Canonical Consistent Latent Basis"
126 | ]
127 | },
128 | {
129 | "cell_type": "markdown",
130 | "metadata": {},
131 | "source": [
132 | "Let's build the FMN model and the canonical latent basis"
133 | ]
134 | },
135 | {
136 | "cell_type": "code",
137 | "execution_count": null,
138 | "metadata": {},
139 | "outputs": [],
140 | "source": [
141 | "from pyFM.FMN import FMN"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "# Build the network\n",
151 | "fmn_model = FMN(meshlist, maps_dict.copy())\n",
152 | "\n",
153 | "# Compute CCLB\n",
154 | "fmn_model.compute_CCLB(m=20)"
155 | ]
156 | },
157 | {
158 | "cell_type": "markdown",
159 | "metadata": {},
160 | "source": [
161 | "# 4 - Analyze the embedding"
162 | ]
163 | },
164 | {
165 | "cell_type": "markdown",
166 | "metadata": {},
167 | "source": [
168 | "We can use the Characterisic Shape Differences as embedding of each shape.\n",
169 | "We have access to both the *area* and *conformal* characteristic shape difference operator for each shape using the CCLB."
170 | ]
171 | },
172 | {
173 | "cell_type": "code",
174 | "execution_count": null,
175 | "metadata": {},
176 | "outputs": [],
177 | "source": [
178 | "all_embs_a = []\n",
179 | "all_embs_c = []\n",
180 | "\n",
181 | "for i in range(fmn_model.n_meshes):\n",
182 | " CSD_a, CSD_c = fmn_model.get_CSD(i)\n",
183 | "\n",
184 | " all_embs_a.append(CSD_a.flatten())\n",
185 | " all_embs_c.append(CSD_c.flatten())\n",
186 | "\n",
187 | "all_embs_a = np.array(all_embs_a)\n",
188 | "all_embs_c = np.array(all_embs_c)"
189 | ]
190 | },
191 | {
192 | "cell_type": "markdown",
193 | "metadata": {},
194 | "source": [
195 | "Let's apply PCA on the embedding to visualize the results"
196 | ]
197 | },
198 | {
199 | "cell_type": "code",
200 | "execution_count": null,
201 | "metadata": {},
202 | "outputs": [],
203 | "source": [
204 | "import matplotlib.pyplot as plt\n",
205 | "from sklearn.decomposition import PCA\n",
206 | "pca_model = PCA(n_components=2)\n",
207 | "emb_red_a = pca_model.fit_transform(all_embs_a)\n",
208 | "emb_red_c = pca_model.fit_transform(all_embs_c)"
209 | ]
210 | },
211 | {
212 | "cell_type": "code",
213 | "execution_count": null,
214 | "metadata": {},
215 | "outputs": [],
216 | "source": [
217 | "_, axs = plt.subplots(1, 2, figsize=(15, 5))\n",
218 | "\n",
219 | "axs[0].scatter(emb_red_a[:, 0], emb_red_a[:, 1], c=np.arange(len(emb_red_a)))\n",
220 | "axs[0].set_title('Area CSD')\n",
221 | "\n",
222 | "axs[1].scatter(emb_red_c[:, 0], emb_red_c[:, 1], c=np.arange(len(emb_red_c)))\n",
223 | "axs[1].set_title('Conformal CSD')"
224 | ]
225 | },
226 | {
227 | "cell_type": "markdown",
228 | "metadata": {},
229 | "source": [
230 | "# 5 - Apply Consistent ZoomOut"
231 | ]
232 | },
233 | {
234 | "cell_type": "code",
235 | "execution_count": null,
236 | "metadata": {},
237 | "outputs": [],
238 | "source": [
239 | "fmn_model.zoomout_refine(nit=10, step=5, subsample=3000, isometric=True, weight_type='icsm',\n",
240 | " M_init=None, cclb_ratio=.9, n_jobs=1, equals_id=False,\n",
241 | " verbose=True)"
242 | ]
243 | },
244 | {
245 | "cell_type": "code",
246 | "execution_count": null,
247 | "metadata": {},
248 | "outputs": [],
249 | "source": [
250 | "fmn_model.compute_CCLB(m=int(0.9*fmn_model.M))"
251 | ]
252 | },
253 | {
254 | "cell_type": "code",
255 | "execution_count": null,
256 | "metadata": {},
257 | "outputs": [],
258 | "source": [
259 | "fmn_model.cclb_eigenvalues.shape"
260 | ]
261 | },
262 | {
263 | "cell_type": "code",
264 | "execution_count": null,
265 | "metadata": {},
266 | "outputs": [],
267 | "source": [
268 | "all_embs_a = []\n",
269 | "all_embs_c = []\n",
270 | "\n",
271 | "for i in range(fmn_model.n_meshes):\n",
272 | " CSD_a, CSD_c = fmn_model.get_CSD(i)\n",
273 | "\n",
274 | " all_embs_a.append(CSD_a.flatten())\n",
275 | " all_embs_c.append(CSD_c.flatten())\n",
276 | "\n",
277 | "all_embs_a = np.array(all_embs_a)\n",
278 | "all_embs_c = np.array(all_embs_c)"
279 | ]
280 | },
281 | {
282 | "cell_type": "code",
283 | "execution_count": null,
284 | "metadata": {},
285 | "outputs": [],
286 | "source": [
287 | "import matplotlib.pyplot as plt\n",
288 | "from sklearn.decomposition import PCA\n",
289 | "pca_model = PCA(n_components=2)\n",
290 | "emb_red_a = pca_model.fit_transform(all_embs_a)\n",
291 | "emb_red_c = pca_model.fit_transform(all_embs_c)"
292 | ]
293 | },
294 | {
295 | "cell_type": "code",
296 | "execution_count": null,
297 | "metadata": {},
298 | "outputs": [],
299 | "source": [
300 | "_, axs = plt.subplots(1, 2, figsize=(15, 5))\n",
301 | "\n",
302 | "axs[0].scatter(emb_red_a[:, 0], emb_red_a[:, 1], c=np.arange(len(emb_red_a)))\n",
303 | "axs[0].set_title('Area CSD')\n",
304 | "\n",
305 | "axs[1].scatter(emb_red_c[:, 0], emb_red_c[:, 1], c=np.arange(len(emb_red_c)))\n",
306 | "axs[1].set_title('Conformal CSD')"
307 | ]
308 | }
309 | ],
310 | "metadata": {
311 | "kernelspec": {
312 | "display_name": "py310",
313 | "language": "python",
314 | "name": "python3"
315 | },
316 | "language_info": {
317 | "codemirror_mode": {
318 | "name": "ipython",
319 | "version": 3
320 | },
321 | "file_extension": ".py",
322 | "mimetype": "text/x-python",
323 | "name": "python",
324 | "nbconvert_exporter": "python",
325 | "pygments_lexer": "ipython3",
326 | "version": "3.10.14"
327 | },
328 | "orig_nbformat": 4
329 | },
330 | "nbformat": 4,
331 | "nbformat_minor": 2
332 | }
333 |
--------------------------------------------------------------------------------
/examples/mesh_and_matching/basic_functions.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "attachments": {},
5 | "cell_type": "markdown",
6 | "metadata": {},
7 | "source": [
8 | "\n",
9 | "# 1 - Imports and defining functions"
10 | ]
11 | },
12 | {
13 | "cell_type": "code",
14 | "execution_count": null,
15 | "metadata": {},
16 | "outputs": [],
17 | "source": [
18 | "import numpy as np\n",
19 | "\n",
20 | "from pyFM.mesh import TriMesh\n",
21 | "from pyFM.functional import FunctionalMapping\n",
22 | "\n",
23 | "import meshplot as mp\n",
24 | "\n",
25 | "def plot_mesh(myMesh,cmap=None):\n",
26 | " mp.plot(myMesh.vertlist, myMesh.facelist,c=cmap)\n",
27 | " \n",
28 | "def double_plot(myMesh1,myMesh2,cmap1=None,cmap2=None):\n",
29 | " d = mp.subplot(myMesh1.vertlist, myMesh1.facelist, c=cmap1, s=[2, 2, 0])\n",
30 | " mp.subplot(myMesh2.vertlist, myMesh2.facelist, c=cmap2, s=[2, 2, 1], data=d)\n",
31 | "\n",
32 | "def visu(vertices):\n",
33 | " min_coord,max_coord = np.min(vertices,axis=0,keepdims=True),np.max(vertices,axis=0,keepdims=True)\n",
34 | " cmap = (vertices-min_coord)/(max_coord-min_coord)\n",
35 | " return cmap"
36 | ]
37 | },
38 | {
39 | "attachments": {},
40 | "cell_type": "markdown",
41 | "metadata": {},
42 | "source": [
43 | "# 2- Loading and processing a mesh"
44 | ]
45 | },
46 | {
47 | "attachments": {},
48 | "cell_type": "markdown",
49 | "metadata": {},
50 | "source": [
51 | "### Basic Mesh methods"
52 | ]
53 | },
54 | {
55 | "attachments": {},
56 | "cell_type": "markdown",
57 | "metadata": {},
58 | "source": [
59 | "A TriMesh class can be created from a path (to a .off or a .obj file) or simply an array of vertices and an optional array of faces.\n",
60 | "\n",
61 | "The mesh can be centered, area-normalized, rotated or translated when loading.\n",
62 | "\n",
63 | "\n",
64 | "Vertices and faces are stored in the 'vertlist' and 'facelist' attributes. One can also use 'mesh.vertices' and 'mesh.faces' to access them. While these notations can feel non-intuitive they result in clearer functions as it avoids expressions of the form ```mesh.vertices - vertices```.\n",
65 | "\n",
66 | "A TriMesh class possess multiple attributes like edges, per-face area, per-vertex area, per-face normals, per-vertex normals, ..."
67 | ]
68 | },
69 | {
70 | "cell_type": "code",
71 | "execution_count": null,
72 | "metadata": {},
73 | "outputs": [],
74 | "source": [
75 | "mesh1 = TriMesh('../data/cat-00.off', area_normalize=True, center=False)\n",
76 | "mesh2 = TriMesh(mesh1.vertlist, mesh1.facelist)"
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {},
83 | "outputs": [],
84 | "source": [
85 | "# Attributes are computed on the fly and cached\n",
86 | "edges = mesh1.edges\n",
87 | "\n",
88 | "area = mesh1.area\n",
89 | "\n",
90 | "face_areas = mesh1.face_areas\n",
91 | "vertex_areas = mesh1.vertex_areas\n",
92 | "face_normals = mesh1.normals\n",
93 | "\n",
94 | "# AREA WEIGHTED VERTEX NORMALS\n",
95 | "vertex_normals_a = mesh1.vertex_normals\n",
96 | "\n",
97 | "# UNIFORM WEIGHTED VERTEX NORMALS\n",
98 | "mesh1.set_vertex_normal_weighting('uniform')\n",
99 | "vertex_normals_u = mesh1.vertex_normals"
100 | ]
101 | },
102 | {
103 | "attachments": {},
104 | "cell_type": "markdown",
105 | "metadata": {},
106 | "source": [
107 | "### Geodesics"
108 | ]
109 | },
110 | {
111 | "attachments": {},
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "We propose three versions to compute geodesics :\n",
116 | "- Heat method - based on [potpourri3d](https://github.com/nmwsharp/potpourri3d) using robust laplacian (recommended)\n",
117 | "- Heat method - pure python implementation from pyFM (not robust but control on the whole code)\n",
118 | "- Dijkstra"
119 | ]
120 | },
121 | {
122 | "cell_type": "code",
123 | "execution_count": null,
124 | "metadata": {},
125 | "outputs": [],
126 | "source": [
127 | "# Geodesic distance from a given index\n",
128 | "# Usses potpourri3d by default. Set robust to False to obtain result from the Python implementation\n",
129 | "# Set robust to False to obtain result from the Python implementation\n",
130 | "dists = mesh1.geod_from(1000, robust=True)"
131 | ]
132 | },
133 | {
134 | "cell_type": "code",
135 | "execution_count": null,
136 | "metadata": {},
137 | "outputs": [],
138 | "source": [
139 | "# Similar arguments can be passed as above. I recommand sticking to the default values.\n",
140 | "S1_geod = mesh1.get_geodesic(verbose=True)"
141 | ]
142 | },
143 | {
144 | "attachments": {},
145 | "cell_type": "markdown",
146 | "metadata": {},
147 | "source": [
148 | "### Laplacian and functions"
149 | ]
150 | },
151 | {
152 | "attachments": {},
153 | "cell_type": "markdown",
154 | "metadata": {},
155 | "source": [
156 | "The spectrum of the LBO can be computed easily.\n",
157 | "\n",
158 | "Eigenvalues and eigenvectors are stored in the ```mesh.eigenvalues``` and ```mesh.eigenvectors``` attributes.\n",
159 | "\n",
160 | "Gradient and divergence can be computed using the associated methods. Using the ```mesh.project``` and ```mesh.unproject``` functions allows to switch between seeing a function in the LBO basis or on the complete shape.\n",
161 | "\n",
162 | "The squared $L^2$ norm and $H^1_0$ norm can be computed via the ```mesh.l2_sqnorm``` and ```mesh.h1_sqnorm``` methods."
163 | ]
164 | },
165 | {
166 | "cell_type": "code",
167 | "execution_count": null,
168 | "metadata": {},
169 | "outputs": [],
170 | "source": [
171 | "# By default does not use the intrinsic delaunay Laplacian.\n",
172 | "# For functional map methods, I'd recommend setting intrinsic to True.\n",
173 | "mesh1.process(k=100, intrinsic=False, verbose=True);"
174 | ]
175 | },
176 | {
177 | "cell_type": "code",
178 | "execution_count": null,
179 | "metadata": {},
180 | "outputs": [],
181 | "source": [
182 | "# plot the third eigenfunction\n",
183 | "plot_mesh(mesh1, mesh1.eigenvectors[:,2])"
184 | ]
185 | },
186 | {
187 | "attachments": {},
188 | "cell_type": "markdown",
189 | "metadata": {},
190 | "source": [
191 | "# 3 - Computing the functional map"
192 | ]
193 | },
194 | {
195 | "attachments": {},
196 | "cell_type": "markdown",
197 | "metadata": {},
198 | "source": [
199 | "**Loading data**"
200 | ]
201 | },
202 | {
203 | "cell_type": "code",
204 | "execution_count": null,
205 | "metadata": {},
206 | "outputs": [],
207 | "source": [
208 | "mesh1 = TriMesh('../data/cat-00.off')\n",
209 | "mesh2 = TriMesh('../data/lion-00.off')\n",
210 | "print(f'Mesh 1 : {mesh1.n_vertices:4d} vertices, {mesh1.n_faces:5d} faces\\n'\n",
211 | " f'Mesh 2 : {mesh2.n_vertices:4d} vertices, {mesh2.n_faces:5d} faces')\n",
212 | "\n",
213 | "double_plot(mesh1,mesh2)"
214 | ]
215 | },
216 | {
217 | "attachments": {},
218 | "cell_type": "markdown",
219 | "metadata": {},
220 | "source": [
221 | "**Computing descriptors**"
222 | ]
223 | },
224 | {
225 | "cell_type": "code",
226 | "execution_count": null,
227 | "metadata": {},
228 | "outputs": [],
229 | "source": [
230 | "process_params = {\n",
231 | " 'n_ev': (35,35), # Number of eigenvalues on source and Target\n",
232 | " 'landmarks': np.loadtxt('../data/landmarks.txt',dtype=int)[:5], # loading 5 landmarks\n",
233 | " 'subsample_step': 5, # In order not to use too many descriptors\n",
234 | " 'descr_type': 'WKS', # WKS or HKS\n",
235 | "}\n",
236 | "\n",
237 | "model = FunctionalMapping(mesh1,mesh2)\n",
238 | "model.preprocess(**process_params,verbose=True);"
239 | ]
240 | },
241 | {
242 | "attachments": {},
243 | "cell_type": "markdown",
244 | "metadata": {},
245 | "source": [
246 | "**Fitting the model**"
247 | ]
248 | },
249 | {
250 | "attachments": {},
251 | "cell_type": "markdown",
252 | "metadata": {},
253 | "source": [
254 | "$\\newcommand{\\RR}{\\mathbb{R}}$\n",
255 | "$\\newcommand{\\Ss}{\\mathcal{S}}$\n",
256 | "$\\newcommand{\\uargmin}[1]{\\underset{#1}{\\text{argmin}}\\;}$\n",
257 | "$\\newcommand{\\uargmax}[1]{\\underset{#1}{\\text{argmax}}\\;}$\n",
258 | "$\\def\\*#1{\\mathbf{#1}}$\n",
259 | "\n",
260 | "In pyFM, we always consider functional maps $\\*C:\\Ss_1\\to\\Ss_2$ and pointwise maps $T:\\Ss_2\\to\\Ss_1$ going in opposite directions, with $\\*C$ always going from shape 1 to shape 2 !\n",
261 | "\n",
262 | "Optimization problem is\n",
263 | "\\begin{equation}\n",
264 | "\\uargmin{\\*C\\in\\RR^{k_2\\times k_1}} w_{descr}\\|\\*C\\*A - \\*B\\|^2 + w_{lap}\\|\\*C\\Delta_1 - \\Delta_2\\*C\\|^2 + w_{\\text{d- comm}}\\sum_i \\|\\*C\\Gamma_1^i - \\Gamma_2^i\\*C\\|^2 + w_{\\text{orient}}\\sum_i \\|\\*C\\Lambda_1^i - \\Lambda_2^i\\*C\\|^2\n",
265 | "\\end{equation}\n",
266 | "\n",
267 | "with $\\Gamma_1^i$ and $\\Gamma_2^i$ [multiplicative operators](http://www.lix.polytechnique.fr/~maks/papers/fundescEG17.pdf) associated to the $i$-th descriptors, $\\Lambda_1^i$ and $\\Lambda_2^i$ [orientation preserving operators](https://arxiv.org/abs/1806.04455) associated to the $i$-th descriptors"
268 | ]
269 | },
270 | {
271 | "cell_type": "code",
272 | "execution_count": null,
273 | "metadata": {},
274 | "outputs": [],
275 | "source": [
276 | "fit_params = {\n",
277 | " 'w_descr': 1e0,\n",
278 | " 'w_lap': 1e-2,\n",
279 | " 'w_dcomm': 1e-1,\n",
280 | " 'w_orient': 0\n",
281 | "}\n",
282 | "\n",
283 | "\n",
284 | "\n",
285 | "model.fit(**fit_params, verbose=True)"
286 | ]
287 | },
288 | {
289 | "attachments": {},
290 | "cell_type": "markdown",
291 | "metadata": {},
292 | "source": [
293 | "**Visualizing the associated point to point map**"
294 | ]
295 | },
296 | {
297 | "cell_type": "code",
298 | "execution_count": null,
299 | "metadata": {
300 | "scrolled": false
301 | },
302 | "outputs": [],
303 | "source": [
304 | "p2p_21 = model.get_p2p(n_jobs=1)\n",
305 | "cmap1 = visu(mesh1.vertlist); cmap2 = cmap1[p2p_21]\n",
306 | "double_plot(mesh1,mesh2,cmap1,cmap2)"
307 | ]
308 | },
309 | {
310 | "attachments": {},
311 | "cell_type": "markdown",
312 | "metadata": {},
313 | "source": [
314 | "# 4 - Refining the Functional Map\n",
315 | "```model.FM``` returns the current state of functional map. One can change which one is returned by using ```model.change_FM_type(FM_type)```, as one can see below. \n",
316 | "\n",
317 | "**ICP**"
318 | ]
319 | },
320 | {
321 | "cell_type": "code",
322 | "execution_count": null,
323 | "metadata": {},
324 | "outputs": [],
325 | "source": [
326 | "model.icp_refine(verbose=True)\n",
327 | "p2p_21_icp = model.get_p2p()\n",
328 | "cmap1 = visu(mesh1.vertlist); cmap2 = cmap1[p2p_21_icp]\n",
329 | "double_plot(mesh1,mesh2,cmap1,cmap2)"
330 | ]
331 | },
332 | {
333 | "attachments": {},
334 | "cell_type": "markdown",
335 | "metadata": {},
336 | "source": [
337 | "**Zoomout**"
338 | ]
339 | },
340 | {
341 | "cell_type": "code",
342 | "execution_count": null,
343 | "metadata": {
344 | "scrolled": false
345 | },
346 | "outputs": [],
347 | "source": [
348 | "model.change_FM_type('classic') # We refine the first computed map, not the icp-refined one\n",
349 | "model.zoomout_refine(nit=15, step = 1, verbose=True)\n",
350 | "print(model.FM.shape)\n",
351 | "p2p_21_zo = model.get_p2p()\n",
352 | "cmap1 = visu(mesh1.vertlist); cmap2 = cmap1[p2p_21_zo]\n",
353 | "double_plot(mesh1,mesh2,cmap1,cmap2)"
354 | ]
355 | },
356 | {
357 | "attachments": {},
358 | "cell_type": "markdown",
359 | "metadata": {},
360 | "source": [
361 | "# Evaluating Results"
362 | ]
363 | },
364 | {
365 | "cell_type": "code",
366 | "execution_count": null,
367 | "metadata": {},
368 | "outputs": [],
369 | "source": [
370 | "import pyFM.eval"
371 | ]
372 | },
373 | {
374 | "cell_type": "code",
375 | "execution_count": null,
376 | "metadata": {},
377 | "outputs": [],
378 | "source": [
379 | "# Compute geodesic distance matrix on the cat mesh\n",
380 | "A_geod = mesh1.get_geodesic(verbose=True)"
381 | ]
382 | },
383 | {
384 | "cell_type": "code",
385 | "execution_count": null,
386 | "metadata": {},
387 | "outputs": [],
388 | "source": [
389 | "# Load an approximate ground truth map\n",
390 | "gt_p2p = np.loadtxt('../data/lion2cat',dtype=int)\n",
391 | "\n",
392 | "acc_base = pyFM.eval.accuracy(p2p_21, gt_p2p, A_geod, sqrt_area=mesh1.sqrtarea)\n",
393 | "\n",
394 | "acc_icp = pyFM.eval.accuracy(p2p_21_icp, gt_p2p, A_geod, sqrt_area=np.sqrt(mesh1.area))\n",
395 | "\n",
396 | "acc_zo = pyFM.eval.accuracy(p2p_21_zo, gt_p2p, A_geod, sqrt_area=np.sqrt(mesh1.area))\n",
397 | "\n",
398 | "print(f'Accuracy results\\n'\n",
399 | " f'\\tBasic FM : {1e3*acc_base:.2f}\\n'\n",
400 | " f'\\tICP refined : {1e3*acc_icp:.2f}\\n'\n",
401 | " f'\\tZoomOut refined : {1e3*acc_zo:.2f}\\n')"
402 | ]
403 | },
404 | {
405 | "cell_type": "code",
406 | "execution_count": null,
407 | "metadata": {},
408 | "outputs": [],
409 | "source": []
410 | }
411 | ],
412 | "metadata": {
413 | "kernelspec": {
414 | "display_name": "Python 3",
415 | "language": "python",
416 | "name": "python3"
417 | },
418 | "language_info": {
419 | "codemirror_mode": {
420 | "name": "ipython",
421 | "version": 3
422 | },
423 | "file_extension": ".py",
424 | "mimetype": "text/x-python",
425 | "name": "python",
426 | "nbconvert_exporter": "python",
427 | "pygments_lexer": "ipython3",
428 | "version": "3.10.14"
429 | }
430 | },
431 | "nbformat": 4,
432 | "nbformat_minor": 4
433 | }
434 |
--------------------------------------------------------------------------------
/examples/mesh_and_matching/matching_with_zoomout.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {},
6 | "source": [
7 | "\n",
8 | "# 1 - Imports and defining functions"
9 | ]
10 | },
11 | {
12 | "cell_type": "code",
13 | "execution_count": null,
14 | "metadata": {},
15 | "outputs": [],
16 | "source": [
17 | "import numpy as np\n",
18 | "\n",
19 | "from pyFM.mesh import TriMesh\n",
20 | "from pyFM.functional import FunctionalMapping\n",
21 | "\n",
22 | "import meshplot as mp\n",
23 | "\n",
24 | "def plot_mesh(myMesh,cmap=None):\n",
25 | " mp.plot(myMesh.vertlist, myMesh.facelist,c=cmap)\n",
26 | " \n",
27 | "def double_plot(myMesh1,myMesh2,cmap1=None,cmap2=None):\n",
28 | " d = mp.subplot(myMesh1.vertlist, myMesh1.facelist, c=cmap1, s=[2, 2, 0])\n",
29 | " mp.subplot(myMesh2.vertlist, myMesh2.facelist, c=cmap2, s=[2, 2, 1], data=d)\n",
30 | "\n",
31 | "def visu(vertices):\n",
32 | " min_coord,max_coord = np.min(vertices,axis=0,keepdims=True),np.max(vertices,axis=0,keepdims=True)\n",
33 | " cmap = (vertices-min_coord)/(max_coord-min_coord)\n",
34 | " return cmap"
35 | ]
36 | },
37 | {
38 | "cell_type": "markdown",
39 | "metadata": {},
40 | "source": [
41 | "# 2- Loading meshes to match"
42 | ]
43 | },
44 | {
45 | "cell_type": "code",
46 | "execution_count": null,
47 | "metadata": {},
48 | "outputs": [],
49 | "source": [
50 | "mesh1 = TriMesh('../data/cat-00.off', center=True, area_normalize=True).process(k=150, intrinsic=True)\n",
51 | "mesh2 = TriMesh('../data/lion-00.off', center=True, area_normalize=True).process(k=150, intrinsic=True)"
52 | ]
53 | },
54 | {
55 | "cell_type": "markdown",
56 | "metadata": {},
57 | "source": [
58 | "# 3 - Computing Initial Correspondences"
59 | ]
60 | },
61 | {
62 | "cell_type": "markdown",
63 | "metadata": {},
64 | "source": [
65 | "ZoomOut works using initial correspondences.\n",
66 | "We show here two versions:\n",
67 | " - One using Nearest Neighbor initialization (hoping that meshes are aligned)\n",
68 | " - One using WKS initialization\n",
69 | "\n",
70 | "\n",
71 | "**NOTE**: In this package, *functional map* usually go from Shape $1$ to Shape $2$, and *pointwise maps* from shape $2$ to shape $1$.\n",
72 | "\n",
73 | "\n",
74 | "Let's compute the initial maps from `mesh2` to `mesh1`"
75 | ]
76 | },
77 | {
78 | "cell_type": "code",
79 | "execution_count": null,
80 | "metadata": {},
81 | "outputs": [],
82 | "source": [
83 | "from pyFM.signatures.WKS_functions import mesh_WKS\n",
84 | "from pyFM.spectral.nn_utils import knn_query\n",
85 | "\n",
86 | "wks_descr1 = mesh_WKS(mesh1, num_E=100, k=100)\n",
87 | "wks_descr2 = mesh_WKS(mesh2, num_E=100, k=100)\n",
88 | "\n",
89 | "# WKS initialisation\n",
90 | "p2p_21_wks = knn_query(wks_descr1, wks_descr2, k=1) # (n2,) initialisation with 1-NN\n",
91 | "\n",
92 | "# NN Initialisation\n",
93 | "p2p_21_nn = knn_query(mesh1.vertices, mesh2.vertices, k=1) # (n2,) initialisation with 1-NN"
94 | ]
95 | },
96 | {
97 | "cell_type": "code",
98 | "execution_count": null,
99 | "metadata": {},
100 | "outputs": [],
101 | "source": [
102 | "cmap1 = visu(mesh1.vertlist)\n",
103 | "cmap2_wks = cmap1[p2p_21_wks]\n",
104 | "cmap2_nn = cmap1[p2p_21_nn]\n",
105 | "\n",
106 | "double_plot(mesh1,mesh2,cmap1,cmap2_wks)"
107 | ]
108 | },
109 | {
110 | "cell_type": "markdown",
111 | "metadata": {},
112 | "source": [
113 | "# 4 - Applying ZoomOut"
114 | ]
115 | },
116 | {
117 | "cell_type": "markdown",
118 | "metadata": {},
119 | "source": [
120 | "We can remesh this mapn using the [ZoomOut algorithm](https://www.lix.polytechnique.fr/~maks/papers/papers_235s4-file1.pdf).\n",
121 | "\n",
122 | "This iterative algorithm refines functional maps but can easily be adapted to refine pointwise correspondences"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {},
129 | "outputs": [],
130 | "source": [
131 | "from pyFM.refine.zoomout import mesh_zoomout_refine_p2p"
132 | ]
133 | },
134 | {
135 | "cell_type": "code",
136 | "execution_count": null,
137 | "metadata": {},
138 | "outputs": [],
139 | "source": [
140 | "FM_12_wks_zo, p2p_21_wks_zo = mesh_zoomout_refine_p2p(p2p_21=p2p_21_wks, mesh1=mesh1, mesh2=mesh2, k_init=20, nit=16, step=5, return_p2p=True, n_jobs=10, verbose=True)\n",
141 | "FM_12_nn_zo, p2p_21_nn_zo = mesh_zoomout_refine_p2p(p2p_21=p2p_21_nn, mesh1=mesh1, mesh2=mesh2, k_init=20, nit=16, step=5, return_p2p=True, n_jobs=10, verbose=True)"
142 | ]
143 | },
144 | {
145 | "cell_type": "code",
146 | "execution_count": null,
147 | "metadata": {},
148 | "outputs": [],
149 | "source": [
150 | "cmap1 = visu(mesh1.vertlist)\n",
151 | "cmap2_wks = cmap1[p2p_21_wks_zo]\n",
152 | "cmap2_nn = cmap1[p2p_21_nn_zo]\n",
153 | "\n",
154 | "double_plot(mesh1,mesh2,cmap1,cmap2_nn)"
155 | ]
156 | },
157 | {
158 | "cell_type": "code",
159 | "execution_count": null,
160 | "metadata": {},
161 | "outputs": [],
162 | "source": []
163 | }
164 | ],
165 | "metadata": {
166 | "kernelspec": {
167 | "display_name": "py310",
168 | "language": "python",
169 | "name": "python3"
170 | },
171 | "language_info": {
172 | "codemirror_mode": {
173 | "name": "ipython",
174 | "version": 3
175 | },
176 | "file_extension": ".py",
177 | "mimetype": "text/x-python",
178 | "name": "python",
179 | "nbconvert_exporter": "python",
180 | "pygments_lexer": "ipython3",
181 | "version": "3.10.14"
182 | },
183 | "orig_nbformat": 4
184 | },
185 | "nbformat": 4,
186 | "nbformat_minor": 2
187 | }
188 |
--------------------------------------------------------------------------------
/pyFM/FMN/FMN.py:
--------------------------------------------------------------------------------
1 | import copy
2 | from collections import defaultdict
3 | import time
4 |
5 | import numpy as np
6 | import scipy.sparse as sparse
7 | import scipy.sparse.linalg
8 | import scipy.linalg
9 | from scipy.optimize import linprog
10 | from tqdm.auto import tqdm
11 |
12 | from .. import spectral
13 |
14 | from sklearn.neighbors import NearestNeighbors
15 |
16 |
17 | class FMN:
18 | """
19 | Functional Map Network Class
20 |
21 | Parameters
22 | --------------------------
23 | meshlist : list
24 | list of TriMesh objects
25 | maps_dict : dict, optional
26 | dictionnary of functional maps between each pair of meshes.
27 | Keys are (i,j) with i,j indices of the meshes in the list.
28 | """
29 |
30 | def __init__(self, meshlist, maps_dict=None):
31 | # Mesh of each Node
32 | self.meshlist = copy.deepcopy(meshlist) # List of n TriMesh
33 |
34 | # Edges are determined by (i,j) pair of indices
35 | # A map is associated to each edge (via dictionnary)
36 | # Weights of edges are stored in a sparse (n,n) matrix
37 | # For computation, an arbitraty ordering of edges is stored.
38 |
39 | # Network attribute
40 | self.edges = None # List of couples (i,j)
41 | self.maps = None # Dictionnary of maps
42 | self.weights = None # (n,n) sparse matrix of weights
43 | self.edge2ind = None # Ordering of edges
44 |
45 | # (n,K) array of indices of K vertices per shape in the network.
46 | self.subsample = None
47 |
48 | # icsm weights attributes
49 | self.cycles = None # List of 3-cycles (i,j,k)
50 | self.A = None # (n_cycle, n_edges) binary matrix (1 if edge j in cycle i)
51 | self.A_sub = None # (n_edge_in_cycle,) indices of edges contained in a 3-cycle
52 |
53 | self.use_icsm = False # Whether icsm or adjacency weights are used.
54 | self.cycle_weight = None # Weights of each 3-cycle (map-dependant)
55 | self.edge_weights = None # Weight of each edge (map-dependant)
56 |
57 | # CLB and CCLB attributes
58 | self.W = None # (n*M, n*M) sparse matrix. Quadratic form for CLB computation.
59 | self.CLB = None # (n,M,M) array of Consistent Latent Basis for each mesh.
60 | self.CCLB = (
61 | None # (n,M,m) array of Canonical Consistent Latent Basis for each mesh
62 | )
63 | self.cclb_eigenvalues = None # (m,) eigenvalues of the CCLB
64 |
65 | # Extra information
66 | self.p2p = None # Dictionnary of pointwise maps associated to each edge
67 | self._M = None
68 |
69 | if maps_dict is not None:
70 | self.set_maps(maps_dict=maps_dict, verbose=True)
71 |
72 | @property
73 | def n_meshes(self):
74 | return len(self.meshlist)
75 |
76 | @property
77 | def M(self):
78 | """
79 | Return the current shared dimension for functional maps
80 | (which are square matrices).
81 |
82 | If not specified, returns the sized of the first found map.
83 |
84 | Returns
85 | -------------------
86 | M : int
87 | size of the functional maps
88 | """
89 | if self._M is not None:
90 | return self._M
91 | else:
92 | return self.maps[self.edges[0]].shape[0]
93 |
94 | @M.setter
95 | def M(self, M):
96 | self._M = M
97 |
98 | @property
99 | def m_cclb(self):
100 | """
101 | Return the dimension of the Canonical Consistent Latent Basis
102 |
103 | Returns
104 | -------------------
105 | m : int
106 | size of the CCLB
107 | """
108 | return self.CCLB.shape[2]
109 |
110 | def _reset_map_attributes(self):
111 | """
112 | Resets all attributes depending on the Functional Maps
113 | """
114 | # Resets icsm weights variables
115 | if self.use_icsm:
116 | self.use_icsm = False # Whether icsm or adjacency weights are used.
117 | self.cycle_weight = None # Weights of each 3-cycle (map-dependant)
118 | self.edge_weights = None # Weight of each edge (map-dependant)
119 | self.weights = None # (n,n) sparse matrix of weights
120 |
121 | # Reset map-dependant attributes
122 | self.W = None # (n*M, n*M) sparse matrix. Quadratic form for CLB computation.
123 | self.CLB = (
124 | None # (n,M,M) array containing the Consistent Latent Basis for each mesh.
125 | )
126 | self.CCLB = (
127 | None # (n,M,m) array of Canonical Consistent Latent Basis for each mesh
128 | )
129 | self.cclb_eigenvalues = None # (m,) eigenvalues of the CCLB
130 | self.p2p = None # Dictionnary of pointwise
131 |
132 | def set_maps(self, maps_dict, verbose=False):
133 | """
134 | Set the edges of the graph with maps.
135 | Saves extra information about the edges.
136 |
137 | Parameters
138 | --------------------------
139 | maps_dict : dict
140 | dictionnary, key (i,j) gives functional map FM between mesh i and j.
141 | FM can be of different size depending on the edge
142 | """
143 | self.maps = copy.deepcopy(maps_dict)
144 |
145 | # Sort edges for later faster optimization
146 | self.edges = sorted(list(maps_dict.keys()))
147 |
148 | self.edge2ind = dict()
149 | for edge_ind, edge in enumerate(self.edges):
150 | self.edge2ind[edge] = edge_ind
151 |
152 | if verbose:
153 | print(f"Setting {len(self.edges)} edges on {self.n_meshes} nodes.")
154 |
155 | return self
156 |
157 | def set_subsample(self, subsample):
158 | """
159 | Set subsamples an all shapes in the network
160 |
161 | Parameters
162 | -----------------------------------
163 | subsample :
164 | (n, size) array of indices of vertices to subsample on each shape
165 | """
166 | self.subsample = subsample
167 |
168 | return self
169 |
170 | def compute_subsample(self, size=1000, geodesic=False, verbose=False):
171 | """
172 | Subsample vertices on each shape using farthest point sampling.
173 | Store in an (n,size) array of indices
174 |
175 | Parameters
176 | ---------------------------------
177 | size : int
178 | number of vertices to subsample on each shape
179 | """
180 | if verbose:
181 | print(f"Computing a {size}-sized subsample for each mesh")
182 | self.subsample = np.zeros((self.n_meshes, size), dtype=int)
183 | for i in range(self.n_meshes):
184 | self.subsample[i] = self.meshlist[i].extract_fps(
185 | size, geodesic=geodesic, random_init=False
186 | )
187 |
188 | def set_weights(self, weights=None, weight_type="icsm", verbose=False):
189 | """
190 | Set weights for each edge in the graph
191 |
192 | Parameters
193 | -------------------------
194 | weights : sparse
195 | (n,n) matrix. If not specified, sets weights according to 'weight_type' argument
196 | weight_type :
197 | 'icsm' | 'adjacency' : if 'weights' is not specified, computes weights
198 | according to the Consistent Zoomout adaptation of icsm or using the adjacency
199 | matrix of the graph.
200 | """
201 | if weights is not None:
202 | self.use_icsm = False
203 | self.weights = copy.deepcopy(weights)
204 |
205 | elif weight_type == "icsm":
206 | self.use_icsm = True
207 |
208 | # Process cycles if necessary
209 | if self.cycles is None:
210 | if verbose:
211 | print("Computing cycle information")
212 | self.extract_3_cycles()
213 | self.compute_Amat()
214 |
215 | # Compute original icsm weights d_ij for each edge (i,j)
216 | # Final weight is set to exp(-d_ij^2/(2*sigma^2))
217 | # With sigma = median(d_ij)
218 | weight_arr = self.optimize_icsm(verbose=verbose) # (n_edges,)
219 | median_val = np.median(weight_arr[self.A_sub])
220 | if np.isclose(median_val, 0, atol=1e-4):
221 | weight_arr /= np.mean(weight_arr[self.A_sub])
222 | else:
223 | weight_arr /= median_val
224 | new_w = np.exp(-np.square(weight_arr) / 2) # (n_edges,)
225 |
226 | I = [x[0] for x in self.edges]
227 | J = [x[1] for x in self.edges]
228 | self.weights = sparse.csr_matrix(
229 | (new_w, (I, J)), shape=(self.n_meshes, self.n_meshes)
230 | )
231 |
232 | elif weight_type == "adjacency":
233 | self.use_icsm = False
234 | I = [x[0] for x in self.edges]
235 | J = [x[1] for x in self.edges]
236 | V = [1 for x in range(len(self.edges))]
237 | self.weights = sparse.csr_matrix(
238 | (V, (I, J)), shape=(self.n_meshes, self.n_meshes)
239 | )
240 |
241 | else:
242 | raise ValueError(
243 | f'"weight_type" should be "icsm" or "adjacency, not {weight_type}'
244 | )
245 |
246 | return self
247 |
248 | def set_isometries(self, M=None):
249 | """
250 | For each edge (i,j), if (j,i) is also an edge then,
251 | the corresponding functional maps are set as transpose of each other
252 | chosing the closest to orthogonal of both.
253 |
254 | Since this modifies the maps, icsm weights are deleted
255 |
256 | Parameters
257 | -----------------------
258 | M : int
259 | dimension with wich to compare the functional maps.
260 | If None, uses the current self.M
261 | """
262 | # Dictionnary with False as a default value for any key
263 | visited = defaultdict(bool)
264 |
265 | if M is None:
266 | M = self.M
267 |
268 | for i, j in self.edges:
269 | if not visited[(i, j)] and (j, i) in self.edges:
270 | FM1 = self.maps[(i, j)][:M, :M]
271 | FM2 = self.maps[(j, i)][:M, :M]
272 |
273 | dist1 = np.linalg.norm(FM1.T @ FM1 - np.eye(FM1.shape[1]))
274 | dist2 = np.linalg.norm(FM2.T @ FM2 - np.eye(FM2.shape[1]))
275 |
276 | if dist1 <= dist2:
277 | self.maps[(j, i)] = np.transpose(self.maps[(i, j)])
278 | else:
279 | self.maps[(i, j)] = np.transpose(self.maps[(j, i)])
280 |
281 | visited[(j, i)] = True
282 |
283 | # Reset all map-dependant attributes
284 | self._reset_map_attributes()
285 |
286 | def compute_W(self, M=None, verbose=False):
287 | """
288 | Computes the quadratic form for Consistent Latent Basis (CLB) computation.
289 |
290 | Parameters
291 | ---------------------------
292 | M : int, optional
293 | size of the functional maps to use, uses projection of FM on this dimension.
294 | If not specified, used the size of the first found functional map
295 | """
296 | if self.maps is None:
297 | raise ValueError("Functional maps should be set")
298 |
299 | if self.weights is None:
300 | self.set_weights(verbose=verbose)
301 |
302 | if M is not None:
303 | self.M = M
304 |
305 | self.W = CLB_quad_form(self.maps, self.weights, M=self.M)
306 |
307 | def compute_CLB(self, equals_id=False, verbose=False):
308 | """
309 | Computes the Consistent Latent Basis CLB using the quadratic form
310 | associated to the problem.
311 | The first M vectors for each basis are computed in order.
312 |
313 | Parameters
314 | --------------------------
315 | equals_id : bool
316 | If False, the sum of Y.T@Y are expected to give n*Id.
317 | If True, the sum of Y.T@Y are expected to give Id.
318 | """
319 | if self.W is None:
320 | self.compute_W(verbose=verbose)
321 |
322 | # W is a real symmetric matrix !
323 | # There is a bug in sparse eigenvalues computation where 'LM' returns the smallest
324 | # eigenvalues whereas 'SM' does not.
325 | if verbose:
326 | print(f"Computing {self.M} CLB eigenvectors...")
327 | start_time = time.time()
328 | if equals_id:
329 | # Returns (n*M,), (n*M,M) array
330 |
331 | eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
332 | self.W, k=self.M, which="LM", sigma=-1e-6
333 | )
334 | else:
335 | # Returns (n*M,), (n*M,M) array
336 | M_mat = 1 / self.n_meshes * scipy.sparse.eye(self.W.shape[0])
337 | eigenvalues, eigenvectors = scipy.sparse.linalg.eigsh(
338 | self.W, M=M_mat, k=self.M, which="LM", sigma=-1e-6
339 | )
340 |
341 | if verbose:
342 | print(f"\tDone in {time.time() - start_time:.1f}s")
343 | # In any case, make sure they are real and sorted.
344 | # eigenvalues = np.real(eigenvalues)
345 | # sorting = np.argsort(eigenvalues)
346 | # eigenvalues = eigenvalues[sorting]
347 | # eigenvectors = np.real(eigenvectors)[:,sorting] # NM,M
348 | eigenvalues[0] = 0
349 |
350 | self.CLB = eigenvectors.reshape((self.n_meshes, self.M, self.M)) # (n,M,M)
351 |
352 | def compute_CCLB(self, m, verbose=True):
353 | """
354 | Compute the Canonical Consistent Latent Basis CCLB from the CLB.
355 |
356 | Parameters
357 | ------------------------------
358 | m : int
359 | size of the CCLB to compute.
360 | """
361 | if self.CLB is None:
362 | self.compute_CLB(verbose=verbose)
363 |
364 | # Matrix E from Algorithm 1 in the Limit Shape paper
365 | E_mat = np.zeros((m, m))
366 |
367 | for i in range(self.n_meshes):
368 | Y = self.CLB[i, :, :m] # (M,m)
369 | evals = self.meshlist[i].eigenvalues[: self.M] # (M,)
370 | E_mat += Y.T @ (evals[:, None] * Y) # (m,m)
371 |
372 | # Compute the eigendecomposition of E
373 | b = self.n_meshes * np.eye(E_mat.shape[0])
374 | eigenvalues, eigenvectors = scipy.linalg.eig(E_mat, b=b) # (m,), (m,m)
375 |
376 | eigenvalues = np.real(eigenvalues) # (m,)
377 | sorting = np.argsort(eigenvalues) # (m,)
378 | eigenvalues = eigenvalues[sorting] # (m,)
379 | eigenvectors = np.real(eigenvectors)[:, sorting] # (m,m)
380 |
381 | # CCLB is stored as an (n,M,m) array
382 | self.cclb_eigenvalues = eigenvalues # (m,)
383 | self.CCLB = np.array(
384 | [self.CLB[i, :, :m] @ eigenvectors for i in range(self.n_meshes)]
385 | )
386 |
387 | return self
388 |
389 | def get_CSD(self, i):
390 | """
391 | Returns the Characterisic Shape Difference operators CSD for mesh i
392 |
393 | Parameters
394 | --------------------------
395 | i : int
396 | index of the mesh on which to returns the two CSD
397 |
398 | Returns
399 | --------------------------
400 | CSD_a: np.ndarray
401 | (m,m) array of area CSD expressed in the Latent Space
402 | CSD_c: np.ndarray
403 | (m,m) array of conformal CSD expressed in the Latent Space
404 | """
405 | # Functional map from the Limit Shape to shape i
406 | FM = self.CCLB[i]
407 |
408 | CSD_a = FM.T @ FM
409 | CSD_c = (
410 | np.linalg.pinv(np.diag(self.cclb_eigenvalues))
411 | @ FM.T
412 | @ (self.meshlist[i].eigenvalues[: self.M, None] * FM)
413 | )
414 |
415 | return CSD_a, CSD_c
416 |
417 | def get_LB(self, i, complete=True):
418 | """
419 | Returns the latent basis LB for mesh i
420 |
421 | Parameters
422 | --------------------------
423 | i : int
424 | index of the mesh on which to returns the LB
425 | complete : bool
426 | If False, only computes values on the self.subsample[i] vertices
427 |
428 | Returns
429 | --------------------------
430 | latent_basis: np.ndarray
431 | (n_i,m) latent basis on mesh i
432 | """
433 | cclb = self.CCLB[
434 | i
435 | ] # / np.linalg.norm(self.CCLB[i],axis=0,keepdims=True) # (M,m)
436 | if not complete and self.subsample is not None:
437 | latent_basis = (
438 | self.meshlist[i].eigenvectors[self.subsample[i], : self.M] @ cclb
439 | )
440 | return latent_basis # (n_i',m)
441 |
442 | latent_basis = self.meshlist[i].eigenvectors[:, : self.M] @ cclb # (N_i,m)
443 | return latent_basis
444 |
445 | def compute_p2p(self, complete=True, n_jobs=1):
446 | """
447 | Computes vertex to vertex maps for each (directed) edge using the factorization of
448 | functional maps CCLB. Only maps related to existing edges are computed.
449 | Vertex to vertex maps are saved in a dictionnary the same way as functional maps,
450 | although their direction are reversed.
451 |
452 | Parameters
453 | --------------------------
454 | complete : bool
455 | If False, uses self.subsample to obtain pointwise maps between
456 | subsamples of vertices for each shape
457 | """
458 |
459 | self.p2p = dict()
460 | curr_vind = -1
461 | for i, j in self.edges:
462 |
463 | if i != curr_vind:
464 | curr_v = i
465 | LB_1 = self.get_LB(curr_v, complete=False) # (n_1',m)
466 |
467 | tree = NearestNeighbors(
468 | n_neighbors=1, leaf_size=40, algorithm="kd_tree", n_jobs=n_jobs
469 | )
470 | _ = tree.fit(LB_1)
471 |
472 | # LB_1 = self.get_LB(i, complete=complete) # (n_1',m)
473 | LB_2 = self.get_LB(j, complete=complete) # (n_2',m)
474 |
475 | t_, p2p = tree.kneighbors(LB_2)
476 |
477 | p2p = p2p.flatten()
478 |
479 | self.p2p[(i, j)] = p2p # (n_2',)
480 |
481 | def compute_maps(self, M, complete=True):
482 | """
483 | Convert pointwise maps into Functional Maps of size M.
484 |
485 | Parameters
486 | ------------------------
487 | M : int
488 | size of the functional map to compute
489 | """
490 | self.M = M
491 | for i, j in self.edges:
492 | if not complete and self.subsample is not None:
493 | sub = (self.subsample[i], self.subsample[j])
494 | else:
495 | sub = None
496 |
497 | FM = spectral.mesh_p2p_to_FM(
498 | self.p2p[(i, j)],
499 | self.meshlist[i],
500 | self.meshlist[j],
501 | dims=M,
502 | subsample=sub,
503 | )
504 | self.maps[(i, j)] = FM
505 |
506 | # Reset map-dependant variables
507 | self._reset_map_attributes()
508 |
509 | def extract_3_cycles(self):
510 | """
511 | Extract all 3-cycles from the graph in a list of 3-uple (i,j,k)
512 | """
513 | self.cycles = []
514 |
515 | # Ugly triple for loop, but only has to be run once
516 | # Saves cycles (i,j,k) with either ij>k
517 | for i in range(self.n_meshes):
518 | for j in range(i):
519 | for k in range(j):
520 | if (
521 | (i, j) in self.edges
522 | and (j, k) in self.edges
523 | and (k, i) in self.edges
524 | ):
525 | self.cycles.append((i, j, k))
526 |
527 | for j in range(i + 1, self.n_meshes):
528 | for k in range(j + 1, self.n_meshes):
529 | if (
530 | (i, j) in self.edges
531 | and (j, k) in self.edges
532 | and (k, i) in self.edges
533 | ):
534 | self.cycles.append(tuple((i, j, k)))
535 |
536 | def compute_Amat(self):
537 | """
538 | Compute matrix A for icsm weights optimization. Binary matrix telling which edge
539 | belongs to which cycle.
540 | Uses the arbitraty edge ordering createede in the self.set_maps method
541 | """
542 | self.A = np.zeros((len(self.cycles), len(self.edges))) # (n_cycles, n_edges)
543 |
544 | for cycle_ind, (i, j, k) in enumerate(self.cycles):
545 | self.A[cycle_ind, self.edge2ind[(i, j)]] = 1
546 | self.A[cycle_ind, self.edge2ind[(j, k)]] = 1
547 | self.A[cycle_ind, self.edge2ind[(k, i)]] = 1
548 |
549 | self.A_sub = np.where(self.A.sum(0) > 0)[0] # (n_edges_in_cycle)
550 |
551 | def compute_3cycle_weights(self, M=None):
552 | """
553 | Compute per-cycle costs and per-edge costs for icsm optimization.
554 | Cycle weights are given by the self.get_cycle_weight method (deviation from Id map)
555 | Edge weight is the inverse of the sum of all weights of the cycles the edge belongs to.
556 |
557 | Parameters
558 | -----------------------
559 | M : int
560 | Dimension of functional maps to use. If None, uses self.M
561 | """
562 | if M is None:
563 | M = self.M
564 |
565 | self.cycle_weight = np.zeros(len(self.cycles))
566 | for cycle_ind, cycle in enumerate(self.cycles):
567 | self.cycle_weight[cycle_ind] = self.get_cycle_weight(cycle, M=M) # n_cycles
568 |
569 | self.edge_weights = np.zeros(len(self.edges))
570 | self.edge_weights[self.A_sub] = 1 / (
571 | self.A[:, self.A_sub] * self.cycle_weight[:, None]
572 | ).sum(0)
573 |
574 | def optimize_icsm(self, verbose=False):
575 | r"""
576 | Solves the linear problem for icsm weights computation
577 | $\min w^{\top} x$
578 | s.t. $A x \geq C_{\gamma}$ and $x \geq 0$
579 |
580 | Edges which are not part of a cycle are given 0-weigths
581 |
582 | Returns
583 | ------------------------
584 | opt_weights : np.ndarray
585 | (n_edges,) (positive) weights for each edge.
586 | """
587 | self.compute_3cycle_weights(M=self.M)
588 |
589 | if verbose:
590 | print("Optimizing Cycle Weights...")
591 | start_time = time.time()
592 | # Solve Linear Program
593 | res = linprog(
594 | self.edge_weights,
595 | A_ub=-self.A,
596 | b_ub=-self.cycle_weight,
597 | bounds=(0, float("inf")),
598 | method="highs-ds",
599 | )
600 |
601 | if verbose:
602 | print(f"\tDone in {time.time() - start_time:.5f}s")
603 | opt_weights = np.zeros(len(self.edges)) # (n_edges,)
604 | opt_weights[self.A_sub] = res.x[self.A_sub]
605 |
606 | return opt_weights
607 |
608 | def get_cycle_weight(self, cycle, M=None):
609 | """
610 | Given a cycle (i,j,k), compute its cost using the functional maps.
611 | Cost is given as the maximum deviation to the identity map when
612 | going through the complete cycle (3 possibilities)
613 |
614 | Parameters
615 | -----------------------
616 | cycle :
617 | 3-uple with node indices creating a cycle
618 | M : int
619 | Dimension of functional maps to use. If None use self.M
620 |
621 | Returns
622 | -----------------------
623 | cost : float
624 | cost of the cycle
625 | """
626 | if M is None:
627 | M = self.M
628 |
629 | (i, j, k) = cycle
630 |
631 | Cij = self.maps[(i, j)][:M, :M]
632 | Cjk = self.maps[(j, k)][:M, :M]
633 | Cki = self.maps[(k, i)][:M, :M]
634 |
635 | Cii = Cij @ Cjk @ Cki
636 | Cjj = Cjk @ Cki @ Cij
637 | Ckk = Cki @ Cij @ Cjk
638 |
639 | costi = np.linalg.norm(Cii - np.eye(M))
640 | costj = np.linalg.norm(Cjj - np.eye(M))
641 | costk = np.linalg.norm(Ckk - np.eye(M))
642 |
643 | return max(max(costi, costj), costk)
644 |
645 | def zoomout_iteration(
646 | self,
647 | cclb_size,
648 | M_init,
649 | M_final,
650 | isometric=True,
651 | weight_type="icsm",
652 | n_jobs=1,
653 | equals_id=False,
654 | complete=False,
655 | ):
656 | """
657 | Performs an iteration of Consistent Zoomout refinement
658 |
659 | Parameters
660 | -----------------------------
661 | cclb_size :
662 | size of the CCLB to compute
663 | M_init :
664 | initial dimension of maps
665 | M_final :
666 | dimension at the end of the iteration
667 | isometric :
668 | whether to use the reduced space strategy of ConsistentZoomout-iso
669 | weight_type :
670 | 'icsm' or 'adjacency', type of weights to use
671 | equals_id :
672 | Whether the CLB optimization uses Id or n*Id as a constraint
673 | complete :
674 | If vertex-to-vertex and functional maps should be computed with all vertices
675 | instead of the subsampling.
676 | """
677 | if isometric:
678 | self.set_isometries(M=M_init)
679 |
680 | if weight_type == "icsm":
681 | self.set_weights(weight_type=weight_type)
682 | elif self.weights is None:
683 | # Only computed at first iteration
684 | self.set_weights(weight_type="adjacency")
685 |
686 | self.compute_W(M=M_init)
687 | self.compute_CLB(equals_id=equals_id)
688 | self.compute_CCLB(cclb_size)
689 | self.compute_p2p(complete=complete, n_jobs=n_jobs)
690 | self.compute_maps(M_final, complete=complete)
691 |
692 | def zoomout_refine(
693 | self,
694 | nit=10,
695 | step=1,
696 | subsample=1000,
697 | isometric=True,
698 | weight_type="icsm",
699 | M_init=None,
700 | cclb_ratio=0.9,
701 | n_jobs=1,
702 | equals_id=False,
703 | verbose=False,
704 | ):
705 | """
706 | Refines the functional maps using Consistent Zoomout refinement
707 |
708 | Parameters
709 | -----------------------------
710 | nit :
711 | number of zoomout iterations
712 | step :
713 | dimension increase at each iteration
714 | subsample :
715 | size of vertices subsample. If set to 0 or None, all vertices are used.
716 | isometric :
717 | whether to use the reduced space strategy of ConsistentZoomout-iso
718 | weight_type :
719 | 'icsm' or 'adjacency', type of weights to use
720 | M_init :
721 | original size of functional maps. If None, uses self.M
722 | cclb_ratio :
723 | size of CCLB as a ratio of the current dimension M
724 | equals_id :
725 | Whether the CLB optimization uses Id or n*Id as a constraint
726 | """
727 | if (
728 | np.issubdtype(type(subsample), np.integer) and subsample == 0
729 | ) or subsample is None:
730 | use_sub = False
731 | self.subsample = None
732 | else:
733 | use_sub = True
734 | if np.issubdtype(type(subsample), np.integer):
735 | self.compute_subsample(size=subsample, verbose=verbose)
736 | else:
737 | self.set_subsample(subsample)
738 |
739 | if M_init is not None:
740 | self.M = M_init
741 | else:
742 | M_init = self.M
743 |
744 | for i in tqdm(range(nit - 1)):
745 | new_M = self.M + step
746 | m_cclb = int(cclb_ratio * self.M)
747 | # If not the last iteration
748 | if i < nit - 1:
749 |
750 | self.zoomout_iteration(
751 | m_cclb,
752 | self.M,
753 | new_M,
754 | weight_type=weight_type,
755 | equals_id=equals_id,
756 | n_jobs=n_jobs,
757 | complete=not use_sub,
758 | )
759 |
760 | # Last iteration
761 | else:
762 | self.zoomout_iteration(
763 | m_cclb,
764 | self.M,
765 | new_M,
766 | weight_type=weight_type,
767 | equals_id=equals_id,
768 | n_jobs=n_jobs,
769 | complete=True,
770 | )
771 |
772 |
773 | def CLB_quad_form(maps, weights, M=None):
774 | """
775 | Computes the quadratic form associated to a Functional Maps Network, for Consistent Latent Basis
776 | computation.
777 |
778 | Parameters
779 | -----------------------------
780 | maps : dict
781 | dictionnary of functional maps associated to key (i,j) representing an edge
782 | weights :
783 | (n,n) sparse matrix of weights. Entry (i,j) represent the weight of edge (i,j)
784 | M :
785 | Dimension of Functional maps to consider
786 |
787 | Returns
788 | -----------------------------
789 | W : scipy.sparse.csr_matrix
790 | (N*M,N*M) sparse matrix representing the quadratic form for CLB computation.
791 | """
792 | edges = list(maps.keys())
793 | N = 1 + np.max(edges)
794 |
795 | if M is None:
796 | M = maps[edges[0]].shape[0]
797 |
798 | # Prepare a block-sparse matrix
799 | grid = [[None for _ in range(N)] for _ in range(N)]
800 | for i in range(N):
801 | grid[i][i] = sparse.csr_matrix(np.zeros((M, M)))
802 |
803 | for i, j in edges:
804 | FM = maps[(i, j)][:M, :M]
805 |
806 | grid[i][i] += sparse.csr_matrix(weights[i, j] * (FM.T @ FM))
807 | grid[j][j] += sparse.csr_matrix(weights[i, j] * np.eye(M))
808 |
809 | if grid[i][j] is None:
810 | grid[i][j] = sparse.csr_matrix(np.zeros((M, M)))
811 |
812 | grid[i][j] -= sparse.csr_matrix(weights[i, j] * FM.T)
813 |
814 | if grid[j][i] is None:
815 | grid[j][i] = sparse.csr_matrix(np.zeros((M, M)))
816 |
817 | grid[j][i] -= sparse.csr_matrix(weights[i, j] * FM)
818 |
819 | # Build block sparse matrix
820 | W = sparse.bmat(grid, format="csr")
821 | return W
822 |
--------------------------------------------------------------------------------
/pyFM/FMN/__init__.py:
--------------------------------------------------------------------------------
1 | from .FMN import FMN
2 |
--------------------------------------------------------------------------------
/pyFM/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/pyFM/__init__.py
--------------------------------------------------------------------------------
/pyFM/eval/__init__.py:
--------------------------------------------------------------------------------
1 | from .evaluate import *
2 |
--------------------------------------------------------------------------------
/pyFM/eval/__pycache__/__init__.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/pyFM/eval/__pycache__/__init__.cpython-36.pyc
--------------------------------------------------------------------------------
/pyFM/eval/__pycache__/evaluate.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/pyFM/eval/__pycache__/evaluate.cpython-36.pyc
--------------------------------------------------------------------------------
/pyFM/eval/evaluate.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def accuracy(p2p, gt_p2p, D1_geod, return_all=False, sqrt_area=None):
5 | """
6 | Computes the geodesic accuracy of a vertex to vertex map. The map goes from
7 | the target shape to the source shape.
8 |
9 | Parameters
10 | ----------------------
11 | p2p :
12 | (n2,) - vertex to vertex map giving the index of the matched vertex on the source shape
13 | for each vertex on the target shape (from a functional map point of view)
14 | gt_p2p :
15 | (n2,) - ground truth mapping between the pairs
16 | D1_geod :
17 | (n1,n1) - geodesic distance between pairs of vertices on the source mesh
18 | return_all : bool
19 | whether to return all the distances or only the average geodesic distance
20 |
21 | Returns
22 | -----------------------
23 | acc : float
24 | average accuracy of the vertex to vertex map
25 | dists : np.ndarray
26 | (n2,) - if return_all is True, returns all the pairwise distances
27 | """
28 |
29 | dists = D1_geod[(p2p, gt_p2p)]
30 | if sqrt_area is not None:
31 | dists /= sqrt_area
32 |
33 | if return_all:
34 | return dists.mean(), dists
35 |
36 | return dists.mean()
37 |
38 |
39 | def continuity(p2p, D1_geod, D2_geod, edges):
40 | """
41 | Computes continuity of a vertex to vertex map. The map goes from
42 | the target shape to the source shape.
43 |
44 | Parameters
45 | ----------------------
46 | p2p :
47 | (n2,) - vertex to vertex map giving the index of the matched vertex on the source shape
48 | for each vertex on the target shape (from a functional map point of view)
49 | gt_p2p :
50 | (n2,) - ground truth mapping between the pairs
51 | D1_geod :
52 | (n1,n1) - geodesic distance between pairs of vertices on the source mesh
53 | D2_geod :
54 | (n1,n1) - geodesic distance between pairs of vertices on the target mesh
55 | edges :
56 | (n2,2) edges on the target shape
57 |
58 | Returns
59 | -----------------------
60 | continuity : float
61 | average continuity of the vertex to vertex map
62 | """
63 | source_len = D2_geod[(edges[:, 0], edges[:, 1])]
64 | target_len = D1_geod[(p2p[edges[:, 0]], p2p[edges[:, 1]])]
65 |
66 | continuity = np.mean(target_len / source_len)
67 |
68 | return continuity
69 |
70 |
71 | def coverage(p2p, A):
72 | """
73 | Computes coverage of a vertex to vertex map. The map goes from
74 | the target shape to the source shape.
75 |
76 | Parameters
77 | ----------------------
78 | p2p :
79 | (n2,) - vertex to vertex map giving the index of the matched vertex on the source shape
80 | for each vertex on the target shape (from a functional map point of view)
81 | A :
82 | (n1,n1) or (n1,) - area matrix on the source shape or array of per-vertex areas.
83 |
84 | Returns
85 | -----------------------
86 | coverage : float
87 | coverage of the vertex to vertex map
88 | """
89 | if len(A.shape) == 2:
90 | vert_area = np.asarray(A.sum(1)).flatten()
91 | coverage = vert_area[np.unique(p2p)].sum() / vert_area.sum()
92 |
93 | return coverage
94 |
--------------------------------------------------------------------------------
/pyFM/functional.py:
--------------------------------------------------------------------------------
1 | import copy
2 | import time
3 |
4 | from tqdm import tqdm
5 |
6 | import numpy as np
7 | from scipy.optimize import fmin_l_bfgs_b
8 |
9 | import pyFM.signatures as sg
10 | import pyFM.optimize as opt_func
11 | import pyFM.refine
12 | import pyFM.spectral as spectral
13 |
14 |
15 | class FunctionalMapping:
16 | """
17 | A class to compute functional maps between two meshes
18 |
19 | Attributes
20 | ----------------------
21 | mesh1 : TriMesh
22 | first mesh
23 | mesh2 : TriMesh
24 | second mesh
25 |
26 | descr1 :
27 | (n1,p) descriptors on the first mesh
28 | descr2 :
29 | (n2,p) descriptors on the second mesh
30 | D_a :
31 | (k1,k1) area-based shape differnence operator
32 | D_c :
33 | (k1,k1) conformal-based shape differnence operator
34 | FM_type :
35 | 'classic' | 'icp' | 'zoomout' which FM is currently used
36 | k1 :
37 | dimension of the first eigenspace (varies depending on the type of FM)
38 | k2 :
39 | dimension of the seconde eigenspace (varies depending on the type of FM)
40 | FM :
41 | (k2,k1) current FM
42 | p2p_21 :
43 | (n2,) point to point map associated to the current functional map
44 |
45 | Parameters
46 | ----------------------
47 | mesh1 : TriMesh
48 | first mesh
49 | mesh2 : TriMesh
50 | second mesh
51 | """
52 |
53 | def __init__(self, mesh1, mesh2):
54 |
55 | self.mesh1 = copy.deepcopy(mesh1)
56 | self.mesh2 = copy.deepcopy(mesh2)
57 |
58 | # DESCRIPTORS
59 | self.descr1 = None
60 | self.descr2 = None
61 |
62 | # FUNCTIONAL MAP
63 | self._FM_type = "classic"
64 | self._FM_base = None
65 | self._FM_icp = None
66 | self._FM_zo = None
67 |
68 | # AREA AND CONFORMAL SHAPE DIFFERENCE OPERATORS
69 | self.SD_a = None
70 | self.SD_c = None
71 |
72 | self._k1, self._k2 = None, None
73 |
74 | # DIMENSION PROPERTIES
75 | @property
76 | def k1(self):
77 | """ "
78 | Return the input dimension of the functional map
79 |
80 | Returns
81 | ----------------
82 | k1 : int
83 | dimension of the first eigenspace
84 | """
85 | if self._k1 is None and not self.preprocessed and not self.fitted:
86 | raise ValueError("No information known about dimensions")
87 | if self.fitted:
88 | return self.FM.shape[1]
89 | else:
90 | return self._k1
91 |
92 | @k1.setter
93 | def k1(self, k1):
94 | self._k1 = k1
95 |
96 | @property
97 | def k2(self):
98 | """
99 | Return the output dimension of the functional map
100 |
101 | Returns
102 | ----------------
103 | k2 : int
104 | dimension of the second eigenspace
105 | """
106 | if self._k2 is None and not self.preprocessed and not self.fitted:
107 | raise ValueError("No information known about dimensions")
108 | if self.fitted:
109 | return self.FM.shape[0]
110 | else:
111 | return self._k2
112 |
113 | @k2.setter
114 | def k2(self, k2):
115 | self._k2 = k2
116 |
117 | # FUNCTIONAL MAP SWITCHER (REFINED OR NOT)
118 | @property
119 | def FM_type(self):
120 | """
121 | Returns the type of functional map currently used
122 |
123 | Returns
124 | ----------------
125 | FM_type : str
126 | 'classic' | 'icp' | 'zoomout'
127 | """
128 | return self._FM_type
129 |
130 | @FM_type.setter
131 | def FM_type(self, FM_type):
132 | if FM_type.lower() not in ["classic", "icp", "zoomout"]:
133 | raise ValueError(
134 | f'FM_type can only be set to "classic", "icp" or "zoomout", not {FM_type}'
135 | )
136 | self._FM_type = FM_type
137 |
138 | def change_FM_type(self, FM_type):
139 | """
140 | Changes the type of functional map to use
141 |
142 | Parameters
143 | ----------------
144 | FM_type : str
145 | 'classic' | 'icp' | 'zoomout'
146 | """
147 | self.FM_type = FM_type
148 |
149 | @property
150 | def FM(self):
151 | """
152 | Returns the current functional map depending on the value of FM_type
153 |
154 | Returns
155 | ----------------
156 | FM :
157 | (k2,k1) current FM
158 | """
159 | if self.FM_type.lower() == "classic":
160 | return self._FM_base
161 | elif self.FM_type.lower() == "icp":
162 | return self._FM_icp
163 | elif self.FM_type.lower() == "zoomout":
164 | return self._FM_zo
165 |
166 | @FM.setter
167 | def FM(self, FM):
168 | self._FM_base = FM
169 |
170 | # BOOLEAN PROPERTIES
171 | @property
172 | def preprocessed(self):
173 | """
174 | check if enough information is provided to fit the model
175 |
176 | Returns
177 | ----------------
178 | preprocessed : bool
179 | whether the model is preprocessed
180 | """
181 | test_descr = (self.descr1 is not None) and (self.descr2 is not None)
182 | test_evals = (self.mesh1.eigenvalues is not None) and (
183 | self.mesh2.eigenvalues is not None
184 | )
185 | test_evects = (self.mesh1.eigenvectors is not None) and (
186 | self.mesh2.eigenvectors is not None
187 | )
188 | return test_descr and test_evals and test_evects
189 |
190 | @property
191 | def fitted(self):
192 | """
193 | check if the model has been fitted
194 |
195 | Returns
196 | ----------------
197 | fitted : bool
198 | whether the model is fitted
199 | """
200 | return self.FM is not None
201 |
202 | def get_p2p(self, use_adj=False, n_jobs=1):
203 | """
204 | Computes a vertex to vertex map from mesh2 to mesh1
205 |
206 | Parameters
207 | --------------------------
208 | use_adj : bool
209 | whether to use the adjoint map.
210 | n_jobs :
211 | number of parallel jobs. Use -1 to use all processes
212 |
213 | Outputs:
214 | --------------------------
215 | p2p_21 :
216 | (n2,) match vertex i on shape 2 to vertex p2p_21[i] on shape 1
217 | """
218 | p2p_21 = spectral.mesh_FM_to_p2p(
219 | self.FM, self.mesh1, self.mesh2, use_adj=use_adj, n_jobs=n_jobs
220 | )
221 |
222 | return p2p_21
223 |
224 | def get_precise_map(
225 | self,
226 | precompute_dmin=True,
227 | use_adj=True,
228 | batch_size=None,
229 | n_jobs=1,
230 | verbose=False,
231 | ):
232 | """
233 | Returns a precise map from mesh2 to mesh1
234 |
235 | See [1] for details on notations.
236 |
237 | [1] - "Deblurring and Denoising of Maps between Shapes", by Danielle Ezuz and Mirela Ben-Chen.
238 |
239 | Parameters
240 | -------------------
241 | precompute_dmin :
242 | Whether to precompute all the values of delta_min. Faster but heavier in memory
243 | use_adj :
244 | use the adjoint method
245 | batch_size :
246 | If precompute_dmin is False, projects batches of points on the surface
247 | n_jobs :
248 | number of parallel process for nearest neighbor precomputation
249 |
250 | Returns
251 | -------------------
252 | P21 : scipy.sparse.csr_matrix
253 | (n2,n1) sparse - precise map from mesh2 to mesh1
254 | """
255 | if not self.fitted:
256 | raise ValueError("Model should be fit and fit to obtain p2p map")
257 |
258 | P21 = spectral.mesh_FM_to_p2p_precise(
259 | self.FM,
260 | self.mesh1,
261 | self.mesh2,
262 | precompute_dmin=precompute_dmin,
263 | use_adj=use_adj,
264 | batch_size=batch_size,
265 | n_jobs=n_jobs,
266 | verbose=verbose,
267 | )
268 | return P21
269 |
270 | def _get_lmks(self, landmarks, verbose=False):
271 | if np.asarray(landmarks).squeeze().ndim == 1:
272 | if verbose:
273 | print("\tUsing same landmarks indices for both meshes")
274 | lmks1 = np.asarray(landmarks).squeeze()
275 | lmks2 = lmks1.copy()
276 | else:
277 | lmks1, lmks2 = landmarks[:, 0], landmarks[:, 1]
278 |
279 | return lmks1, lmks2
280 |
281 | def preprocess(
282 | self,
283 | n_ev=(50, 50),
284 | n_descr=100,
285 | descr_type="WKS",
286 | landmarks=None,
287 | subsample_step=1,
288 | k_process=None,
289 | verbose=False,
290 | ):
291 | """
292 | Saves the information about the Laplacian mesh for opt
293 |
294 | Parameters
295 | -----------------------------
296 | n_ev : tuple
297 | (k1, k2) tuple - with the number of Laplacian eigenvalues to consider.
298 | n_descr : int
299 | number of descriptors to consider
300 | descr_type : str
301 | "HKS" | "WKS"
302 | landmarks : np.ndarray, optional
303 | (p,1|2) array of indices of landmarks to match.
304 | If (p,1) uses the same indices for both.
305 | subsample_step : int
306 | step with which to subsample the descriptors.
307 | k_process : int
308 | number of eigenvalues to compute for the Laplacian spectrum
309 | """
310 | self.k1, self.k2 = n_ev
311 |
312 | if k_process is None:
313 | k_process = 200
314 |
315 | use_lm = landmarks is not None and len(landmarks) > 0
316 |
317 | # Compute the Laplacian spectrum
318 | if verbose:
319 | print("\nComputing Laplacian spectrum")
320 | self.mesh1.process(max(self.k1, k_process), verbose=verbose)
321 | self.mesh2.process(max(self.k2, k_process), verbose=verbose)
322 |
323 | if verbose:
324 | print("\nComputing descriptors")
325 |
326 | # Extract landmarks indices
327 | if use_lm:
328 | lmks1, lmks2 = self._get_lmks(landmarks, verbose=False)
329 |
330 | # Compute descriptors
331 | if descr_type == "HKS":
332 | self.descr1 = sg.mesh_HKS(self.mesh1, n_descr, k=self.k1) # (N1, n_descr)
333 | self.descr2 = sg.mesh_HKS(self.mesh2, n_descr, k=self.k2) # (N2, n_descr)
334 |
335 | if use_lm:
336 | lm_descr1 = sg.mesh_HKS(
337 | self.mesh1, n_descr, landmarks=lmks1, k=self.k1
338 | ) # (N1, p*n_descr)
339 | lm_descr2 = sg.mesh_HKS(
340 | self.mesh2, n_descr, landmarks=lmks2, k=self.k2
341 | ) # (N2, p*n_descr)
342 |
343 | self.descr1 = np.hstack([self.descr1, lm_descr1]) # (N1, (p+1)*n_descr)
344 | self.descr2 = np.hstack([self.descr2, lm_descr2]) # (N2, (p+1)*n_descr)
345 |
346 | elif descr_type == "WKS":
347 | self.descr1 = sg.mesh_WKS(self.mesh1, n_descr, k=self.k1) # (N1, n_descr)
348 | self.descr2 = sg.mesh_WKS(self.mesh2, n_descr, k=self.k2) # (N2, n_descr)
349 |
350 | if use_lm:
351 | lm_descr1 = sg.mesh_WKS(
352 | self.mesh1, n_descr, landmarks=lmks1, k=self.k1
353 | ) # (N1, p*n_descr)
354 | lm_descr2 = sg.mesh_WKS(
355 | self.mesh2, n_descr, landmarks=lmks2, k=self.k2
356 | ) # (N2, p*n_descr)
357 |
358 | self.descr1 = np.hstack([self.descr1, lm_descr1]) # (N1, (p+1)*n_descr)
359 | self.descr2 = np.hstack([self.descr2, lm_descr2]) # (N2, (p+1)*n_descr)
360 |
361 | else:
362 | raise ValueError(f'Descriptor type "{descr_type}" not implemented')
363 |
364 | # Subsample descriptors
365 | self.descr1 = self.descr1[:, np.arange(0, self.descr1.shape[1], subsample_step)]
366 | self.descr2 = self.descr2[:, np.arange(0, self.descr2.shape[1], subsample_step)]
367 |
368 | # Normalize descriptors
369 | if verbose:
370 | print("\tNormalizing descriptors")
371 |
372 | no1 = np.sqrt(self.mesh1.l2_sqnorm(self.descr1)) # (p,)
373 | no2 = np.sqrt(self.mesh2.l2_sqnorm(self.descr2)) # (p,)
374 |
375 | self.descr1 /= no1[None, :]
376 | self.descr2 /= no2[None, :]
377 |
378 | if verbose:
379 | n_lmks = np.asarray(landmarks).shape[0] if use_lm else 0
380 | print(
381 | f"\n\t{self.descr1.shape[1]} out of {n_descr*(1+n_lmks)} possible descriptors kept"
382 | )
383 |
384 | return self
385 |
386 | def fit(
387 | self,
388 | w_descr=1e-1,
389 | w_lap=1e-3,
390 | w_dcomm=1,
391 | w_orient=0,
392 | orient_reversing=False,
393 | optinit="zeros",
394 | verbose=False,
395 | ):
396 | """
397 | Solves the functional map optimization problem :
398 |
399 | $\min_C \mu_{descr} \|C A - B\|^2 + \mu_{descr comm} \sum_i \|CD_{A_i} - D_{B_i} C \|^2 + \mu_{lap} \|C L_1 - L_2 C\|^2$
400 | $+ \mu_{orient} * \sum_i \|C G_{A_i} - G_{B_i} C\|^2$
401 |
402 | with A and B descriptors, D_Ai and D_Bi multiplicative operators extracted
403 | from the i-th descriptors, L1 and L2 laplacian on each shape, G_Ai and G_Bi
404 | orientation preserving (or reversing) operators association to the i-th descriptors.
405 |
406 | Parameters
407 | -------------------------------
408 | w_descr : float
409 | scaling for the descriptor preservation term
410 | w_lap : float
411 | scaling of the laplacian commutativity term
412 | w_dcomm : float
413 | scaling of the multiplicative operator commutativity
414 | w_orient :
415 | scaling of the orientation preservation term (in addition to relative scaling with the other terms as in the original code)
416 | orient_reversing :
417 | Whether to use the orientation reversing term instead of the orientation preservation one
418 | optinit :
419 | 'random' | 'identity' | 'zeros' initialization. In any case, the first column of the functional map is computed by hand
420 | and not modified during optimization
421 | """
422 | if optinit not in ["random", "identity", "zeros"]:
423 | raise ValueError(
424 | f"optinit arg should be 'random', 'identity' or 'zeros', not {optinit}"
425 | )
426 |
427 | if not self.preprocessed:
428 | self.preprocess()
429 |
430 | # Project the descriptors on the LB basis
431 | descr1_red = self.project(self.descr1, mesh_ind=1) # (n_ev1, n_descr)
432 | descr2_red = self.project(self.descr2, mesh_ind=2) # (n_ev2, n_descr)
433 |
434 | # Compute multiplicative operators associated to each descriptor
435 | list_descr = []
436 | if w_dcomm > 0:
437 | if verbose:
438 | print("Computing commutativity operators")
439 | list_descr = self.compute_descr_op() # (n_descr, ((k1,k1), (k2,k2)) )
440 |
441 | # Compute orientation operators associated to each descriptor
442 | orient_op = []
443 | if w_orient > 0:
444 | if verbose:
445 | print("Computing orientation operators")
446 | orient_op = self.compute_orientation_op(
447 | reversing=orient_reversing
448 | ) # (n_descr,)
449 |
450 | # Compute the squared differences between eigenvalues for LB commutativity
451 | ev_sqdiff = np.square(
452 | self.mesh1.eigenvalues[None, : self.k1]
453 | - self.mesh2.eigenvalues[: self.k2, None]
454 | ) # (n_ev2,n_ev1)
455 | # ev_sqdiff /= np.linalg.norm(ev_sqdiff)**2
456 | if verbose:
457 | print(f"\tScaling LBO commutativity weight by {1 / ev_sqdiff.sum():.1e}")
458 | ev_sqdiff /= ev_sqdiff.sum()
459 |
460 | # rescale orientation term
461 | if w_orient > 0:
462 | args_native = (
463 | np.eye(self.k2, self.k1),
464 | w_descr,
465 | w_lap,
466 | w_dcomm,
467 | 0,
468 | descr1_red,
469 | descr2_red,
470 | list_descr,
471 | orient_op,
472 | ev_sqdiff,
473 | )
474 |
475 | eval_native = opt_func.energy_func_std(*args_native)
476 | eval_orient = opt_func.oplist_commutation(
477 | np.eye(self.k2, self.k1), orient_op
478 | )
479 | w_orient *= eval_native / eval_orient
480 | if verbose:
481 | print(
482 | f"\tScaling orientation preservation weight by {eval_native / eval_orient:.1e}"
483 | )
484 |
485 | # Arguments for the optimization problem
486 | args = (
487 | w_descr,
488 | w_lap,
489 | w_dcomm,
490 | w_orient,
491 | descr1_red,
492 | descr2_red,
493 | list_descr,
494 | orient_op,
495 | ev_sqdiff,
496 | )
497 |
498 | # Initialization
499 | x0 = self.get_x0(optinit=optinit)
500 |
501 | if verbose:
502 | print(
503 | f"\nOptimization :\n"
504 | f"\t{self.k1} Ev on source - {self.k2} Ev on Target\n"
505 | f"\tUsing {self.descr1.shape[1]} Descriptors\n"
506 | f"\tHyperparameters :\n"
507 | f"\t\tDescriptors preservation :{w_descr:.1e}\n"
508 | f"\t\tDescriptors commutativity :{w_dcomm:.1e}\n"
509 | f"\t\tLaplacian commutativity :{w_lap:.1e}\n"
510 | f"\t\tOrientation preservation :{w_orient:.1e}\n"
511 | )
512 |
513 | # Optimization
514 | start_time = time.time()
515 | res = fmin_l_bfgs_b(
516 | opt_func.energy_func_std,
517 | x0.ravel(),
518 | fprime=opt_func.grad_energy_std,
519 | args=args,
520 | )
521 | opt_time = time.time() - start_time
522 | self.FM = res[0].reshape((self.k2, self.k1))
523 |
524 | if verbose:
525 | print(
526 | "\tTask : {task}, funcall : {funcalls}, nit : {nit}, warnflag : {warnflag}".format(
527 | **res[2]
528 | )
529 | )
530 | print(f"\tDone in {opt_time:.2f} seconds")
531 |
532 | def icp_refine(
533 | self, nit=10, tol=None, use_adj=False, overwrite=True, verbose=False, n_jobs=1
534 | ):
535 | """
536 | Refines the functional map using ICP and saves the result
537 |
538 | Parameters
539 | -------------------
540 | nit : int
541 | number of iterations of icp to apply
542 | tol : float
543 | threshold of change in functional map in order to stop refinement
544 | (only applies if nit is None)
545 | overwrite : bool
546 | If True changes FM type to 'icp' so that next call of self.FM
547 | will be the icp refined FM
548 | """
549 | if not self.fitted:
550 | raise ValueError("The Functional map must be fit before refining it")
551 |
552 | self._FM_icp = pyFM.refine.mesh_icp_refine(
553 | self.FM,
554 | self.mesh1,
555 | self.mesh2,
556 | nit=nit,
557 | tol=tol,
558 | use_adj=use_adj,
559 | n_jobs=n_jobs,
560 | verbose=verbose,
561 | )
562 |
563 | if overwrite:
564 | self.FM_type = "icp"
565 |
566 | def zoomout_refine(
567 | self, nit=10, step=1, subsample=None, overwrite=True, verbose=False
568 | ):
569 | """
570 | Refines the functional map using ZoomOut and saves the result
571 |
572 | Parameters
573 | -------------------
574 | nit : int
575 | number of iterations to do
576 | step : int
577 | increase in dimension at each Zoomout Iteration
578 | subsample : int
579 | number of points to subsample for ZoomOut. If None or 0, no subsampling is done.
580 | overwrite : bool
581 | If True changes FM type to 'zoomout' so that next call of self.FM
582 | will be the zoomout refined FM (larger than the other 2)
583 | """
584 | if not self.fitted:
585 | raise ValueError("The Functional map must be fit before refining it")
586 |
587 | if subsample is None or subsample == 0:
588 | sub = None
589 | else:
590 | sub1 = self.mesh1.extract_fps(subsample)
591 | sub2 = self.mesh2.extract_fps(subsample)
592 | sub = (sub1, sub2)
593 |
594 | self._FM_zo = pyFM.refine.mesh_zoomout_refine(
595 | self.FM,
596 | self.mesh1,
597 | self.mesh2,
598 | nit,
599 | step=step,
600 | subsample=sub,
601 | verbose=verbose,
602 | )
603 | if overwrite:
604 | self.FM_type = "zoomout"
605 |
606 | def compute_SD(self):
607 | """
608 | Compute the shape difference operators associated to the functional map
609 | """
610 | if not self.fitted:
611 | raise ValueError(
612 | "The Functional map must be fit before computing the shape difference"
613 | )
614 |
615 | self.D_a = spectral.area_SD(self.FM)
616 | self.D_c = spectral.conformal_SD(
617 | self.FM, self.mesh1.eigenvalues, self.mesh2.eigenvalues
618 | )
619 |
620 | def get_x0(self, optinit="zeros"):
621 | """
622 | Returns the initial functional map for optimization.
623 |
624 | Parameters
625 | ------------------------
626 | optinit : str
627 | 'random' | 'identity' | 'zeros' initialization.
628 | In any case, the first column of the functional map is computed by hand
629 | and not modified during optimization
630 |
631 | Returns
632 | ------------------------
633 | x0 : np.ndarray
634 | corresponding initial vector
635 | """
636 | if optinit == "random":
637 | x0 = np.random.random((self.k2, self.k1))
638 | elif optinit == "identity":
639 | x0 = np.eye(self.k2, self.k1)
640 | else:
641 | x0 = np.zeros((self.k2, self.k1))
642 |
643 | # Sets the equivalence between the constant functions
644 | ev_sign = np.sign(self.mesh1.eigenvectors[0, 0] * self.mesh2.eigenvectors[0, 0])
645 | area_ratio = np.sqrt(self.mesh2.area / self.mesh1.area)
646 |
647 | x0[:, 0] = np.zeros(self.k2)
648 | x0[0, 0] = ev_sign * area_ratio
649 |
650 | return x0
651 |
652 | def compute_descr_op(self):
653 | """
654 | Compute the multiplication operators associated with the descriptors
655 |
656 | Returns
657 | ---------------------------
658 | operators : list
659 | n_descr long list of ((k1,k1),(k2,k2)) operators.
660 | """
661 | if not self.preprocessed:
662 | raise ValueError(
663 | "Preprocessing must be done before computing the new descriptors"
664 | )
665 |
666 | pinv1 = self.mesh1.eigenvectors[:, : self.k1].T @ self.mesh1.A # (k1,n)
667 | pinv2 = self.mesh2.eigenvectors[:, : self.k2].T @ self.mesh2.A # (k2,n)
668 |
669 | list_descr = [
670 | (
671 | pinv1
672 | @ (self.descr1[:, i, None] * self.mesh1.eigenvectors[:, : self.k1]),
673 | pinv2
674 | @ (self.descr2[:, i, None] * self.mesh2.eigenvectors[:, : self.k2]),
675 | )
676 | for i in range(self.descr1.shape[1])
677 | ]
678 |
679 | return list_descr
680 |
681 | def compute_orientation_op(self, reversing=False, normalize=False):
682 | """
683 | Compute orientation preserving or reversing operators associated to each descriptor.
684 |
685 | Parameters
686 | ---------------------------------
687 | reversing : bool
688 | whether to return operators associated to orientation inversion instead
689 | of orientation preservation (return the opposite of the second operator)
690 | normalize : bool
691 | whether to normalize the gradient on each face. Might improve results
692 | according to the authors
693 |
694 | Returns
695 | ---------------------------------
696 | list_op : list
697 | (n_descr,) where term i contains (D1,D2) respectively of size (k1,k1) and
698 | (k2,k2) which represent operators supposed to commute.
699 | """
700 | n_descr = self.descr1.shape[1]
701 |
702 | # Precompute the inverse of the eigenvectors matrix
703 | pinv1 = self.mesh1.eigenvectors[:, : self.k1].T @ self.mesh1.A # (k1,n)
704 | pinv2 = self.mesh2.eigenvectors[:, : self.k2].T @ self.mesh2.A # (k2,n)
705 |
706 | # Compute the gradient of each descriptor
707 | grads1 = [
708 | self.mesh1.gradient(self.descr1[:, i], normalize=normalize)
709 | for i in range(n_descr)
710 | ]
711 | grads2 = [
712 | self.mesh2.gradient(self.descr2[:, i], normalize=normalize)
713 | for i in range(n_descr)
714 | ]
715 |
716 | # Compute the operators in reduced basis
717 | can_op1 = [
718 | pinv1
719 | @ self.mesh1.orientation_op(gradf)
720 | @ self.mesh1.eigenvectors[:, : self.k1]
721 | for gradf in grads1
722 | ]
723 |
724 | if reversing:
725 | can_op2 = [
726 | -pinv2
727 | @ self.mesh2.orientation_op(gradf)
728 | @ self.mesh2.eigenvectors[:, : self.k2]
729 | for gradf in grads2
730 | ]
731 | else:
732 | can_op2 = [
733 | pinv2
734 | @ self.mesh2.orientation_op(gradf)
735 | @ self.mesh2.eigenvectors[:, : self.k2]
736 | for gradf in grads2
737 | ]
738 |
739 | list_op = list(zip(can_op1, can_op2))
740 |
741 | return list_op
742 |
743 | def project(self, func, k=None, mesh_ind=1):
744 | """
745 | Projects a function on the LB basis
746 |
747 | Parameters
748 | -----------------------
749 | func : array
750 | (n1|n2,p) evaluation of the function
751 | mesh_in : int
752 | 1 | 2 index of the mesh on which to encode
753 |
754 | Returns
755 | -----------------------
756 | encoded_func : np.ndarray
757 | (n1|n2,p) array of decoded f
758 | """
759 | if k is None:
760 | k = self.k1 if mesh_ind == 1 else self.k2
761 |
762 | if mesh_ind == 1:
763 | return self.mesh1.project(func, k=k)
764 | elif mesh_ind == 2:
765 | return self.mesh2.project(func, k=k)
766 | else:
767 | raise ValueError(f"Only indices 1 or 2 are accepted, not {mesh_ind}")
768 |
769 | def decode(self, encoded_func, mesh_ind=2):
770 | """
771 | Decode a function from the LB basis
772 |
773 | Parameters
774 | -----------------------
775 | encoded_func : array
776 | (k1|k2,p) encoding of the functions
777 | mesh_ind : int
778 | 1 | 2 index of the mesh on which to decode
779 |
780 | Returns
781 | -----------------------
782 | func : np.ndarray
783 | (n1|n2,p) array of decoded f
784 | """
785 |
786 | if mesh_ind == 1:
787 | return self.mesh1.decode(encoded_func)
788 | elif mesh_ind == 2:
789 | return self.mesh2.decode(encoded_func)
790 | else:
791 | raise ValueError(f"Only indices 1 or 2 are accepted, not {mesh_ind}")
792 |
793 | def transport(self, encoded_func, reverse=False):
794 | """
795 | transport a function from LB basis 1 to LB basis 2.
796 | If reverse is True, then the functions are transposed the other way
797 | using the transpose of the functional map matrix
798 |
799 | Parameters
800 | -----------------------
801 | encoded_func : array
802 | (k1|k2,p) encoding of the functions
803 | reverse :
804 | bool If true, transpose from 2 to 1 using the transpose of the FM
805 |
806 | Returns
807 | -----------------------
808 | transp_func : np.ndarray
809 | (n2|n1,p) array of new encoding of the functions
810 | """
811 | if not self.preprocessed:
812 | raise ValueError(
813 | "The Functional map must be fit before transporting a function"
814 | )
815 |
816 | if not reverse:
817 | return self.FM @ encoded_func
818 | else:
819 | return self.FM.T @ encoded_func
820 |
821 | def transfer(self, func, reverse=False):
822 | """
823 | Transfer a function from mesh1 to mesh2.
824 | If 'reverse' is set to true, then the transfer goes
825 | the other way using the transpose of the functional
826 | map as approximate inverser transfer.
827 |
828 | Parameters
829 | ----------------------
830 | func :
831 | (n1|n2,p) evaluation of the functons
832 |
833 | Returns
834 | -----------------------
835 | transp_func : np.ndarray
836 | (n2|n1,p) transfered function
837 |
838 | """
839 | if not reverse:
840 | return self.decode(self.transport(self.project(func)))
841 |
842 | else:
843 | encoding = self.project(func, mesh_ind=2)
844 | return self.decode(self.transport(encoding, reverse=True), mesh_ind=1)
845 |
--------------------------------------------------------------------------------
/pyFM/mesh/__init__.py:
--------------------------------------------------------------------------------
1 | from .trimesh import TriMesh
2 |
--------------------------------------------------------------------------------
/pyFM/mesh/data/texture_1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/pyFM/mesh/data/texture_1.jpg
--------------------------------------------------------------------------------
/pyFM/mesh/data/texture_2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/RobinMagnet/pyFM/10675c000c568c54bd471e1e45544627860cb7ea/pyFM/mesh/data/texture_2.jpg
--------------------------------------------------------------------------------
/pyFM/mesh/file_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | from shutil import copyfile
3 | import numpy as np
4 |
5 |
6 | def read_off(filepath, read_colors=False):
7 | """
8 | read a standard .off file
9 |
10 | Read a .off file containing vertex and face information, and possibly face colors.
11 |
12 | Parameters
13 | ----------
14 | file : str
15 | path to a '.off'-format file
16 | read_colors : bool, optional
17 | bool - whether to read colors if present
18 |
19 | Returns
20 | -------
21 | vertices : np.ndarray
22 | (n,3) array of vertices coordinates
23 | faces : np.ndarray
24 | (m,3) array of indices of face vertices
25 | colors : np.ndarray, optional
26 | (m,3) Only if read_colors is True. Array of colors for each face. None if not found.
27 | """
28 | with open(filepath, "r") as f:
29 | if f.readline().strip() != "OFF":
30 | raise TypeError("Not a valid OFF header")
31 |
32 | header_line = f.readline().strip().split(" ")
33 | while header_line[0].startswith("#"):
34 | header_line = f.readline().strip().split(" ")
35 | n_verts, n_faces, _ = [int(x) for x in header_line]
36 |
37 | vertices = [
38 | [float(x) for x in f.readline().strip().split()[:3]] for _ in range(n_verts)
39 | ]
40 |
41 | if n_faces > 0:
42 | face_elements = [
43 | [
44 | int(x)
45 | for x in f.readline().strip().split()[1:]
46 | if not x.startswith("#")
47 | ]
48 | for _ in range(n_faces)
49 | ]
50 | face_elements = np.asarray(face_elements)
51 | faces = face_elements[:, :3]
52 | if read_colors:
53 | colors = face_elements[:, 3:6] if face_elements.shape[1] == 6 else None
54 | else:
55 | faces = None
56 |
57 | if read_colors:
58 | return np.asarray(vertices), faces, colors
59 |
60 | return np.asarray(vertices), faces
61 |
62 |
63 | def read_obj(
64 | filepath, load_normals=False, load_texture=False, load_texture_normals=False
65 | ):
66 | """
67 | Read a .obj file containing a mesh.
68 |
69 | Parameters
70 | -------------------------
71 | filepath : str
72 | path to the .obj file
73 | load_normals : bool, optional
74 | whether to load vertex normals if present
75 | load_texture : bool, optional
76 | whether to load texture coordinates if present. Reads both uv coordinates and face to texture vertex indices
77 | load_texture_normals : bool, optional
78 | whether to load texture normals if present. Reads face to texture normal indices
79 |
80 | Returns
81 | -------
82 | vertices : np.ndarray
83 | (n,3) array of vertices coordinates
84 | faces : np.ndarray
85 | (m,3) array of indices of face vertices, None if not present
86 | normals : np.ndarray, optional
87 | Only if load_normals is True. (n,3) array of vertex normals, None if not present
88 | uv : np.ndarray, optional
89 | Only if load_texture is True (n,2) array of uv coordinates, None if not present
90 | fvt : np.ndarray, optional
91 | Only if load_texture is True (m,3) array of indices of face to vertex texture (vt) indices, None if not present
92 | fnt : np.ndarray, optional
93 | Only if load_texture_normals is True (m,3) array of indices of face to texture normal indices, None if not present
94 |
95 | """
96 |
97 | with open(filepath, "r") as f:
98 | vertices = []
99 | faces = []
100 | normals = []
101 |
102 | uv = []
103 | fvt = []
104 | fnt = []
105 |
106 | for line in f:
107 | line = line.strip()
108 | if line == "" or line[0].startswith("#"):
109 | continue
110 |
111 | line = line.split()
112 | if line[0] == "v":
113 | vertices.append([float(x) for x in line[1:4]])
114 |
115 | elif load_texture and line[0] == "vt":
116 | uv.append(
117 | [float(x) for x in line[1:3]]
118 | if len(line) >= 3
119 | else [float(line[0]), 0]
120 | )
121 |
122 | elif line[0] == "f":
123 | faces.append([int(x.split("/")[0]) - 1 for x in line[1:]])
124 |
125 | if load_texture and line[1].count("/") > 0:
126 | if line[1].split("/")[1] != "":
127 | fvt.append([int(x.split("/")[1]) - 1 for x in line[1:]])
128 |
129 | if load_normals and line[1].count("/") == 2:
130 | if line[1].split("/")[2] != "":
131 | fnt.append([int(x.split("/")[2]) - 1 for x in line[1:]])
132 |
133 | elif load_normals and line[0] == "vn":
134 | normals.append([float(x) for x in line[1:]])
135 |
136 | vertices = np.asarray(vertices)
137 | faces = np.asarray(faces) if len(faces) > 0 else None
138 | normals = np.asarray(normals) if len(normals) > 0 else None
139 | uv = np.asarray(uv) if len(uv) > 0 else None
140 | fvt = np.asarray(fvt) if len(fvt) > 0 else None
141 | fnt = np.asarray(fnt) if len(fnt) > 0 else None
142 |
143 | output = [vertices, faces]
144 | if load_normals:
145 | output.append(normals)
146 |
147 | if load_texture:
148 | output.append(uv)
149 | output.append(fvt)
150 |
151 | if load_texture_normals:
152 | output.append(fnt)
153 |
154 | return output
155 |
156 |
157 | def write_off(filepath, vertices, faces, precision=None, face_colors=None):
158 | """
159 | Writes a mesh to a .off file
160 |
161 | The number of significant digit to use can be specified for memory saving.
162 |
163 | Parameters
164 | --------------------------
165 | filepath: str
166 | path to the .off file to write
167 | vertices: np.ndarray
168 | (n,3) array of vertices coordinates
169 | faces: np.ndarray
170 | (m,3) array of indices of face vertices
171 | precision: int, optional
172 | number of significant digits to write for each float. Defaults to 16
173 | face_colors: np.ndarray, optional
174 | (m,3) array of colors for each face
175 | """
176 | n_vertices = vertices.shape[0]
177 | n_faces = faces.shape[0] if faces is not None else 0
178 | precision = precision if precision is not None else 16
179 |
180 | if face_colors is not None:
181 | assert face_colors.shape[0] == faces.shape[0], "PB"
182 | if face_colors.max() <= 1:
183 | face_colors = (256 * face_colors).astype(int)
184 |
185 | with open(filepath, "w") as f:
186 | f.write("OFF\n")
187 | f.write(f"{n_vertices} {n_faces} 0\n")
188 | for i in range(n_vertices):
189 | f.write(
190 | f'{" ".join([f"{coord:.{precision}f}" for coord in vertices[i]])}\n'
191 | )
192 |
193 | if n_faces != 0:
194 | for j in range(n_faces):
195 | if face_colors is None:
196 | f.write(f'3 {" ".join([str(tri) for tri in faces[j]])}\n')
197 | else:
198 | f.write(f'4 {" ".join([str(tri) for tri in faces[j]])} ')
199 | f.write(f'{" ".join([str(tri_c) for tri_c in face_colors[j]])}\n')
200 |
201 |
202 | def write_obj(
203 | filepath,
204 | vertices,
205 | faces=None,
206 | uv=None,
207 | fvt=None,
208 | fnt=None,
209 | vertex_normals=None,
210 | mtl_path=None,
211 | mtl_name="material_0",
212 | precision=None,
213 | ):
214 | """
215 | Writes a .obj file with texture.
216 |
217 | If material is used, the .mtl file will be copied to the same directory as the .obj file.
218 |
219 | Parameters
220 | -------------------------
221 | filepath : str
222 | path to the .obj file to write
223 | vertices : np.ndarray
224 | (n,3) array of vertices coordinates
225 | faces : np.ndarray, optional
226 | (m,3) array of indices of face vertices
227 | uv : np.ndarray, optional
228 | (n,2) array of uv coordinates
229 | fvt : np.ndarray, optional
230 | (m,3) array of indices of face to vertex texture indices
231 | fnt : np.ndarray, optional
232 | (m,3) array of indices of face to texture normal indices
233 | vertex_normals : np.ndarray, optional
234 | (n,3) array of vertex normals
235 | mtl_path : str, optional
236 | path to the .mtl file defining the material
237 | mtl_name : str, optional
238 | name of the material in the .mtl file
239 | precision : int, optional
240 | number of significant digits to write for each float
241 | """
242 |
243 | n_vertices = len(vertices)
244 | n_faces = len(faces) if faces is not None else 0
245 | n_vt = len(uv) if uv is not None else 0
246 | precision = precision if precision is not None else 16
247 |
248 | if (mtl_path is not None) and (uv is not None) and (fvt is None):
249 | print("WARNING: Material and uv provided, but no face texture index")
250 |
251 | if mtl_path is not None and n_faces == 0:
252 | print("WARNING: Material provided, but no face. Ignoring material.")
253 |
254 | with open(filepath, "w") as f:
255 | if n_faces > 0 and mtl_path is not None:
256 | mtl_filename = os.path.splitext(os.path.basename(mtl_path))[0]
257 | f.write(f"mtllib {mtl_path}\ng\n")
258 |
259 | f.write(f"# {n_vertices} vertices - {n_faces} faces - {n_vt} vertex textures\n")
260 |
261 | for i in range(n_vertices):
262 | f.write(
263 | f'v {" ".join([f"{coord:.{precision}f}" for coord in vertices[i]])}\n'
264 | )
265 |
266 | if vertex_normals is not None:
267 | for i in range(len(vertex_normals)):
268 | f.write(
269 | f'vn {" ".join([f"{coord:.{precision}f}" for coord in vertex_normals[i]])}\n'
270 | )
271 |
272 | if uv is not None:
273 | for i in range(len(uv)):
274 | f.write(
275 | f'vt {" ".join([f"{coord:.{precision}f}" for coord in uv[i]])}\n'
276 | )
277 |
278 | if n_faces > 0:
279 | if mtl_path is not None:
280 | f.write(f"g {mtl_filename}_export\n")
281 | f.write(f"usemtl {mtl_name}\n")
282 |
283 | for j in range(n_faces):
284 | if fvt is not None and fnt is not None:
285 | f.write(
286 | f'f {" ".join([f"{1+faces[j][k]:d}/{1+fvt[j][k]:d}/{1+fnt[j][k]:d}" for k in range(3)])}\n'
287 | )
288 |
289 | elif fvt is not None:
290 | f.write(
291 | f'f {" ".join([f"{1+faces[j][k]:d}/{1+fvt[j][k]:d}" for k in range(3)])}\n'
292 | )
293 |
294 | elif fnt is not None:
295 | f.write(
296 | f'f {" ".join([f"{1+faces[j][k]:d}//{1+fnt[j][k]:d}" for k in range(3)])}\n'
297 | )
298 |
299 | else:
300 | f.write(f'f {" ".join([str(1+tri) for tri in faces[j]])}\n')
301 |
302 |
303 | def read_vert(filepath):
304 | """
305 | Read a .vert file from TOSCA dataset
306 |
307 | Parameters
308 | ----------------------
309 | filepath : str
310 | path to file
311 |
312 | Returns
313 | ----------------------
314 | vertices : np.ndarray
315 | (n,3) array of vertices coordinates
316 | """
317 | vertices = [
318 | [float(x) for x in line.strip().split()] for line in open(filepath, "r")
319 | ]
320 | return np.asarray(vertices)
321 |
322 |
323 | def read_tri(filepath, from_matlab=True):
324 | """
325 | Read a .tri file from TOSCA dataset
326 |
327 | Parameters
328 | ----------------------
329 | filepath : str
330 | path to file
331 | from_matlab : bool, optional
332 | If True, file indexing starts at 1
333 |
334 | Returns
335 | ----------------------
336 | faces : np.ndarray
337 | (m,3) array of vertices indices to define faces
338 | """
339 | faces = [[int(x) for x in line.strip().split()] for line in open(filepath, "r")]
340 | faces = np.asarray(faces)
341 | if from_matlab and np.min(faces) > 0:
342 | raise ValueError(
343 | "Indexing starts at 0, can't set the from_matlab argument to True "
344 | )
345 | return faces - int(from_matlab)
346 |
347 |
348 | def write_mtl(filepath, texture_im="texture_1.jpg"):
349 | """
350 | Writes a .mtl file for a .obj mesh.
351 |
352 | Use the name of a texture image to define the material.
353 |
354 | Parameters
355 | ----------------------
356 | filepath : str
357 | path to file
358 | texture_im : str, optional
359 | name of the image of texture. Default to 'texture_1.jpg', included in the package
360 | """
361 | with open(filepath, "w") as f:
362 | f.write("newmtl material_0\n")
363 | f.write(f"Ka {0.2:.6f} {0.2:.6f} {0.2:.6f}\n")
364 | f.write(f"Kd {1.:.6f} {1.:.6f} {1.:.6f}\n")
365 | f.write(f"Ks {1.:.6f} {1.:.6f} {1.:.6f}\n")
366 | f.write(f"Tr {1:d}\n")
367 | f.write(f"Ns {0:d}\n")
368 | f.write(f"illum {2:d}\n")
369 | f.write(f"map_Kd {texture_im}")
370 |
371 |
372 | def _get_data_dir():
373 | """
374 | Return the directory where texture data is saved.
375 |
376 | Looks in the package directory.
377 |
378 | Returns
379 | ---------------------
380 | data_dir : str
381 | directory of texture data
382 | """
383 | curr_dir = os.path.dirname(__file__)
384 | return os.path.join(curr_dir, "data")
385 |
386 |
387 | def get_uv(vertices, ind1, ind2, mult_const=1):
388 | """Extracts UV coordinates for a mesh for a .obj file
389 |
390 | Parameters
391 | ----------
392 | vertices :
393 | (n,3) coordinates of vertices
394 | ind1 : int
395 | column index to use as first coordinate
396 | ind2 : int
397 | column index to use as second coordinate
398 | mult_const : float
399 | number of time to repeat the pattern
400 |
401 | Returns
402 | -------
403 | uv : float
404 | (n,2) UV coordinates of each vertex
405 |
406 | """
407 |
408 | vt = vertices[:, [ind1, ind2]]
409 | vt -= np.min(vt)
410 | vt = mult_const * vt / np.max(vt)
411 | return vt
412 |
413 |
414 | def write_obj_texture(
415 | filepath,
416 | vertices,
417 | faces,
418 | uv=None,
419 | mtl_file="material.mtl",
420 | texture_im="texture_1.jpg",
421 | mtl_name=None,
422 | precision=6,
423 | vertex_normals=None,
424 | verbose=False,
425 | ):
426 | """
427 | Writes a .obj file with texture, with a simpler interface than `write_obj`.
428 |
429 | This function writes mtl files and copy textures if necessary
430 |
431 | Parameters
432 | -------------------------
433 | filepath : str
434 | path to the .obj file to write
435 | vertices : np.ndarray
436 | (n,3) coordinates of vertices
437 | faces : np.ndarray
438 | (m,3) faces defined by vertex indices
439 | uv: np.ndarray, optional
440 | (n,2) UV coordinates of each vertex
441 | mtl_file : str, optional
442 | name or path of the .mtl file. If just a name, a default material will be created.
443 | texture_im : str, optional
444 | name or path of the .jpg file defining texture
445 | mtl_name : str, optional
446 | name of the material in the .mtl file
447 | precision : int, optional
448 | number of significant digits to write for each float
449 | vertex_normals : np.ndarray, optional
450 | (n,3) array of vertex normals
451 | verbose : bool, optional
452 | whether to print information
453 |
454 | """
455 | assert filepath.endswith(
456 | ".obj"
457 | ), f"Filepath must end with .obj. Current filepath: {filepath}"
458 |
459 | use_texture = uv is not None
460 | n_vertices = vertices.shape[0]
461 | n_faces = faces.shape[0] if faces is not None else 0
462 | precision = 16 if precision is None else precision
463 |
464 | out_dir_name = os.path.abspath(os.path.dirname(filepath))
465 |
466 | if use_texture:
467 | # if texture_im = /path/to/texture.jpg
468 | if os.path.isfile(texture_im):
469 | texture_name = os.path.basename(texture_im) # texture.jpg
470 | texture_abspath = os.path.abspath(
471 | os.path.join(out_dir_name, texture_name)
472 | ) # /outdir/texture.jpg
473 | texture_relpath = os.path.join("./", texture_name) # ./texture.jpg
474 | if not os.path.isfile(texture_abspath):
475 | copyfile(texture_im, texture_abspath)
476 |
477 | else:
478 | # texture_im is texture.jpg or just texture
479 | if os.path.splitext(texture_im)[1] != ".jpg":
480 | texture_im = texture_im + ".jpg"
481 |
482 | texture_im = os.path.join(os.path.dirname(__file__), "data", texture_im)
483 | texture_name = os.path.basename(texture_im)
484 |
485 | texture_abspath = os.path.abspath(os.path.join(out_dir_name, texture_name))
486 | texture_relpath = os.path.join("./", texture_name)
487 | if not os.path.isfile(texture_abspath):
488 | copyfile(texture_im, texture_abspath)
489 | if verbose:
490 | print(f"Copy texure at {texture_abspath}")
491 |
492 | if os.path.isfile(mtl_file):
493 | # mtl_file = /path/to/material.mtl
494 | mtl_name = os.path.basename(mtl_file) # material.mtl
495 | mtl_abspath = os.path.abspath(
496 | os.path.join(out_dir_name, mtl_name)
497 | ) # /outdir/material.mtl
498 | mtl_relpath = os.path.join("./", mtl_name) # ./material.mtl
499 | if not os.path.isfile(mtl_abspath):
500 | copyfile(mtl_file, mtl_abspath)
501 | if verbose:
502 | print(f"Copy material at {mtl_abspath}")
503 |
504 | else:
505 | # mtl_file is material.mtl or just material
506 | if os.path.splitext(mtl_file)[1] != ".mtl":
507 | mtl_file = mtl_file + ".mtl"
508 |
509 | mtl_abspath = os.path.abspath(
510 | os.path.join(out_dir_name, mtl_file)
511 | ) # /outdir/material.mtl
512 | mtl_relpath = os.path.join(
513 | "./", os.path.basename(mtl_file)
514 | ) # ./material.mtl
515 | if os.path.isfile(mtl_abspath):
516 | os.remove(mtl_abspath)
517 | write_mtl(mtl_abspath, texture_im=texture_relpath)
518 | mtl_name = "material_0"
519 | if verbose:
520 | print(f"Write material at {mtl_abspath}")
521 |
522 | if mtl_name is None:
523 | mtl_name = "material_0"
524 |
525 | else:
526 | mtl_relpath = None
527 | texture_relpath = None
528 |
529 | write_obj(
530 | filepath,
531 | vertices=vertices,
532 | faces=faces,
533 | uv=uv,
534 | fvt=faces,
535 | mtl_path=mtl_relpath,
536 | mtl_name=mtl_name,
537 | precision=precision,
538 | vertex_normals=vertex_normals,
539 | )
540 |
541 | if verbose:
542 | print(f"Write .obj file at {filepath}")
543 |
--------------------------------------------------------------------------------
/pyFM/mesh/laplacian.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.sparse as sparse
3 |
4 |
5 | def dia_area_mat(vertices, faces, faces_areas=None):
6 | """
7 | Compute the diagonal matrix of lumped vertex area for mesh laplacian.
8 | Entry i on the diagonal is the area of vertex i, approximated as one third
9 | of adjacent triangles
10 |
11 | Parameters
12 | -----------------------------
13 | vertices :
14 | (n,3) array of vertices coordinates
15 | faces :
16 | (m,3) array of vertex indices defining faces
17 | faces_area :
18 | (m,) - Optional, array of per-face area
19 |
20 | Returns
21 | -----------------------------
22 | A : scipy.sparse.dia_matrix
23 | (n,n) sparse diagonal matrix of vertex areas in dia format
24 | """
25 | N = vertices.shape[0]
26 |
27 | # Compute face area
28 | if faces_areas is None:
29 | v1 = vertices[faces[:, 0]] # (m,3)
30 | v2 = vertices[faces[:, 1]] # (m,3)
31 | v3 = vertices[faces[:, 2]] # (m,3)
32 | faces_areas = 0.5 * np.linalg.norm(np.cross(v2 - v1, v3 - v1), axis=1) # (m,)
33 |
34 | I = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]])
35 | J = np.zeros_like(I)
36 | V = np.concatenate([faces_areas, faces_areas, faces_areas]) / 3
37 |
38 | # Get array of vertex areas
39 | vertex_areas = np.array(
40 | sparse.coo_matrix((V, (I, J)), shape=(N, 1)).todense()
41 | ).flatten()
42 |
43 | A = sparse.dia_matrix((vertex_areas, 0), shape=(N, N))
44 | return A
45 |
46 |
47 | def fem_area_mat(vertices, faces, faces_areas=None):
48 | """
49 | Compute the area matrix for mesh laplacian using finite elements method.
50 |
51 | Entry (i,i) is 1/6 of the sum of the area of surrounding triangles
52 | Entry (i,j) is 1/12 of the sum of the area of triangles using edge (i,j)
53 |
54 | Parameters
55 | -----------------------------
56 | vertices :
57 | (n,3) array of vertices coordinates
58 | faces :
59 | (m,3) array of vertex indices defining faces
60 | faces_area :
61 | (m,) - Optional, array of per-face area
62 |
63 | Returns
64 | -----------------------------
65 | A : scipy.sparse.csc_matrix
66 | (n,n) sparse area matrix in csc format
67 | """
68 | N = vertices.shape[0]
69 |
70 | # Compute face area
71 | if faces_areas is None:
72 | v1 = vertices[faces[:, 0]] # (m,3)
73 | v2 = vertices[faces[:, 1]] # (m,3)
74 | v3 = vertices[faces[:, 2]] # (m,3)
75 | faces_areas = 0.5 * np.linalg.norm(np.cross(v2 - v1, v3 - v1), axis=1) # (m,)
76 |
77 | # Use similar construction as cotangent weights
78 | I = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]]) # (3m,)
79 | J = np.concatenate([faces[:, 1], faces[:, 2], faces[:, 0]]) # (3m,)
80 | S = np.concatenate([faces_areas, faces_areas, faces_areas]) # (3m,)
81 |
82 | In = np.concatenate([I, J, I]) # (9m,)
83 | Jn = np.concatenate([J, I, I]) # (9m,)
84 | Sn = 1 / 12 * np.concatenate([S, S, 2 * S]) # (9m,)
85 |
86 | A = sparse.coo_matrix((Sn, (In, Jn)), shape=(N, N)).tocsc()
87 | return A
88 |
89 |
90 | def cotangent_weights(vertices, faces):
91 | """
92 | Compute the cotengenant weights matrix for mesh laplacian.
93 |
94 | Entry (i,i) is 1/6 of the sum of the area of surrounding triangles
95 | Entry (i,j) is 1/12 of the sum of the area of triangles using edge (i,j)
96 |
97 | Parameters
98 | -----------------------------
99 | vertices :
100 | (n,3) array of vertices coordinates
101 | faces :
102 | (m,3) array of vertex indices defining faces
103 | faces_area :
104 | (m,) - Optional, array of per-face area
105 |
106 | Returns
107 | -----------------------------
108 | A : scipy.sparse.csc_matrix
109 | (n,n) sparse area matrix in csc format
110 | """
111 | N = vertices.shape[0]
112 |
113 | v1 = vertices[faces[:, 0]] # (m,3)
114 | v2 = vertices[faces[:, 1]] # (m,3)
115 | v3 = vertices[faces[:, 2]] # (m,3)
116 |
117 | # Edge lengths indexed by opposite vertex
118 | u1 = v3 - v2
119 | u2 = v1 - v3
120 | u3 = v2 - v1
121 |
122 | L1 = np.linalg.norm(u1, axis=1) # (m,)
123 | L2 = np.linalg.norm(u2, axis=1) # (m,)
124 | L3 = np.linalg.norm(u3, axis=1) # (m,)
125 |
126 | # Compute cosine of angles
127 | A1 = np.einsum("ij,ij->i", -u2, u3) / (L2 * L3) # (m,)
128 | A2 = np.einsum("ij,ij->i", u1, -u3) / (L1 * L3) # (m,)
129 | A3 = np.einsum("ij,ij->i", -u1, u2) / (L1 * L2) # (m,)
130 |
131 | # Use cot(arccos(x)) = x/sqrt(1-x^2)
132 | I = np.concatenate([faces[:, 0], faces[:, 1], faces[:, 2]])
133 | J = np.concatenate([faces[:, 1], faces[:, 2], faces[:, 0]])
134 | S = np.concatenate([A3, A1, A2])
135 | S = 0.5 * S / np.sqrt(1 - S**2)
136 |
137 | In = np.concatenate([I, J, I, J])
138 | Jn = np.concatenate([J, I, I, J])
139 | Sn = np.concatenate([-S, -S, S, S])
140 |
141 | W = sparse.coo_matrix((Sn, (In, Jn)), shape=(N, N)).tocsc()
142 | return W
143 |
144 |
145 | def laplacian_spectrum(W, A, spectrum_size=200):
146 | """
147 | Solves the generalized eigenvalue problem.
148 | Change solver if necessary
149 |
150 | Parameters
151 | -----------------------------
152 | W :
153 | (n,n) - sparse matrix of cotangent weights
154 | A :
155 | (n,n) - sparse matrix of area weights
156 | spectrum_size :
157 | int - number of eigenvalues to compute
158 |
159 | Returns
160 | -----------------------------
161 | eigenvalues : np.ndarray
162 | (spectrum_size,) - array of eigenvalues
163 | eigenvectors : np.ndarray
164 | (n, spectrum_size) - array of eigenvectors
165 | """
166 | try:
167 | eigenvalues, eigenvectors = sparse.linalg.eigsh(
168 | W, k=spectrum_size, M=A, sigma=-0.01
169 | )
170 |
171 | except RuntimeError:
172 | # raise ValueError('Matrices are not positive semidefinite')
173 | # Initial eigenvector values:
174 | print("Problem during LBO decomposition ! Please check")
175 | init_eigenvecs = np.random.random((A.shape[0], spectrum_size))
176 | eigenvalues, eigenvectors = sparse.linalg.lobpcg(
177 | W, init_eigenvecs, B=A, largest=False, maxiter=40
178 | )
179 |
180 | eigenvalues = np.real(eigenvalues)
181 | sorting_arr = np.argsort(eigenvalues)
182 | eigenvalues = eigenvalues[sorting_arr]
183 | eigenvectors = eigenvectors[sorting_arr]
184 |
185 | return eigenvalues, eigenvectors
186 |
--------------------------------------------------------------------------------
/pyFM/optimize/__init__.py:
--------------------------------------------------------------------------------
1 | from .base_functions import *
2 |
--------------------------------------------------------------------------------
/pyFM/optimize/base_functions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def descr_preservation(C, descr1_red, descr2_red):
5 | """
6 | Compute the descriptor preservation constraint
7 |
8 | Parameters
9 | ---------------------
10 | C :
11 | (K2,K1) Functional map
12 | descr1 :
13 | (K1,p) descriptors on first basis
14 | descr2 :
15 | (K2,p) descriptros on second basis
16 |
17 | Returns
18 | ---------------------
19 | energy : float
20 | descriptor preservation squared norm
21 | """
22 | return 0.5 * np.square(C @ descr1_red - descr2_red).sum()
23 |
24 |
25 | def descr_preservation_grad(C, descr1_red, descr2_red):
26 | """
27 | Compute the gradient of the descriptor preservation constraint
28 |
29 | Parameters
30 | ---------------------
31 | C :
32 | (K2,K1) Functional map
33 | descr1 :
34 | (K1,p) descriptors on first basis
35 | descr2 :
36 | (K2,p) descriptros on second basis
37 |
38 | Returns
39 | ---------------------
40 | gradient : np.ndarray
41 | gradient of the descriptor preservation squared norm
42 | """
43 | return (C @ descr1_red - descr2_red) @ descr1_red.T
44 |
45 |
46 | def LB_commutation(C, ev_sqdiff):
47 | """
48 | Compute the LB commutativity constraint
49 |
50 | Parameters
51 | ---------------------
52 | C :
53 | (K2,K1) Functional map
54 | ev_sqdiff :
55 | (K2,K1) [normalized] matrix of squared eigenvalue differences
56 |
57 | Returns
58 | ---------------------
59 | energy : float
60 | (float) LB commutativity squared norm
61 | """
62 | return 0.5 * (np.square(C) * ev_sqdiff).sum()
63 |
64 |
65 | def LB_commutation_grad(C, ev_sqdiff):
66 | """
67 | Compute the gradient of the LB commutativity constraint
68 |
69 | Parameters
70 | ---------------------
71 | C :
72 | (K2,K1) Functional map
73 | ev_sqdiff :
74 | (K2,K1) [normalized] matrix of squared eigenvalue differences
75 |
76 | Returns
77 | ---------------------
78 | gradient : np.ndarray
79 | (K2,K1) gradient of the LB commutativity squared norm
80 | """
81 | return C * ev_sqdiff
82 |
83 |
84 | def op_commutation(C, op1, op2):
85 | """
86 | Compute the operator commutativity constraint.
87 | Can be used with descriptor multiplication operator
88 |
89 | Parameters
90 | ---------------------
91 | C :
92 | (K2,K1) Functional map
93 | op1 :
94 | (K1,K1) operator on first basis
95 | op2 :
96 | (K2,K2) descriptros on second basis
97 |
98 | Returns
99 | ---------------------
100 | energy : float
101 | (float) operator commutativity squared norm
102 | """
103 | return 0.5 * np.square(C @ op1 - op2 @ C).sum()
104 |
105 |
106 | def op_commutation_grad(C, op1, op2):
107 | """
108 | Compute the gradient of the operator commutativity constraint.
109 | Can be used with descriptor multiplication operator
110 |
111 | Parameters
112 | ---------------------
113 | C :
114 | (K2,K1) Functional map
115 | op1 :
116 | (K1,K1) operator on first basis
117 | op2 :
118 | (K2,K2) descriptros on second basis
119 |
120 | Returns
121 | ---------------------
122 | gardient : np.ndarray
123 | (K2,K1) gradient of the operator commutativity squared norm
124 | """
125 | return op2.T @ (op2 @ C - C @ op1) - (op2 @ C - C @ op1) @ op1.T
126 |
127 |
128 | def oplist_commutation(C, op_list):
129 | """
130 | Compute the operator commutativity constraint for a list of pairs of operators
131 | Can be used with a list of descriptor multiplication operator
132 |
133 | Parameters
134 | ---------------------
135 | C :
136 | (K2,K1) Functional map
137 | op_list :
138 | list of tuple( (K1,K1), (K2,K2) ) operators on first and second basis
139 |
140 | Returns
141 | ---------------------
142 | energy : float
143 | (float) sum of operators commutativity squared norm
144 | """
145 | energy = 0
146 | for op1, op2 in op_list:
147 | energy += op_commutation(C, op1, op2)
148 |
149 | return energy
150 |
151 |
152 | def oplist_commutation_grad(C, op_list):
153 | """
154 | Compute the gradient of the operator commutativity constraint for a list of pairs of operators
155 | Can be used with a list of descriptor multiplication operator
156 |
157 | Parameters
158 | ---------------------
159 | C :
160 | (K2,K1) Functional map
161 | op_list :
162 | list of tuple( (K1,K1), (K2,K2) ) operators on first and second basis
163 |
164 | Returns
165 | ---------------------
166 | gradient : np.ndarray
167 | (K2,K1) gradient of the sum of operators commutativity squared norm
168 | """
169 | gradient = 0
170 | for op1, op2 in op_list:
171 | gradient += op_commutation_grad(C, op1, op2)
172 | return gradient
173 |
174 |
175 | def energy_func_std(
176 | C,
177 | descr_mu,
178 | lap_mu,
179 | descr_comm_mu,
180 | orient_mu,
181 | descr1_red,
182 | descr2_red,
183 | list_descr,
184 | orient_op,
185 | ev_sqdiff,
186 | ):
187 | """
188 | Evaluation of the energy for standard FM computation
189 |
190 | Parameters:
191 | ----------------------
192 | C :
193 | (K2*K1) or (K2,K1) Functional map
194 | descr_mu :
195 | scaling of the descriptor preservation term
196 | lap_mu :
197 | scaling of the laplacian commutativity term
198 | descr_comm_mu :
199 | scaling of the descriptor commutativity term
200 | orient_mu :
201 | scaling of the orientation preservation term
202 | descr1 :
203 | (K1,p) descriptors on first basis
204 | descr2 :
205 | (K2,p) descriptros on second basis
206 | list_descr :
207 | p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
208 | related to descriptors.
209 | orient_op :
210 | p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
211 | related to orientation preservation operators.
212 | ev_sqdiff :
213 | (K2,K1) [normalized] matrix of squared eigenvalue differences
214 |
215 | Returns
216 | ------------------------
217 | energy : float
218 | value of the energy
219 | """
220 | k1 = descr1_red.shape[0]
221 | k2 = descr2_red.shape[0]
222 | C = C.reshape((k2, k1))
223 |
224 | energy = 0
225 |
226 | if descr_mu > 0:
227 | energy += descr_mu * descr_preservation(C, descr1_red, descr2_red)
228 |
229 | if lap_mu > 0:
230 | energy += lap_mu * LB_commutation(C, ev_sqdiff)
231 |
232 | if descr_comm_mu > 0:
233 | energy += descr_comm_mu * oplist_commutation(C, list_descr)
234 |
235 | if orient_mu > 0:
236 | energy += orient_mu * oplist_commutation(C, orient_op)
237 |
238 | return energy
239 |
240 |
241 | def grad_energy_std(
242 | C,
243 | descr_mu,
244 | lap_mu,
245 | descr_comm_mu,
246 | orient_mu,
247 | descr1_red,
248 | descr2_red,
249 | list_descr,
250 | orient_op,
251 | ev_sqdiff,
252 | ):
253 | """
254 | Evaluation of the gradient of the energy for standard FM computation
255 |
256 | Parameters:
257 | ----------------------
258 | C :
259 | (K2*K1) or (K2,K1) Functional map
260 | descr_mu :
261 | scaling of the descriptor preservation term
262 | lap_mu :
263 | scaling of the laplacian commutativity term
264 | descr_comm_mu :
265 | scaling of the descriptor commutativity term
266 | orient_mu :
267 | scaling of the orientation preservation term
268 | descr1 :
269 | (K1,p) descriptors on first basis
270 | descr2 :
271 | (K2,p) descriptros on second basis
272 | list_descr :
273 | p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
274 | related to descriptors.
275 | orient_op :
276 | p-uple( (K1,K1), (K2,K2) ) operators on first and second basis
277 | related to orientation preservation operators.
278 | ev_sqdiff :
279 | (K2,K1) [normalized] matrix of squared eigenvalue differences
280 |
281 | Returns
282 | ------------------------
283 | gradient : float
284 | (K2*K1) - value of the energy
285 | """
286 | k1 = descr1_red.shape[0]
287 | k2 = descr2_red.shape[0]
288 | C = C.reshape((k2, k1))
289 |
290 | gradient = np.zeros_like(C)
291 |
292 | if descr_mu > 0:
293 | gradient += descr_mu * descr_preservation_grad(C, descr1_red, descr2_red)
294 |
295 | if lap_mu > 0:
296 | gradient += lap_mu * LB_commutation_grad(C, ev_sqdiff)
297 |
298 | if descr_comm_mu > 0:
299 | gradient += descr_comm_mu * oplist_commutation_grad(C, list_descr)
300 |
301 | if orient_mu > 0:
302 | gradient += orient_mu * oplist_commutation_grad(C, orient_op)
303 |
304 | gradient[:, 0] = 0
305 | return gradient.reshape(-1)
306 |
--------------------------------------------------------------------------------
/pyFM/refine/__init__.py:
--------------------------------------------------------------------------------
1 | from .icp import icp_refine, mesh_icp_refine, mesh_icp_refine_p2p # noqa: F401
2 | from .zoomout import (
3 | zoomout_refine, # noqa: F401
4 | mesh_zoomout_refine, # noqa: F401
5 | mesh_zoomout_refine_p2p, # noqa: F401
6 | )
7 |
--------------------------------------------------------------------------------
/pyFM/refine/icp.py:
--------------------------------------------------------------------------------
1 | import time
2 | from tqdm.auto import tqdm
3 |
4 | import numpy as np
5 | import scipy.linalg
6 |
7 | from .. import spectral
8 |
9 |
10 | def icp_iteration(FM_12, evects1, evects2, use_adj=False, n_jobs=1):
11 | """
12 | Performs an iteration of ICP.
13 | Conversion from a functional map to a pointwise map is done by comparing
14 | embeddings of dirac functions on the second mesh Phi_2.T with embeddings
15 | of dirac functions of the first mesh Phi_1.T.
16 | The diracs are transposed using the functional map or its adjoint.
17 |
18 | Parameters
19 | -------------------------
20 | FM_12 :
21 | (k2,k1) functional map in reduced basis
22 | evects1 :
23 | (n1,k1') first k' eigenvectors of the first basis (k1'>k1).
24 | evects2 :
25 | (n2,k2') first k' eigenvectors of the second basis (k2'>k2)
26 | use_adj :
27 | use the adjoint method
28 | n_jobs :
29 | number of parallel jobs. Use -1 to use all processes
30 |
31 | Returns
32 | --------------------------
33 | FM_refined : np.ndarray
34 | (k2,k1) An orthogonal functional map after one step of refinement
35 | """
36 | k2, k1 = FM_12.shape
37 | p2p_21 = spectral.FM_to_p2p(FM_12, evects1, evects2, use_adj=use_adj, n_jobs=n_jobs)
38 | FM_icp = spectral.p2p_to_FM(p2p_21, evects1[:, :k1], evects2[:, :k2])
39 | U, _, VT = scipy.linalg.svd(FM_icp)
40 | return U @ np.eye(k2, k1) @ VT
41 |
42 |
43 | def icp_refine(
44 | FM_12,
45 | evects1,
46 | evects2,
47 | nit=10,
48 | tol=1e-10,
49 | use_adj=False,
50 | return_p2p=False,
51 | n_jobs=1,
52 | verbose=False,
53 | ):
54 | """
55 | Refine a functional map using the standard ICP algorithm.
56 | One can use the adjoint instead of the functional map for pointwise map computation.
57 |
58 | Parameters
59 | --------------------------
60 | FM_12 :
61 | (k2,k1) functional map functional map from first to second basis
62 | evects1 :
63 | (n1,k1') first k' eigenvectors of the first basis (k1'>k1).
64 | evects2 :
65 | (n2,k2') first k' eigenvectors of the second basis (k2'>k2)
66 | nit : int
67 | Number of iterations to perform. If not specified, uses the tol parameter
68 | tol : float
69 | Maximum change in a functional map to stop refinement
70 | (only used if nit is not specified)
71 | use_adj :
72 | use the adjoint method
73 | n_jobs :
74 | number of parallel jobs. Use -1 to use all processes
75 | return_p2p : bool
76 | if True returns the vertex to vertex map from 2 to 1
77 |
78 | Returns
79 | ---------------------------
80 | FM_12_icp : np.ndarray
81 | ICP-refined functional map
82 | p2p_21_icp : np.ndarray
83 | only if return_p2p is set to True - the refined pointwise map
84 | from basis 2 to basis 1
85 | """
86 | FM_12_curr = FM_12.copy()
87 | iteration = 1
88 | if verbose:
89 | start_time = time.time()
90 |
91 | if nit is not None and nit > 0:
92 | myrange = tqdm(range(nit)) if verbose else range(nit)
93 | else:
94 | myrange = range(10000)
95 |
96 | for i in myrange:
97 | FM_12_icp = icp_iteration(
98 | FM_12_curr, evects1, evects2, use_adj=use_adj, n_jobs=n_jobs
99 | )
100 |
101 | if nit is None or nit == 0:
102 | if verbose:
103 | print(
104 | f"iteration : {1+i} - mean : {np.square(FM_12_curr - FM_12_icp).mean():.2e}"
105 | f" - max : {np.max(np.abs(FM_12_curr - FM_12_icp)):.2e}"
106 | )
107 | if np.max(np.abs(FM_12_curr - FM_12_icp)) <= tol:
108 | break
109 |
110 | FM_12_curr = FM_12_icp.copy()
111 |
112 | if nit is None or nit == 0 and verbose:
113 | run_time = time.time() - start_time
114 | print(f"ICP done with {iteration:d} iterations - {run_time:.2f} s")
115 |
116 | if return_p2p:
117 | p2p_21_icp = spectral.FM_to_p2p(
118 | FM_12_icp, evects1, evects2, use_adj=use_adj, n_jobs=n_jobs
119 | ) # (n2,)
120 | return FM_12_icp, p2p_21_icp
121 |
122 | return FM_12_icp
123 |
124 |
125 | def mesh_icp_refine(
126 | FM_12,
127 | mesh1,
128 | mesh2,
129 | nit=10,
130 | tol=1e-10,
131 | use_adj=False,
132 | return_p2p=False,
133 | n_jobs=1,
134 | verbose=False,
135 | ):
136 | """
137 | Refine a functional map using the ICP algorithm.
138 |
139 | Parameters
140 | --------------------------
141 | FM_12 :
142 | (k2,k1) functional map from mesh1 to mesh2
143 | mesh1 : TriMesh
144 | Source mesh
145 | mesh2 : TriMesh
146 | Target mesh
147 | nit : int
148 | Number of iterations to perform. If not specified, uses the tol parameter
149 | tol : float
150 | Maximum change in a functional map to stop refinement
151 | (only used if nit is not specified)
152 | use_adj :
153 | use the adjoint method
154 | n_jobs :
155 | number of parallel jobs. Use -1 to use all processes
156 | return_p2p : bool
157 | if True returns the vertex to vertex map from 2 to 1
158 |
159 | Returns
160 | ---------------------------
161 | FM_12_icp : np.ndarray
162 | ICP-refined functional map
163 | p2p_21_icp : np.ndarray
164 | only if return_p2p is set to True - the refined pointwise map
165 | from basis 2 to basis 1
166 | """
167 | k2, k1 = FM_12.shape
168 |
169 | result = icp_refine(
170 | FM_12,
171 | mesh1.eigenvectors[:, :k1],
172 | mesh2.eigenvectors[:, :k2],
173 | nit=nit,
174 | tol=tol,
175 | use_adj=use_adj,
176 | return_p2p=return_p2p,
177 | n_jobs=n_jobs,
178 | verbose=verbose,
179 | )
180 |
181 | return result
182 |
183 |
184 | def mesh_icp_refine_p2p(
185 | p2p_21,
186 | mesh1,
187 | mesh2,
188 | k_init,
189 | nit=10,
190 | tol=1e-10,
191 | use_adj=False,
192 | return_p2p=False,
193 | n_jobs=1,
194 | verbose=False,
195 | ):
196 | """
197 | Refine a functional map using the auxiliar ICP algorithm (different conversion
198 | from functional map to vertex-to-vertex).
199 | This algorithm starts from an initial pointwise map instead of a functional map.
200 |
201 | Parameters
202 | --------------------------
203 | p2p_21 : np.ndarray
204 | (n2,) initial pointwise map from mesh2 to mesh1
205 | mesh1 : TriMesh
206 | Source mesh
207 | mesh2 : TriMesh
208 | Target mesh
209 | k_init : int
210 | Initial number of eigenvectors to use
211 | nit : int
212 | Number of iterations to perform. If not specified, uses the tol parameter
213 | tol : float
214 | Maximum change in a functional map to stop refinement
215 | (only used if nit is not specified)
216 | use_adj :
217 | use the adjoint method
218 | n_jobs :
219 | number of parallel jobs. Use -1 to use all processes
220 | return_p2p : bool
221 | if True returns the vertex to vertex map from 2 to 1
222 |
223 | Returns
224 | ---------------------------
225 | FM_12_icp : np.ndarray
226 | ICP-refined functional map
227 | p2p_21_icp : np.ndarray
228 | only if return_p2p is set to True - the refined pointwise map
229 | from basis 2 to basis 1
230 | """
231 |
232 | FM_12_init = spectral.mesh_p2p_to_FM(
233 | p2p_21, mesh1, mesh2, dims=k_init, subsample=None
234 | )
235 |
236 | result = mesh_icp_refine(
237 | FM_12_init,
238 | mesh1,
239 | mesh2,
240 | nit=nit,
241 | tol=tol,
242 | use_adj=use_adj,
243 | return_p2p=return_p2p,
244 | n_jobs=n_jobs,
245 | verbose=verbose,
246 | )
247 |
248 | return result
249 |
--------------------------------------------------------------------------------
/pyFM/refine/zoomout.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from tqdm.auto import tqdm
3 |
4 | from .. import spectral
5 |
6 |
7 | def zoomout_iteration(FM_12, evects1, evects2, step=1, A2=None, n_jobs=1):
8 | """
9 | Performs an iteration of ZoomOut.
10 |
11 | Parameters
12 | --------------------
13 | FM_12 :
14 | (k2,k1) Functional map from evects1[:,:k1] to evects2[:,:k2]
15 | evects1 :
16 | (n1,k1') eigenvectors on source shape with k1' >= k1 + step.
17 | Can be a subsample of the original ones on the first dimension.
18 | evects2 :
19 | (n2,k2') eigenvectors on target shape with k2' >= k2 + step.
20 | Can be a subsample of the original ones on the first dimension.
21 | step : int
22 | step of increase of dimension.
23 | A2 :
24 | (n2,n2) sparse area matrix on target mesh, for vertex to vertex computation.
25 | If specified, the eigenvectors can't be subsampled !
26 |
27 | Returns
28 | --------------------
29 | FM_zo : np.ndarray
30 | zoomout-refined functional map
31 | """
32 | k2, k1 = FM_12.shape
33 | try:
34 | step1, step2 = step
35 | except TypeError:
36 | step1 = step
37 | step2 = step
38 | new_k1, new_k2 = k1 + step1, k2 + step2
39 |
40 | p2p_21 = spectral.FM_to_p2p(FM_12, evects1, evects2, n_jobs=n_jobs) # (n2,)
41 | # Compute the (k2+step, k1+step) FM
42 | FM_zo = spectral.p2p_to_FM(p2p_21, evects1[:, :new_k1], evects2[:, :new_k2], A2=A2)
43 |
44 | return FM_zo
45 |
46 |
47 | def zoomout_refine(
48 | FM_12,
49 | evects1,
50 | evects2,
51 | nit=10,
52 | step=1,
53 | A2=None,
54 | subsample=None,
55 | return_p2p=False,
56 | n_jobs=1,
57 | verbose=False,
58 | ):
59 | """
60 | Refine a functional map with ZoomOut.
61 | Supports subsampling for each mesh, different step size, and approximate nearest neighbor.
62 |
63 | Parameters
64 | --------------------
65 | eigvects1 :
66 | (n1,k1) eigenvectors on source shape with k1 >= K + nit
67 | eigvects2 :
68 | (n2,k2) eigenvectors on target shape with k2 >= K + nit
69 | FM_12 :
70 | (K,K) Functional map from from shape 1 to shape 2
71 | nit : int
72 | number of iteration of zoomout
73 | step :
74 | increase in dimension at each Zoomout Iteration
75 | A2 :
76 | (n2,n2) sparse area matrix on target mesh.
77 | subsample : tuple or iterable of size 2
78 | Each gives indices of vertices to sample
79 | for faster optimization. If not specified, no subsampling is done.
80 | return_p2p : bool
81 | if True returns the vertex to vertex map.
82 |
83 | Returns
84 | --------------------
85 | FM_12_zo : np.ndarray
86 | zoomout-refined functional map from basis 1 to 2
87 | p2p_21_zo : np.ndarray
88 | only if return_p2p is set to True - the refined pointwise map from basis 2 to basis 1
89 | """
90 | k2_0, k1_0 = FM_12.shape
91 | try:
92 | step1, step2 = step
93 | except TypeError:
94 | step1 = step
95 | step2 = step
96 |
97 | assert (
98 | k1_0 + nit * step1 <= evects1.shape[1]
99 | ), f"Not enough eigenvectors on source : \
100 | {k1_0 + nit*step1} are needed when {evects1.shape[1]} are provided"
101 | assert (
102 | k2_0 + nit * step2 <= evects2.shape[1]
103 | ), f"Not enough eigenvectors on target : \
104 | {k2_0 + nit*step2} are needed when {evects2.shape[1]} are provided"
105 |
106 | use_subsample = False
107 | if subsample is not None:
108 | use_subsample = True
109 | sub1, sub2 = subsample
110 |
111 | FM_12_zo = FM_12.copy()
112 |
113 | iterable = range(nit) if not verbose else tqdm(range(nit))
114 | for it in iterable:
115 | if use_subsample:
116 | FM_12_zo = zoomout_iteration(
117 | FM_12_zo,
118 | evects1[sub1],
119 | evects2[sub2],
120 | A2=None,
121 | step=step,
122 | n_jobs=n_jobs,
123 | )
124 |
125 | else:
126 | FM_12_zo = zoomout_iteration(
127 | FM_12_zo, evects1, evects2, A2=A2, step=step, n_jobs=n_jobs
128 | )
129 |
130 | if return_p2p:
131 | p2p_21_zo = spectral.FM_to_p2p(
132 | FM_12_zo, evects1, evects2, n_jobs=n_jobs
133 | ) # (n2,)
134 | return FM_12_zo, p2p_21_zo
135 |
136 | return FM_12_zo
137 |
138 |
139 | def mesh_zoomout_refine(
140 | FM_12,
141 | mesh1,
142 | mesh2,
143 | nit=10,
144 | step=1,
145 | subsample=None,
146 | return_p2p=False,
147 | n_jobs=1,
148 | verbose=False,
149 | ):
150 | """
151 | Refine a functional map between meshes with ZoomOut.
152 | Supports subsampling for each mesh, different step size, and approximate nearest neighbor.
153 |
154 | Parameters
155 | --------------------
156 | mesh1 : TriMesh
157 | Source mesh
158 | mesh2 : TriMesh
159 | Target mesh
160 | FM :
161 | (K,K) Functional map between
162 | nit : int
163 | number of iteration of zoomout
164 | step :
165 | increase in dimension at each Zoomout Iteration
166 | A2 :
167 | (n2,n2) sparse area matrix on target mesh.
168 | subsample : int or tuple or iterable of size 2
169 | Each gives indices of vertices so sample
170 | for faster optimization. If not specified, no subsampling is done.
171 | return_p2p : bool
172 | if True returns the vertex to vertex map.
173 |
174 | Returns
175 | --------------------
176 | FM_zo : zoomout-refined functional map
177 | p2p : only if return_p2p is set to True - the refined pointwise map
178 | """
179 |
180 | if np.issubdtype(type(subsample), np.integer):
181 | if verbose:
182 | print(f"Computing farthest point sampling of size {subsample}")
183 | sub1 = mesh1.extract_fps(subsample)
184 | sub2 = mesh2.extract_fps(subsample)
185 | subsample = (sub1, sub2)
186 |
187 | result = zoomout_refine(
188 | FM_12,
189 | mesh1.eigenvectors,
190 | mesh2.eigenvectors,
191 | nit,
192 | step=step,
193 | A2=mesh2.A,
194 | subsample=subsample,
195 | return_p2p=return_p2p,
196 | n_jobs=n_jobs,
197 | verbose=verbose,
198 | )
199 |
200 | return result
201 |
202 |
203 | def mesh_zoomout_refine_p2p(
204 | p2p_21,
205 | mesh1,
206 | mesh2,
207 | k_init,
208 | nit=10,
209 | step=1,
210 | subsample=None,
211 | return_p2p=False,
212 | n_jobs=1,
213 | p2p_on_sub=False,
214 | verbose=False,
215 | ):
216 | """
217 | Refine a functional map between meshes with ZoomOut.
218 | Supports subsampling for each mesh, different step size, and approximate nearest neighbor.
219 |
220 | Parameters
221 | --------------------
222 | mesh1 : TriMesh
223 | Source mesh
224 | mesh2 : TriMesh
225 | Target mesh
226 | FM :
227 | (K,K) Functional map between
228 | nit : int
229 | number of iteration of zoomout
230 | step :
231 | increase in dimension at each Zoomout Iteration
232 | A2 :
233 | (n2,n2) sparse area matrix on target mesh.
234 | subsample : int or tuple or iterable of size 2
235 | Each gives indices of vertices so sample
236 | for faster optimization. If not specified, no subsampling is done.
237 | return_p2p : bool
238 | if True returns the vertex to vertex map.
239 |
240 | Returns
241 | --------------------
242 | FM_zo : np.ndarray
243 | zoomout-refined functional map
244 | p2p : np.ndarray
245 | only if return_p2p is set to True - the refined pointwise map
246 | """
247 |
248 | if np.issubdtype(type(subsample), np.integer):
249 | if p2p_on_sub:
250 | raise ValueError("P2P can't be defined on undefined subsample")
251 | if verbose:
252 | print(f"Computing farthest point sampling of size {subsample}")
253 | sub1 = mesh1.extract_fps(subsample)
254 | sub2 = mesh2.extract_fps(subsample)
255 | subsample = (sub1, sub2)
256 |
257 | if p2p_on_sub:
258 | FM_12_init = spectral.mesh_p2p_to_FM(
259 | p2p_21, mesh1, mesh2, dims=k_init, subsample=subsample
260 | )
261 | else:
262 | FM_12_init = spectral.mesh_p2p_to_FM(
263 | p2p_21, mesh1, mesh2, dims=k_init, subsample=None
264 | )
265 |
266 | result = zoomout_refine(
267 | FM_12_init,
268 | mesh1.eigenvectors,
269 | mesh2.eigenvectors,
270 | nit,
271 | step=step,
272 | A2=mesh2.A,
273 | subsample=subsample,
274 | return_p2p=return_p2p,
275 | n_jobs=n_jobs,
276 | verbose=verbose,
277 | )
278 |
279 | return result
280 |
--------------------------------------------------------------------------------
/pyFM/signatures/HKS_functions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def HKS(evals, evects, time_list, scaled=False):
5 | """
6 | Returns the Heat Kernel Signature for num_T different values.
7 | The values of the time are interpolated in logscale between the limits
8 | given in the HKS paper. These limits only depends on the eigenvalues.
9 |
10 | Parameters
11 | ------------------------
12 | evals :
13 | (K,) array of the K eigenvalues
14 | evecs :
15 | (N,K) array with the K eigenvectors
16 | time_list :
17 | (num_T,) Time values to use
18 | scaled :
19 | (bool) whether to scale for each time value
20 |
21 | Returns
22 | ------------------------
23 | HKS : np.ndarray
24 | (N,num_T) array where each line is the HKS for a given t
25 | """
26 | evals_s = np.asarray(evals).flatten()
27 | t_list = np.asarray(time_list).flatten()
28 |
29 | coefs = np.exp(-np.outer(t_list, evals_s)) # (num_T,K)
30 | natural_HKS = np.einsum("tk,nk->nt", coefs, np.square(evects))
31 |
32 | if scaled:
33 | inv_scaling = coefs.sum(1) # (num_T)
34 | return (1 / inv_scaling)[None, :] * natural_HKS
35 |
36 | else:
37 | return natural_HKS
38 |
39 |
40 | def lm_HKS(evals, evects, landmarks, time_list, scaled=False):
41 | """
42 | Returns the Heat Kernel Signature for some landmarks and time values.
43 |
44 |
45 | Parameters
46 | ------------------------
47 | evects :
48 | (N,K) array with the K eigenvectors of the Laplace Beltrami operator
49 | evals :
50 | (K,) array of the K corresponding eigenvalues
51 | landmarks :
52 | (p,) indices of landmarks to compute
53 | time_list :
54 | (num_T,) values of t to use
55 |
56 | Returns
57 | ------------------------
58 | landmarks_HKS : np.ndarray
59 | (N,num_E*p) array where each column is the HKS for a given t for some landmark
60 | """
61 |
62 | evals_s = np.asarray(evals).flatten()
63 | t_list = np.asarray(time_list).flatten()
64 |
65 | coefs = np.exp(-np.outer(t_list, evals_s)) # (num_T,K)
66 | weighted_evects = evects[None, landmarks, :] * coefs[:, None, :] # (num_T,p,K)
67 |
68 | landmarks_HKS = np.einsum("tpk,nk->ptn", weighted_evects, evects) # (p,num_T,N)
69 |
70 | if scaled:
71 | inv_scaling = coefs.sum(1) # (num_T,)
72 | landmarks_HKS = (1 / inv_scaling)[None, :, None] * landmarks_HKS
73 |
74 | return landmarks_HKS.reshape(-1, evects.shape[0]).T # (N,p*num_E)
75 |
76 |
77 | def auto_HKS(evals, evects, num_T, landmarks=None, scaled=True):
78 | """
79 | Compute HKS with an automatic choice of tile values
80 |
81 | Parameters
82 | ------------------------
83 | evals :
84 | (K,) array of K eigenvalues
85 | evects :
86 | (N,K) array with K eigenvectors
87 | landmarks :
88 | (p,) if not None, indices of landmarks to compute.
89 | num_T :
90 | (int) number values of t to use
91 | Returns
92 | ------------------------
93 | HKS or lm_HKS : np.ndarray
94 | (N,num_E) or (N,p*num_E) array where each column is the WKS for a given e
95 | for some landmark
96 | """
97 |
98 | abs_ev = sorted(np.abs(evals))
99 | t_list = np.geomspace(
100 | 4 * np.log(10) / abs_ev[-1], 4 * np.log(10) / abs_ev[1], num_T
101 | )
102 |
103 | if landmarks is None:
104 | return HKS(abs_ev, evects, t_list, scaled=scaled)
105 | else:
106 | return lm_HKS(abs_ev, evects, landmarks, t_list, scaled=scaled)
107 |
108 |
109 | def mesh_HKS(mesh, num_T, landmarks=None, k=None):
110 | """
111 | Compute the Heat Kernel Signature for a mesh
112 |
113 | Parameters
114 | ------------------------
115 | mesh : TriMesh
116 | mesh on which to compute the HKS
117 | num_T : int
118 | number of time values to use
119 | landmarks : np.ndarray, optional
120 | (p,) indices of landmarks to use
121 | k : int, optional
122 | number of eigenvalues to use
123 |
124 | Returns
125 | ------------------------
126 | HKS: np.ndarray
127 | (N,num_T) array where each line is the HKS for a given t
128 | """
129 |
130 | assert mesh.eigenvalues is not None, "Eigenvalues should be processed"
131 |
132 | if k is None:
133 | k = len(mesh.eigenvalues)
134 | else:
135 | assert len(
136 | mesh.eigenvalues >= k
137 | ), f"At least ${k}$ eigenvalues should be computed, not {len(mesh.eigenvalues)}"
138 |
139 | return auto_HKS(
140 | mesh.eigenvalues[:k],
141 | mesh.eigenvectors[:, :k],
142 | num_T,
143 | landmarks=landmarks,
144 | scaled=True,
145 | )
146 |
--------------------------------------------------------------------------------
/pyFM/signatures/WKS_functions.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def WKS(evals, evects, energy_list, sigma, scaled=False):
5 | """
6 | Returns the Wave Kernel Signature for some energy values.
7 |
8 | Parameters
9 | ------------------------
10 | evects :
11 | (N,K) array with the K eigenvectors of the Laplace Beltrami operator
12 | evals :
13 | (K,) array of the K corresponding eigenvalues
14 | energy_list :
15 | (num_E,) values of e to use
16 | sigma :
17 | (float) [positive] standard deviation to use
18 | scaled :
19 | (bool) Whether to scale each energy level
20 |
21 | Returns
22 | ------------------------
23 | WKS : np.ndarray
24 | (N,num_E) array where each column is the WKS for a given e
25 | """
26 | assert sigma > 0, f"Sigma should be positive ! Given value : {sigma}"
27 |
28 | evals = np.asarray(evals).flatten()
29 | indices = np.where(evals > 1e-5)[0].flatten()
30 | evals = evals[indices]
31 | evects = evects[:, indices]
32 |
33 | e_list = np.asarray(energy_list)
34 | coefs = np.exp(
35 | -np.square(e_list[:, None] - np.log(np.abs(evals))[None, :]) / (2 * sigma**2)
36 | ) # (num_E,K)
37 |
38 | natural_WKS = np.einsum("tk,nk->nt", coefs, np.square(evects)) # (N,num_E)
39 |
40 | if scaled:
41 | inv_scaling = coefs.sum(1) # (num_E)
42 | return (1 / inv_scaling)[None, :] * natural_WKS
43 |
44 | else:
45 | return natural_WKS
46 |
47 |
48 | def lm_WKS(evals, evects, landmarks, energy_list, sigma, scaled=False):
49 | """
50 | Returns the Wave Kernel Signature for some landmarks and energy values.
51 |
52 |
53 | Parameters
54 | ------------------------
55 | evects :
56 | (N,K) array with the K eigenvectors of the Laplace Beltrami operator
57 | evals :
58 | (K,) array of the K corresponding eigenvalues
59 | landmarks :
60 | (p,) indices of landmarks to compute
61 | energy_list :
62 | (num_E,) values of e to use
63 | sigma : int
64 | standard deviation
65 |
66 | Returns
67 | ------------------------
68 | landmarks_WKS : np.ndarray
69 | (N,num_E*p) array where each column is the WKS for a given e for some landmark
70 | """
71 | assert sigma > 0, f"Sigma should be positive ! Given value : {sigma}"
72 |
73 | evals = np.asarray(evals).flatten()
74 | indices = np.where(evals > 1e-2)[0].flatten()
75 | evals = evals[indices]
76 | evects = evects[:, indices]
77 |
78 | e_list = np.asarray(energy_list)
79 | coefs = np.exp(
80 | -np.square(e_list[:, None] - np.log(np.abs(evals))[None, :]) / (2 * sigma**2)
81 | ) # (num_E,K)
82 | weighted_evects = evects[None, landmarks, :] * coefs[:, None, :] # (num_E,p,K)
83 |
84 | landmarks_WKS = np.einsum("tpk,nk->ptn", weighted_evects, evects) # (p,num_E,N)
85 |
86 | if scaled:
87 | inv_scaling = coefs.sum(1) # (num_E,)
88 | landmarks_WKS = (1 / inv_scaling)[None, :, None] * landmarks_WKS
89 |
90 | return landmarks_WKS.reshape(-1, evects.shape[0]).T # (N,p*num_E)
91 |
92 |
93 | def auto_WKS(evals, evects, num_E, landmarks=None, scaled=True):
94 | """
95 | Compute WKS with an automatic choice of scale and energy
96 |
97 | Parameters
98 | ------------------------
99 | evals :
100 | (K,) array of K eigenvalues
101 | evects :
102 | (N,K) array with K eigenvectors
103 | landmarks :
104 | (p,) If not None, indices of landmarks to compute.
105 | num_E :
106 | (int) number values of e to use
107 | Returns
108 | ------------------------
109 | WKS or lm_WKS : np.ndarray
110 | (N,num_E) or (N,p*num_E) array where each column is the WKS for a given e
111 | and possibly for some landmarks
112 | """
113 | abs_ev = sorted(np.abs(evals))
114 |
115 | e_min, e_max = np.log(abs_ev[1]), np.log(abs_ev[-1])
116 | sigma = 7 * (e_max - e_min) / num_E
117 |
118 | e_min += 2 * sigma
119 | e_max -= 2 * sigma
120 |
121 | energy_list = np.linspace(e_min, e_max, num_E)
122 |
123 | if landmarks is None:
124 | return WKS(abs_ev, evects, energy_list, sigma, scaled=scaled)
125 | else:
126 | return lm_WKS(abs_ev, evects, landmarks, energy_list, sigma, scaled=scaled)
127 |
128 |
129 | def mesh_WKS(mesh, num_E, landmarks=None, k=None):
130 | """
131 | Compute the Wave Kernel Signature for a mesh
132 |
133 | Parameters
134 | ------------------------
135 | mesh : TriMesh
136 | mesh on which to compute the XKS
137 | num_T : int
138 | number of time values to use
139 | landmarks : np.ndarray, optional
140 | (p,) indices of landmarks to use
141 | k : int, optional
142 | number of eigenvalues to use
143 |
144 | Returns
145 | ------------------------
146 | WKS: np.ndarray
147 | (N,num_T) array where each line is the HKS for a given t
148 | """
149 | assert mesh.eigenvalues is not None, "Eigenvalues should be processed"
150 |
151 | if k is None:
152 | k = len(mesh.eigenvalues)
153 | else:
154 | assert len(
155 | mesh.eigenvalues >= k
156 | ), f"At least ${k}$ eigenvalues should be computed, not {len(mesh.eigenvalues)}"
157 |
158 | return auto_WKS(
159 | mesh.eigenvalues[:k],
160 | mesh.eigenvectors[:, :k],
161 | num_E,
162 | landmarks=landmarks,
163 | scaled=True,
164 | )
165 |
--------------------------------------------------------------------------------
/pyFM/signatures/__init__.py:
--------------------------------------------------------------------------------
1 | from .HKS_functions import *
2 | from .WKS_functions import *
3 |
--------------------------------------------------------------------------------
/pyFM/spectral/__init__.py:
--------------------------------------------------------------------------------
1 | from .convert import *
2 | from .shape_difference import *
3 | from .nn_utils import knn_query
4 |
--------------------------------------------------------------------------------
/pyFM/spectral/convert.py:
--------------------------------------------------------------------------------
1 | """
2 | Python implementation of:
3 |
4 | [1] - "Deblurring and Denoising of Maps between Shapes", by Danielle Ezuz and Mirela Ben-Chen.
5 | """
6 |
7 | import scipy.linalg
8 | import numpy as np
9 |
10 | from .nn_utils import knn_query
11 | from . import projection_utils as pju
12 |
13 |
14 | def p2p_to_FM(p2p_21, evects1, evects2, A2=None):
15 | """
16 | Compute a Functional Map from a vertex to vertex maps (with possible subsampling).
17 | Can compute with the pseudo inverse of eigenvectors (if no subsampling) or least square.
18 |
19 | Parameters
20 | ------------------------------
21 | p2p_21 :
22 | (n2,) vertex to vertex map from target to source.
23 | For each vertex on the target shape, gives the index of the corresponding vertex on mesh 1.
24 | Can also be presented as a (n2,n1) sparse matrix.
25 | eigvects1 :
26 | (n1,k1) eigenvectors on source mesh. Possibly subsampled on the first dimension.
27 | eigvects2 :
28 | (n2,k2) eigenvectors on target mesh. Possibly subsampled on the first dimension.
29 | A2 :
30 | (n2,n2) area matrix of the target mesh. If specified, the eigenvectors can't be subsampled
31 |
32 | Returns
33 | -------------------------------
34 | FM_12 : np.ndarray
35 | (k2,k1) functional map corresponding to the p2p map given.
36 | Solved with pseudo inverse if A2 is given, else using least square.
37 | """
38 | # Pulled back eigenvectors
39 | evects1_pb = (
40 | evects1[p2p_21, :] if np.asarray(p2p_21).ndim == 1 else p2p_21 @ evects1
41 | )
42 |
43 | if A2 is not None:
44 | if A2.shape[0] != evects2.shape[0]:
45 | raise ValueError(
46 | "Can't compute exact pseudo inverse with subsampled eigenvectors"
47 | )
48 |
49 | if A2.ndim == 1:
50 | return evects2.T @ (A2[:, None] * evects1_pb) # (k2,k1)
51 |
52 | return evects2.T @ (A2 @ evects1_pb) # (k2,k1)
53 |
54 | # Solve with least square
55 | return scipy.linalg.lstsq(evects2, evects1_pb)[0] # (k2,k1)
56 |
57 |
58 | def mesh_p2p_to_FM(p2p_21, mesh1, mesh2, dims=None, subsample=None):
59 | """
60 | Compute a Functional Map from a vertex to vertex maps (with possible subsampling).
61 |
62 | Parameters
63 | ------------------------------
64 | p2p_21 :
65 | (n2,) vertex to vertex map from target to source.
66 | For each vertex on the target shape, gives the index of the corresponding vertex on mesh 1.
67 | Can also be presented as a (n2,n1) sparse matrix.
68 | mesh1 : TriMesh
69 | source mesh for the functional map. Requires enough processed eigenvectors.
70 | mesh2 : TriMesh
71 | target mesh for the functional map. Requires enough processed eigenvectors.
72 | dims : int, or 2-uple of int
73 | Dimension of the functional map to return.
74 | If None uses all the processed eigenvectors.
75 | If single int k , returns a (k,k) functional map
76 | If 2-uple of int (k1,k2), returns a (k2,k1) functional map
77 | subsample :
78 | None or size 2 iterable ((n1',), (n2',)). Subsample of vertices for both mesh. If specified the p2p map is between the two subsamples.
79 |
80 | Returns
81 | -------------------------------
82 | FM_12 : np.ndarray
83 | (k2,k1) functional map corresponding to the p2p map given.
84 | """
85 | if dims is None:
86 | k1, k2 = len(mesh1.eigenvalues), len(mesh2.eigenvalues)
87 | elif np.issubdtype(type(dims), np.integer):
88 | k1 = dims
89 | k2 = dims
90 | else:
91 | k1, k2 = dims
92 |
93 | if subsample is None:
94 | return p2p_to_FM(
95 | p2p_21, mesh1.eigenvectors[:, :k1], mesh2.eigenvectors[:, :k2], A2=mesh2.A
96 | )
97 |
98 | sub1, sub2 = subsample
99 | return p2p_to_FM(
100 | p2p_21, mesh1.eigenvectors[sub1, :k1], mesh2.eigenvectors[sub2, :k2], A2=None
101 | )
102 |
103 |
104 | def FM_to_p2p(FM_12, evects1, evects2, use_adj=False, n_jobs=1):
105 | """
106 | Obtain a point to point map from a functional map C.
107 | Compares embeddings of dirac functions on the second mesh Phi_2.T with embeddings
108 | of dirac functions of the first mesh Phi_1.T
109 |
110 | Either one can transport the first diracs with the functional map or the second ones with
111 | the adjoint, which leads to different results (adjoint is the mathematically correct way)
112 |
113 | Parameters
114 | --------------------------
115 | FM_12 :
116 | (k2,k1) functional map from mesh1 to mesh2 in reduced basis
117 | eigvects1 :
118 | (n1,k1') first k' eigenvectors of the first basis (k1'>k1).
119 | First dimension can be subsampled.
120 | eigvects2 :
121 | (n2,k2') first k' eigenvectors of the second basis (k2'>k2)
122 | First dimension can be subsampled.
123 | use_adj :
124 | use the adjoint method
125 | n_jobs :
126 | number of parallel jobs. Use -1 to use all processes
127 |
128 |
129 | Returns
130 | --------------------------
131 | p2p_21 : np.ndarray
132 | (n2,) match vertex i on shape 2 to vertex p2p_21[i] on shape 1,
133 | or equivalent result if the eigenvectors are subsampled.
134 | """
135 | k2, k1 = FM_12.shape
136 |
137 | assert (
138 | k1 <= evects1.shape[1]
139 | ), f"At least {k1} should be provided, here only {evects1.shape[1]} are given"
140 | assert (
141 | k2 <= evects2.shape[1]
142 | ), f"At least {k2} should be provided, here only {evects2.shape[1]} are given"
143 |
144 | if use_adj:
145 | emb1 = evects1[:, :k1]
146 | emb2 = evects2[:, :k2] @ FM_12
147 |
148 | else:
149 | emb1 = evects1[:, :k1] @ FM_12.T
150 | emb2 = evects2[:, :k2]
151 |
152 | p2p_21 = knn_query(emb1, emb2, k=1, n_jobs=n_jobs)
153 | return p2p_21 # (n2,)
154 |
155 |
156 | def mesh_FM_to_p2p(FM_12, mesh1, mesh2, use_adj=False, subsample=None, n_jobs=1):
157 | """
158 | Wrapper for `FM_to_p2p` using TriMesh class
159 |
160 | Parameters
161 | --------------------------
162 | FM_12 :
163 | (k2,k1) functional map in reduced basis
164 | mesh1 : TriMesh
165 | source mesh for the functional map
166 | mesh2 : TriMesh
167 | target mesh for the functional map
168 | use_adj : bool
169 | whether to use the adjoint map.
170 | subsample : None or size 2 iterable ((n1',), (n2',)).
171 | Subsample of vertices for both mesh.
172 | If specified the p2p map is between the two subsamples.
173 | n_jobs : int
174 | number of parallel jobs. Use -1 to use all processes
175 |
176 | Returns
177 | --------------------------
178 | p2p_21 : np.ndarray
179 | (n2,) match vertex i on shape 2 to vertex p2p_21[i] on shape 1
180 | """
181 | k2, k1 = FM_12.shape
182 | if subsample is None:
183 | p2p_21 = FM_to_p2p(
184 | FM_12,
185 | mesh1.eigenvectors[:, :k1],
186 | mesh2.eigenvectors[:, :k2],
187 | use_adj=use_adj,
188 | n_jobs=n_jobs,
189 | )
190 |
191 | else:
192 | sub1, sub2 = subsample
193 | p2p_21 = FM_to_p2p(
194 | FM_12,
195 | mesh1.eigenvectors[sub1, :k1],
196 | mesh2.eigenvectors[sub2, :k2],
197 | use_adj=use_adj,
198 | n_jobs=n_jobs,
199 | )
200 |
201 | return p2p_21
202 |
203 |
204 | def mesh_FM_to_p2p_precise(
205 | FM_12,
206 | mesh1,
207 | mesh2,
208 | precompute_dmin=True,
209 | use_adj=True,
210 | batch_size=None,
211 | n_jobs=1,
212 | verbose=False,
213 | ):
214 | """
215 | Computes a precise pointwise map between two meshes, that is for each vertex in mesh2, gives
216 | barycentric coordinates of its image on mesh1.
217 | See [1] for details on notations.
218 |
219 | [1] - "Deblurring and Denoising of Maps between Shapes", by Danielle Ezuz and Mirela Ben-Chen.
220 |
221 | Parameters
222 | ----------------------------
223 | FM_12 :
224 | (k2,k1) Functional map from mesh1 to mesh2
225 | mesh1 :
226 | Source mesh (for the functional map) with n1 vertices
227 | mesh2 :
228 | Target mesh (for the functional map) with n2 vertices
229 | precompute_dmin :
230 | Whether to precompute all the values of delta_min.
231 | Faster but heavier in memory
232 | use_adj :
233 | use the adjoint method
234 | batch_size :
235 | If precompute_dmin is False, projects batches of points on the surface
236 | n_jobs :
237 | number of parallel process for nearest neighbor precomputation
238 |
239 | Returns
240 | ----------------------------
241 | P_21 : scipy.sparse.csr_matrix
242 | (n2,n1) - precise point to point map from mesh2 to mesh1
243 | """
244 | k2, k1 = FM_12.shape
245 |
246 | if use_adj:
247 | emb1 = mesh1.eigenvectors[:, :k1]
248 | emb2 = mesh2.eigenvectors[:, :k2] @ FM_12
249 | else:
250 | emb1 = mesh1.eigenvectors[:, :k1] @ FM_12.T
251 | emb2 = mesh2.eigenvectors[:, :k2]
252 |
253 | P_21 = pju.project_pc_to_triangles(
254 | emb1,
255 | mesh1.facelist,
256 | emb2,
257 | precompute_dmin=precompute_dmin,
258 | batch_size=batch_size,
259 | n_jobs=n_jobs,
260 | verbose=verbose,
261 | )
262 |
263 | return P_21
264 |
--------------------------------------------------------------------------------
/pyFM/spectral/nn_utils.py:
--------------------------------------------------------------------------------
1 | from sklearn.neighbors import NearestNeighbors
2 |
3 |
4 | def knn_query(X, Y, k=1, return_distance=False, n_jobs=1):
5 | """
6 | Query nearest neighbors.
7 |
8 | Parameters
9 | -------------------------------
10 | X : np.ndarray
11 | (n1,p) first collection
12 | Y : np.ndarray
13 | (n2,p) second collection
14 | k : int
15 | number of neighbors to look for
16 | return_distance :
17 | whether to return the nearest neighbor distance
18 | n_jobs :
19 | number of parallel jobs. Set to -1 to use all processes
20 |
21 | Returns
22 | -------------------------------
23 | dists : np.ndarray
24 | (n2,k) or (n2,) if k=1 - ONLY if return_distance is False. Nearest neighbor distance.
25 | matches : np.ndarray
26 | (n2,k) or (n2,) if k=1 - nearest neighbor
27 | """
28 | tree = NearestNeighbors(
29 | n_neighbors=k, leaf_size=40, algorithm="kd_tree", n_jobs=n_jobs
30 | )
31 | tree.fit(X)
32 | dists, matches = tree.kneighbors(Y)
33 |
34 | if k == 1:
35 | dists = dists.squeeze()
36 | matches = matches.squeeze()
37 |
38 | if return_distance:
39 | return dists, matches
40 | return matches
41 |
--------------------------------------------------------------------------------
/pyFM/spectral/shape_difference.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from . import convert
4 |
5 |
6 | def area_SD(FM):
7 | """
8 | Return the area shape difference computed from a functional map.
9 |
10 | Parameters
11 | ---------------------------
12 | FM :
13 | (k2,k1) functional map between two meshes
14 |
15 | Returns
16 | ----------------------------
17 | SD : np.ndarray
18 | k1,k1) - Area based shape difference operator
19 | """
20 | SD = FM.T @ FM
21 | return SD
22 |
23 |
24 | def conformal_SD(FM, evals1, evals2):
25 | """
26 | Return the conformal shape difference operator computed from a functional map.
27 |
28 | Parameters
29 | ---------------------------
30 | FM :
31 | (k2,k1) functional map between two meshes
32 | evals1 :
33 | eigenvalues of the LBO on the source mesh (at least k1)
34 | evals2 :
35 | eigenvalues of the LBO on the target mesh (at least k2)
36 |
37 | Returns
38 | ----------------------------
39 | SD : np.ndarray
40 | (k1,k1) - Conformal shape difference operator
41 | """
42 | k2, k1 = FM.shape
43 |
44 | SD = np.linalg.pinv(np.diag(evals1[:k1])) @ FM.T @ (evals2[:k2, None] * FM)
45 | return SD
46 |
47 |
48 | def compute_SD(mesh1, mesh2, k1=None, k2=None, p2p=None, SD_type="spectral"):
49 | """
50 | Computes shape difference operators from a vertex to vertex map.
51 |
52 | Parameters
53 | -----------------------------
54 | mesh1 : pyFM.mesh.TriMesh
55 | Source mesh with computed eigenvectors.
56 | mesh2 : pyFM.mesh.TriMesh
57 | Target mesh object with computed eigenvectors.
58 | k1 :
59 | Dimension to use on the source basis. If None, use all the computed eigenvectors
60 | k2 :
61 | Dimension to use on the source basis if SD_type is 'spectral'.
62 | If None and SD_type is spectral, uses 3*k1
63 | p2p :
64 | (n2,) vertex to vertex map between the two meshes.
65 | If None, set to the identity mapping
66 | SD_type :
67 | 'spectral' | 'semican' : first option uses the LB basis on the target shape.
68 | Second option uses the canonical basis on the target shape
69 |
70 | Returns
71 | ----------------------------
72 | SD_a : np.ndarray
73 | (k1,k1) Area based shape difference operator
74 | SD_c : np.ndarray
75 | (k1,k1) Conformal shape difference operator
76 | """
77 | assert SD_type in [
78 | "spectral",
79 | "semican",
80 | ], f"Problem with type of SD type : {SD_type}"
81 |
82 | if k1 is None:
83 | k1 = len(mesh1.eigenvalues)
84 |
85 | if k2 is None:
86 | k2 = 3 * k1
87 |
88 | if p2p is None:
89 | p2p = np.arange(mesh2.n_vertices)
90 |
91 | if SD_type == "spectral":
92 | FM = convert.mesh_p2p_to_FM(p2p, mesh1, mesh2, dims=(k1, k2)) # (K2,K1)
93 | SD_a = area_SD(FM) # (K1,K1)
94 | SD_c = conformal_SD(FM, mesh1.eigenvalues, mesh2.eigenvalues) # (K1,K1)
95 |
96 | elif SD_type == "semican":
97 | FM = mesh1.eigenvectors[p2p, :k1] # (n2,K1)
98 | SD_a = FM.T @ mesh2.A @ FM # (K1,K1)
99 | SD_c = (
100 | np.linalg.pinv(np.diag(mesh1.eigenvalues[:k1])) @ FM.T @ mesh2.W @ FM
101 | ) # (K1,K1)
102 |
103 | return SD_a, SD_c
104 |
--------------------------------------------------------------------------------
/pyFM/tests/test_data.py:
--------------------------------------------------------------------------------
1 | def test_loading_data():
2 | from pyFM.mesh import TriMesh
3 | mesh1 = TriMesh('examples/data/cat-00.off', area_normalize=True, center=False)
4 | mesh2 = TriMesh('examples/data/lion-00.off', area_normalize=True, center=True)
5 |
6 | assert mesh1 is not None
7 | assert mesh2 is not None
8 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | scipy
3 | potpourri3d
4 | robust_laplacian
5 | tqdm
6 | scikit-learn
--------------------------------------------------------------------------------
/requirements_docs.txt:
--------------------------------------------------------------------------------
1 | furo
2 |
3 | myst_parser >=0.13
4 |
5 |
6 | # Sphinx + theme/plugins
7 | sphinx >=4.0
8 | sphinx-autodoc-typehints
9 | sphinx-copybutton
10 | sphinx-design
11 | sphinx-gallery
12 | sphinx-math-dollar
13 |
14 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = pyfmaps
3 | version = 1.0.1
4 | description = Python bindings for functional maps
5 | long_description = file: README.md
6 | long_description_content_type = text/markdown
7 | author = Robin Magnet
8 | license = MIT License
9 | license_file = LICENSE
10 | classifiers =
11 | Development Status :: 2 - Pre-Alpha
12 | Intended Audience :: Developers
13 | License :: OSI Approved :: MIT License
14 | Operating System :: OS Independent
15 | Programming Language :: Python
16 | Programming Language :: Python :: 3
17 | Programming Language :: Python :: 3 :: Only
18 | Programming Language :: Python :: 3.7
19 | Programming Language :: Python :: 3.8
20 | Programming Language :: Python :: 3.9
21 | Programming Language :: Python :: 3.10
22 | Topic :: Software Development :: Testing
23 | project_urls =
24 | Bug Tracker = https://github.com/RobinMagnet/pyFM/issues
25 | Documentation = https://github.com/RobinMagnet/pyFM/blob/master/example_notebook.ipynb
26 | Source Code = https://github.com/RobinMagnet/pyFM
27 | User Support = https://github.com/RobinMagnet/pyFM/issues
28 |
29 | [options]
30 | packages = find:
31 | install_requires =
32 | numpy
33 | scipy
34 | potpourri3d
35 | robust_laplacian
36 | tqdm
37 | scikit-learn
38 | python_requires = >=3.7
39 | include_package_data = True
40 |
41 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | from setuptools import setup, find_packages
3 |
4 | setup(
5 | packages=find_packages()
6 | )
--------------------------------------------------------------------------------