├── .gitignore ├── .gitmodules ├── .travis.yml ├── LICENSE ├── README.md ├── benchmark ├── cross.py ├── dot.py └── inv.py ├── cgtools ├── __init__.py ├── array_utils.py ├── circular.py ├── debug.py ├── fastmath │ ├── __init__.py │ ├── cross.py │ ├── dot.py │ ├── inv.py │ ├── kron.py │ └── polar_dec.py ├── histograms.py ├── indexing.py ├── io │ ├── __init__.py │ ├── hdf5.py │ ├── obj.py │ ├── off.py │ └── ply.py ├── mesh │ ├── __init__.py │ ├── barycentric.py │ ├── bunny_2503.obj │ ├── catmull_clark_subdiv.py │ ├── crouds.py │ ├── def_transfer.py │ ├── distance.py │ ├── div.py │ ├── geodesic.py │ ├── gradient.py │ ├── intersections.py │ ├── laplacian.py │ └── topology.py ├── neighbors.py ├── procrustes.py ├── skinning.py ├── vector.py └── vis │ ├── __init__.py │ ├── animator.py │ ├── correspondences.py │ ├── lines.py │ ├── mesh.py │ ├── points.py │ ├── vtk_util.py │ └── weights.py ├── environment.yml ├── examples ├── correspondences.py └── histogram.py ├── pytest.ini ├── scripts └── meshmorph ├── setup.cfg ├── setup.py ├── src ├── fast_obj.cpp ├── fastmath.cpp ├── igl_ext.cpp └── intersections.cpp └── tests ├── test_array_utils.py ├── test_circular.py ├── test_fastmath.py ├── test_indexing.py ├── test_intersections.py ├── test_procrustes.py ├── test_vector.py └── test_vis.py /.gitignore: -------------------------------------------------------------------------------- 1 | # mostly copied from https://github.com/github/gitignore/blob/master/Python.gitignore 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | 29 | # Installer logs 30 | pip-log.txt 31 | pip-delete-this-directory.txt 32 | 33 | # Unit test / coverage reports 34 | htmlcov/ 35 | .tox/ 36 | .coverage 37 | .coverage.* 38 | .cache 39 | nosetests.xml 40 | coverage.xml 41 | *,cover 42 | .hypothesis/ 43 | 44 | # virtualenv 45 | .venv/ 46 | venv/ 47 | ENV/ 48 | 49 | tmp 50 | 51 | .vscode/ 52 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "include/eigen"] 2 | path = include/eigen 3 | url = https://github.com/eigenteam/eigen-git-mirror 4 | [submodule "include/libigl"] 5 | path = include/libigl 6 | url = https://github.com/libigl/libigl 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: cpp 2 | os: 3 | - linux 4 | - osx 5 | env: 6 | - PYTHON=2.7 7 | - PYTHON=3.5 8 | - CONDA=2.7 9 | - CONDA=3.5 10 | addons: 11 | apt: 12 | sources: 13 | - ubuntu-toolchain-r-test 14 | - deadsnakes 15 | packages: 16 | - g++-4.8 17 | - python3.5 18 | - python3.5-dev 19 | before_install: 20 | - | 21 | if [ "$TRAVIS_OS_NAME" = "linux" ]; then export CXX=g++-4.8 CC=gcc-4.8; fi 22 | if [ -n "$PYTHON" ]; then 23 | if [ "$TRAVIS_OS_NAME" = "osx" ] && [ "$PYTHON" = "3.5" ]; then 24 | brew update; brew install python3; 25 | fi 26 | pip install --user --upgrade pip virtualenv 27 | virtualenv -p python$PYTHON venv 28 | source venv/bin/activate 29 | elif [ -n "$CONDA" ]; then 30 | if [ "$TRAVIS_OS_NAME" = "linux" ]; then OS=Linux-x86_64; else OS=MacOSX-x86_64; fi 31 | wget -O miniconda.sh https://repo.continuum.io/miniconda/Miniconda${CONDA:0:1}-latest-$OS.sh 32 | bash miniconda.sh -b -p $HOME/miniconda 33 | export PATH="$HOME/miniconda/bin:$PATH" 34 | conda config --set always_yes yes --set changeps1 no 35 | conda config --add channels conda-forge 36 | conda update -q conda 37 | conda install -q conda-build 38 | conda create -q -n test-environment python=$CONDA 39 | source activate test-environment 40 | fi 41 | install: 42 | - | 43 | if [ -n "$PYTHON" ]; then 44 | python setup.py sdist 45 | pip install --verbose dist/*.tar.gz 46 | elif [ -n "$CONDA" ]; then 47 | conda build conda.recipe 48 | conda install --use-local python_example 49 | fi 50 | script: 51 | - python tests/test.py 52 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Thomas Neumann 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | To setup and run under Anaconda in a separate environment: 2 | ``` 3 | $ conda env create -f environment.yml 4 | $ pytest 5 | $ python setup.py install 6 | ``` -------------------------------------------------------------------------------- /benchmark/cross.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | from cgtools.fastmath import cross3 5 | 6 | 7 | if __name__ == '__main__': 8 | import timeit 9 | a = np.random.random((17000, 3)) 10 | b = np.random.random((17000, 3)) 11 | np_cross = np.cross 12 | def t_cross3(): 13 | cross3(a, b) 14 | def t_cross_np(): 15 | np_cross(a, b) 16 | print("blitzcross:", end=' ') 17 | speed1 = np.mean(timeit.repeat(t_cross3, repeat=5, number=100)) 18 | print(speed1) 19 | print("numpy.cross:", end=' ') 20 | speed2 = np.mean(timeit.repeat(t_cross_np, repeat=5, number=100)) 21 | print(speed2) 22 | print("speedup %.2f" % np.mean(speed2 / speed1)) 23 | -------------------------------------------------------------------------------- /benchmark/dot.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | from cgtools.fastmath import matmat, matvec 5 | 6 | 7 | if __name__ == '__main__': 8 | import timeit 9 | a = np.random.random((10000, 4, 4)) 10 | b = np.random.random((10000, 4, 4)) 11 | 12 | def t_matmat(): 13 | matmat(a, b) 14 | def t_naive_np(): 15 | np.array(list(map(np.dot, a, b))) 16 | def t_matmul(): 17 | np.matmul(a, b) 18 | 19 | print("measuring performance of multiplying %d %dx%d matrices" % a.shape) 20 | 21 | timeit_args = dict(repeat=5, number=100) 22 | 23 | speed_matmat = np.mean(timeit.repeat(t_matmat, **timeit_args)) 24 | print("cgtools matmat:", speed_matmat) 25 | 26 | speed_matmul = np.mean(timeit.repeat(t_matmul, **timeit_args)) 27 | print("numpy matmul: %f (speedup %.2f)" % (speed_matmul, speed_matmul / speed_matmat)) 28 | 29 | speed_np = np.mean(timeit.repeat(t_naive_np, **timeit_args)) 30 | print("naive numpy: %f (speedup %.2f)" % (speed_np, speed_np / speed_matmat)) 31 | 32 | 33 | a = np.random.random((10000, 4, 4)) 34 | b = np.random.random((10000, 4)) 35 | 36 | def t_matvec(): 37 | return matvec(a, b) 38 | def t_matvec_naive_np(): 39 | np.array(list(map(np.dot, a, b))) 40 | def t_matvec_matmul(): 41 | return np.matmul(a, b[:, :, np.newaxis])[:, :, 0] 42 | 43 | print((np.allclose(t_matvec(), t_matvec_matmul()))) 44 | 45 | print() 46 | print("measuring performance of multiplying %d %dx%d matrices with %d %d-dimensional vectors" % tuple(list(a.shape) + list(b.shape))) 47 | 48 | speed_matmat = np.mean(timeit.repeat(t_matvec, **timeit_args)) 49 | print("cgtools matmat:", speed_matmat) 50 | 51 | speed_matmul = np.mean(timeit.repeat(t_matvec_matmul, **timeit_args)) 52 | print("numpy matmul: %f (speedup %.2f)" % (speed_matmul, speed_matmul / speed_matmat)) 53 | 54 | speed_np = np.mean(timeit.repeat(t_matvec_naive_np, **timeit_args)) 55 | print("naive numpy: %f (speedup %.2f)" % (speed_np, speed_np / speed_matmat)) 56 | -------------------------------------------------------------------------------- /benchmark/inv.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | from cgtools.fastmath import inv3, inv2 5 | 6 | if __name__ == '__main__': 7 | import timeit 8 | for dim in [2, 3]: 9 | print(("testing inv%d" % dim)) 10 | a = np.random.random((1000, dim, dim)) 11 | inv_func = inv2 if dim == 2 else inv3 12 | def t_cgtools(): 13 | inv_func(a) 14 | def t_np(): 15 | np.array(list(map(np.linalg.inv, a))) 16 | speed1 = np.mean(timeit.repeat(t_cgtools, repeat=5, number=100)) 17 | print((" cgtools: %f" % speed1)) 18 | speed2 = np.mean(timeit.repeat(t_np, repeat=5, number=100)) 19 | print((" numpy: %f" % speed2)) 20 | print((" speedup %.2f" % np.mean(speed2 / speed1))) 21 | -------------------------------------------------------------------------------- /cgtools/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tneumann/cgtools/8f77b6a4642fe79ac85b8449ebd3f72ea0e56032/cgtools/__init__.py -------------------------------------------------------------------------------- /cgtools/array_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def concatenate_fill(arrays, axis=0, fill_value=None): 4 | """ 5 | Appends to all the arrays so that they can be concatenated along the given axis (kwargs axis=0 by default). 6 | The fill_value will be automatically determined from the dtype of arrays. For floating point types of arrays 7 | it will be set to NaN, for integer arrays it will be either 0 (for unsigned int) or -1 (signed int). 8 | 9 | >>> a = np.arange(2*3).reshape(2, 3) 10 | >>> a 11 | array([[0, 1, 2], 12 | [3, 4, 5]]) 13 | >>> b = np.arange(2*2).reshape(2, 2) 14 | >>> b 15 | array([[0, 1], 16 | [2, 3]]) 17 | >>> np.concatenate((a, b), axis=0) 18 | Traceback (most recent call last): 19 | ... 20 | ValueError: all the input array dimensions except for the concatenation axis must match exactly 21 | 22 | >>> concatenate_fill((a, b), axis=0, fill_value=9) 23 | array([[0, 1, 2], 24 | [3, 4, 5], 25 | [0, 1, 9], 26 | [2, 3, 9]]) 27 | 28 | """ 29 | if len(arrays) == 0: 30 | raise ValueError("Need at least one array") 31 | if len(arrays) == 1: 32 | return arrays[0] 33 | if not all(a.ndim == arrays[0].ndim for a in arrays): 34 | raise ValueError("Requires arrays with the same number of dimensions") 35 | if len(set(a.shape for a in arrays)) == 1: 36 | # all arrays have the same shape, can use normal concatenate 37 | return np.concatenate(arrays, axis=axis) 38 | if all(a.shape[axis] == 0 for a in arrays): 39 | # all arrays are empty along the shape that we want them to be concatenated 40 | # in this case just return the first array (it is empty anyways) 41 | return arrays[0] 42 | 43 | final_shape = [(sum if ax == axis else max)(a.shape[ax] for a in arrays) 44 | for ax in range(arrays[0].ndim)] 45 | final_dtype = np.result_type(*arrays) 46 | if fill_value is None: 47 | if issubclass(final_dtype.type, np.floating): 48 | fill_value = np.nan 49 | elif issubclass(final_dtype.type, np.integer): 50 | fill_value = max(-1, np.iinfo(final_dtype).min) 51 | else: 52 | raise ValueError("cannot automatically decide for a fill_value for dtype=%s, please specify fill_value explicitely" % str(final_dtype)) 53 | 54 | concat = np.full(final_shape, fill_value, dtype=final_dtype) 55 | i = 0 56 | for a in arrays: 57 | target = [slice(0, a.shape[ax], 1) for ax in range(a.ndim)] 58 | target[axis] = slice(i, i + a.shape[axis], 1) 59 | concat[tuple(target)] = a 60 | i += a.shape[axis] 61 | 62 | return concat 63 | 64 | -------------------------------------------------------------------------------- /cgtools/circular.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | __doc__ = """ 4 | Various functions for dealing with circular spaces and distributions 5 | """ 6 | 7 | twopi = np.pi * 2 8 | 9 | def wrapped_mean(values, max_value): 10 | """ return the mean of values assuming a circular space that wraps at max_value """ 11 | values = np.asanyarray(values) 12 | angles = (values*twopi) / max_value 13 | mean_angle = circular_mean(angles) 14 | return (mean_angle*max_value) / twopi 15 | 16 | def circular_mean(angles): 17 | """ return the mean of values assuming a circular space 18 | e.g. circular_mean([0.1, 2*pi-0.1]) == 0 19 | """ 20 | angles = np.asanyarray(angles) 21 | mean_angle = np.arctan2(np.sin(angles).mean(), np.cos(angles).mean()) 22 | if mean_angle < 0: 23 | mean_angle = twopi+mean_angle 24 | return mean_angle 25 | 26 | def wrapped_distance(v1, v2, max_value = twopi): 27 | """ return the distance assuming distribution of v1 and v2 28 | where wrapping occurs at max_value """ 29 | v1 = np.asanyarray(v1) 30 | v2 = np.asanyarray(v2) 31 | diff = np.abs(v1 - v2) 32 | return np.minimum(max_value - diff, diff) 33 | 34 | -------------------------------------------------------------------------------- /cgtools/debug.py: -------------------------------------------------------------------------------- 1 | try: 2 | import ipdb 3 | debug = ipdb.set_trace 4 | except ImportError: 5 | from IPython.core import debugger 6 | debug = debugger.Pdb().set_trace 7 | 8 | __all__ = ['debug'] 9 | -------------------------------------------------------------------------------- /cgtools/fastmath/__init__.py: -------------------------------------------------------------------------------- 1 | from .dot import matmat, matvec 2 | from .inv import inv3, inv2 3 | from .cross import cross3 4 | from .kron import multikron 5 | from .polar_dec import polar_dec 6 | -------------------------------------------------------------------------------- /cgtools/fastmath/cross.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import _fastmath_ext 3 | 4 | __all__ = ['cross3'] 5 | 6 | 7 | def cross3(a, b): 8 | """ 9 | >>> a = np.random.random((10, 3)) 10 | >>> b = np.random.random((10, 3)) 11 | >>> c1 = cross3(a, b) 12 | >>> c2 = np.cross(a, b) 13 | >>> np.allclose(c1, c2) 14 | True 15 | """ 16 | a = np.asarray(a) 17 | b = np.asarray(b) 18 | orig_shape = a.shape 19 | if a.ndim == 1 and b.ndim == 1: 20 | # just a single cross product 21 | return np.cross(a, b) 22 | if a.shape[-1] != 3 or b.shape[-1] != 3: 23 | raise ValueError("both arrays must be arrays of 3D vectors") 24 | if a.shape != b.shape: 25 | raise ValueError("a and b must have the same shape, but shape(a)=%d and shape(b)=%d" % (a.shape, b.shape)) 26 | a = a.reshape(-1, 3) 27 | b = b.reshape(-1, 3) 28 | return _fastmath_ext.cross3(a, b).reshape(orig_shape) 29 | -------------------------------------------------------------------------------- /cgtools/fastmath/dot.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import _fastmath_ext 3 | 4 | 5 | __all__ = ['matmat', 'matvec'] 6 | 7 | 8 | def matmat(a, b): 9 | """ 10 | >>> a = np.random.random((10, 3, 5)) 11 | >>> b = np.random.random((10, 5, 4)) 12 | >>> r1 = matmat(a, b) 13 | >>> r2 = np.array(list(map(np.dot, a, b))) 14 | >>> np.allclose(r1, r2) 15 | True 16 | """ 17 | a = np.asarray(a) 18 | b = np.asarray(b) 19 | if a.ndim == 2 and b.ndim == 2: 20 | # just a single matrix vector multiplication, no need to use blitzdot here 21 | return np.dot(a, b) 22 | if a.ndim == 2 and b.ndim == 3: 23 | # a is just a single matrix 24 | return np.dot(b.swapaxes(-1, -2), a.T).swapaxes(-1, -2) 25 | if a.ndim == 3 and b.ndim == 2: 26 | # b is just a single matrix 27 | return np.dot(a, b) 28 | if a.shape[-1] != b.shape[-2]: 29 | raise ValueError("arrays (shapes %s and %s)must have suitable shape for matrix multiplication" % (a.shape, b.shape)) 30 | 31 | if a.shape[:-2] != b.shape[:-2]: 32 | shp = np.broadcast(a[..., 0, 0], b[..., 0, 0]).shape 33 | a_bc = np.broadcast_to(a, shp + a.shape[-2:]) 34 | b_bc = np.broadcast_to(b, shp + b.shape[-2:]) 35 | else: 36 | a_bc, b_bc = a, b 37 | a_contig = a_bc.reshape(-1, a.shape[-2], a.shape[-1]) 38 | b_contig = b_bc.reshape(-1, b.shape[-2], b.shape[-1]) 39 | return _fastmath_ext.matmat(a_contig, b_contig).reshape(a_bc.shape[:-2] + (a.shape[-2], b.shape[-1])) 40 | 41 | 42 | def matvec(matrices, vectors): 43 | """ 44 | >>> a = np.random.random((10, 3, 4)) 45 | >>> b = np.random.random((10, 4)) 46 | >>> r1 = matvec(a, b) 47 | >>> r2 = np.array(list(map(np.dot, a, b))) 48 | >>> np.allclose(r1, r2) 49 | True 50 | """ 51 | matrices = np.asarray(matrices) 52 | vectors = np.asarray(vectors) 53 | if matrices.shape[-1] != vectors.shape[-1]: 54 | raise ValueError("vertices and matrices should have same dimension") 55 | if matrices.ndim == 2 and vectors.ndim == 1: 56 | # just a single matrix vector multiplication, no need to use blitzdot here 57 | return np.dot(matrices, vectors) 58 | if matrices.ndim == 2: 59 | # just a single matrix multiplied by multiple vectors - use numpy.dot 60 | return np.dot(matrices, vectors.T).T 61 | if vectors.ndim == 1: 62 | # multiple matrices multiplied by a single vector - use numpy.dot 63 | return np.dot(matrices, vectors) 64 | if matrices.shape[-1] != vectors.shape[-1]: 65 | raise ValueError("matrices and vectors must be compatible for matrix-vector multiplication") 66 | if matrices.shape[:-2] != vectors.shape[:-1]: 67 | shp = np.broadcast(matrices[..., 0, 0], vectors[..., 0]).shape 68 | matrices_bc = np.broadcast_to(matrices, shp + matrices.shape[-2:]) 69 | vectors_bc = np.broadcast_to(vectors, shp + vectors.shape[-1:]) 70 | else: 71 | matrices_bc, vectors_bc = matrices, vectors 72 | matrices_contig = matrices_bc.reshape(-1, matrices.shape[-2], matrices.shape[-1]) 73 | vectors_contig = vectors_bc.reshape(-1, vectors.shape[-1]) 74 | return _fastmath_ext.matvec(matrices_contig, vectors_contig).reshape(matrices_bc.shape[:-2] + (matrices.shape[-2], )) 75 | -------------------------------------------------------------------------------- /cgtools/fastmath/inv.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import _fastmath_ext 3 | 4 | __all__ = ['inv2', 'inv3'] 5 | 6 | 7 | def inv3(matrices): 8 | if matrices.shape[-2:] != (3, 3): 9 | raise ValueError("Can only invert 3x3 matrices") 10 | Ts = matrices.reshape((-1, 3, 3)) 11 | Qs = _fastmath_ext.inv3(Ts) 12 | return Qs.reshape(matrices.shape).astype(matrices.dtype) 13 | 14 | def inv2(matrices): 15 | if matrices.shape[-2:] != (2, 2): 16 | raise ValueError("Can only invert 2x2 matrices") 17 | Ts = matrices.reshape((-1, 2, 2)) 18 | Qs = _fastmath_ext.inv2(Ts) 19 | return Qs.reshape(matrices.shape) 20 | -------------------------------------------------------------------------------- /cgtools/fastmath/kron.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from numpy.lib.stride_tricks import _broadcast_shape 3 | from . import _fastmath_ext 4 | 5 | 6 | __all__ = ['multikron'] 7 | 8 | 9 | def multikron(a, b): 10 | """ 11 | parallel kronecker product over arrays of stacked matrices 12 | (e.g. performs np.kron over the last 2 dimensions of a and b) 13 | 14 | >>> a = np.random.random((10, 3, 5)) 15 | >>> b = np.random.random((10, 5, 4)) 16 | >>> r1 = multikron(a, b) 17 | >>> r2 = np.array(list(map(np.kron, a, b))) 18 | >>> np.allclose(r1, r2) 19 | True 20 | """ 21 | a = np.asarray(a) 22 | b = np.asarray(b) 23 | if a.ndim == 2 and b.ndim == 2: 24 | # just a single matrix vector multiplication 25 | return np.kron(a, b) 26 | if a.shape[:-2] != b.shape[:-2]: 27 | shp = np.broadcast(a[..., 0, 0], b[..., 0, 0]).shape 28 | a_bc = np.broadcast_to(a, shp + a.shape[-2:]) 29 | b_bc = np.broadcast_to(b, shp + b.shape[-2:]) 30 | else: 31 | a_bc, b_bc = a, b 32 | a_contig = a_bc.reshape(-1, a.shape[-2], a.shape[-1]) 33 | b_contig = b_bc.reshape(-1, b.shape[-2], b.shape[-1]) 34 | if a_contig.shape[0] != b_contig.shape[0]: 35 | raise ValueError("array shapes are not compatible: %s vs %s. Expect shapes to be compatible up to the last 2 axes" % (a.shape, b.shape)) 36 | return _fastmath_ext.multikron(a_contig, b_contig).reshape(a_bc.shape[:-2] + (a.shape[-2]*b.shape[-2], a.shape[-1]*b.shape[-1])) 37 | 38 | -------------------------------------------------------------------------------- /cgtools/fastmath/polar_dec.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import _fastmath_ext 3 | 4 | 5 | __all__ = ['polar_dec'] 6 | 7 | 8 | def polar_dec(matrices): 9 | """ 10 | Batched polar decomposition of an array of stacked matrices, 11 | e.g. given matrices [M1, M2, ..., Mn], decomposes each matrix 12 | into rotation and skew-symmetric matrices. 13 | 14 | >>> matrices = np.random.random((10, 3, 3)) 15 | >>> rotations, stretches = polar_dec(matrices) 16 | >>> np.allclose([np.linalg.det(R) for R in rotations], 1.0) 17 | True 18 | """ 19 | matrices = np.asarray(matrices) 20 | if matrices.ndim == 2: 21 | matrices = matrices[np.newaxis] 22 | single_matrix = True 23 | else: 24 | single_matrix = False 25 | Rs, Ss = _fastmath_ext.polar_dec(matrices) 26 | if single_matrix: 27 | return Rs[0], Ss[0] 28 | else: 29 | return Rs, Ss 30 | -------------------------------------------------------------------------------- /cgtools/histograms.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | _range = range 4 | 5 | 6 | def soft_histogram(data, nbins, range=None, normalize=True): 7 | """Return a soft-binned histogram of data consisting of nbins bins, with an optional range. 8 | 9 | If the range is None (default), then the range of the min/max of the given data is used as a range. 10 | When normalize==True (default), then the final histogram is normalized to 1. 11 | 12 | >>> list(soft_histogram([0, 0.5, 1], 2)) 13 | [0.5, 0.5] 14 | >>> list(soft_histogram([0, 0.5, 1], 2, normalize=False)) 15 | [1.5, 1.5] 16 | >>> list(soft_histogram([0], 3, range=(0,3), normalize=False)) 17 | [1.0, 0.0, 0.0] 18 | >>> list(soft_histogram([1], 3, range=(0,3), normalize=False)) 19 | [0.5, 0.5, 0.0] 20 | >>> list(soft_histogram([0.5], 3, range=(0,3), normalize=False)) 21 | [1.0, 0.0, 0.0] 22 | >>> list(soft_histogram([1.5], 3, range=(0,3), normalize=False)) 23 | [0.0, 1.0, 0.0] 24 | >>> list(soft_histogram([3.0], 3, range=(0,3), normalize=False)) 25 | [0.0, 0.0, 1.0] 26 | """ 27 | 28 | data = np.asfarray(data) 29 | if range is None: 30 | dmin, dmax = data.min(), data.max() 31 | else: 32 | dmin, dmax = range 33 | if dmin >= dmax: 34 | raise ValueError("invalid range given(min >= max)") 35 | in_range = (data >= dmin) & (data <= dmax) 36 | if not all(in_range): 37 | #logging.warn("some data values are outside of the given histogram range, ignoring them") 38 | data = data[in_range] 39 | assert data.size > 0 40 | # move data into the range (0.5 ... nbins + 0.5) 41 | a = ((data - dmin) / (dmax - dmin)) * (nbins) + 0.5 42 | # determine the 2 bins that the data values fall into and calculate their weight 43 | upperweight = a - np.floor(a) 44 | lowerweight = 1 - upperweight 45 | lowerbin = a.astype(int) - 1 46 | upperbin = lowerbin + 1 47 | # build final soft histogram 48 | h = np.zeros(nbins) 49 | h1 = np.bincount(np.maximum(lowerbin, 0), weights=lowerweight) 50 | h2 = np.bincount(np.minimum(upperbin, nbins-1), weights=upperweight) 51 | h[:len(h1)] += h1 52 | h[:len(h2)] += h2[:nbins] # if an item is exactly dmax, 53 | # then it's upperbin will be nbins 54 | # which is out of the histogram 55 | # (but the value will be zero - ignore those) 56 | if normalize: 57 | h /= np.sum(h) 58 | return h 59 | 60 | 61 | def soft_histogram_dd(samples, nbins, range, normed=False, wrapping=False): 62 | D = samples.shape[1] 63 | if not hasattr(wrapping, '__iter__'): 64 | wrapping = [wrapping] * D 65 | nbins = np.array(nbins) 66 | min, max = list(map(np.array, list(zip(*range)))) 67 | # bring the sample range into the range between [0.5 .. nbins + 0.5] 68 | a_0_n = ((samples - min) / (max - min)) * nbins + 0.5 69 | # find for each dimension in which lower bin the sample falls, and with which weight 70 | lowerbin = [] 71 | lowerweight = [] 72 | for dim in _range(D): 73 | a_dim = a_0_n[:,dim] 74 | lowerweight.append(1 - a_dim + np.floor(a_dim)) 75 | lowerbin.append(a_dim.astype(int) - 1) 76 | lowerbin = np.column_stack(lowerbin) 77 | lowerweight = np.column_stack(lowerweight) 78 | # use mgrid to generate corners of the D-dimensional cube of length 1, 79 | # which are needed as offsets to lowerbin 80 | cube = np.mgrid[[slice(0, 2, None)] * D].T.reshape((2**D, -1)) 81 | bin = lowerbin[:,np.newaxis, :] + cube[np.newaxis,:,:] 82 | for dim in _range(D): 83 | if wrapping[dim]: 84 | bin[:,:,dim] = bin[:,:,dim] % nbins[dim] 85 | else: 86 | bin[:,:,dim] = np.clip(bin[:,:,dim], 0, nbins[dim]) 87 | w = np.abs( cube[np.newaxis,:,:] - lowerweight[:,np.newaxis,:] ) 88 | weight = np.product(w, axis=-1) # for D==2 this is bilinear filtering 89 | # given the weights and the corresponding bins on the corners of the data ranges, 90 | # we can simply use the numpy histogramdd function since it supports weighting smaple points 91 | return np.histogramdd(bin.reshape((-1, D)), weights=weight.ravel(), 92 | range=[(0,n) for n in nbins], bins=nbins, normed=normed)[0] 93 | 94 | -------------------------------------------------------------------------------- /cgtools/indexing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import sparse 3 | from collections import defaultdict, OrderedDict 4 | from itertools import count 5 | 6 | 7 | def inverse_index_dict(_list): 8 | """ build a dict that maps items to their index in given _list 9 | 10 | >>> inverse_index_dict(['zero', 'one', 'two']) == {'zero': 0, 'two': 2, 'one': 1} 11 | True 12 | 13 | be careful with collisions, as they are not handled correctly (TODO?) 14 | >>> inverse_index_dict([55, 66, 77, 55]) == {66: 1, 77: 2, 55: 3} 15 | True 16 | 17 | """ 18 | return dict(zip(_list, range(len(_list)))) 19 | 20 | def linear_search(array, targets): 21 | """ search the (first) occurance of each item from the list of targets and return it's index, or -1 if it is not found 22 | >>> linear_search([1, 2, 1, 4], [1, 4]) 23 | array([0, 3]) 24 | >>> linear_search([1, 2, 1, 4], [5, 2, 1]) 25 | array([-1, 1, 0]) 26 | """ 27 | array = np.asarray(array) 28 | results = np.full(len(targets), -1, np.int) 29 | for i, target_i in enumerate(targets): 30 | s = np.where(array == target_i)[0] 31 | if len(s) > 0: 32 | results[i] = s[0] 33 | return results 34 | 35 | def occurance_mask(c1, c2): 36 | c1_set = set(c1) 37 | return np.array([c2_item in c1_set for c2_item in c2], np.bool) 38 | 39 | def group_all(query, data=None): 40 | """ 41 | Build a dictionary that allows reverse lookup of elements in data that have same query. 42 | A dict is build from query, where each unique element in query will be used as a key. 43 | The corresponding value will be a list of all data[i] where query[i] == key. 44 | Query and data should be iterables of equal length containing the corresponding query -> data relations. 45 | If data==None (default), an index mapping will be built, that is values in the returned dict will 46 | be lists of indices into query. 47 | 48 | The difference to the itertools.groupby function is that not only adjacent equal elements are grouped, 49 | but all equal elements in query. 50 | 51 | >>> group_all([1, 2, 1, 2, 3], ['a', 'b', 'c', 'd', 'f']) 52 | {1: ['a', 'c'], 2: ['b', 'd'], 3: ['f']} 53 | >>> group_all([1, 2, 1, 2, 3]) 54 | {1: [0, 2], 2: [1, 3], 3: [4]} 55 | >>> group_all("hello") 56 | {'h': [0], 'e': [1], 'l': [2, 3], 'o': [4]} 57 | """ 58 | if data is None: 59 | data = count() 60 | result = defaultdict(list) 61 | for q, d in zip(query, data): 62 | result[q].append(d) 63 | return dict(result) 64 | 65 | def group_all_keep_order(query, data=None): 66 | """ 67 | Same as group_all, but returns an OrderedDict, keys are kept in the same order as they appear in query 68 | 69 | >>> group_all_keep_order("haha") 70 | OrderedDict([('h', [0, 2]), ('a', [1, 3])]) 71 | >>> group_all_keep_order("ahh") 72 | OrderedDict([('a', [0]), ('h', [1, 2])]) 73 | """ 74 | if data is None: 75 | data = count() 76 | result = OrderedDict() 77 | for q, d in zip(query, data): 78 | if q not in result: 79 | result[q] = [] 80 | result[q].append(d) 81 | return result 82 | 83 | def group_all_array(query): 84 | """ 85 | Numpy-optimized version of group_all, which returns a dictionary of (value -> array indices). 86 | 87 | >>> group_all_array([1, 2, 1, 2, 3]) 88 | {1: array([0, 2]), 2: array([1, 3]), 3: array([4])} 89 | """ 90 | values, inverse = np.unique(query, return_inverse=True) 91 | return {v: np.flatnonzero(inverse == i) for i, v in enumerate(values)} 92 | 93 | 94 | def filter_reindex(condition, indices): 95 | """ 96 | Filtering of index arrays. Filters and reindex a given index array ("indices"). 97 | Say you have target[indices] and now you filter target by some condition, e.g. 98 | target[condition]. Then, target[condition][indices] will not work anymore. 99 | Use filter_reindex to fix that: target[condition][filter_reindex(condition, indices)] 100 | 101 | To explain this better, let us look at an example. Let's say you have the following data: 102 | >>> data = np.array(['a', 'b', 'c', 'd']) 103 | 104 | You also have another array that consists of indices into this array 105 | >>> indices = np.array([0, 3, 2, 0]) 106 | >>> print(data[indices]) 107 | ['a' 'd' 'c' 'a'] 108 | 109 | Now, say you are filtering some elements in your data array 110 | >>> condition = (data == 'a') | (data == 'd') 111 | >>> filtered_data = data[condition] 112 | >>> print(filtered_data) 113 | ['a' 'd'] 114 | 115 | The problem is that your index array doesn't correctly reference the new filtered data array 116 | >>> filtered_data[indices] 117 | Traceback (most recent call last): 118 | ... 119 | IndexError: index 3 is out of bounds for axis 1 with size 2 120 | 121 | Based on an old index array, this method returns a new index array 122 | that re-indices into the data array as if condition was applied to this array, so 123 | >>> filtered_indices = filter_reindex(condition, indices) 124 | >>> print(filtered_indices) 125 | [0 1 0] 126 | >>> print(filtered_data[filtered_indices]) 127 | ['a' 'd' 'a'] 128 | 129 | >>> indices = np.array([1, 4, 1, 4]) 130 | >>> condition = np.array([False, True, False, False, True]) 131 | >>> print(filter_reindex(condition, indices)) 132 | [0 1 0 1] 133 | 134 | Also works when one of the filtered elements actually is not in the indices 135 | >>> data = np.array(['a', 'b', 'c']) 136 | >>> indices = np.array([0, 0, 1, 1]) 137 | >>> condition = np.array([True, False, True]) 138 | >>> print(data[indices]) 139 | ['a' 'a' 'b' 'b'] 140 | >>> indices_filtered = filter_reindex(condition, indices) 141 | >>> print(indices_filtered) 142 | [0 0] 143 | 144 | Filtering everything returns an empty index array 145 | >>> indices = np.array([1, 1, 2, 3]) 146 | >>> condition = np.array([True, False, False, False]) 147 | >>> print(filter_reindex(condition, indices)) 148 | [] 149 | 150 | Also works for n-dim arrays, for example to filter a mesh. 151 | E.g. given a list of vertices in 2D. 152 | >>> verts = np.array([(0, 0), (1, 0), (1, 1), (0, 1), (0, 2)]) 153 | 154 | And a list of triangles: 155 | >>> triangles = np.array([(0, 1, 2), (0, 2, 3), (3, 2, 4)]) 156 | 157 | Lets say we want to filter some of the vertices: 158 | >>> vert_mask = [True, True, True, False, True] 159 | >>> verts_new = verts[vert_mask] 160 | 161 | Now, if we filter the vertices and keep using the triangles, we will get an index error: 162 | >>> verts_new[triangles] 163 | Traceback (most recent call last): 164 | ... 165 | IndexError: ... 166 | 167 | This can be fixed by filter_reindex: 168 | >>> print(filter_reindex(vert_mask, triangles)) 169 | [[0 1 2]] 170 | 171 | This works with n-d indices: 172 | >>> target = np.random.random((100, 6)) 173 | >>> indices = np.random.randint(len(target), size=(3000, 2, 4, 3)) 174 | >>> target[indices].shape 175 | (3000, 2, 4, 3, 6) 176 | >>> mask = (target > 0.8).any(axis=1) 177 | >>> target_masked = target[mask] 178 | >>> target_masked[indices] 179 | Traceback (most recent call last): 180 | ... 181 | IndexError: ... 182 | >>> indices_masked = filter_reindex(mask, indices) 183 | >>> target_masked[indices_masked].shape[1:] 184 | (2, 4, 3, 6) 185 | 186 | """ 187 | condition = np.asarray(condition) 188 | if condition.dtype != np.bool: 189 | raise ValueError("condition must be a binary array") 190 | if condition.ndim > 1: 191 | raise ValueError("condition must be a 1D array") 192 | 193 | # build the reindexing array 194 | reindex = np.cumsum(condition) - 1 195 | 196 | # filter indices 197 | if indices.ndim == 1: 198 | ind_mask = condition[indices] 199 | 200 | else: 201 | ind_2d = indices.reshape(indices.shape[0], -1) 202 | # accept only indices that have no items filtered 203 | ind_mask = condition[ind_2d].all(axis=-1) 204 | 205 | return reindex[indices][ind_mask] 206 | 207 | 208 | def take_reindex(target_ixs, indices): 209 | """ 210 | Similar to filter_reindex, but filters by index selection instead of condition/mask. 211 | Say you have target[indices] and now you filter target by only selecting some indices 212 | target[target_ixs], with target_ixs = [2, 5, 6] for example. 213 | Then, target[target_ixs][indices] will not work anymore. 214 | Use take_reindex to fix that: target[target_ixs][take_reindex(target_ixs, indices)] 215 | 216 | To explain this better, let us look at an example. Let's say you have the following data: 217 | >>> data = np.array(['a', 'b', 'c', 'd', 'e']) 218 | 219 | You also have another array that consists of indices into this array 220 | >>> indices = np.array([0, 3, 2, 0]) 221 | >>> print(data[indices]) 222 | ['a' 'd' 'c' 'a'] 223 | 224 | Now, say you are selecting some elements in your data array 225 | >>> sel = [3, 0] 226 | >>> sel_data = data[sel] 227 | >>> print(sel_data) 228 | ['d' 'a'] 229 | 230 | The problem is that your index array doesn't correctly reference the new filtered data array 231 | >>> sel_data[indices] 232 | Traceback (most recent call last): 233 | ... 234 | IndexError: index 3 is out of bounds for axis 1 with size 2 235 | 236 | Based on an old index array, this method returns a new index array 237 | that re-indices into the data array as if the selection was applied to this array, so 238 | >>> sel_indices = take_reindex(sel, indices) 239 | >>> print(sel_indices) 240 | [1 0 1] 241 | >>> print(sel_data[sel_indices]) 242 | ['a' 'd' 'a'] 243 | 244 | Note that in comparison to filter_reindex, this might reorder everything according to the 245 | order of the selection: 246 | >>> indices = np.array([1, 4, 1, 4]) 247 | >>> print(take_reindex([4, 1], indices)) 248 | [1 0 1 0] 249 | >>> print(take_reindex([1, 4], indices)) 250 | [0 1 0 1] 251 | 252 | Also works for n-dim arrays, for example to filter a mesh. 253 | E.g. given a list of vertices in 2D. 254 | >>> verts = np.array([(0, 0), (1, 0), (1, 1), (0, 1), (0, 2)]) 255 | 256 | And a list of triangles: 257 | >>> triangles = np.array([(0, 1, 2), (0, 2, 3), (3, 2, 4)]) 258 | 259 | Lets say we want to filter some of the vertices: 260 | >>> vert_ixs = [2, 1, 0, 4] 261 | >>> verts_new = verts[vert_ixs] 262 | 263 | Now, if we filter the vertices and keep using the triangles, we will get an index error: 264 | >>> verts_new[triangles] 265 | Traceback (most recent call last): 266 | ... 267 | IndexError: ... 268 | 269 | This can be fixed by this method: 270 | >>> print(take_reindex(vert_ixs, triangles)) 271 | [[2 1 0]] 272 | >>> triangles_new = take_reindex(vert_ixs, triangles) 273 | >>> verts_new[triangles_new].tolist() 274 | [[[0, 0], [1, 0], [1, 1]]] 275 | """ 276 | masked_ind = filter_reindex(mask_from_indices(target_ixs, indices.max() + 1), indices) 277 | reindex = np.argsort(target_ixs) 278 | return reindex[masked_ind] 279 | 280 | 281 | def valid_indices(indices, array_shape, return_mask=False): 282 | """ 283 | Returns only the valid indices that fall into array_shape. 284 | If indices is a float array, then the indices will be rounded and converted to integer. 285 | 286 | >>> idx = np.array([(1, 0), (-1, 2), (-3, 5), (0, 0), (6, 7)]) 287 | >>> valid_indices(idx, (2, 2)) 288 | array([[1, 0], 289 | [0, 0]]) 290 | 291 | Can also return the mask which indices would be selected: 292 | 293 | >>> print(valid_indices(idx, (2, 2), return_mask=True)[1]) 294 | [ True False False True False] 295 | 296 | >>> valid_indices(np.random.randint(0, 2, (10, 3, 2)), 2) 297 | Traceback (most recent call last): 298 | ... 299 | ValueError: indices array must be of shape (?, 1) or must be one-dimensional 300 | 301 | """ 302 | if len(indices) == 0: 303 | return np.zeros_like(indices, np.int) 304 | 305 | indices = np.asarray(indices) 306 | if type(array_shape) == int: 307 | array_shape = (array_shape,) 308 | if (indices.ndim != 2 and len(array_shape) > 1): 309 | raise ValueError("indices array must be of shape (?, 2)") 310 | if (indices.ndim > 2 and len(array_shape) == 1): 311 | raise ValueError("indices array must be of shape (?, 1) or must be one-dimensional") 312 | 313 | if issubclass(indices.dtype.type, np.floating): 314 | indices = np.round(indices).astype(np.int) 315 | 316 | if not issubclass(indices.dtype.type, np.integer): 317 | raise ValueError("indices must be integer arrays") 318 | 319 | was_1d = False 320 | if indices.ndim == 1: 321 | indices = indices[:, np.newaxis] 322 | was_1d = True 323 | 324 | mask = np.ones(indices.shape[0], np.bool) 325 | for axis in range(indices.shape[1]): 326 | mask &= (indices[:, axis] >= 0) & (indices[:, axis] < array_shape[axis]) 327 | indices_masked = indices[mask] 328 | 329 | if was_1d: 330 | indices_masked = indices_masked.ravel() 331 | 332 | if return_mask: 333 | return indices_masked, mask 334 | else: 335 | return indices_masked 336 | 337 | def mask_from_indices(ix, count=0): 338 | """ 339 | Given an array of 1d indices, return a mask array that has all those indices 340 | set to True, and the remaining indices set to Failse. 341 | The returned mask has size of given count, or if count == 0, 342 | the length corresponds to the maximum element in ix. 343 | 344 | >>> print(mask_from_indices([1, 3])) 345 | [False True False True] 346 | >>> print(mask_from_indices([1, 3], 5)) 347 | [False True False True False] 348 | 349 | """ 350 | return (np.bincount(ix, minlength=count) != 0) 351 | 352 | 353 | def sparse_indicator_matrix(ci, num_cols, omega=1.): 354 | """ 355 | build a sparse constraint matrix C 356 | for each i in ci, a row is made in the matrix, and the i'th entry is set to omega (default=1) 357 | such a matrix can be easily used in least squares problems, since C*x[ci] == x[ci] 358 | omega is the value that is placed into the nonzero entries of the matrix 359 | >>> x = np.array([4, 5, 6, 7, 8], np.float) 360 | >>> C = sparse_indicator_matrix([1, 2, 4], 5) 361 | >>> print(C * x) 362 | [5. 6. 8.] 363 | """ 364 | ci = np.asanyarray(ci) 365 | if ci.dtype == np.bool: 366 | ci = ci.nonzero()[0] 367 | data = np.ones(len(ci)) * omega 368 | ij = (np.arange(len(ci)), ci) 369 | return sparse.csr_matrix((data, ij), shape=(len(ci), num_cols)) 370 | 371 | -------------------------------------------------------------------------------- /cgtools/io/__init__.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | from os import path 3 | 4 | from .obj import load_obj, save_obj 5 | from .off import load_off, save_off 6 | 7 | 8 | def load_mesh(filename): 9 | loaders = { 10 | 'obj': load_obj, 11 | 'off': partial(load_off, no_colors=True), 12 | } 13 | ext = path.splitext(filename)[1].lower()[1:] 14 | if ext not in loaders: 15 | raise IOError("No loader for %s extension known, available file formats are: %s" % (ext, list(loaders.keys()))) 16 | return loaders[ext](filename) 17 | 18 | 19 | def save_mesh(filename, verts, tris, *args, **kw): 20 | writers = { 21 | 'obj': save_obj, 22 | 'off': save_off, 23 | } 24 | ext = path.splitext(filename)[1].lower()[1:] 25 | if ext not in writers: 26 | raise IOError("No known writer for %s extension known, available file formats are: %s" % (ext, list(loaders.keys()))) 27 | return writers[ext](filename, verts, tris, *args, **kw) 28 | -------------------------------------------------------------------------------- /cgtools/io/hdf5.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import h5py 3 | 4 | def save_mesh_animation(filename, verts, tris, **kwargs): 5 | verts = np.asarray(verts) 6 | tris = np.asarray(tris) 7 | assert verts.ndim == 3 8 | assert tris.ndim == 2 9 | with h5py.File(filename, 'w') as f: 10 | f.create_dataset('verts', data=verts, compression='gzip') 11 | f['tris'] = tris 12 | for k, v in kwargs.items(): 13 | if k == 'attributes': 14 | for an, av in v.items(): 15 | f.attrs[an] = av 16 | else: 17 | f[k] = v 18 | print("saved mesh animation %s" % filename) 19 | 20 | def load_mesh_animation(filename, *additional_datasets): 21 | r = [] 22 | with h5py.File(filename, 'r') as f: 23 | for name in ['verts', 'tris'] + list(additional_datasets): 24 | if name in f: 25 | r.append(f[name].value) 26 | else: 27 | print("[warn] non-existent dataset %s requested, returning None" % name) 28 | r.append(None) 29 | return r 30 | 31 | def load_first_frame(filename, *additional_datasets): 32 | r = [] 33 | with h5py.File(filename, 'r') as f: 34 | r.append(f['verts'][0].value) 35 | for name in ['tris'] + list(additional_datasets): 36 | r.append(f[name].value) 37 | return r 38 | 39 | def save_blendshapes(filename, shapes, tris, blendshape_names=None): 40 | with h5py.File(filename, 'w') as f: 41 | f['tris'] = tris 42 | for i, s in enumerate(shapes): 43 | if i == 0: 44 | name = 'default' 45 | else: 46 | name = "%03d_%s" % (i, blendshape_names[i-1]) \ 47 | if blendshape_names is not None and len(blendshape_names) >= i and blendshape_names[i-1] is not None \ 48 | else '%03d' % i 49 | f[name] = s 50 | print("saved blendshapes %s" % filename) 51 | 52 | def save_components_as_blendshapes(filename, verts0, tris, components, names=None): 53 | blendshapes = components + verts0[np.newaxis] 54 | save_blendshapes(filename, [verts0] + list(blendshapes), tris, blendshape_names=names) 55 | 56 | def load_components_from_blendshapes(filename): 57 | with h5py.File(filename, 'r') as f: 58 | tris = f['tris'].value 59 | Xmean = f['default'].value 60 | names = sorted(list(set(f.keys()) - set(['tris', 'default']))) 61 | names_fixed = [] 62 | for n in names: 63 | if len(n) > 3: 64 | n = n[4:] 65 | names_fixed.append(n) 66 | components = np.array([ 67 | f[name].value - Xmean 68 | for name in names]) 69 | return components, tris, Xmean, names_fixed 70 | 71 | -------------------------------------------------------------------------------- /cgtools/io/obj.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from os import path 3 | import re 4 | 5 | from . import _fastobj_ext 6 | 7 | 8 | # TODO: use \d in triangle regex instead of [^\/\s] 9 | _triangle_regex = re.compile(r"^f\s+(\d+)\S*\s+(\d+)\S*\s+(\d+)", re.MULTILINE) 10 | _quad_regex = re.compile(r"^f\s+(\d+)\S*\s+(\d+)\S*\s+(\d+)\S*\s+(\d+)", re.MULTILINE) 11 | _triangle_regex_all = re.compile(r"^f\s+([\d]+)/?([\d]*)/?([\d]*)\s+([\d]+)/?([\d]*)/?([\d]*)\s+([\d]+)/?([\d]*)/?([\d]*)", re.MULTILINE) 12 | _quad_regex_all = re.compile(r"^f\s+(\d+)/?(\d+)?/?(\d+)?\s+(\d+)/?(\d+)?/?(\d+)?\s+(\d+)/?(\d+)?/?(\d+)?\s+(\d+)/?(\d+)?/?(\d+)?", re.MULTILINE) 13 | _normal_regex = re.compile("^vn\s+(\S+)\s+(\S+)\s+(\S+)", re.MULTILINE) 14 | _texcoord_regex = re.compile("^vt\s+(\S+)\s+(\S+)", re.MULTILINE) 15 | _vertex_regex = re.compile("^v\s+(\S+)\s+(\S+)\s+(\S+)", re.MULTILINE) 16 | _vertex_regex_with_color = re.compile("^v\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)", re.MULTILINE) 17 | 18 | 19 | def _array_fromregex(buffer, regex, dtype): 20 | # could use np.fromregex but that has a bug in some versions of numpy 21 | # that is caused when called with a compiled regex object 22 | # see https://github.com/numpy/numpy/pull/10501 23 | seq = regex.findall(buffer) 24 | return np.array(seq, dtype=dtype) 25 | 26 | 27 | def load_texture_filename_in_obj(filename): 28 | tex_file = None 29 | for line in open(filename): 30 | if line.startswith('mtllib'): 31 | mtl_file = path.join(path.dirname(filename), line.strip().split()[1]) 32 | for mtl_line in open(mtl_file): 33 | if mtl_line.startswith('map_Kd'): 34 | tex_file = mtl_line.strip().split()[1] 35 | return tex_file 36 | 37 | 38 | def load_obj(filename, load_normals=False, load_texcoords=False, load_texture=False, 39 | load_full_face_definitions=False, is_quadmesh='auto', load_colors=False): 40 | """ load a wavefront obj file 41 | loads vertices into a (x,y,z) struct array and vertex indices 42 | into a n x 3 index array 43 | only loads obj files vertex positions and also 44 | only works with triangle or (when is_quadmesh=True) with quad meshes """ 45 | contents = open(filename).read() 46 | if load_colors: 47 | data = _array_fromregex(contents, _vertex_regex_with_color, np.float) 48 | vertices = data[:,:3] 49 | colors = data[:,3:] 50 | else: 51 | vertices = _array_fromregex(contents, _vertex_regex, np.float) 52 | 53 | if load_normals: 54 | normals = _array_fromregex(contents, _normal_regex, np.float) 55 | if load_texcoords: 56 | texcoords = _array_fromregex(contents, _texcoord_regex, np.float) 57 | 58 | reg_quads = _quad_regex_all if load_full_face_definitions else _quad_regex 59 | reg_tris = _triangle_regex_all if load_full_face_definitions else _triangle_regex 60 | if is_quadmesh == 'auto': 61 | quads = _array_fromregex(contents, reg_quads, np.int) - 1 # 1-based indexing in obj file format! 62 | if len(quads) > 0: 63 | faces = quads 64 | else: 65 | faces = _array_fromregex(contents, reg_tris, np.int) - 1 # 1-based indexing in obj file format! 66 | elif is_quadmesh: 67 | quads = _array_fromregex(contents, reg_quads, np.int) - 1 # 1-based indexing in obj file format! 68 | faces = quads 69 | else: 70 | tris = _array_fromregex(contents, reg_tris, np.int) - 1 # 1-based indexing in obj file format! 71 | faces = tris 72 | 73 | r = [vertices] 74 | if load_normals: 75 | r.append(normals) 76 | if load_texcoords: 77 | r.append(texcoords) 78 | if load_colors: 79 | r.append(colors) 80 | 81 | r.append(faces) 82 | 83 | if load_texture: 84 | from scipy.misc import imread 85 | 86 | tex_file = load_texture_filename_in_obj(filename) 87 | if tex_file is None: 88 | raise IOError("Cannot read texture from %s" % filename) 89 | texture = imread(path.join(path.dirname(filename), tex_file)) 90 | r.append(texture) 91 | 92 | return r 93 | 94 | 95 | def load_obj_fast(filename, *args, **kw): 96 | return _fastobj_ext.load_obj_fast(filename) 97 | 98 | 99 | def save_obj(filename, vertices, faces, normals=None, texcoords=None, texture_file=None): 100 | with open(filename, 'w') as f: 101 | if texture_file is not None: 102 | mtl_file = filename + ".mtl" 103 | f.write("mtllib ./%s\n\n" % path.basename(mtl_file)) 104 | if path.dirname(texture_file) == path.dirname(filename): 105 | texture_file = path.basename(texture_file) 106 | with open(mtl_file, 'w') as mf: 107 | mf.write( 108 | "newmtl material_0\n" 109 | "Ka 0.200000 0.200000 0.200000\n" 110 | "Kd 1.000000 1.000000 1.000000\n" 111 | "Ks 1.000000 1.000000 1.000000\n" 112 | "Tr 1.000000\n" 113 | "illum 2\n" 114 | "Ns 0.000000\n" 115 | "map_Kd %s\n" % texture_file 116 | ) 117 | np.savetxt(f, vertices, fmt="v %f %f %f") 118 | if texcoords is not None: 119 | np.savetxt(f, texcoords, 120 | fmt="vt %f %f") 121 | 122 | if normals is not None: 123 | np.savetxt(f, normals, 124 | fmt="vn %f %f %f") 125 | 126 | if texture_file is not None: 127 | f.write("usemtl material_0\n\n") 128 | 129 | if faces is not None and len(faces) > 0: 130 | if faces.shape[1] in [3, 4]: 131 | face_fmt = 'f ' + ' '.join(['%d'] * faces.shape[1]) 132 | elif faces.shape[1] in [3*3, 3*4]: 133 | face_fmt = 'f ' + ' '.join(['%d/%d/%d' * faces.shape[1]]) 134 | else: 135 | raise ValueError("invalid format for faces, allowed: (N, 3), (N, 4), (N, 9), (N,12)") 136 | np.savetxt(f, faces + 1, fmt=face_fmt) 137 | 138 | -------------------------------------------------------------------------------- /cgtools/io/off.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def save_off(filename, vertices=None, faces=None, scalars=None, vmin=None, vmax=None, colors=None): 5 | if vertices is None: 6 | vertices = [] 7 | if faces is None: 8 | faces = [] 9 | has_color = scalars is not None or colors is not None 10 | with open(filename, 'w') as f: 11 | f.write("%s\n%d %d 0\n" % (['OFF', 'COFF'][has_color], len(vertices), len(faces))) 12 | if len(vertices) > 1: 13 | if has_color: 14 | if scalars is not None: 15 | import matplotlib as mpl 16 | import matplotlib.cm as cm 17 | 18 | norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax) 19 | rgba = cm.ScalarMappable(norm=norm, cmap=cm.jet).to_rgba(scalars) 20 | 21 | elif colors is not None: 22 | rgba = colors 23 | 24 | if rgba.dtype != np.uint8: 25 | rgba = (rgba * 255).astype(np.uint8) 26 | 27 | np.savetxt(f, np.hstack((vertices, rgba[:, :3])), fmt="%.12f %.12f %.12f %d %d %d") 28 | else: 29 | np.savetxt(f, vertices, fmt="%.12f %.12f %.12f") 30 | if len(faces) > 1: 31 | for face in faces: 32 | fmt = " ".join(["%d"] * (len(face) + 1)) + "\n" 33 | f.write(fmt % ((len(face),) + tuple(map(int, face)))) 34 | 35 | def load_off(filename, no_colors=False): 36 | lines = open(filename, errors='ignore').readlines() 37 | lines = [line for line in lines if line.strip() != '' and line[0] != '#'] 38 | assert lines[0].strip() in ['OFF', 'COFF'], 'OFF header missing' 39 | has_colors = lines[0].strip() == 'COFF' 40 | n_verts, n_faces, _ = list(map(int, lines[1].split())) 41 | vertex_data = np.fromstring( 42 | ''.join(lines[2:2 + n_verts]), 43 | sep=' ', dtype=np.float).reshape(n_verts, -1) 44 | if n_faces > 0: 45 | faces = np.fromstring(''.join(lines[2+n_verts: 2+n_verts+n_faces]), 46 | sep=' ', dtype=np.int).reshape(n_faces, -1)[:, 1:] 47 | else: 48 | faces = None 49 | if has_colors: 50 | colors = vertex_data[:,3:].astype(np.uint8) 51 | vertex_data = vertex_data[:,:3] 52 | else: 53 | colors = None 54 | if no_colors: 55 | return vertex_data, faces 56 | else: 57 | return vertex_data, colors, faces 58 | -------------------------------------------------------------------------------- /cgtools/io/ply.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def save_pointcloud(filename, points, point_colors=None): 5 | """ 6 | save a 3d point cloud in PLY format 7 | points should be given as an array (or list) of 3d positions 8 | point_colors can be given if desired, as an array (or list) of RGB tuples 9 | the PLY files can be viewed e.g. using meshlab 10 | """ 11 | points = np.asanyarray(points) 12 | if not points.ndim == 2 and points.shape[-1] == 3: 13 | raise ValueError("points must be an n x 3 array of 3d point coordinates") 14 | if point_colors is not None: 15 | point_colors = np.asanyarray(point_colors) 16 | if not point_colors.shape == points.shape: 17 | raise ValueError("point_colors must be an n x 3 array of rgb values with the same size as points") 18 | header = [ 19 | "ply", 20 | "format ascii 1.0", 21 | "element face 0", 22 | "property list uchar int vertex_indices", 23 | "element vertex %d" % len(points), 24 | "property float x", 25 | "property float y", 26 | "property float z", 27 | ] 28 | if point_colors is not None: 29 | header += [ 30 | "property uchar diffuse_red", 31 | "property uchar diffuse_green", 32 | "property uchar diffuse_blue", 33 | ] 34 | with open(filename, 'w') as f: 35 | f.write('\n'.join(header) + '\nend_header\n') 36 | if points is not None and len(points) > 0: 37 | data_channels = np.hsplit(points, 3) 38 | fmt = "%g %g %g" 39 | if point_colors is not None: 40 | data_channels += np.hsplit(point_colors, 3) 41 | fmt += " %d %d %d" 42 | data = np.hstack(data_channels) 43 | np.savetxt(f, data, fmt=fmt) 44 | 45 | def load_pointcloud(filename): 46 | with open(filename) as f: 47 | line = None 48 | has_color = False 49 | while line != "end_header": 50 | line = f.readline().strip() 51 | if line.startswith("property uchar diff"): 52 | has_color = True 53 | data = np.loadtxt(f) 54 | if has_color: 55 | return data[:,:3], data[:,3:] 56 | else: 57 | return data 58 | 59 | 60 | -------------------------------------------------------------------------------- /cgtools/mesh/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tneumann/cgtools/8f77b6a4642fe79ac85b8449ebd3f72ea0e56032/cgtools/mesh/__init__.py -------------------------------------------------------------------------------- /cgtools/mesh/barycentric.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import sparse 3 | 4 | from .topology import get_mesh_edges 5 | 6 | 7 | def barycentric_matrix(uv, tris, num_verts): 8 | """ 9 | Return the barycentric coordinate matrix B such that 10 | 11 | B * verts = verts_new 12 | 13 | where verts_new yield the barycentric interpolation according to uv 14 | of the triangles given as n*3 index array in tris. 15 | 16 | Given the barycentric coordinates u and v of a triangle 17 | with points p1, p2, p3 (given by indices i1, i2, i3), 18 | barycentric interpolation yields the new point pb 19 | 20 | pb = p1 + u * (p2 - p1) + v * (p3 - p1) 21 | 22 | """ 23 | uvw = np.column_stack((1 - uv[:,0] - uv[:,1], uv[:,0], uv[:,1])) 24 | return sparse.csr_matrix((uvw.ravel(), 25 | (np.mgrid[:len(uvw), :3][0].ravel(), 26 | tris.ravel()) ), 27 | shape=(len(uvw), num_verts)) 28 | 29 | 30 | def barycentric_interpolate(verts, tris, uv): 31 | """ 32 | Compute 3d points from a given set of barycentric coordinates 33 | 34 | Given the barycentric coordinates u and v of a triangle in uv 35 | with points p1, p2, p3 (given by indices i1, i2, i3 in tris), 36 | barycentric interpolation yields the new point pb 37 | 38 | pb = p1 + u * (p2 - p1) + v * (p3 - p1) 39 | """ 40 | edge1, edge2 = get_mesh_edges(verts, tris) 41 | return verts[tris[:,0]] + \ 42 | uv[:,0][:,np.newaxis] * edge1 + \ 43 | uv[:,1][:,np.newaxis] * edge2 44 | -------------------------------------------------------------------------------- /cgtools/mesh/catmull_clark_subdiv.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | 3 | import numpy as np 4 | from scipy import sparse 5 | 6 | from ..indexing import inverse_index_dict 7 | 8 | 9 | class CatmullClarkSubdiv(object): 10 | def __init__(self, quads): 11 | quads = np.array(quads) 12 | assert quads.shape[1] == 4 13 | self._quads_lo = quads 14 | n_verts = quads.max() + 1 15 | 16 | # build edges 17 | def mk_edge_key(i, j): 18 | return (min(i, j), max(i, j)) 19 | 20 | quads_by_edge_ij = defaultdict(list) 21 | edges_ij = [] 22 | edges_by_quads = defaultdict(lambda: [None] * 4) 23 | quads_by_vert = defaultdict(list) 24 | edge_ijs_by_vert = defaultdict(list) # {vertIndex: [(i1, j1), (i2, j2), ...]} 25 | for quad_ix, quad in enumerate(quads): 26 | for k in range(4): 27 | # construct edge from vertex i to j 28 | ij = mk_edge_key(quad[k], quad[(k + 1) % 4]) 29 | edges_ij.append(ij) 30 | quads_by_edge_ij[ij].append(quad_ix) 31 | # build adjacency 32 | quads_by_vert[quad[k]].append(quad_ix) 33 | edge_ijs_by_vert[quad[k]].append(ij) 34 | 35 | # ensure all edges have 2 neighboring faces 36 | assert all(len(v) == 2 for v in list(quads_by_edge_ij.values())) 37 | # ensure all quads reference 4 edges 38 | assert all(all(i is not None for i in quad) for quad in list(edges_by_quads.values())) 39 | 40 | # make unique edge indices 41 | edges_ij_uniq = np.unique(edges_ij, axis=0) 42 | edge_ix_by_ij = inverse_index_dict(list(map(tuple, edges_ij_uniq))) 43 | 44 | quads_hi = [] 45 | for quad_ix, (i0, i1, i2, i3) in enumerate(quads): 46 | # face point index 47 | iF = n_verts + quad_ix 48 | # edge indices 49 | ix_offset_edges = n_verts + len(quads) 50 | e0 = edge_ix_by_ij[mk_edge_key(i0, i1)] + ix_offset_edges 51 | e1 = edge_ix_by_ij[mk_edge_key(i1, i2)] + ix_offset_edges 52 | e2 = edge_ix_by_ij[mk_edge_key(i2, i3)] + ix_offset_edges 53 | e3 = edge_ix_by_ij[mk_edge_key(i3, i0)] + ix_offset_edges 54 | quads_hi.append((i0, e0, iF, e3)) 55 | quads_hi.append((e0, i1, e1, iF)) 56 | quads_hi.append((iF, e1, i2, e2)) 57 | quads_hi.append((e3, iF, e2, i3)) 58 | 59 | self.quads_hi = np.array(quads_hi) 60 | 61 | # We're going to construct sparse matrices that perform the linear interpolation 62 | # of the new vertices from the old ones, see __call__ on how we can use this. 63 | # This will induce a certain cost to construct these matrices, but once constructed 64 | # the interpolation can be done very quickly. 65 | 66 | # construct edge interpolation matrix 67 | E_triplets = [] 68 | for (i, j), edge_ix in edge_ix_by_ij.items(): 69 | q1, q2 = quads_by_edge_ij[(i, j)] 70 | E_triplets.append((edge_ix, i, 0.25)) 71 | E_triplets.append((edge_ix, j, 0.25)) 72 | E_triplets.append((edge_ix, q1 + n_verts, 0.25)) 73 | E_triplets.append((edge_ix, q2 + n_verts, 0.25)) 74 | E_i, E_j, E_data = list(zip(*E_triplets)) 75 | self._E_intp = sparse.csr_matrix((E_data, (E_i, E_j))) 76 | 77 | # construct vertex interpolation matrix 78 | V_triplets = [] 79 | for vert_ix in range(n_verts): 80 | n = len(quads_by_vert[vert_ix]) 81 | # put coefficients that compute F 82 | adj_quads = quads_by_vert[vert_ix] 83 | for quad_ix in adj_quads: 84 | V_triplets.append((vert_ix, n_verts + quad_ix, 1 / float(len(adj_quads) * n))) 85 | # put coefficients that compute 2 * R 86 | adj_edges = edge_ijs_by_vert[vert_ix] 87 | for i, j in adj_edges: 88 | # duplicate vertices will be summed when converting to csr later 89 | V_triplets.append((vert_ix, i, 1 / float(len(adj_edges) * n))) 90 | V_triplets.append((vert_ix, j, 1 / float(len(adj_edges) * n))) 91 | # put coefficients to compute (n - 3) * orig 92 | V_triplets.append((vert_ix, vert_ix, (n - 3.) / float(n))) 93 | 94 | V_i, V_j, V_data = list(zip(*V_triplets)) 95 | self._V_intp = sparse.csr_matrix((V_data, (V_i, V_j))) 96 | 97 | def __call__(self, attrs): 98 | # interpolate new face points 99 | face_attrs = attrs[self._quads_lo].mean(axis=1) 100 | # interpolate new edge points 101 | edge_pts = self._E_intp * np.vstack((attrs, face_attrs)) 102 | # interpolate old vertices 103 | attrs_subdiv = self._V_intp * np.vstack((attrs, face_attrs)) 104 | 105 | return np.vstack(( 106 | attrs_subdiv, face_attrs, edge_pts 107 | )) 108 | -------------------------------------------------------------------------------- /cgtools/mesh/crouds.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | def merge_meshes(list_of_verts, list_of_faces): 5 | merged_verts = np.vstack(list_of_verts) 6 | if isinstance(list_of_faces, np.ndarray) and list_of_faces.ndim == 2: 7 | list_of_faces = [list_of_faces] * len(list_of_verts) 8 | merged_tris = [] 9 | n = 0 10 | for verts, tris in zip(list_of_verts, list_of_faces): 11 | merged_tris.append(tris + n) 12 | n += len(verts) 13 | return merged_verts, np.vstack((merged_tris)) 14 | 15 | 16 | def distribute_points(list_of_points, axes=(0, 2), n1=None, pad_factor=1.2, spacing=None): 17 | if isinstance(axes, int): 18 | axes = [axes] 19 | if len(axes) > 2: 20 | raise ValueError("axes should be a tuple with at most 2 elements") 21 | if spacing is None: 22 | spacing = np.max([np.ptp(p, axis=0) for p in list_of_points], axis=0) * pad_factor 23 | 24 | rng = [[0], [0], [0]] 25 | if len(axes) == 1: 26 | n1 = len(list_of_points) 27 | else: 28 | if n1 is None: 29 | n1 = int(np.ceil(np.sqrt(len(list_of_points)))) 30 | n2 = int(np.ceil(np.sqrt(len(list_of_points)))) 31 | else: 32 | n2 = int(np.ceil(len(list_of_points) / float(n1))) 33 | assert n1 * n2 >= len(list_of_points) 34 | 35 | rng[axes[1]] = np.r_[ : n2 * spacing[axes[1]] : spacing[axes[1]]] 36 | 37 | rng[axes[0]] = np.r_[ : n1 * spacing[axes[0]] : spacing[axes[0]]] 38 | 39 | offsets = np.column_stack((list(map(np.ravel, np.meshgrid(*rng))))) 40 | offsets = offsets[:len(list_of_points)] 41 | 42 | ret = [p + o for p, o in zip(list_of_points, offsets)] 43 | return ret, spacing, offsets 44 | 45 | 46 | def distribute_meshes(list_of_points, list_of_faces, **kwargs): 47 | list_points_distr, spacing, offsets = distribute_points(list_of_points, **kwargs) 48 | return list(merge_meshes(list_points_distr, list_of_faces)) + [spacing, offsets] 49 | 50 | 51 | def duplicate_and_distribute_mesh(verts, faces, n, **kwargs): 52 | return distribute_meshes([verts] * n, faces, **kwargs) 53 | 54 | 55 | class Croud(): 56 | def __init__(self, list_of_points, **kwargs_for_distribute): 57 | _, _, self.offsets = distribute_points(list_of_points, **kwargs_for_distribute) 58 | 59 | def distribute(self, list_of_points, list_of_faces=None): 60 | if len(list_of_points) != len(self.offsets): 61 | raise ValueError("cannot distribute a %d meshes in a croud that was set up with %d meshes" % 62 | (len(list_of_points), len(self.offsets))) 63 | distributed_verts = [p + o for p, o in zip(list_of_points, self.offsets)] 64 | if list_of_faces is None: 65 | return np.vstack(distributed_verts) 66 | else: 67 | return merge_meshes(distributed_verts, list_of_faces) 68 | -------------------------------------------------------------------------------- /cgtools/mesh/def_transfer.py: -------------------------------------------------------------------------------- 1 | from scipy.sparse.linalg import factorized 2 | 3 | from .topology import get_triangle_frames 4 | from .div import div_op 5 | from .laplacian import compute_mesh_laplacian 6 | from ..fastmath import matmat, inv3 7 | 8 | 9 | def defgrads(verts_src, verts_deformed, tris): 10 | """ 11 | Compute deformation gradients between source and deformed mesh 12 | """ 13 | S0 = get_triangle_frames(verts_src, tris) 14 | S1 = get_triangle_frames(verts_deformed, tris) 15 | return matmat(S1, inv3(S0)) 16 | 17 | 18 | def deformation_transfer(verts_src, verts_src_deformed, verts_tgt, tris): 19 | """ 20 | Deforms vertices given in "verts_tgt" so that their deformation 21 | matches the deformation seen between "verts_src" and "verts_deformed". 22 | 23 | This method assumes common topology (same triangles) in all 3 meshes. 24 | 25 | Returns the new vertex coordinates. 26 | 27 | Implemented as described in 28 | "Deformation Transfer for Detail-Preserving Surface Editing" 29 | Mario Botsch, Robert W. Sumner, Mark Pauly, Markus Gross 30 | in VMV 2006 31 | 32 | """ 33 | s = DefGradSolver(verts_tgt, tris) 34 | return s.transfer(verts_src, verts_src_deformed) 35 | 36 | 37 | class DefGradSolver(object): 38 | def __init__(self, verts_template, tris): 39 | self.v0 = verts_template 40 | self.tris = tris 41 | # setup poisson system - see Chapter 5 in Botsch et al. 2006 42 | L = compute_mesh_laplacian(self.v0, self.tris, weight_type='cotangent', 43 | area_type='lumped_mass', return_vertex_area=False) 44 | self.solve = factorized(L.tocsc()) 45 | self.div = div_op(self.v0, self.tris) 46 | 47 | def reconstruct(self, defgrads, align_to_template=True): 48 | """ reconstruct from array of (num_tris, 3, 3), return vertex coordinates """ 49 | rhs = defgrads.transpose(0, 2, 1).reshape(-1, 3) 50 | # solve poisson system - see Chapter 5 in Botsch et al. 2006 51 | verts = self.solve(self.div * rhs) 52 | if align_to_template: 53 | verts -= (verts - self.v0).mean(axis=0) 54 | return verts 55 | 56 | def transfer(self, verts_src, verts_src_deformed, **kwargs): 57 | """ 58 | computes the deformation between verts_src and verts_src_deformed 59 | and applies this deformation to the verts_template passed to the constructor 60 | of this DefgradSolver 61 | """ 62 | # D contains deformation gradients for each triangle: (n_triangles, 3, 3) 63 | D = defgrads(verts_src, verts_src_deformed, self.tris) 64 | return self.reconstruct(D, **kwargs) 65 | -------------------------------------------------------------------------------- /cgtools/mesh/distance.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ..vector import veclen 3 | from .intersections import closest_points_on_mesh 4 | 5 | 6 | __all__ = ['signed_distance'] 7 | 8 | 9 | def signed_distance(verts0, normals0, verts1): 10 | """ 11 | Compute the euclidean distance from 3d points in verts0 to the corresponding points in verts1, 12 | and flip the sign of the distance corresponding to the normals in normals0. 13 | That is, returns negative values for dimples, positive values for bulges. 14 | """ 15 | diff = verts0 - verts1 16 | return veclen(diff) * np.sign((normals0 * diff).sum(axis=-1)) 17 | 18 | 19 | def signed_closest_point_distance(verts0, normals0, verts1, tris1): 20 | """ 21 | Compute the signed closest point distance from verts0 to verts1 22 | """ 23 | _, _, hit_pts, _ = closest_points_on_mesh(verts0, verts1, tris1) 24 | return signed_distance(verts0, normals0, hit_pts) 25 | -------------------------------------------------------------------------------- /cgtools/mesh/div.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import sparse 3 | 4 | from .gradient import gradient_op 5 | from .topology import double_triangle_area 6 | 7 | 8 | def div_op(verts, tris): 9 | D = sparse.diags(np.repeat(double_triangle_area(verts, tris), 3)) 10 | grad = gradient_op(verts, tris) 11 | # TODO check if 0.5 factor is correct 12 | # it is 0.25 in https://github.com/alecjacobson/gptoolbox/blob/master/mesh/div.m 13 | # but then we don't have the identity L = div * grad 14 | return -0.5 * grad.T * D 15 | 16 | 17 | def div(verts, tris, g): 18 | return div_op(verts, tris) * g 19 | -------------------------------------------------------------------------------- /cgtools/mesh/geodesic.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import sparse 3 | 4 | from ._igl_ext import exact_geodesic 5 | 6 | from ..vector import veclen, normalized, sq_veclen 7 | from .laplacian import compute_mesh_laplacian 8 | from .gradient import gradient_op 9 | from .div import div_op 10 | 11 | try: 12 | from sksparse.cholmod import cholesky 13 | factorized = lambda A: cholesky(A, mode='simplicial') 14 | except ImportError: 15 | print("CHOLMOD not found - trying to use slower LU factorization from scipy") 16 | print("install scikits.sparse to use the faster cholesky factorization") 17 | from scipy.sparse.linalg import factorized 18 | 19 | 20 | class GeodesicDistanceComputation(object): 21 | """ 22 | Computation of geodesic distances on triangle meshes using the heat method from the impressive paper 23 | 24 | Geodesics in Heat: A New Approach to Computing Distance Based on Heat Flow 25 | Keenan Crane, Clarisse Weischedel, Max Wardetzky 26 | ACM Transactions on Graphics (SIGGRAPH 2013) 27 | 28 | Example usage: 29 | $ compute_distance = GeodesicDistanceComputation(vertices, triangles) 30 | $ distance_of_each_vertex_to_vertex_0 = compute_distance(0) 31 | 32 | """ 33 | 34 | def __init__(self, verts, tris, m=1.0): 35 | self._verts = verts 36 | self._tris = tris 37 | # precompute some stuff needed later on 38 | self._grad = gradient_op(verts, tris) 39 | self._div = div_op(verts, tris) 40 | e01 = verts[tris[:,1]] - verts[tris[:,0]] 41 | e12 = verts[tris[:,2]] - verts[tris[:,1]] 42 | e20 = verts[tris[:,0]] - verts[tris[:,2]] 43 | # parameters for heat method 44 | h = np.mean(list(map(veclen, [e01, e12, e20]))) 45 | t = m * h ** 2 46 | # pre-factorize poisson systems 47 | Lc, vertex_area = compute_mesh_laplacian(verts, tris, area_type='lumped_mass') 48 | # TODO: could actually compute: Lc = self._div * self._grad 49 | A = sparse.spdiags(vertex_area, 0, len(verts), len(verts)) 50 | #self._factored_AtLc = splu((A - t * Lc).tocsc()).solve 51 | self._factored_AtLc = factorized((A - t * Lc).tocsc()) 52 | #self._factored_L = splu(Lc.tocsc()).solve 53 | self._factored_L = factorized(Lc.tocsc()) 54 | 55 | def __call__(self, idx): 56 | """ 57 | computes geodesic distances to all vertices in the mesh 58 | idx can be either an integer (single vertex index) or a list of vertex indices 59 | or an array of bools of length n (with n the number of vertices in the mesh) 60 | """ 61 | u0 = np.zeros(len(self._verts)) 62 | u0[idx] = 1.0 63 | # -- heat method, step 1 64 | u = self._factored_AtLc(u0).ravel() 65 | # running heat flow with multiple sources results in heat flowing 66 | # into the source region. So just set the source region to the constrained value. 67 | u[idx] = 1 68 | # I tried solving the equality-constrained quadratic program that would fix this 69 | # during the solve, but that did not seem to yield a lower error 70 | # (but it meant that prefactorization is not straightforward) 71 | # The QP solution would look something like: 72 | # from scipy import sparse 73 | # from cgtools.indexing import sparse_indicator_matrix 74 | # I = sparse_indicator_matrix(idx, self._verts.shape[0]) 75 | # Q = sparse.bmat([(self.A - self.t * self.Lc, I.T), 76 | # (I, None)]) 77 | # u = sparse.linalg.spsolve(Q, np.concatenate((u0, np.ones(I.shape[0]))))[:self._verts.shape[0]] 78 | 79 | # -- heat method step 2: compute gradients & normalize 80 | # additional normalization accross triangles helps overall numerical stability 81 | n_u = 1. / (u[self._tris].sum(axis=1)) 82 | # compute gradient 83 | grad_u = (self._grad * u).reshape(-1, 3) * n_u[:, np.newaxis] 84 | # normalize gradient 85 | with np.errstate(all='ignore'): 86 | X = grad_u / veclen(grad_u)[:, np.newaxis] 87 | X = np.nan_to_num(X, copy=False) 88 | 89 | # -- heat method step 3: solve poisson system 90 | div_Xs = self._div * X.ravel() 91 | phi = self._factored_L(div_Xs).ravel() 92 | # transform to distances 93 | phi = phi - phi.min() 94 | phi = phi.max() - phi 95 | return phi 96 | 97 | -------------------------------------------------------------------------------- /cgtools/mesh/gradient.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import sparse 3 | 4 | from .. import vector as V 5 | 6 | 7 | def gradient_op(verts, tris): 8 | """Build sparse matrix operator that computes 9 | the gradient of a scalar function f defined on 10 | a triangular mesh, e.g. returns G such that: 11 | 12 | (G * f).reshape(-1, 3) 13 | 14 | is the gradient of f on the mesh. 15 | 16 | The gradient is computed according to: 17 | grad f = 1/2A * normal x (f_i * e_jk + f_j * e_ki + f_k * e_ij) 18 | For derivation see http://dgd.service.tu-berlin.de/wordpress/vismathws10/2012/10/17/gradient-of-scalar-functions/ 19 | or in Crane et al. 2013 "Geodesics in Heat" (Sec. 3.2). 20 | 21 | Arguments: 22 | verts -- vertices, shape (n, 3) 23 | tris -- index array of triangles, shape (m, 3) 24 | 25 | Returns: 26 | sparse.csr_matrix -- sparse matrix G with shape (3*m, n) 27 | """ 28 | n_verts = verts.shape[0] 29 | n_tris = tris.shape[0] 30 | # alias for indices of vertices 31 | i = tris[:, 0] 32 | j = tris[:, 1] 33 | k = tris[:, 2] 34 | e_ij = verts[j] - verts[i] 35 | e_jk = verts[k] - verts[j] 36 | e_ki = verts[i] - verts[k] 37 | normal = np.cross(e_ij, e_jk) 38 | double_area_sq = V.sq_veclen(normal) 39 | # row index: 40 | # [0, 0, 0, 1, 1, 1, ....] 41 | row = np.repeat(np.arange(3*n_tris), 3) 42 | # column index 43 | # [i[0], j[0], k[0], i[0], j[0], k[0], i[0], j[0], k[0], 44 | # i[1], j[1], k[1], i[1], j[1], k[1], i[1], j[1], k[1], 45 | # ... ] 46 | col = np.tile(tris, 3).ravel() 47 | # values are the cross products of the normal with the opposite edge 48 | val = (np.dstack(( 49 | np.cross(normal, e_jk), # vertex i 50 | np.cross(normal, e_ki), # vertex j 51 | np.cross(normal, e_ij), # vertex k 52 | )) / double_area_sq[:, np.newaxis, np.newaxis]).ravel() 53 | 54 | G = sparse.csr_matrix((val, (row, col)), 55 | shape=(3*n_tris, n_verts)) 56 | return G 57 | 58 | 59 | def gradient(verts, tris, f): 60 | return (gradient_op(verts, tris) * f).reshape(-1, 3) 61 | 62 | 63 | # There are alternative formulations: 64 | # https://github.com/alecjacobson/gptoolbox/blob/master/mesh/grad.m 65 | # also in http://www.hao-li.com/cs599-ss2015/slides/Lecture04.1.pdf 66 | # I tested those and up to numerical imprecision, their result 67 | # was the same. Here is the commented-out code for anyone interested: 68 | # 69 | # def gradient_op(verts, tris): 70 | # n_tris = tris.shape[0] 71 | # n_verts = verts.shape[0] 72 | # i1 = tris[:, 0] 73 | # i2 = tris[:, 1] 74 | # i3 = tris[:, 2] 75 | # v32 = verts[i3] - verts[i2] 76 | # v13 = verts[i1] - verts[i3] 77 | # v21 = verts[i2] - verts[i1] 78 | # n = np.cross(v32, v13) 79 | # dblA = V.veclen(n) 80 | # u = V.normalized(n) 81 | # eperp21 = np.cross(u, v21) / dblA[:, np.newaxis] 82 | # eperp13 = np.cross(u, v13) / dblA[:, np.newaxis] 83 | 84 | # row = np.repeat(np.arange(3*n_tris), 4) 85 | # col = np.tile(tris[:, [1, 0, 2, 0]], 3).ravel() 86 | # val = np.dstack(( 87 | # eperp13, -eperp13, eperp21, -eperp21 88 | # )).ravel() 89 | # G = sparse.csr_matrix((val, (row, col)), 90 | # shape=(3*n_tris, n_verts)) 91 | # return G 92 | 93 | 94 | if __name__ == "__main__": 95 | from os import path 96 | from mayavi import mlab 97 | from scipy.sparse.linalg import eigsh 98 | 99 | from ..io import load_mesh 100 | from ..vis.mesh import vismesh 101 | from .laplacian import compute_mesh_laplacian 102 | 103 | verts, tris = load_mesh(path.join(path.dirname(__file__), 'bunny_2503.obj')) 104 | 105 | # we need an example function defined on the surface 106 | # lets take an eigenvector of the laplacian of the mesh 107 | L, va = compute_mesh_laplacian(verts, tris) 108 | _, eigvecs = eigsh(-L, M=sparse.diags(va), k=64, sigma=0) 109 | f = eigvecs[:, -1] 110 | 111 | # compute gradient of heat flow and visualize 112 | grad_f = gradient(verts, tris, f) 113 | 114 | vismesh(verts, tris, scalars=f) 115 | 116 | midpts = verts[tris].mean(axis=1) 117 | mlab.quiver3d( 118 | midpts[:, 0], midpts[:, 1], midpts[:, 2], 119 | grad_f[:, 0], grad_f[:, 1], grad_f[:, 2], 120 | mode='2darrow', color=(0, 0, 0), line_width=1, 121 | ) 122 | 123 | mlab.show() 124 | -------------------------------------------------------------------------------- /cgtools/mesh/intersections.py: -------------------------------------------------------------------------------- 1 | from ._intersections_ext import ray_mesh_intersect, closest_points_on_mesh, ray_mesh_intersect_fast 2 | -------------------------------------------------------------------------------- /cgtools/mesh/laplacian.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import numpy as np 4 | import scipy.sparse as sparse 5 | 6 | from .. import vector as V 7 | 8 | 9 | def compute_mesh_laplacian(verts, tris, weight_type='cotangent', 10 | return_vertex_area=True, area_type='mixed', 11 | add_diagonal=True): 12 | """ 13 | computes a sparse matrix representing the 14 | discretized laplace-beltrami operator of the mesh 15 | given by n vertex positions ("verts") and a m triangles ("tris") 16 | 17 | verts: (n, 3) array (float) 18 | tris: (m, 3) array (int) - indices into the verts array 19 | weight_type: either 'mean_value', 'uniform' or 'cotangent' (default) 20 | return_vertex_area: wether to return another area with the areas per vertex 21 | area_type: can be 'mixed' or 'lumped_mass' 22 | 23 | if weight_type == 'cotangent': 24 | computes the conformal weights ("cotangent weights") for the mesh, ie: 25 | w_ij = - .5 * (cot \alpha + cot \beta) 26 | 27 | if weight_type == 'mean_value': 28 | computes mean value coordinates for the mesh 29 | w_ij = - (tan(theta1_ij / 2) + tan(theta2_ij / 2)) / || v_i - v_j || 30 | 31 | if weight_type == 'uniform': 32 | w_ij = - 1 33 | 34 | for all weight types: 35 | w_ii = sum(w_ij for j in [1..n]) 36 | 37 | 38 | if area_type == 'mixed': 39 | compute the vertex area as the voronoi area for non-obtuse triangles, 40 | use the barycentric area for obtuse triangles 41 | (according to Mark Meyer's 2002 paper) 42 | 43 | if area_type == 'lumped_mass': 44 | compute vertex area by equally dividing the triangle area to the vertices, 45 | e.g. area of vertex i is the sum of areas of adjacent triangles divided by 3 46 | 47 | See: 48 | Olga Sorkine, "Laplacian Mesh Processing" 49 | and also 50 | Mark Meyer et al., "Discrete Differential-Geometry Operators for Triangulated 2-Manifolds" 51 | and for theoretical comparison of different discretizations, see 52 | Max Wardetzky et al., "Discrete Laplace operators: No free lunch" 53 | 54 | returns matrix L that computes the laplacian coordinates, e.g. L * x = delta 55 | """ 56 | if area_type not in ['mixed', 'lumped_mass']: 57 | raise ValueError('unknown area type: %s' % area_type) 58 | if weight_type not in ['cotangent', 'mean_value', 'uniform']: 59 | raise ValueError('unknown weight type: %s' % weight_type) 60 | 61 | n = len(verts) 62 | # we consider the triangle P, Q, R 63 | iP = tris[:, 0] 64 | iQ = tris[:, 1] 65 | iR = tris[:, 2] 66 | # edges forming the triangle 67 | PQ = verts[iP] - verts[iQ] # P--Q 68 | QR = verts[iQ] - verts[iR] # Q--R 69 | RP = verts[iR] - verts[iP] # R--P 70 | if weight_type == 'cotangent' or (return_vertex_area and area_type == 'mixed'): 71 | # compute cotangent at all 3 points in triangle PQR 72 | double_area = V.veclen(np.cross(PQ, RP)) 73 | cotP = -1 * (PQ * RP).sum(axis=1) / double_area # angle at vertex P 74 | cotQ = -1 * (QR * PQ).sum(axis=1) / double_area # angle at vertex Q 75 | cotR = -1 * (RP * QR).sum(axis=1) / double_area # angle at vertex R 76 | 77 | # compute weights and indices 78 | if weight_type == 'cotangent': 79 | I = np.concatenate(( iP, iR, iP, iQ, iQ, iR)) 80 | J = np.concatenate(( iR, iP, iQ, iP, iR, iQ)) 81 | W = 0.5 * np.concatenate((cotQ, cotQ, cotR, cotR, cotP, cotP)) 82 | 83 | elif weight_type == 'mean_value': 84 | # TODO: I didn't check this code yet 85 | PQlen = 1 / V.veclen(PQ) 86 | QRlen = 1 / V.veclen(QR) 87 | RPlen = 1 / V.veclen(RP) 88 | PQn = PQ * PQlen[:,np.newaxis] # normalized 89 | QRn = QR * QRlen[:,np.newaxis] 90 | RPn = RP * RPlen[:,np.newaxis] 91 | # TODO pretty sure there is a simpler solution to those 3 formulas 92 | tP = np.tan(0.5 * np.arccos((PQn * -RPn).sum(axis=1))) 93 | tQ = np.tan(0.5 * np.arccos((-PQn * QRn).sum(axis=1))) 94 | tR = np.tan(0.5 * np.arccos((RPn * -QRn).sum(axis=1))) 95 | I = np.concatenate(( iP, iP, iQ, iQ, iR, iR)) 96 | J = np.concatenate(( iQ, iR, iP, iR, iP, iQ)) 97 | W = np.concatenate((tP*PQlen, tP*RPlen, tQ*PQlen, tQ*QRlen, tR*RPlen, tR*QRlen)) 98 | 99 | elif weight_type == 'uniform': 100 | # this might add an edge twice to the matrix 101 | # but prevents the problem of boundary edges going only in one direction 102 | # we fix this problem after the matrix L is constructed 103 | I = np.concatenate((iP, iQ, iQ, iR, iR, iP)) 104 | J = np.concatenate((iQ, iP, iR, iQ, iP, iR)) 105 | W = np.ones(len(tris) * 6) 106 | 107 | # construct sparse matrix 108 | # notice that this will also sum duplicate entries of (i,j), 109 | # which is explicitely assumed by the code above 110 | L = sparse.csr_matrix((W, (I, J)), shape=(n, n)) 111 | if weight_type == 'uniform': 112 | # because we probably add weights in both directions of an edge earlier, 113 | # and the csr_matrix constructor sums them, some values in L might be 2 instead of 1 114 | # so reset them 115 | L.data[:] = 1 116 | # add diagonal entries as the sum across rows 117 | if add_diagonal: 118 | L = L - sparse.spdiags(L * np.ones(n), 0, n, n) 119 | 120 | if return_vertex_area: 121 | if area_type == 'mixed': 122 | # compute voronoi cell areas 123 | aP = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotQ * (RP**2).sum(axis=1)) # area at point P 124 | aQ = 1/8. * (cotR * (PQ**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point Q 125 | aR = 1/8. * (cotQ * (RP**2).sum(axis=1) + cotP * (QR**2).sum(axis=1)) # area at point R 126 | # replace by barycentric areas for obtuse triangles 127 | # TODO area computed previously in cotangent formula, reuse it here? 128 | triangle_area = .5 * V.veclen(np.cross(PQ, RP)) 129 | for i, c in enumerate([cotP, cotQ, cotR]): 130 | is_x_obtuse = c < 0 # obtuse at point? 131 | # TODO: the paper by Desbrun says that we should divide by 1/2 or 1/4, 132 | # but according to other code I found we should divide by 1 or 1/2 133 | # check which scheme is correct! 134 | aP[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 0 else 1/2.) 135 | aQ[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 1 else 1/2.) 136 | aR[is_x_obtuse] = triangle_area[is_x_obtuse] * (1 if i == 2 else 1/2.) 137 | area = np.bincount(iP, aP, minlength=n) + \ 138 | np.bincount(iQ, aQ, minlength=n) + np.bincount(iR, aR, minlength=n) 139 | 140 | elif area_type == 'lumped_mass': 141 | lump_area = V.veclen(np.cross(PQ, RP)) / 6. 142 | area = sum(np.bincount(tris[:,i], lump_area, minlength=n) for i in range(3)) 143 | 144 | return L, area 145 | else: 146 | return L 147 | 148 | -------------------------------------------------------------------------------- /cgtools/mesh/topology.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | from collections import defaultdict 4 | import numpy as np 5 | from scipy import sparse 6 | from scipy.sparse.csgraph import connected_components 7 | 8 | from .. import vector as V 9 | from ..indexing import filter_reindex, take_reindex 10 | from ._igl_ext import is_border_vertex 11 | 12 | 13 | def get_mesh_edges(verts, tris): 14 | u = verts[tris[:,1]] - verts[tris[:,0]] 15 | v = verts[tris[:,2]] - verts[tris[:,0]] 16 | return u, v 17 | 18 | edges = get_mesh_edges 19 | 20 | 21 | def get_vertex_rings(tris): 22 | # TODO: can we achieve this quicker with scipy.sparse? 23 | ring_by_vertex_id = defaultdict(list) 24 | edges = get_edges_from_triangles(tris) 25 | for i1, i2 in edges: 26 | ring_by_vertex_id[i1].append(i2) 27 | ring_by_vertex_id[i2].append(i1) 28 | return ring_by_vertex_id 29 | 30 | 31 | def triangle_triangle_adjacency_list(tris): 32 | """ 33 | determines triangle-triangle adjacency just from a list of triangles which are given as i,j,k index tuples 34 | returns an array of shape (len(edges), 2), each tuple is a pair of indices of neighboring triangles 35 | (in no particular order) 36 | """ 37 | tri_by_edge = defaultdict(lambda: []) 38 | for tri_index, (i, j, k) in enumerate(tris): 39 | tri_by_edge[(min(i, j), max(i, j))].append(tri_index) 40 | tri_by_edge[(min(j, k), max(j, k))].append(tri_index) 41 | tri_by_edge[(min(k, i), max(k, i))].append(tri_index) 42 | return np.array([ts for ts in list(tri_by_edge.values()) if len(ts) == 2]) 43 | 44 | 45 | def get_edges_from_triangles(tris, directed=False): 46 | """ 47 | Returns the unique edges as an array of shape (n_edges, 2) 48 | containing, for each edge, 2 indices of the vertices of each edge. 49 | 50 | If directed = True (default=False), then all non-boundary edges will be returned twice, 51 | e.g. if there is an edge between vertex i and j, the returned array will include 52 | (i, j) and (j, i) 53 | 54 | >>> tris = [[0, 1, 2], [2, 1, 3], [3, 1, 4]] 55 | >>> sorted(get_edges_from_triangles(tris).tolist()) 56 | [[0, 1], [0, 2], [1, 2], [1, 3], [1, 4], [2, 3], [3, 4]] 57 | >>> tris = [[0, 1, 2], [2, 1, 3], [3, 1, 4]] 58 | >>> sorted(get_edges_from_triangles(tris, directed=False).tolist()) 59 | [[0, 1], [0, 2], [1, 2], [1, 3], [1, 4], [2, 3], [3, 4]] 60 | """ 61 | tris = np.array(tris) 62 | all_edges = tris[:, [[0,1], [1,0], [1,2], [2,1], [2,0], [0,2]]].reshape((-1, 2)) 63 | A = sparse.coo_matrix((np.ones(len(all_edges)), all_edges.T)) 64 | if not directed: 65 | A = sparse.triu(A, format='csr') # format csr necessary to remove duplicate entries in COO format 66 | return np.column_stack(A.nonzero()) 67 | 68 | 69 | def edge_difference_matrix(tris, directed=False): 70 | edges_ij = get_edges_from_triangles(tris, directed=directed) 71 | return sparse.csr_matrix( 72 | (np.tile([-1, 1], len(edges_ij)), 73 | (np.repeat(np.arange(len(edges_ij)), 2), edges_ij.ravel()))) 74 | 75 | 76 | def edge_adjacency_matrix(tris, n_verts=None): 77 | """ 78 | Returns a scipy.sparse.csr_matrix of size n_verts x n_verts 79 | where element (i, j) is one if vertex i is connected to vertex j. 80 | 81 | If n_verts is None, it is determined automatically from the triangle array. 82 | """ 83 | if n_verts is None: 84 | n_verts = tris.max() + 1 85 | ij = np.r_[np.c_[tris[:, 0], tris[:, 1]], 86 | np.c_[tris[:, 0], tris[:, 2]], 87 | np.c_[tris[:, 1], tris[:, 2]]] 88 | A = sparse.csr_matrix( 89 | (np.ones(len(ij)), ij.T), 90 | shape=(n_verts, n_verts)) 91 | A = A.T + A 92 | A.data[:] = 1 93 | return A 94 | 95 | 96 | def vertex_triangle_adjacency_matrix(tris, n_verts=None): 97 | """ 98 | Returns a scipy.sparse.csr_matrix of size n_tris x n_verts 99 | where element (i, j) is one if triangle i is connected to vertex j. 100 | 101 | If n_verts is None, it is determined automatically from the triangle array. 102 | """ 103 | if n_verts is None: 104 | n_verts = tris.max() + 1 105 | vert_ix = tris.ravel() 106 | tri_ix = np.repeat(np.arange(len(tris)), 3) 107 | A = sparse.csr_matrix(( 108 | (np.ones(3*len(tris)), (vert_ix, tri_ix)) 109 | )) 110 | A.data[:] = 1 111 | return A 112 | 113 | 114 | def largest_connected_component(tris): 115 | """ 116 | Returns vertex indices of the largest connected component of the mesh 117 | as well as a reindexed triangle array, e.g. use it like so: 118 | 119 | ix, tris_new = largest_connected_component(tris) 120 | show_mesh(verts[ix], tris_new) 121 | """ 122 | _, labels = connected_components(edge_adjacency_matrix(tris), directed=False) 123 | largest = np.bincount(labels).argmax() 124 | mask = labels == largest 125 | vertex_indices = mask.nonzero()[0] 126 | tris_new = filter_reindex(mask, tris) 127 | return vertex_indices, tris_new 128 | 129 | 130 | def get_per_triangle_normals(verts, tris, edges_uv=None): 131 | if edges_uv is None: 132 | u, v = get_mesh_edges(verts, tris) 133 | else: 134 | u, v = edges_uv 135 | return V.normalized(np.cross(u, v)) 136 | 137 | 138 | def get_triangle_frames(verts, tris, normals=None): 139 | u, v = get_mesh_edges(verts, tris) 140 | # need the normal of each triangle 141 | normals = normals if normals is not None else get_per_triangle_normals(verts, tris, edges_uv=(u, v)) 142 | return np.dstack((u, v, normals)) # swapaxes??? 143 | 144 | 145 | def get_vertex_areas(verts, tris): 146 | PQ = verts[tris[:, 0]] - verts[tris[:, 1]] 147 | RP = verts[tris[:, 2]] - verts[tris[:, 0]] 148 | lump_area = V.veclen(np.cross(PQ, RP)) / 6. 149 | area = sum(np.bincount(tris[:,i], lump_area, minlength=len(verts)) for i in range(3)) 150 | return area 151 | 152 | 153 | def triangle_normal(verts, tris): 154 | u, v = edges(verts, tris) 155 | return np.cross(u, v) 156 | 157 | 158 | def double_triangle_area(verts, tris): 159 | return V.veclen(triangle_normal(verts, tris)) 160 | 161 | 162 | def triangle_area(verts, tris): 163 | return double_triangle_area(verts, tris) / 2. 164 | 165 | 166 | def quads_to_tris(quads): 167 | if quads.shape[1] == 3: # already triangles? 168 | return quads 169 | return quads[:, [[0, 1, 2], [0, 2, 3]]].reshape(-1, 3) 170 | 171 | 172 | def filter_triangles(vert_ix_or_mask, tris): 173 | vert_ix_or_mask = np.asarray(vert_ix_or_mask) 174 | fn = filter_reindex if vert_ix_or_mask.dtype == np.bool else take_reindex 175 | return fn(vert_ix_or_mask, tris) 176 | 177 | 178 | class ReorderByFaces(object): 179 | def __init__(self, faces0, faces1): 180 | self.ji = defaultdict(set) 181 | max_i = 0 182 | for face0, face1 in zip(faces0, faces1): 183 | for i, j in zip(face0, face1): 184 | self.ji[j].add(i) 185 | max_i = max(max_i, i) 186 | self._sz = max_i + 1 187 | 188 | def __call__(self, array): 189 | array_reordered = np.zeros((self._sz,) + array.shape[1:]) 190 | for j, ixs in self.ji.iteritems(): 191 | for i in ixs: 192 | # TODO: duplicating values here - is that ok or should we warn when those values are not consistent? 193 | array_reordered[i] = array[j] 194 | return array_reordered 195 | 196 | 197 | def reorder_by_faces(faces0, faces1, array): 198 | return ReorderByFaces(faces0, faces1)(array) 199 | 200 | -------------------------------------------------------------------------------- /cgtools/neighbors.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | 3 | import numpy as np 4 | from scipy.spatial import cKDTree 5 | from scipy.spatial.distance import cdist 6 | import networkx as nx 7 | 8 | 9 | __all__ = ['closest_points', 'optimal_permutation'] 10 | 11 | 12 | def closest_points(from_pts, to_pts): 13 | return cKDTree(to_pts).query(from_pts)[1] 14 | 15 | 16 | def optimal_permutation(from_pts, to_pts, distance_metric='euclidean'): 17 | """ 18 | Compute optimal matching between point sets, 19 | so that any point in from_pts will be matched to at most one point in to_pts 20 | and vice-versa. 21 | 22 | Returns the matching as an array of shape (n, 2) with pairs of indices 23 | for all correspondences, 24 | e.g. [(0, 2), (3, 4)] means point 0 in from_pts matches point 2 in to_pts 25 | and point 3 in from_pts matches point 4 in to_pts 26 | """ 27 | # build distance matrix 28 | D = cdist(from_pts, to_pts, distance_metric) 29 | Dmax = D.max() 30 | # construct bipartite graph and compute max weighted matching 31 | g = nx.Graph() 32 | for i in range(from_pts.shape[0]): 33 | for j in range(to_pts.shape[0]): 34 | w = Dmax - D[i, j] 35 | g.add_edge(('from', i), ('to', j), weight=w) 36 | mates = nx.max_weight_matching(g, True) 37 | # read out permutation 38 | ij = [] 39 | for (which1, ix1), (which2, ix2) in mates: 40 | if which1 == 'from': 41 | ij.append((ix1, ix2)) 42 | else: 43 | ij.append((ix2, ix1)) 44 | return np.array(ij) -------------------------------------------------------------------------------- /cgtools/procrustes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from . import vector as V 4 | 5 | 6 | def procrustes3d(frompts, topts, allow_reflection=False): 7 | """ 8 | Finds a rigid body transformation M that moves points in frompts to the points in topts 9 | that is, it finds a rigid body motion [ R | t ] with R \in SO(3) 10 | 11 | This algorithm first approximates the rotation by solving 12 | the orthogonal procrustes problem. 13 | """ 14 | # center data 15 | t0 = frompts.mean(0) 16 | t1 = topts.mean(0) 17 | frompts_local = frompts - t0 18 | topts_local = topts - t1 19 | R = procrustes(frompts_local, topts_local, allow_reflection=allow_reflection) 20 | T0 = np.eye(4) 21 | T0[:3,:3] = R 22 | T0[:3, 3] = t1 - np.dot(R, t0) 23 | return T0 24 | 25 | 26 | def procrustes(frompts, topts, allow_reflection=False): 27 | """ 28 | Finds a orthogonal rotation R \in SO(N) 29 | that aligns array of N-dimensional frompts with topts. 30 | 31 | Solves the orthogonal procrustes problem. 32 | """ 33 | M = np.dot(topts.T, frompts) 34 | U, s, Vt = np.linalg.svd(M) 35 | if allow_reflection: 36 | R = U.dot(Vt) 37 | else: 38 | d = np.sign(np.linalg.det(np.dot(Vt.T, U.T))) 39 | E = np.diag([1, 1, d]) 40 | R = np.dot(U, np.dot(E, Vt)) 41 | return R 42 | 43 | 44 | def rigid_align(frompts, topts, allow_reflection=False): 45 | M = procrustes3d(frompts, topts, allow_reflection=allow_reflection) 46 | return V.transform(frompts, M) -------------------------------------------------------------------------------- /cgtools/skinning.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from . import vector as V 3 | 4 | 5 | def rbm_to_dualquat(rbm): 6 | import cgkit.cgtypes as cg 7 | q0 = cg.quat().fromMat(cg.mat3(rbm[:3,:3].T.tolist())) 8 | q0 = q0.normalize() 9 | q0 = np.array([q0.w, q0.x, q0.y, q0.z]) 10 | t = rbm[:3, 3] 11 | q1 = np.array([ 12 | -0.5*( t[0]*q0[1] + t[1]*q0[2] + t[2]*q0[3]), 13 | 0.5*( t[0]*q0[0] + t[1]*q0[3] - t[2]*q0[2]), 14 | 0.5*(-t[0]*q0[3] + t[1]*q0[0] + t[2]*q0[1]), 15 | 0.5*( t[0]*q0[2] - t[1]*q0[1] + t[2]*q0[0]) ]) 16 | return np.array(q0.tolist() + q1.tolist()) 17 | 18 | def dualquats_to_rbms(blendq): 19 | qn = blendq[:,:4] 20 | qd = blendq[:,4:] 21 | len2 = np.sum(qn**2, axis=1) 22 | w, x, y, z = qn[:,0], qn[:,1], qn[:,2], qn[:,3] 23 | t0, t1, t2, t3 = qd[:,0], qd[:,1], qd[:,2], qd[:,3] 24 | M = np.empty((len(blendq), 4, 4)) 25 | M[:,0,0] = w*w + x*x - y*y - z*z 26 | M[:,0,1] = 2*x*y - 2*w*z 27 | M[:,0,2] = 2*x*z + 2*w*y 28 | M[:,1,0] = 2*x*y + 2*w*z 29 | M[:,1,1] = w*w + y*y - x*x - z*z 30 | M[:,1,2] = 2*y*z - 2*w*x; 31 | M[:,2,0] = 2*x*z - 2*w*y 32 | M[:,2,1] = 2*y*z + 2*w*x 33 | M[:,2,2] = w*w + z*z - x*x - y*y 34 | M[:,0,3] = -2*t0*x + 2*w*t1 - 2*t2*z + 2*y*t3 35 | M[:,1,3] = -2*t0*y + 2*t1*z - 2*x*t3 + 2*w*t2 36 | M[:,2,3] = -2*t0*z + 2*x*t2 + 2*w*t3 - 2*t1*y 37 | M[:,3] = 0 38 | M[:,3,3] = len2 39 | M /= len2[:,np.newaxis,np.newaxis] 40 | return M 41 | 42 | def dq_skinning(pts, BW, dqs): 43 | from scipy import weave 44 | 45 | blendq = np.sum(BW[:,:,np.newaxis] * dqs[np.newaxis], axis=1) 46 | code = """ 47 | using namespace blitz; 48 | float M00, M01, M02, M03; 49 | float M10, M11, M12, M13; 50 | float M20, M21, M22, M23; 51 | 52 | for (int i=0; i>> dot([1, 0], [0, 1]) 42 | 0 43 | >>> dot([1, 1], [2, 3]) 44 | 5 45 | >>> dot([[1, 0], [1, 1]], [[0, 1], [2, 3]]).tolist() 46 | [0, 5] 47 | """ 48 | return np.sum(ARR(u)*ARR(v), axis=-1) 49 | 50 | def project(v, u): 51 | """ project v onto u """ 52 | u_norm = normalized(u) 53 | return (dot(v, u_norm)[..., np.newaxis] * u_norm) 54 | 55 | def homogenize(v, value=1): 56 | """ returns v as homogeneous vectors by inserting one more element into the last axis 57 | the parameter value defines which value to insert (meaningful values would be 0 and 1) 58 | >>> homogenize([1, 2, 3]).tolist() 59 | [1, 2, 3, 1] 60 | >>> homogenize([1, 2, 3], 9).tolist() 61 | [1, 2, 3, 9] 62 | >>> homogenize([[1, 2], [3, 4]]).tolist() 63 | [[1, 2, 1], [3, 4, 1]] 64 | >>> homogenize([[1, 2], [3, 4]], 99).tolist() 65 | [[1, 2, 99], [3, 4, 99]] 66 | >>> homogenize([[1, 2], [3, 4]], [33, 99]).tolist() 67 | [[1, 2, 33], [3, 4, 99]] 68 | """ 69 | v = ARR(v) 70 | if hasattr(value, '__len__'): 71 | return np.append(v, ARR(value).reshape(v.shape[:-1] + (1,)), axis=-1) 72 | else: 73 | return np.insert(v, v.shape[-1], np.array(value, v.dtype), axis=-1) 74 | 75 | # just some handy aliases 76 | hom = homogenize 77 | 78 | def dehomogenize(a): 79 | """ makes homogeneous vectors inhomogenious by dividing by the last element in the last axis 80 | >>> dehomogenize([1, 2, 4, 2]).tolist() 81 | [0.5, 1.0, 2.0] 82 | >>> dehomogenize([[1, 2], [4, 4]]).tolist() 83 | [[0.5], [1.0]] 84 | """ 85 | a = np.asfarray(a) 86 | return a[...,:-1] / a[...,np.newaxis,-1] 87 | 88 | # just some handy aliases 89 | dehom = dehomogenize 90 | 91 | def ensure_dim(a, dim, value=1): 92 | """ 93 | checks if an array of vectors has dimension dim, and if not, 94 | adds one dimension with values set to value (default 1) 95 | """ 96 | cdim = a.shape[-1] 97 | if cdim == dim - 1: 98 | return homogenize(a, value=value) 99 | elif cdim == dim: 100 | return a 101 | else: 102 | raise ValueError('vectors had %d dimensions, but expected %d or %d' % (cdim, dim-1, dim)) 103 | 104 | def hom4(a, value=1): 105 | return ensure_dim(a, 4, value) 106 | 107 | def hom3(a, value=1): 108 | return ensure_dim(a, 3, value) 109 | 110 | def transform(v, M, w=1): 111 | """ 112 | transforms vectors in v with the matrix M 113 | if matrix M has one more dimension then the vectors 114 | this will be done by homogenizing the vectors 115 | (with the last dimension filled with w) and 116 | then applying the transformation 117 | """ 118 | if M.shape[-1] == v.shape[-1] + 1: 119 | v1 = matvec(M, hom(v)) 120 | if v1.shape[-1] == v.shape[-1] + 1: 121 | v1 = dehom(v1) 122 | return v1 123 | else: 124 | return matvec(M, v) 125 | 126 | def toskewsym(v): 127 | assert v.shape == (3,) 128 | return np.array([[0, -v[2], v[1]], 129 | [v[2], 0, -v[0]], 130 | [-v[1], v[0], 0]]) 131 | 132 | def convert_3x4_to_4x4(matrices, new_row=[0, 0, 0, 1]): 133 | """ 134 | Turn a 3x4 matrix or an array of 3x4 matrices into 4x4 matrices by appending the . 135 | >>> A = np.zeros((3, 4)) 136 | >>> convert_3x4_to_4x4(A).shape 137 | (4, 4) 138 | >>> convert_3x4_to_4x4(A)[3].tolist() 139 | [0.0, 0.0, 0.0, 1.0] 140 | >>> many_A = np.random.random((10, 20, 3, 4)) 141 | >>> many_A_4x4 = convert_3x4_to_4x4(many_A) 142 | >>> many_A_4x4.shape 143 | (10, 20, 4, 4) 144 | >>> many_A_4x4[2, 1, 3].tolist() 145 | [0.0, 0.0, 0.0, 1.0] 146 | >>> np.all(many_A_4x4[:, :, :3, :] == many_A) 147 | True 148 | """ 149 | assert matrices.shape[-1] == 4 and matrices.shape[-2] == 3 150 | return np.insert(matrices, 3, new_row, axis=-2) 151 | 152 | to_4x4 = convert_3x4_to_4x4 153 | 154 | 155 | def assemble_3x4(rotations, translations): 156 | """ 157 | Given one (or more) 3x3 matrices and one (or more) 3d vectors, 158 | create an array of 3x4 matrices 159 | 160 | >>> rots = np.arange(9).reshape(3, 3) 161 | >>> ts = np.array([99, 88, 77]) 162 | >>> assemble_3x4(rots, ts) 163 | array([[ 0, 1, 2, 99], 164 | [ 3, 4, 5, 88], 165 | [ 6, 7, 8, 77]]) 166 | 167 | >>> rots = np.random.random((100, 3, 3)) 168 | >>> ts = np.random.random((100, 3)) 169 | >>> Ms = assemble_3x4(rots, ts) 170 | >>> Ms.shape 171 | (100, 3, 4) 172 | >>> np.all(Ms[:, :, :3] == rots) 173 | True 174 | >>> np.all(Ms[:, :, 3] == ts) 175 | True 176 | """ 177 | translations = np.asarray(translations) 178 | rotations = np.asarray(rotations) 179 | if rotations.ndim not in [2, 3] or rotations.shape[-2:] != (3, 3): 180 | raise ValueError("requires rotations argument to be one or more 3x3 matrices, so the shape should be either (3, 3) or (n, 3, 3)") 181 | if translations.ndim not in [1, 2] or translations.shape[-1] != 3: 182 | raise ValueError("requires translations argument to be one or more 3d vectors, so the shape should be either (3,) or (n, 3)") 183 | if rotations.ndim == 2 and translations.ndim == 1: 184 | # single translation, single rotation -> output single matrix 185 | return np.column_stack((rotations, translations)) 186 | else: 187 | if rotations.ndim == 2: 188 | rotations = rotations[np.newaxis] 189 | if translations.ndim == 1: 190 | translations = translations[np.newaxis] 191 | translations = translations[:, :, np.newaxis] 192 | return np.concatenate((rotations, translations), axis=-1) 193 | 194 | 195 | def assemble_4x4(rotations, translations, new_row=[0, 0, 0, 1]): 196 | """ 197 | Given one (or more) 3x3 matrices and one (or more) 3d vectors, 198 | create an array of 4x4 matrices 199 | 200 | >>> rots = np.arange(9).reshape(3, 3) 201 | >>> ts = np.array([99, 88, 77]) 202 | >>> assemble_4x4(rots, ts) 203 | array([[ 0, 1, 2, 99], 204 | [ 3, 4, 5, 88], 205 | [ 6, 7, 8, 77], 206 | [ 0, 0, 0, 1]]) 207 | 208 | >>> rots = np.random.random((100, 3, 3)) 209 | >>> ts = np.random.random((100, 3)) 210 | >>> Ms = assemble_4x4(rots, ts) 211 | >>> Ms.shape 212 | (100, 4, 4) 213 | >>> np.all(Ms[:, :3, :3] == rots) 214 | True 215 | >>> np.all(Ms[:, :3, 3] == ts) 216 | True 217 | >>> np.all(Ms[:, 3, :] == np.array([0, 0, 0, 1])) 218 | True 219 | """ 220 | return to_4x4(assemble_3x4(rotations, translations), new_row=new_row) 221 | 222 | 223 | def inv_3x4(matrices): 224 | """Given one (or more) 3x4 matrices, converts matrices into common 225 | transformation matrices by appending a row (0, 0, 0, 1), then 226 | inverts those matrices. Since the inverse will also have the 227 | same last row, the returned matrices are also 3x4 228 | 229 | >>> X = np.random.random((3, 4)) 230 | >>> X_4x4 = to_4x4(X) 231 | >>> np.allclose(inv_3x4(X), np.linalg.inv(X_4x4)[:3, :]) 232 | True 233 | """ 234 | if matrices.ndim not in [2, 3] or matrices.shape[-2:] != (3, 4): 235 | raise ValueError("requires matrices argument to be one or more 3x4 matrices, so the shape should be either (3, 4) or (n, 3, 4)") 236 | R_inv = inv3(matrices[..., :3, :3]) # "rotation" part (upper left 3x3 block) 237 | t_inv = matvec(R_inv, -matrices[..., :3, 3]) # "translation" part 238 | return assemble_3x4(R_inv, t_inv) 239 | 240 | -------------------------------------------------------------------------------- /cgtools/vis/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tneumann/cgtools/8f77b6a4642fe79ac85b8449ebd3f72ea0e56032/cgtools/vis/__init__.py -------------------------------------------------------------------------------- /cgtools/vis/animator.py: -------------------------------------------------------------------------------- 1 | import types 2 | from os import path 3 | from mayavi import mlab 4 | from tvtk.api import tvtk 5 | from pyface.timer.api import Timer 6 | from traits.api import HasTraits, Button, Instance, Range, Bool, Int, Directory, String, Tuple, Event, on_trait_change 7 | from traitsui.api import View, Group, Item, RangeEditor, HGroup, Tabbed, TupleEditor 8 | from pyface.api import ProgressDialog 9 | from tvtk.common import configure_input 10 | 11 | 12 | __all__ = [ 13 | "Animator", 14 | "animate" 15 | ] 16 | 17 | 18 | class Animator(HasTraits): 19 | start = Button('Start Animation') 20 | stop = Button('Stop Animation') 21 | next_frame = Button('+') 22 | prev_frame = Button('-') 23 | delay = Range(10, 100000, 500) 24 | loop = Bool(True) 25 | 26 | current_frame = Int(-1) 27 | _last_frame = Int() 28 | 29 | # TODO use Range(high="trait name") 30 | render_from_frame = Int() 31 | render_to_frame = Int() 32 | render_animation = Button() 33 | is_rendering = Bool(False) # indicator bool is True when rendering 34 | is_rendering_animation = Bool(False) 35 | 36 | render_directory = Directory("/tmp", exists=False) 37 | render_name_pattern = String("frame_%05d.png") 38 | 39 | magnification = Range(1, 128) 40 | fix_image_size = Bool(False) 41 | image_size = Tuple(Int(1280), Int(720)) 42 | 43 | render = Event() 44 | 45 | enable_cameraman = Bool(False) 46 | set_keyframe = Button() 47 | remove_keyframe = Button() 48 | 49 | timer = Instance(Timer) 50 | 51 | traits_view = View( Tabbed( 52 | Group( 53 | HGroup( 54 | Item('start', show_label=False), 55 | Item('stop', show_label=False), 56 | Item('next_frame', show_label=False, enabled_when='current_frame < _last_frame'), 57 | Item('prev_frame', show_label=False, enabled_when='current_frame > 0'), 58 | ), 59 | HGroup( 60 | Item(name = 'loop'), 61 | Item(name = 'delay'), 62 | ), 63 | Item(name = 'current_frame', 64 | editor=RangeEditor(is_float=False, high_name='_last_frame', mode='slider')), 65 | Group( 66 | HGroup( 67 | Item(name = 'enable_cameraman', label='enabled'), 68 | Item(name = 'set_keyframe', show_label=False), 69 | Item(name = 'remove_keyframe', show_label=False), 70 | Item(name = 'interpolation_type', object='object._camera_interpolator'), 71 | ), 72 | label = 'Cameraman', 73 | ), 74 | label = 'Timeline', 75 | ), 76 | Group( 77 | HGroup( 78 | Item('fix_image_size', label="Set Image Size"), 79 | Item('magnification', visible_when='not fix_image_size', label='Magnification'), 80 | Item('image_size', visible_when='fix_image_size', show_label=False, editor=TupleEditor(cols=2, labels=['W', 'H'])), 81 | ), 82 | Item("_"), 83 | Item("render_directory", label="Target Dir"), 84 | Item("render_name_pattern", label="Filename Pattern"), 85 | Item("_"), 86 | HGroup( 87 | Item("render_from_frame", label="from", 88 | editor=RangeEditor(is_float=False, low=0, high_name='render_to_frame')), 89 | Item("render_to_frame", label="to", 90 | editor=RangeEditor(is_float=False, low_name='render_from_frame', high_name='_last_frame')), 91 | ), 92 | Item("render_animation", show_label=False), 93 | label = "Render", 94 | ), 95 | ), 96 | title = 'Animation Controller', 97 | buttons = []) 98 | 99 | 100 | def __init__(self, num_frames, callable, millisec=40, figure=None, play=True, *args, **kwargs): 101 | HasTraits.__init__(self) 102 | self.delay = millisec 103 | self._last_frame = num_frames - 1 104 | self._callable = callable 105 | if figure is None: 106 | figure = mlab.gcf() 107 | self._figure = figure 108 | self._camera_interpolator = tvtk.CameraInterpolator(interpolation_type='spline') 109 | self._t_keyframes = {} 110 | self.render_to_frame = self._last_frame 111 | self.timer = Timer(millisec, self._on_timer, *args, **kwargs) 112 | if not play: 113 | self.stop = True 114 | self._internal_generator = None 115 | self.current_frame = 0 116 | self.on_trait_change(self._render, "render, current_frame", dispatch="ui") 117 | 118 | def _render(self): 119 | self.is_rendering = True 120 | if self._internal_generator is not None: 121 | try: 122 | next(self._internal_generator) 123 | except StopIteration: # is ok since generator should yield just once to render 124 | pass 125 | except: # catch and re-raise other errors 126 | raise 127 | else: 128 | raise "The render function should be either a simple function or a generator that yields just once to render" 129 | # before we call the user function, we want to disallow rendering 130 | # this speeds up animations that use mlab functions 131 | scene = self._figure.scene 132 | scene.disable_render = True 133 | r = self._callable(self.current_frame) 134 | if isinstance(r, types.GeneratorType): 135 | next(r) 136 | # save away generator to yield when another frame has to be displayed 137 | self._internal_generator = r 138 | # render scene without dumb hourglass cursor, 139 | # can be prevented by setting _interacting before calling render 140 | old_interacting = scene._interacting 141 | if self._camera_interpolator.number_of_cameras >= 2 and self.enable_cameraman: 142 | t = self.current_frame / float(self._last_frame) 143 | self._camera_interpolator.interpolate_camera(t, mlab.get_engine().current_scene.scene.camera) 144 | mlab.gcf().scene.renderer.reset_camera_clipping_range() 145 | scene._interacting = True 146 | scene.disable_render = False 147 | scene.render() 148 | scene._interacting = old_interacting 149 | self.is_rendering = False 150 | 151 | @on_trait_change('set_keyframe') 152 | def _set_keyframe(self): 153 | t = self.current_frame / float(self._last_frame) 154 | self._camera_interpolator.add_camera(t, mlab.get_engine().current_scene.scene.camera) 155 | self._t_keyframes[self.current_frame] = t 156 | 157 | def _next_frame_fired(self): 158 | self.current_frame += 1 159 | 160 | def _prev_frame_fired(self): 161 | self.current_frame -= 1 162 | 163 | @on_trait_change('remove_keyframe') 164 | def _remove_keyframe(self): 165 | if self.current_frame in self._t_keyframes: 166 | self._camera_interpolator.remove_last_keyframe(self._t_keyframes[self.current_frame]) 167 | 168 | def _on_timer(self, *args, **kwargs): 169 | if self.loop or self.current_frame != self._last_frame: 170 | self.current_frame = (self.current_frame + 1) % (self._last_frame + 1) 171 | else: 172 | self.stop = True 173 | 174 | def _delay_changed(self, value): 175 | t = self.timer 176 | if t is None: 177 | return 178 | if t.IsRunning(): 179 | t.Stop() 180 | t.Start(value) 181 | 182 | def _start_fired(self): 183 | if not self.loop and self.current_frame == self._last_frame: 184 | self.current_frame = 0 185 | self.timer.Start(self.delay) 186 | 187 | def _stop_fired(self): 188 | self.timer.Stop() 189 | 190 | def _render_animation_fired(self): 191 | self.stop = True 192 | n_frames_render = self.render_to_frame - self.render_from_frame 193 | # prepare the render window 194 | renwin = self._figure.scene.render_window 195 | aa_frames = renwin.aa_frames 196 | renwin.aa_frames = 8 197 | renwin.alpha_bit_planes = 1 198 | # turn on off screen rendering 199 | #renwin.off_screen_rendering = True 200 | # set size of window 201 | if self.fix_image_size: 202 | orig_size = renwin.size 203 | renwin.size = self.image_size 204 | # render the frames 205 | progress = ProgressDialog(title="Rendering", max=n_frames_render, 206 | show_time=True, can_cancel=True) 207 | progress.open() 208 | self.is_rendering_animation = True 209 | for frame in range(self.render_from_frame, self.render_to_frame + 1): 210 | # move animation to desired frame, this will also render the scene 211 | self.current_frame = frame 212 | # prepare window to image writer 213 | render = tvtk.WindowToImageFilter(input=renwin, magnification=1)#, input_buffer_type='rgba') 214 | if not self.fix_image_size: 215 | render.magnification = self.magnification 216 | exporter = tvtk.PNGWriter(file_name=path.join(self.render_directory, self.render_name_pattern % frame)) 217 | 218 | configure_input(exporter,render) 219 | exporter.write() 220 | do_continue, skip = progress.update(frame - self.render_from_frame) 221 | if not do_continue: 222 | break 223 | # reset the render window to old values 224 | renwin.aa_frames = aa_frames 225 | if self.fix_image_size: 226 | renwin.size = orig_size 227 | #renwin.off_screen_rendering = False 228 | self.is_rendering_animation = False 229 | progress.close() 230 | 231 | 232 | def animate(num_frames, delay=40, ui=True, fig=None, play=True): 233 | class Wrapper(object): 234 | # The wrapper which calls the decorated function. 235 | def __init__(self, function): 236 | self.func = function 237 | 238 | def __call__(self, *args, **kw): 239 | a = Animator(num_frames, self.func, delay, fig, play) 240 | if ui: 241 | a.edit_traits() 242 | return a 243 | 244 | def _wrapper1(function): 245 | # Needed to create the Wrapper in the right scope. 246 | w = Wrapper(function) 247 | return w 248 | 249 | return _wrapper1 250 | 251 | -------------------------------------------------------------------------------- /cgtools/vis/correspondences.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import traits.api as ta 3 | from mayavi import mlab 4 | from tvtk.api import tvtk 5 | from tvtk.common import configure_input_data, configure_input 6 | import traitsui.api as tu 7 | 8 | from .mesh import mesh_as_vtk_actor 9 | 10 | 11 | def visualize_point_correspondences(source_pts, target_pts, ij_corr=None, scalars=None, point_size=10): 12 | if ij_corr is None: 13 | if source_pts.shape != target_pts.shape: 14 | raise ValueError("must have same amount of source and target points, or specify ij_corr parameter") 15 | ij_corr = np.column_stack((np.arange(len(source_pts)), np.arange(len(target_pts)))) 16 | 17 | p = source_pts[ij_corr[:,0]] 18 | p2 = target_pts[ij_corr[:,1]] 19 | 20 | pd = tvtk.PolyData(points=p, verts=np.r_[:len(p)].reshape((-1,1))) 21 | actor = tvtk.Actor(mapper=tvtk.PolyDataMapper()) 22 | configure_input_data(actor.mapper, pd) 23 | actor.property.point_size = point_size 24 | if scalars is not None: 25 | pd.point_data.scalars = scalars 26 | actor.mapper.scalar_range = scalars.min(), scalars.max() 27 | mlab.gcf().scene.add_actor(actor) 28 | 29 | class Ctrl(ta.HasTraits): 30 | alpha = ta.Range(0., 1.) 31 | 32 | def _alpha_changed(self): 33 | pd.points = p + self.alpha * (p2 - p) 34 | mlab.gcf().scene.render() 35 | 36 | Ctrl().configure_traits() 37 | 38 | 39 | class Morpher(ta.HasTraits): 40 | alpha = ta.Range(0.0, 1.0) 41 | 42 | def __init__(self, verts1, verts2, tris=None, lines=None, as_points=False, 43 | scalars=None, scalars2=None, vmin=None, vmax=None, 44 | actor_property=dict(specular=0.1, specular_power=128., diffuse=0.5), 45 | ): 46 | if tris is None: 47 | if lines is None: 48 | rep = 'points' 49 | else: 50 | rep = 'wireframe' 51 | else: 52 | rep = 'surface' 53 | super(Morpher, self).__init__() 54 | self._verts1, self._verts2 = verts1, verts2 55 | self._polydata = tvtk.PolyData(points=verts1) 56 | if rep == 'points': 57 | self._polydata.verts = np.r_[:len(verts1)].reshape(-1,1) 58 | if tris is not None: 59 | self._polydata.polys = tris 60 | if lines is not None: 61 | self._polydata.lines = lines 62 | n = tvtk.PolyDataNormals(splitting=False) 63 | configure_input_data(n, self._polydata) 64 | self._actor = tvtk.Actor(mapper=tvtk.PolyDataMapper()) 65 | configure_input(self._actor.mapper, n) 66 | self._actor.property.representation = rep 67 | if rep == 'points': 68 | self._actor.property.point_size = 5 69 | if as_points and scalars is None: 70 | self._polydata.point_data.scalars = \ 71 | np.random.uniform(0, 255, (len(verts1), 3)).astype(np.uint8) 72 | 73 | self._scalars12 = None 74 | if scalars is not None: 75 | self._polydata.point_data.scalars = scalars 76 | # automatically determine minimum/maximum from scalars if not given by user 77 | if vmin is None: 78 | vmin = scalars.min() 79 | if scalars2 is not None: 80 | vmin = min(vmin, scalars2.min()) 81 | if vmax is None: 82 | vmax = scalars.max() 83 | if scalars2 is not None: 84 | vmax = max(vmax, scalars2.max()) 85 | if scalars.ndim == 1: 86 | self._actor.mapper.use_lookup_table_scalar_range = False 87 | self._actor.mapper.scalar_range = (vmin, vmax) 88 | self._actor.mapper.lookup_table.hue_range = (0.33, 0) 89 | # when scalars of second mesh given we need to store both scalars in order 90 | # to interpolate between them during rendering 91 | if scalars2 is not None: 92 | self._scalars12 = (scalars, scalars2) 93 | else: 94 | self._actor.property.set(**actor_property) 95 | mlab.gcf().scene.add_actor(self._actor) 96 | 97 | def _alpha_changed(self): 98 | self._polydata.points = self._verts1 * (1 - self.alpha) \ 99 | + self._verts2 * self.alpha 100 | if self._scalars12 is not None: 101 | blended = self._scalars12[0] * (1 - self.alpha) \ 102 | + self._scalars12[1] * self.alpha 103 | # when scalars is a (n_verts, 3) color array (type uint8) 104 | # then above blending will cast to float, undo this here: 105 | if self._scalars12[0].dtype == np.uint8: 106 | blended = blended.astype(np.uint8) 107 | self._polydata.point_data.scalars = blended 108 | mlab.gcf().scene.render() 109 | 110 | traits_view = tu.View(tu.Item('alpha', show_label=False), title='cgtools Morpher') 111 | 112 | 113 | def visualize_mesh_morph(verts1, verts2, tris=None, **kwargs): 114 | Morpher(verts1, verts2, tris, **kwargs).configure_traits() 115 | 116 | 117 | morph_mesh = visualize_mesh_morph 118 | 119 | 120 | class MultiMeshMorpher(ta.HasTraits): 121 | visible = ta.Enum(values='_names') 122 | morph_target = ta.Enum(values='_names') 123 | morph_alpha = ta.Range(0.0, 1.0, 0.0) 124 | show_edges = ta.Bool(False) 125 | _names = ta.List() 126 | 127 | def __init__(self, list_verts, tris, names=None, fig=None, **kw): 128 | super(MultiMeshMorpher, self).__init__(**kw) 129 | self._list_verts = list_verts 130 | self._tris = tris 131 | 132 | if fig is None: 133 | self._fig = mlab.figure(bgcolor=(1, 1, 1)) 134 | else: 135 | self._fig = fig 136 | 137 | if names is None: 138 | names = map(str, range(len(list_verts))) 139 | self._names = list(names) 140 | 141 | self._verts_by_name = dict(zip(self._names, list_verts)) 142 | self._actor, self._pd = mesh_as_vtk_actor(list_verts[0], tris, return_polydata=True) 143 | self._actor.property.set( 144 | ambient=0.0, 145 | specular=0.15, 146 | specular_power=128., 147 | diffuse=0.8, 148 | ) 149 | self._fig.scene.add_actor(self._actor) 150 | 151 | self.visible = self._names[0] 152 | if len(self._names) > 1: 153 | self.morph_target = self._names[1] 154 | 155 | @ta.on_trait_change('visible, show_edges, morph_target, morph_alpha') 156 | def _update(self): 157 | self._actor.property.edge_visibility = self.show_edges 158 | v1 = self._verts_by_name[self.visible] 159 | if self.morph_alpha > 0: 160 | v2 = self._verts_by_name[self.morph_target] 161 | self._pd.points = v1 * (1 - self.morph_alpha) + v2 * self.morph_alpha 162 | else: 163 | self._pd.points = v1 164 | self._fig.scene.render() 165 | 166 | view = tu.View( 167 | tu.Group( 168 | tu.Item('visible'), 169 | tu.Item('morph_target'), 170 | tu.Item('morph_alpha'), 171 | tu.Item('show_edges', name='Wireframe'), 172 | label="Viewer"), 173 | title="MultiMeshMorpher" 174 | ) 175 | 176 | 177 | def morph_multimesh(list_verts, tris, **kw): 178 | MultiMeshMorpher(list_verts, tris, **kw).configure_traits() 179 | -------------------------------------------------------------------------------- /cgtools/vis/lines.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mayavi import mlab 3 | 4 | 5 | def vislines(from_points, to_points, **kwargs): 6 | from_points = np.asarray(from_points) 7 | to_points = np.asarray(to_points) 8 | x, y, z = from_points.T 9 | u, v, w = (to_points - from_points).T 10 | kw = dict(scale_factor=1, scale_mode='vector', mode='2ddash', line_width=1) 11 | kw.update(kwargs) 12 | quiv = mlab.quiver3d(x, y, z, u, v, w, **kw) 13 | if 'scalars' in kw: 14 | quiv.glyph.color_mode = 'color_by_scalar' 15 | return quiv 16 | -------------------------------------------------------------------------------- /cgtools/vis/mesh.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mayavi import mlab 3 | from tvtk.api import tvtk 4 | from tvtk.common import configure_input, configure_input_data 5 | import traits.api as ta 6 | import traitsui.api as tu 7 | 8 | from .animator import animate 9 | 10 | 11 | 12 | def mesh_as_vtk_actor(verts, tris=None, lines=None, compute_normals=True, return_polydata=True, scalars=None): 13 | pd = tvtk.PolyData(points=verts) 14 | if tris is not None: 15 | pd.polys = tris 16 | if lines is not None: 17 | pd.lines = lines 18 | if scalars is not None: 19 | pd.point_data.scalars = scalars 20 | actor = polydata_actor(pd, compute_normals=compute_normals) 21 | if return_polydata: 22 | return actor, pd 23 | else: 24 | return actor 25 | 26 | 27 | def mesh_as_vtk_polydata(verts, tris=None, uv=None, normals=None, tris_uv=None, tris_normals=None): 28 | # do vertex and uv or normal topology differ? in that case, flatten those arrays 29 | if tris is not None and (tris_uv is not None or tris_normals is not None): 30 | if tris_normals is None: 31 | tris_normals = tris 32 | if tris_uv is None: 33 | tris_uv = tris 34 | verts_flat = verts[tris].reshape(-1, 3) 35 | tris_flat = np.arange(len(verts_flat)).reshape(-1, 3) 36 | 37 | pd = tvtk.PolyData(points=verts_flat, polys=tris_flat) 38 | if uv is not None: 39 | pd.point_data.t_coords = uv[tris_uv].reshape(-1, 2) 40 | if normals is not None: 41 | pd.point_data.normals = normals[tris_normals].reshape(-1, 3) 42 | else: 43 | # use data as-is 44 | pd = tvtk.PolyData(points=verts) 45 | if tris is not None: 46 | pd.polys = tris 47 | if uv is not None: 48 | assert len(uv) == len(verts) 49 | pd.point_data.t_coords = uv 50 | if normals is not None: 51 | assert len(normals) == len(verts) 52 | pd.point_data.normals = normals 53 | 54 | return pd 55 | 56 | 57 | def image_to_vtk_texture(img): 58 | imgdata = tvtk.ImageData() 59 | t = img[::-1].reshape(-1, 3).astype(np.uint8) 60 | imgdata.point_data.scalars = t 61 | imgdata.extent = (0, img.shape[0]-1, 0, img.shape[1]-1, 0, 0) 62 | imgdata.dimensions = (img.shape[1], img.shape[0], 1) 63 | vtk_texture = tvtk.Texture() 64 | configure_input_data(vtk_texture, imgdata) 65 | return vtk_texture 66 | 67 | 68 | def textured_vtk_actor(verts, tris, uv, img, tris_uv=None, normals=None, tris_normals=None): 69 | vtk_texture = image_to_vtk_texture(img) 70 | pd = mesh_as_vtk_polydata(verts, tris, uv, normals, tris_uv, tris_normals) 71 | 72 | actor = polydata_actor(pd, compute_normals=False) 73 | actor.texture = vtk_texture 74 | actor.property.diffuse = 1 75 | 76 | return actor 77 | 78 | 79 | def polydata_actor(polydata, compute_normals=True): 80 | """ create a vtk actor with given polydata as input """ 81 | if compute_normals: 82 | normals = tvtk.PolyDataNormals(splitting=False) 83 | configure_input_data(normals, polydata) 84 | polydata = normals 85 | actor = tvtk.Actor(mapper=tvtk.PolyDataMapper()) 86 | configure_input(actor.mapper, polydata) 87 | actor.mapper.lookup_table.hue_range = (0.33, 0.) 88 | return actor 89 | 90 | 91 | def vismesh(pts, tris, color=None, edge_visibility=False, shader=None, triangle_scalars=None, colors=None, nan_color=None, **kwargs): 92 | if 'scalars' in kwargs and np.asarray(kwargs['scalars']).ndim == 2: 93 | colors = kwargs['scalars'] 94 | del kwargs['scalars'] 95 | # VTK does not allow bool arrays as scalars normally, so convert to float 96 | if 'scalars' in kwargs and np.asarray(kwargs['scalars']).dtype == np.bool: 97 | kwargs['scalars'] = kwargs['scalars'].astype(np.float) 98 | 99 | tm = mlab.triangular_mesh(pts[:,0], pts[:,1], pts[:,2], tris, color=color, **kwargs) 100 | if shader is not None: 101 | tm.actor.property.load_material(shader) 102 | tm.actor.actor.property.shading = True 103 | diffuse = 1.0 if colors is not None else 0.8 104 | tm.actor.actor.property.set( 105 | edge_visibility=edge_visibility, line_width=1, 106 | specular=0.0, specular_power=128., 107 | diffuse=diffuse) 108 | if triangle_scalars is not None: 109 | tm.actor.mapper.input.cell_data.scalars = triangle_scalars 110 | tm.actor.mapper.set(scalar_mode='use_cell_data', use_lookup_table_scalar_range=False, 111 | scalar_visibility=True) 112 | if "vmin" in kwargs and "vmax" in kwargs: 113 | tm.actor.mapper.scalar_range = kwargs["vmin"], kwargs["vmax"] 114 | if colors is not None: 115 | # this basically is a hack which doesn't quite work, 116 | # we have to completely replace the polydata behind the hands of mayavi 117 | tm.mlab_source.dataset.point_data.scalars = colors.astype(np.uint8) 118 | normals = tvtk.PolyDataNormals(splitting=False) 119 | configure_input_data(normals, tm.mlab_source.dataset) 120 | configure_input(tm.actor.mapper, normals) 121 | if nan_color is not None: 122 | if len(nan_color) == 3: 123 | nan_color = list(nan_color) + [1] 124 | tm.module_manager.scalar_lut_manager.lut.nan_color = nan_color 125 | tm.update_pipeline() 126 | return tm 127 | 128 | def compute_normals(pts, faces): 129 | pd = tvtk.PolyData(points=pts, polys=faces) 130 | n = tvtk.PolyDataNormals(splitting=False) 131 | configure_input_data(n, pd) 132 | n.update() 133 | return n.output.point_data.normals.to_array() 134 | 135 | def showmesh(pts, tris, **kwargs): 136 | mlab.clf() 137 | vismesh(pts, tris, **kwargs) 138 | if 'scalars' in kwargs: 139 | mlab.colorbar() 140 | mlab.show() 141 | 142 | def viscroud(meshes, axis=0, padding=1.2, **kwargs): 143 | offset = np.zeros(3) 144 | offset[axis] = np.max([mesh[0][:,axis].ptp() for mesh in meshes]) * padding 145 | tms = [] 146 | # find common minimum when multiple scalars are given per mesh 147 | # find common minimum when multiple scalars are given per mesh 148 | scalars = [mesh[2] for mesh in meshes if len(mesh) > 2] 149 | if len(scalars) > 0: 150 | kwargs['vmin'] = np.min(scalars) 151 | kwargs['vmax'] = np.max(scalars) 152 | 153 | for i, mesh in enumerate(meshes): 154 | verts, tris = mesh[:2] 155 | scalars = mesh[2] if len(mesh) > 2 else None 156 | tm = vismesh(verts + offset * i, tris, scalars=scalars, **kwargs) 157 | tms.append(tm) 158 | return tms 159 | 160 | 161 | class MultiMeshViewer(ta.HasTraits): 162 | visible = ta.Enum(values='_names') 163 | show_edges = ta.Bool(False) 164 | _names = ta.List() 165 | 166 | def __init__(self, list_verts, list_tris, names=None, fig=None, **kw): 167 | super(MultiMeshViewer, self).__init__(**kw) 168 | 169 | if fig is None: 170 | self._fig = mlab.figure(bgcolor=(1, 1, 1)) 171 | else: 172 | self._fig = fig 173 | 174 | if names is None: 175 | names = map(str, range(len(list_verts))) 176 | self._names = list(names) 177 | 178 | self._actors = {} 179 | for name, verts, tris in zip(self._names, list_verts, list_tris): 180 | actor, _ = mesh_as_vtk_actor(verts, tris) 181 | actor.property.set( 182 | ambient=0.0, 183 | specular=0.15, 184 | specular_power=128., 185 | diffuse=0.8, 186 | ) 187 | self._fig.scene.add_actor(actor) 188 | self._actors[name] = actor 189 | 190 | self.visible = self._names[0] 191 | 192 | @ta.on_trait_change('visible, show_edges, morph_target, morph_alpha, show_distance') 193 | def _update(self): 194 | for a in self._actors.values(): 195 | a.visibility = False 196 | a.property.edge_visibility = self.show_edges 197 | self._actors[self.visible].visibility = True 198 | self._fig.scene.render() 199 | 200 | view = tu.View( 201 | tu.Item('visible'), 202 | tu.Item('show_edges', name='Wireframe'), 203 | title="MultiMeshViewer" 204 | ) 205 | 206 | 207 | def vis_multimesh(list_verts, list_tris, **kw): 208 | MultiMeshViewer(list_verts, list_tris, **kw).configure_traits() 209 | 210 | 211 | def show_mesh_animation(list_verts, tris, **kw): 212 | actor, pd = mesh_as_vtk_actor(list_verts[0], tris=tris, **kw) 213 | mlab.gcf().scene.add_actor(actor) 214 | 215 | @animate(len(list_verts)) 216 | def show_frame(frame): 217 | pd.points = list_verts[frame] 218 | 219 | show_frame() 220 | mlab.show() 221 | -------------------------------------------------------------------------------- /cgtools/vis/points.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from mayavi import mlab 3 | from tvtk.api import tvtk 4 | from tvtk.common import configure_input 5 | 6 | 7 | def pointcloud_as_vtk_polydata(points, scalars=None, pt_colors=None, polydata=None): 8 | """ generate tvtk.PolyData (or fill existing polydata) to be displayed as a point cloud """ 9 | if polydata is None: 10 | polydata = tvtk.PolyData() 11 | polydata.set(points=points, verts=np.r_[:len(points)].reshape((-1,1))) 12 | if scalars is not None: 13 | polydata.point_data.scalars = scalars 14 | if pt_colors is not None: 15 | polydata.point_data.scalars = pt_colors.astype(np.uint8) 16 | return polydata 17 | 18 | def pointcloud_as_vtk_actor(points, pt_colors=None, point_size=5.0, alpha=1.0): 19 | pd = pointcloud_as_vtk_polydata(points, pt_colors) 20 | mapper = tvtk.PolyDataMapper() 21 | configure_input(mapper,pd) 22 | actor = tvtk.Actor(mapper=mapper) 23 | actor.property.set(point_size=point_size, opacity=alpha) 24 | return actor, pd 25 | 26 | def vispoints(pts, point_colors=None, point_size=5., mode='2dvertex', **kwargs): 27 | v = mlab.points3d( 28 | pts[:,0], pts[:,1], pts[:,2], 29 | scale_mode='none', scale_factor=point_size, 30 | mode=mode, **kwargs) 31 | if mode == '2dvertex': 32 | v.actor.property.point_size = point_size 33 | if point_colors is not None: 34 | v.glyph.glyph.input.point_data.scalars = point_colors.astype(np.uint8) 35 | return v 36 | 37 | -------------------------------------------------------------------------------- /cgtools/vis/vtk_util.py: -------------------------------------------------------------------------------- 1 | from tvtk.api import tvtk 2 | 3 | def numpy_matrix_to_vtk_matrix(M): 4 | assert M.shape == (4, 4) 5 | Mvtk = tvtk.Matrix4x4() 6 | for i in range(4): 7 | for j in range(4): 8 | Mvtk.set_element(i, j, M[i, j]) 9 | return Mvtk 10 | -------------------------------------------------------------------------------- /cgtools/vis/weights.py: -------------------------------------------------------------------------------- 1 | from os import path 2 | import logging 3 | import numpy as np 4 | from itertools import cycle, product 5 | from mayavi import mlab 6 | from traits.api import Any, HasTraits, Range, Int, String, Instance, on_trait_change, Bool, List, Button 7 | from tvtk.pyface.scene_editor import SceneEditor 8 | from mayavi.tools.mlab_scene_model import MlabSceneModel 9 | from mayavi.core.ui.mayavi_scene import MayaviScene 10 | #from tvtk.pyface.api import Scene 11 | from traitsui.api import View, Item, HGroup, Group, EnumEditor 12 | from pyface.api import DirectoryDialog, OK 13 | 14 | 15 | def _centered(x): 16 | if x is None: 17 | return None 18 | vmax = np.maximum(np.abs(x.max(0)), np.abs(x.min(0))) 19 | vmin = -vmax 20 | x = ((x - vmin) / (vmax - vmin)) 21 | return x 22 | 23 | class WeightsVisualization(HasTraits): 24 | scene = Instance(MlabSceneModel, (), kw=dict(background=(1,1,1))) 25 | weight_index = Range(value=0, low='_min_weight', high='_max_weight') 26 | display_all = Bool(True) 27 | display_labels = Bool(True) 28 | _min_weight = Int(0) 29 | _max_weight = Int() 30 | selected_mesh = String() 31 | _names = List(String) 32 | 33 | save_all = Button() 34 | 35 | def __init__(self, meshes, center=True, vmin=None, vmax=None, offset_axis=0, offset_spacing=0.2, contours=None, colormap='RdBu', show_labels=False, actor_options=dict(), offset_axis2=1, offset_spacing2=0.5, label_offset_axis=None, label_offset=1.1, num_columns=None, **kwargs): 36 | HasTraits.__init__(self) 37 | 38 | if type(meshes) is dict: 39 | names, verts, tris, weights = list(zip(*[(n, v, t, w) for n, (v, t, w) in meshes.items()])) 40 | elif type(meshes) in [list, tuple]: 41 | names, verts, tris, weights = [], [], [], [] 42 | for i, mesh in enumerate(meshes): 43 | # (verts, tris, weights, name) 44 | verts.append(mesh[0]) 45 | tris.append(mesh[1]) 46 | weights.append(mesh[2]) 47 | if len(mesh) < 4: 48 | names.append(str(i)) 49 | else: 50 | names.append(mesh[3]) 51 | else: 52 | raise ValueError('illegal value for parameter "meshes"') 53 | 54 | ## single arrays given? 55 | #share_verts, share_tris = False, False 56 | #if type(weights) is np.ndarray and weights.ndim == 2: 57 | # weights = [weights] 58 | #if type(verts) is np.ndarray and verts.ndim == 2: 59 | # verts = cycle([verts]) 60 | # share_verts = True 61 | #if type(tris) is np.ndarray and tris.ndim == 2: 62 | # tris = cycle([tris]) 63 | # share_tris = True 64 | #else: 65 | # if not share_tris and len(tris) != len(weights): 66 | # raise ValueError, "need the same number of weight and triangle arrays" 67 | # if not share_verts and len(verts) != len(weights): 68 | # raise ValueError, "need the same number of weight and vertex arrays" 69 | # if names is not None and len(names) != len(weights): 70 | # raise ValueError, "need the same number of weight arrays and names" 71 | 72 | if names is not None: 73 | assert len(set(names)) == len(names) 74 | 75 | self._weights = list(map(_centered, weights)) if center else weights 76 | self._verts = verts 77 | self._tris = tris 78 | valid_weights = [w.shape[-1] - 1 for w in self._weights if w is not None] 79 | self._max_weight = max(valid_weights) if len(valid_weights) > 0 else 0 80 | self._names = names 81 | #self._names = map(str, range(len(self._weights))) if names is None else names 82 | #if len(weights) == 2: 83 | # self.display_all = True 84 | 85 | # visualize each mesh 86 | self._trimeshes = [] 87 | self._texts = [] 88 | self._offsets = [] 89 | offset = np.zeros(3) 90 | for i, (verts_i, tris_i, weights_i, name_i) in enumerate(zip(verts, tris, weights, names)): 91 | if i > 0: 92 | offset[offset_axis] += verts_i[:,offset_axis].ptp() * (1 + offset_spacing) 93 | if num_columns is not None and i % num_columns == 0: 94 | offset[offset_axis2] += verts_i[:, offset_axis2].ptp() * (1 + offset_spacing2) 95 | offset[offset_axis] = 0 96 | if center: 97 | vmin, vmax = 0, 1 98 | else: 99 | vmin, vmax = vmin, vmax 100 | # draw mesh 101 | tm = mlab.triangular_mesh( 102 | verts_i[:,0], verts_i[:,1], verts_i[:,2], tris_i, 103 | scalars=weights_i[...,0] if weights_i is not None and not weights_i.dtype == np.uint8 else None, 104 | colormap=colormap, 105 | vmin=vmin, vmax=vmax, 106 | figure=self.scene.mayavi_scene) 107 | if weights_i is None: 108 | tm.actor.mapper.scalar_visibility = False 109 | # disable normal splitting 110 | tm.parent.parent.filter.splitting = False 111 | 112 | if contours is not None and weights_i is not None: 113 | from mayavi.modules.surface import Surface 114 | engine = tm.scene.engine 115 | tm_contour = Surface() 116 | engine.add_filter(tm_contour, tm.module_manager) 117 | tm_contour.enable_contours = True 118 | tm_contour.contour.number_of_contours = contours 119 | tm_contour.actor.mapper.scalar_visibility = False 120 | tm_contour.actor.property.set(color = (0.0, 0.0, 0.0), line_width=4) 121 | 122 | self._trimeshes.append([tm, tm_contour]) 123 | else: 124 | self._trimeshes.append([tm]) 125 | 126 | if show_labels: 127 | txt = mlab.text(0, 0, str(name_i), z=0, color=(0, 0, 0), width=0.12) 128 | txt.actor.text_scale_mode = 'none' 129 | txt.property.set(font_size=11, justification='centered') 130 | self._texts.append(txt) 131 | 132 | 133 | #tm.actor.actor.property.edit_traits() 134 | # for the next mesh, add the extent of the current mesh (plus spacing) to the offset 135 | self._offsets.append(offset.copy()) 136 | tm.actor.property.set(**actor_options) 137 | #if handle_points is not None: 138 | # h = verts2.ptp() 139 | # mlab.points3d(handle_points[:,0], handle_points[:,1], handle_points[:,2], scale_factor=h / 20) 140 | 141 | #actor_prop.edit_traits() 142 | self.selected_mesh = self._names[0] 143 | self._label_offset_axis = label_offset_axis 144 | self._label_offset = label_offset 145 | if self._label_offset_axis is None: 146 | self._label_offset_axis = (np.abs(self._offsets[-1]).argmax() + 1) % 3 147 | self._update_view() 148 | self._reposition_meshes() 149 | 150 | @on_trait_change('weight_index') 151 | def _update_view(self): 152 | for tms, w in zip(self._trimeshes, self._weights): 153 | tm = tms[0] 154 | if w is None: 155 | continue 156 | try: 157 | wi = w[...,self.weight_index] 158 | if wi.dtype == np.uint8: 159 | tm.actor.mapper.input.point_data.scalars = wi 160 | self.scene.render() 161 | else: 162 | tm.mlab_source.set(scalars=wi) 163 | except IndexError: 164 | logging.warn("coult not reference index %d" % self.weight_index) 165 | 166 | @on_trait_change('display_all') 167 | def _reposition_meshes(self): 168 | self.scene.disable_render = True 169 | if self.display_all: 170 | for tms, offset in zip(self._trimeshes, self._offsets): 171 | for tm in tms: 172 | tm.actor.actor.position = offset 173 | for txt, v, offset in zip(self._texts, self._verts, self._offsets): 174 | ax = np.zeros(3) 175 | ax[self._label_offset_axis] = self._label_offset 176 | txt.x_position, txt.y_position, txt.z_position = (v.mean(axis=0) + v.ptp(axis=0) * ax) + offset 177 | else: 178 | for tms in self._trimeshes: 179 | for tm in tms: 180 | tm.actor.actor.position = (0, 0, 0) 181 | self.scene.disable_render = False 182 | 183 | @on_trait_change('display_all, selected_mesh, display_labels') 184 | def _update_mesh_visibilities(self): 185 | self.scene.disable_render = True 186 | for name, tms in zip(self._names, self._trimeshes): 187 | for tm in tms: 188 | tm.visible = self.display_all or name == self.selected_mesh 189 | for txt in self._texts: 190 | txt.visible = self.display_all and self.display_labels 191 | self.scene.disable_render = False 192 | 193 | def _save_all_fired(self): 194 | file_dialog = DirectoryDialog(action = 'open', title = 'Select Directory') 195 | if file_dialog.open() == OK: 196 | out_path = file_dialog.path 197 | if self.display_all: 198 | items = [(None, k) for k in range(self._max_weight + 1)] 199 | else: 200 | items = product(self._names, range(self._max_weight + 1)) 201 | for name, k in items: 202 | if name is not None: 203 | self.selected_mesh = name 204 | self.weight_index = k 205 | mlab.savefig(path.join(out_path, "%s_%03d.png" % (name, k))) 206 | 207 | view = View( 208 | Group( 209 | HGroup( 210 | Item('scene', editor=SceneEditor(scene_class=MayaviScene), 211 | height=600, width=800, show_label=False), 212 | ), 213 | HGroup( 214 | Item('weight_index', label='idx'), 215 | Item('display_all', label='all'), 216 | Item('display_labels', label='labels', 217 | visible_when='len(object._texts) > 0 and object.display_all'), 218 | Item('selected_mesh', label='which', 219 | visible_when="len(object._names) > 1 and not object.display_all", 220 | editor=EnumEditor( 221 | name='object._names'), 222 | ), 223 | Item('save_all'), 224 | ), 225 | ), 226 | resizable=True, title="Mesh Weights", 227 | ) 228 | 229 | 230 | def show_many_weights(meshes, **kwargs): 231 | WeightsVisualization(meshes, **kwargs).configure_traits() 232 | 233 | 234 | def show_weights(verts, tris, multi_weights, names=None, **kwargs): 235 | if type(multi_weights) is np.ndarray and multi_weights.ndim == 2: 236 | multi_weights = [multi_weights] # single weight vector 237 | if names is None: 238 | meshes = [[verts, tris, w] for w in multi_weights] 239 | else: 240 | meshes = [[verts, tris, w, n] for w, n in zip(multi_weights, names)] 241 | show_many_weights(meshes, **kwargs) 242 | 243 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: cgtools 2 | channels: 3 | - defaults 4 | - conda-forge 5 | dependencies: 6 | - h5py>=2.7.1 7 | - mayavi>=4.5.0 8 | - numpy>=1.13 9 | - pybind11>=2.2.2 10 | - pytest>=3.3.2 11 | - scipy>=1.1.0 12 | - eigen>=3.3 13 | -------------------------------------------------------------------------------- /examples/correspondences.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from cgtools.vis.correspondences import visualize_mesh_morph 3 | 4 | 5 | if __name__ == "__main__": 6 | x, y = list(map(np.ravel, np.mgrid[:10:20j, :10:20j])) 7 | z1 = np.sin(0.5 * x) + np.cos(1.2 * y) 8 | z2 = -0.5 * np.sin(0.4 * x) + 0.5 * np.cos(1.0 * y) 9 | ix = np.arange(len(x)).reshape(20, 20) 10 | quads = np.column_stack(list(map(np.ravel, [ix[:-1, :-1], 11 | ix[ 1:, :-1], 12 | ix[ 1:, 1:], 13 | ix[:-1, 1:]]))) 14 | tris = quads[:, [0, 1, 2, 2, 3, 0]].reshape(-1, 3) 15 | 16 | visualize_mesh_morph( 17 | np.column_stack((x, y, z1)), 18 | np.column_stack((x, y, z2)), 19 | quads, 20 | actor_property={'edge_visibility': True}, 21 | ) 22 | 23 | -------------------------------------------------------------------------------- /examples/histogram.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from cgtools.histograms import soft_histogram, soft_histogram_dd 3 | 4 | 5 | if __name__ == '__main__': 6 | import sys 7 | import pylab as pl 8 | 9 | np.random.seed(2) 10 | rmin, rmax = 0.1, 1.0 11 | n_angular_bins = 16 12 | n_radial_bins = 5 13 | pts = np.random.normal(loc=0, scale=0.3, size=(4, 2)) 14 | #pts = np.array([(0.2, 0.), (0., 0.2), (0, -0.2), (-0.2, 0)]) 15 | #pts = np.random.uniform(low=-0.7, high=0.7, size=(300,2)) 16 | pts_polar = np.column_stack((np.log10(np.sqrt(pts[:,0]**2 + pts[:,1]**2)), 17 | np.arctan2(pts[:,1], pts[:,0]))) 18 | h = soft_histogram_dd(pts_polar, nbins=(n_radial_bins, n_angular_bins), 19 | range=((np.log10(rmin), np.log10(rmax)), (-np.pi, np.pi)), 20 | wrapping=[False, True]) 21 | #pl.scatter(pts[:,1], pts[:,0], marker='x') 22 | #pl.imshow(h, extent=(-1, 1, 1, -1)) 23 | pl.subplot(121, polar=True) 24 | r, theta = np.meshgrid(10 ** np.linspace(np.log10(rmin), np.log10(rmax), n_radial_bins+1), 25 | np.linspace(-np.pi, np.pi, n_angular_bins+1)) 26 | print(r) 27 | print(h.shape) 28 | print(h) 29 | pl.pcolormesh(theta, r, h.T, edgecolors=(1,1,1), lw=0.001, vmin=0, vmax=1) 30 | pl.scatter(pts_polar[:,1], 10 ** pts_polar[:,0], c='w') 31 | 32 | pl.subplot(122) 33 | pl.scatter(pts_polar[:,1], pts_polar[:,0], c='w') 34 | pl.imshow(h, extent=(-np.pi, np.pi, np.log10(rmax), np.log10(rmin)), vmin=0, vmax=1) 35 | pl.show() 36 | 37 | 38 | pts = np.random.normal(loc=0, scale=0.3, size=(8, 2)) 39 | pl.subplot(121) 40 | pl.title("cgtools.histograms.soft_histogram_dd") 41 | nbins = 20 42 | h = soft_histogram_dd(pts, (nbins, nbins), ((-1, 1), (-1, 1))) 43 | pl.scatter(pts[:,1], pts[:,0], marker='x') 44 | pl.imshow(h, extent=(-1, 1, 1, -1), vmin=0, vmax=1) 45 | pl.subplot(122) 46 | pl.title('numpy.histogramdd') 47 | h = np.histogramdd(pts, bins=(nbins, nbins), range=((-1, 1), (-1, 1)))[0] 48 | pl.scatter(pts[:,1], pts[:,0], marker='x') 49 | pl.imshow(h, extent=(-1, 1, 1, -1)) 50 | pl.show() 51 | 52 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --doctest-modules --ignore=include 3 | doctest_optionflags = NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL 4 | -------------------------------------------------------------------------------- /scripts/meshmorph: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import plac 4 | from cgtools.io import load_mesh 5 | from cgtools.vis.correspondences import morph_multimesh 6 | 7 | 8 | def main(*mesh_filenames): 9 | verts, faces = zip(*[load_mesh(f) for f in mesh_filenames]) 10 | morph_multimesh(verts, faces[0], names=mesh_filenames) 11 | 12 | 13 | if __name__ == '__main__': 14 | plac.call(main) 15 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [aliases] 2 | test=pytest 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, Extension 2 | from setuptools.command.build_ext import build_ext 3 | import numpy 4 | import sys 5 | import setuptools 6 | 7 | 8 | 9 | class get_pybind_include(object): 10 | """Helper class to determine the pybind11 include path 11 | The purpose of this class is to postpone importing pybind11 12 | until it is actually installed, so that the ``get_include()`` 13 | method can be invoked. """ 14 | 15 | def __init__(self, user=False): 16 | self.user = user 17 | 18 | def __str__(self): 19 | import pybind11 20 | return pybind11.get_include(self.user) 21 | 22 | 23 | 24 | ext_modules = [ 25 | Extension( 26 | 'cgtools.fastmath._fastmath_ext', 27 | ['src/fastmath.cpp'], 28 | include_dirs=[ 29 | get_pybind_include(), 30 | get_pybind_include(user=True), 31 | numpy.get_include(), 32 | 'include/eigen', 33 | 'include/libigl/include', 34 | ], 35 | language='c++' 36 | ), 37 | Extension( 38 | 'cgtools.io._fastobj_ext', 39 | ['src/fast_obj.cpp'], 40 | include_dirs=[ 41 | get_pybind_include(), 42 | get_pybind_include(user=True), 43 | numpy.get_include(), 44 | 'include/eigen', 45 | ], 46 | language='c++' 47 | ), 48 | Extension( 49 | 'cgtools.mesh._intersections_ext', 50 | ['src/intersections.cpp'], 51 | include_dirs=[ 52 | 'include/eigen', 53 | 'include/libigl/include', 54 | numpy.get_include(), 55 | ], 56 | language='c++' 57 | ), 58 | Extension( 59 | 'cgtools.mesh._igl_ext', 60 | ['src/igl_ext.cpp'], 61 | include_dirs=[ 62 | 'include/eigen', 63 | 'include/libigl/include', 64 | numpy.get_include(), 65 | ], 66 | language='c++' 67 | ), 68 | ] 69 | 70 | 71 | # As of Python 3.6, CCompiler has a `has_flag` method. 72 | # cf http://bugs.python.org/issue26689 73 | def has_flag(compiler, flagname): 74 | """Return a boolean indicating whether a flag name is supported on 75 | the specified compiler. 76 | """ 77 | import tempfile 78 | with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f: 79 | f.write('int main (int argc, char **argv) { return 0; }') 80 | try: 81 | compiler.compile([f.name], extra_postargs=[flagname]) 82 | except setuptools.distutils.errors.CompileError: 83 | return False 84 | return True 85 | 86 | 87 | def cpp_flag(compiler): 88 | """Return the -std=c++[11/14] compiler flag. 89 | The c++14 is prefered over c++11 (when it is available). 90 | """ 91 | if has_flag(compiler, '-std=c++14'): 92 | return '-std=c++14' 93 | elif has_flag(compiler, '-std=c++11'): 94 | return '-std=c++11' 95 | else: 96 | raise RuntimeError('Unsupported compiler -- at least C++11 support ' 97 | 'is needed!') 98 | 99 | 100 | class BuildExt(build_ext): 101 | """A custom build extension for adding compiler-specific options.""" 102 | c_opts = { 103 | 'msvc': ['/EHsc'], 104 | 'unix': [], 105 | } 106 | 107 | if sys.platform == 'darwin': 108 | c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.7'] 109 | 110 | def build_extensions(self): 111 | ct = self.compiler.compiler_type 112 | opts = self.c_opts.get(ct, []) 113 | if ct == 'unix': 114 | opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) 115 | opts.append(cpp_flag(self.compiler)) 116 | if has_flag(self.compiler, '-fvisibility=hidden'): 117 | opts.append('-fvisibility=hidden') 118 | elif ct == 'msvc': 119 | opts.append('/DVERSION_INFO=\\"%s\\"' % self.distribution.get_version()) 120 | for ext in self.extensions: 121 | ext.extra_compile_args = opts 122 | build_ext.build_extensions(self) 123 | 124 | 125 | setup( 126 | name = 'cgtools', 127 | version = "0.0.1", 128 | author = "Thomas Neumann", 129 | author_email = "neumann.thomas@gmail.com", 130 | description = "Tools computer graphics and vision, mostly for numpy / scipy", 131 | license = "MIT", 132 | cmdclass={'build_ext': BuildExt}, 133 | ext_modules = ext_modules, 134 | packages = ['cgtools', 'cgtools.fastmath', 'cgtools.io', 'cgtools.vis', 'cgtools.mesh'], 135 | setup_requires=[ 136 | 'pytest-runner', 137 | ], 138 | tests_require=[ 139 | 'pytest', 140 | ], 141 | scripts=[ 142 | 'scripts/meshmorph' 143 | ], 144 | ) 145 | 146 | -------------------------------------------------------------------------------- /src/fast_obj.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | 6 | #include 7 | #include 8 | #include 9 | 10 | 11 | inline bool is_white_space(char c) 12 | { 13 | return ((c) == ' ' || (c) == '\t'); 14 | }; 15 | 16 | inline bool is_valid_digit(char c) 17 | { 18 | return ((c) >= '0' && (c) <= '9'); 19 | }; 20 | 21 | inline void skip_whitespace(char * &c) 22 | { 23 | while (is_white_space(*c) ) c++; 24 | } 25 | 26 | inline void skip_until_whitespace(char * &c) 27 | { 28 | while (*c && !is_white_space(*c)) c++; 29 | } 30 | 31 | inline int fast_atoi(char * &c) 32 | { 33 | int val = 0; 34 | while(is_valid_digit(*c)) { 35 | val = val*10 + (*c - '0'); 36 | c++; 37 | } 38 | return val; 39 | } 40 | 41 | // this function was adapted from Tom Van Baaks code here: http://leapsecond.com/tools/fast_atof.c 42 | inline double fast_atod(char * &p) 43 | { 44 | int frac; 45 | double sign, value, scale; 46 | 47 | // Get sign, if any. 48 | 49 | sign = 1.0; 50 | if (*p == '-') { 51 | sign = -1.0; 52 | p += 1; 53 | 54 | } else if (*p == '+') { 55 | p += 1; 56 | } 57 | 58 | // Get digits before decimal point or exponent, if any. 59 | 60 | for (value = 0.0; is_valid_digit(*p); p += 1) { 61 | value = value * 10.0 + (*p - '0'); 62 | } 63 | 64 | // Get digits after decimal point, if any. 65 | 66 | if (*p == '.') { 67 | double pow10 = 10.0; 68 | p += 1; 69 | while (is_valid_digit(*p)) { 70 | value += (*p - '0') / pow10; 71 | pow10 *= 10.0; 72 | p += 1; 73 | } 74 | } 75 | 76 | // Handle exponent, if any. 77 | 78 | frac = 0; 79 | scale = 1.0; 80 | if ((*p == 'e') || (*p == 'E')) { 81 | unsigned int expon; 82 | 83 | // Get sign of exponent, if any. 84 | 85 | p += 1; 86 | if (*p == '-') { 87 | frac = 1; 88 | p += 1; 89 | 90 | } else if (*p == '+') { 91 | p += 1; 92 | } 93 | 94 | // Get digits of exponent, if any. 95 | 96 | for (expon = 0; is_valid_digit(*p); p += 1) { 97 | expon = expon * 10 + (*p - '0'); 98 | } 99 | if (expon > 308) expon = 308; 100 | 101 | // Calculate scaling factor. 102 | 103 | while (expon >= 50) { scale *= 1E50; expon -= 50; } 104 | while (expon >= 8) { scale *= 1E8; expon -= 8; } 105 | while (expon > 0) { scale *= 10.0; expon -= 1; } 106 | } 107 | 108 | // Return signed and scaled floating point result. 109 | 110 | return sign * (frac ? (value / scale) : (value * scale)); 111 | } 112 | 113 | 114 | using RowMatX3d = Eigen::Matrix; 115 | using RowMatXi = Eigen::Matrix; 116 | 117 | std::pair 118 | loadOBJFast(std::string filename) 119 | { 120 | std::ifstream inF(filename, std::fstream::in | std::fstream::binary); 121 | std::string line; 122 | 123 | std::vector> verts_vec; 124 | std::vector> quads; 125 | std::vector> tris; 126 | int faceDim = 0; 127 | 128 | while (std::getline(inF, line)) { 129 | char* c = &line[0]; 130 | if (c[0] == 'v' && is_white_space(c[1])) { 131 | c += 2; 132 | skip_whitespace(c); 133 | // parse vertex coordinates 134 | double x = fast_atod(c); 135 | skip_whitespace(c); 136 | double y = fast_atod(c); 137 | skip_whitespace(c); 138 | double z = fast_atod(c); 139 | verts_vec.push_back({x, y, z}); 140 | // TODO: check if there is a vertex color, read that 141 | } 142 | else if (c[0] == 'f' && is_white_space(c[1])) { 143 | c += 2; 144 | skip_whitespace(c); 145 | if (faceDim == 0) { 146 | // determine face dimension - quads or triangles? 147 | auto c_lookahead = c; 148 | for (; is_valid_digit(*c_lookahead); faceDim++) { 149 | skip_until_whitespace(c_lookahead); 150 | skip_whitespace(c_lookahead); 151 | } 152 | } 153 | if (faceDim == 3) { 154 | // parse triangle 155 | std::array tri; 156 | tri[0] = fast_atoi(c) - 1; 157 | skip_until_whitespace(c); skip_whitespace(c); 158 | tri[1] = fast_atoi(c) - 1; 159 | skip_until_whitespace(c); skip_whitespace(c); 160 | tri[2] = fast_atoi(c) - 1; 161 | if (tri[0] < 0 || tri[1] < 0 || tri[2] < 0) { 162 | std::cerr << "negative face index found, ignoring" << std::endl; 163 | } 164 | else { 165 | tris.push_back(tri); 166 | } 167 | } else { 168 | // parse quad 169 | std::array quad; 170 | quad[0] = fast_atoi(c) - 1; 171 | skip_until_whitespace(c); skip_whitespace(c); 172 | quad[1] = fast_atoi(c) - 1; 173 | skip_until_whitespace(c); skip_whitespace(c); 174 | quad[2] = fast_atoi(c) - 1; 175 | skip_until_whitespace(c); skip_whitespace(c); 176 | quad[3] = fast_atoi(c) - 1; 177 | if (quad[0] < 0 || quad[1] < 0 || quad[2] < 0 || quad[3] < 0) { 178 | std::cerr << "negative face index found, ignoring"; 179 | } 180 | else { 181 | quads.push_back(quad); 182 | } 183 | } 184 | } 185 | } 186 | 187 | RowMatX3d verts = Eigen::Map((double*)verts_vec.data(), verts_vec.size(), 3); 188 | RowMatXi faces; 189 | if (faceDim == 4) { 190 | faces = Eigen::Map((int*)quads.data(), quads.size(), 4); 191 | } 192 | else if (faceDim == 3) { 193 | faces = Eigen::Map((int*)tris.data(), tris.size(), 3); 194 | } 195 | return std::make_pair(verts, faces); 196 | } 197 | 198 | 199 | PYBIND11_MODULE(_fastobj_ext, m) { 200 | m.def("load_obj_fast", &loadOBJFast); 201 | } 202 | -------------------------------------------------------------------------------- /src/fastmath.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | #include 8 | 9 | namespace py = pybind11; 10 | 11 | 12 | template 13 | py::array_t inv3(py::array_t & Ts) 14 | { 15 | auto Ts_buf = Ts.request(); 16 | float_t *pT = (float_t*)Ts_buf.ptr; 17 | 18 | auto result = py::array_t(Ts_buf.size); 19 | auto result_buf = result.request(); 20 | float_t *pR = (float_t*)result_buf.ptr; 21 | 22 | for (size_t idx = 0; idx < Ts_buf.shape[0]; idx++) { 23 | const float_t T00 = pT[0], T01 = pT[1], T02 = pT[2]; 24 | const float_t T10 = pT[3], T11 = pT[4], T12 = pT[5]; 25 | const float_t T20 = pT[6], T21 = pT[7], T22 = pT[8]; 26 | const float_t det = T00 * (T22 * T11 - T21 * T12) \ 27 | - T10 * (T22 * T01 - T21 * T02) \ 28 | + T20 * (T12 * T01 - T11 * T02); 29 | const float_t invDet = 1. / det; 30 | pR[0] = (T11 * T22 - T21 * T12) * invDet; 31 | pR[1] = -(T01 * T22 - T02 * T21) * invDet; 32 | pR[2] = (T01 * T12 - T02 * T11) * invDet; 33 | pR[3] = -(T10 * T22 - T12 * T20) * invDet; 34 | pR[4] = (T00 * T22 - T02 * T20) * invDet; 35 | pR[5] = -(T00 * T12 - T10 * T02) * invDet; 36 | pR[6] = (T10 * T21 - T20 * T11) * invDet; 37 | pR[7] = -(T00 * T21 - T20 * T01) * invDet; 38 | pR[8] = (T00 * T11 - T10 * T01) * invDet; 39 | 40 | pT += 3*3; 41 | pR += 3*3; 42 | } 43 | 44 | return result; 45 | } 46 | 47 | template 48 | py::array_t inv2(py::array_t & Ts) 49 | { 50 | auto Ts_buf = Ts.request(); 51 | float_t *pT = (float_t*)Ts_buf.ptr; 52 | 53 | auto result = py::array_t(Ts_buf.size); 54 | auto result_buf = result.request(); 55 | float_t *pR = (float_t*)result_buf.ptr; 56 | 57 | for (size_t idx = 0; idx < Ts_buf.shape[0]; idx++) { 58 | const float_t T00 = pT[0], T01 = pT[1]; 59 | const float_t T10 = pT[2], T11 = pT[3]; 60 | const float_t det = T00 * T11 - T01 * T10; 61 | const float_t invDet = 1. / det; 62 | pR[0] = T11 * invDet; 63 | pR[1] = -1 * T01 * invDet; 64 | pR[2] = -1 * T10 * invDet; 65 | pR[3] = T00 * invDet; 66 | 67 | pT += 2*2; 68 | pR += 2*2; 69 | } 70 | 71 | return result; 72 | } 73 | 74 | template 75 | py::array_t matmat( 76 | py::array_t & a, 77 | py::array_t & b 78 | ) 79 | { 80 | auto a_buf = a.request(); 81 | float_t *p_a = (float_t*)a_buf.ptr; 82 | auto b_buf = b.request(); 83 | float_t *p_b = (float_t*)b_buf.ptr; 84 | 85 | auto result = py::array_t( 86 | {a_buf.shape[0], a_buf.shape[1], b_buf.shape[2]}); 87 | auto result_buf = result.request(); 88 | float_t *p_res = (float_t*)result_buf.ptr; 89 | 90 | const size_t n_rows_a = a_buf.shape[1]; 91 | const size_t n_cols_a = a_buf.shape[2]; 92 | const size_t n_rows_b = b_buf.shape[1]; 93 | const size_t n_cols_b = b_buf.shape[2]; 94 | assert(n_cols_a == n_rows_b); 95 | for (size_t idx = 0; idx < a_buf.shape[0]; idx++) { 96 | for (size_t row_a = 0; row_a < n_rows_a; row_a++) { 97 | for (size_t col_b = 0; col_b < n_cols_b; col_b++) { 98 | float_t sum = 0.0; 99 | for (size_t k = 0; k < n_cols_a; k++) { 100 | const float_t ai = p_a[row_a * n_cols_a + k]; 101 | const float_t bi = p_b[k * n_cols_b + col_b]; 102 | sum += ai * bi; 103 | } 104 | *p_res = sum; 105 | p_res++; 106 | } 107 | } 108 | p_a += n_cols_a * n_rows_a; 109 | p_b += n_cols_b * n_rows_b; 110 | } 111 | 112 | return result; 113 | } 114 | 115 | template 116 | py::array_t matvec( 117 | py::array_t & mats, 118 | py::array_t & vecs 119 | ) 120 | { 121 | auto mats_buf = mats.request(); 122 | float_t *p_mats = (float_t*)mats_buf.ptr; 123 | auto vecs_buf = vecs.request(); 124 | float_t *p_vecs = (float_t*)vecs_buf.ptr; 125 | 126 | auto result = py::array_t({mats_buf.shape[0], mats_buf.shape[1]}); 127 | auto result_buf = result.request(); 128 | float_t *p_res = (float_t*)result_buf.ptr; 129 | 130 | const size_t mat_stride1 = mats_buf.strides[1] / sizeof(float_t); 131 | for (size_t idx = 0; idx < mats_buf.shape[0]; idx++) { 132 | for (size_t row = 0; row < mats_buf.shape[1]; row++) { 133 | float_t sum = 0.0; 134 | for (size_t k = 0; k < mats_buf.shape[2]; k++) { 135 | sum += *(p_mats++) * p_vecs[k]; 136 | } 137 | *p_res = sum; 138 | p_res++; 139 | } 140 | p_vecs += vecs_buf.shape[1]; 141 | } 142 | 143 | return result; 144 | } 145 | 146 | template 147 | py::array_t cross3( 148 | py::array_t & a, 149 | py::array_t & b 150 | ) 151 | { 152 | auto a_buf = a.request(); 153 | float_t *p_a = (float_t*)a_buf.ptr; 154 | auto b_buf = b.request(); 155 | float_t *p_b = (float_t*)b_buf.ptr; 156 | 157 | auto result = py::array_t( 158 | {a_buf.shape[0], a_buf.shape[1]}); 159 | auto result_buf = result.request(); 160 | float_t *p_res = (float_t*)result_buf.ptr; 161 | 162 | for (size_t idx = 0; idx < a_buf.shape[0]; idx++) { 163 | const double ax = p_a[0]; 164 | const double ay = p_a[1]; 165 | const double az = p_a[2]; 166 | const double bx = p_b[0]; 167 | const double by = p_b[1]; 168 | const double bz = p_b[2]; 169 | p_res[0] = ay * bz - az * by; 170 | p_res[1] = az * bx - ax * bz; 171 | p_res[2] = ax * by - ay * bx; 172 | p_res += 3; 173 | p_a += 3; 174 | p_b += 3; 175 | } 176 | 177 | return result; 178 | } 179 | 180 | 181 | template 182 | py::array_t multikron( 183 | py::array_t & a, 184 | py::array_t & b 185 | ) 186 | { 187 | auto a_buf = a.request(); 188 | float_t *p_a = (float_t*)a_buf.ptr; 189 | auto b_buf = b.request(); 190 | float_t *p_b = (float_t*)b_buf.ptr; 191 | 192 | const auto n_rows_a = a_buf.shape[1]; 193 | const auto n_cols_a = a_buf.shape[2]; 194 | const auto n_rows_b = b_buf.shape[1]; 195 | const auto n_cols_b = b_buf.shape[2]; 196 | 197 | auto result = py::array_t( 198 | {a_buf.shape[0], n_rows_a * n_rows_b, n_cols_a * n_cols_b}); 199 | auto result_buf = result.request(); 200 | float_t *p_res = (float_t*)result_buf.ptr; 201 | 202 | for (size_t idx = 0; idx < a_buf.shape[0]; idx++) { 203 | // iterate over rows of a 204 | for (size_t row_a = 0; row_a < n_rows_a; row_a++) { 205 | // iterate over rows of b 206 | for (size_t row_b = 0; row_b < n_rows_b; row_b++) { 207 | // iterate over columns of a 208 | for (size_t col_a = 0; col_a < n_cols_a; col_a++) { 209 | // iterate over columns of b 210 | for (size_t col_b = 0; col_b < n_cols_b; col_b++) { 211 | const float_t ai = p_a[row_a * n_cols_a + col_a]; 212 | const float_t bi = p_b[row_b * n_cols_b + col_b]; 213 | *p_res = ai * bi; 214 | p_res++; 215 | } 216 | } 217 | } 218 | } 219 | // next matrix 220 | p_a += n_cols_a * n_rows_a; 221 | p_b += n_cols_b * n_rows_b; 222 | } 223 | 224 | return result; 225 | } 226 | 227 | 228 | std::tuple, py::array_t> 229 | polarDecompose(py::array_t Ms) 230 | { 231 | using RowMat3d = Eigen::Matrix; 232 | 233 | auto Ms_raw = Ms.unchecked<3>(); 234 | // allocate output matrices 235 | auto Rs = py::array_t({Ms_raw.shape(0), 3l, 3l}); 236 | auto Rs_raw = Rs.mutable_unchecked<3>(); 237 | auto Ss = py::array_t({Ms_raw.shape(0), 3l, 3l}); 238 | auto Ss_raw = Ss.mutable_unchecked<3>(); 239 | 240 | RowMat3d U; 241 | RowMat3d V; 242 | Eigen::Matrix S; 243 | 244 | for (int i = 0; i < Ms_raw.shape(0); ++i) { 245 | const RowMat3d Mi = Eigen::Map(Ms_raw.data(i, 0, 0)); 246 | Eigen::Map Ri_map(Rs_raw.mutable_data(i, 0, 0)); 247 | Eigen::Map Si_map(Ss_raw.mutable_data(i, 0, 0)); 248 | RowMat3d Ri = Ri_map; // performs copy, since igl::polar_dec does not take Eigen::Map 249 | RowMat3d Si = Si_map; 250 | //igl::polar_dec(Mi, Ri, Si); 251 | igl::polar_svd(Mi, Ri, Si, U, S, V); 252 | // TODO: use igl::polar_svd3x3? 253 | // write back results 254 | Ri_map = Ri; 255 | Si_map = Si; 256 | } 257 | 258 | return std::make_tuple(Rs, Ss); 259 | } 260 | 261 | 262 | PYBIND11_MODULE(_fastmath_ext, m) { 263 | m.def("inv3", &inv3); 264 | m.def("inv3", &inv3); 265 | m.def("inv2", &inv2); 266 | m.def("inv2", &inv2); 267 | m.def("matmat", &matmat); 268 | m.def("matvec", &matvec); 269 | m.def("cross3", &cross3); 270 | m.def("multikron", &multikron); 271 | m.def("polar_dec", &polarDecompose); 272 | } 273 | -------------------------------------------------------------------------------- /src/igl_ext.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | 11 | namespace py = pybind11; 12 | using namespace Eigen; 13 | 14 | PYBIND11_MODULE(_igl_ext, m) { 15 | using namespace pybind11::literals; 16 | 17 | m.def("exact_geodesic", [] 18 | (const MatrixXd& V, 19 | const MatrixXi& F, 20 | const VectorXi& src_ix) 21 | -> VectorXd 22 | { 23 | VectorXi src_face_ix; 24 | VectorXi target_ix = VectorXi::LinSpaced(V.rows(), 0, V.rows()-1); 25 | VectorXi target_face_ix; 26 | MatrixXd dists; 27 | 28 | igl::exact_geodesic(V, F, 29 | src_ix, src_face_ix, 30 | target_ix, target_face_ix, 31 | dists); 32 | 33 | Map dists_flat(dists.data(), dists.size()); 34 | return dists_flat; 35 | }, 36 | py::call_guard(), 37 | py::arg("verts"), py::arg("tris"), py::arg("src_vert_indices") 38 | ); 39 | 40 | m.def("is_border_vertex", [] 41 | (const MatrixXi& F) 42 | -> Matrix 43 | { 44 | std::vector b = igl::is_border_vertex(F); 45 | Matrix bm(b.size()); 46 | for (size_t i=0; i < b.size(); i++) { 47 | bm(i) = b[i]; 48 | } 49 | return bm; 50 | }, 51 | py::call_guard(), 52 | py::arg("tris") 53 | ); 54 | } 55 | 56 | -------------------------------------------------------------------------------- /src/intersections.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | 7 | #include 8 | #include 9 | 10 | 11 | namespace py = pybind11; 12 | using namespace Eigen; 13 | 14 | typedef Eigen::Vector3d Vec3d; 15 | typedef Eigen::Vector2d Vec2d; 16 | 17 | 18 | std::tuple, py::array_t, py::array_t, py::array_t> 19 | rayMeshIntersect( 20 | py::array_t verts_in, 21 | py::array_t tris_in, 22 | py::array_t ray_pts_in, 23 | py::array_t ray_dirs_in, 24 | double max_distance, 25 | double max_angle, 26 | bool allow_backface_hit) 27 | { 28 | auto verts = verts_in.unchecked<2>(); 29 | auto tris = tris_in.unchecked<2>(); 30 | auto rayPts = ray_pts_in.unchecked<2>(); 31 | auto rayDirs = ray_dirs_in.unchecked<2>(); 32 | 33 | std::vector hitTris; 34 | std::vector hitUVs; 35 | std::vector hitPoints; 36 | std::vector hitRayIndices; 37 | 38 | double maxAngleCos = std::cos(max_angle); 39 | for(int iray = 0; iray < rayPts.shape(0); iray++) { 40 | Vec3d rayPos(rayPts(iray, 0), rayPts(iray, 1), rayPts(iray, 2)); 41 | Vec3d rayDir(rayDirs(iray, 0), rayDirs(iray, 1), rayDirs(iray, 2)); 42 | Vec3d rayDirNorm = rayDir.normalized(); 43 | Vec3d bestHit; 44 | float bestU, bestV, bestAngleCos; 45 | int bestTri = -1; 46 | float best_t = std::numeric_limits::infinity(); 47 | for(int itri = 0; itri < tris.shape(0); itri++) { 48 | // put data into Vec3d for easy dot/cross operations 49 | auto i1 = tris(itri, 0); auto i2 = tris(itri, 1); auto i3 = tris(itri, 2); 50 | Vec3d v1(verts(i1, 0), verts(i1, 1), verts(i1, 2)); 51 | Vec3d v2(verts(i2, 0), verts(i2, 1), verts(i2, 2)); 52 | Vec3d v3(verts(i3, 0), verts(i3, 1), verts(i3, 2)); 53 | // perform ray-triangle hit test 54 | Vec3d edge1 = v2 - v1; 55 | Vec3d edge2 = v3 - v1; 56 | Vec3d pvec = rayDir.cross(edge2); 57 | float det = edge1.dot(pvec); 58 | if(std::abs(det) < std::numeric_limits::epsilon()) { 59 | continue; 60 | } 61 | float invDet = 1.0 / det; 62 | Vec3d tvec = rayPos - v1; 63 | float u = tvec.dot(pvec) * invDet; 64 | if(u < 0.0 || u > 1.0) { 65 | continue; 66 | } 67 | Vec3d qvec = tvec.cross(edge1); 68 | float v = rayDir.dot(qvec) * invDet; 69 | if(v < 0.0 || u + v > 1.0) { 70 | continue; 71 | } 72 | float t = edge2.dot(qvec) * invDet; 73 | Vec3d hitPoint = v1 + edge1*u + edge2*v; 74 | // check if nearest hit and if it is valid 75 | if(fabs(t) < fabs(best_t) && (hitPoint - rayPos).norm() < max_distance) { 76 | bestHit = hitPoint; 77 | bestU = u; bestV = v; 78 | bestTri = itri; 79 | best_t = t; 80 | Vec3d normal = edge1.normalized().cross(edge2.normalized()).normalized(); 81 | if (allow_backface_hit) { 82 | bestAngleCos = fmax(rayDirNorm.dot(normal), rayDirNorm.dot(normal * -1.f)); 83 | } 84 | else { 85 | bestAngleCos = rayDirNorm.dot(normal); 86 | } 87 | } 88 | } 89 | if(bestTri > -1) { 90 | // check ray-normal angle 91 | if(bestAngleCos < maxAngleCos) { 92 | continue; 93 | } 94 | hitUVs.emplace_back(bestU, bestV); 95 | hitPoints.push_back(bestHit); 96 | hitRayIndices.push_back(iray); 97 | hitTris.push_back(bestTri); 98 | } 99 | } 100 | 101 | return std::make_tuple( 102 | py::array_t(hitTris.size(), hitTris.data()), 103 | py::array_t({(unsigned long)hitUVs.size(), 2ul}, (double*)hitUVs.data()), 104 | py::array_t(hitRayIndices.size(), hitRayIndices.data()), 105 | py::array_t({(unsigned long)hitPoints.size(), 3ul}, (double*)hitPoints.data()) 106 | ); 107 | } 108 | 109 | std::tuple 110 | rayMeshIntersectFast( 111 | const MatrixXd& verts, 112 | const MatrixXi& tris, 113 | const MatrixXd& ray_pts, 114 | const MatrixXd& ray_dirs) 115 | { 116 | igl::AABB tree; 117 | tree.init(verts, tris); 118 | 119 | std::vector> hits; 120 | for (int i = 0; i < ray_pts.rows(); i++) { 121 | igl::Hit hit; 122 | if (tree.intersect_ray(verts, tris, ray_pts.row(i), ray_dirs.row(i), hit)) { 123 | hits.emplace_back(hit, i); 124 | } 125 | } 126 | 127 | VectorXi tri_ixs(hits.size()); 128 | MatrixX2d barys(hits.size(), 2); 129 | VectorXi ray_ixs(hits.size()); 130 | 131 | for(int i = 0; i < hits.size(); i++) { 132 | ray_ixs(i) = hits[i].second; 133 | const igl::Hit & hit = hits[i].first; 134 | tri_ixs(i) = hit.id; 135 | barys.row(i) = Vec2d((double)hit.u, (double)hit.v); 136 | } 137 | 138 | return std::make_tuple(tri_ixs, barys, ray_ixs); 139 | } 140 | 141 | std::tuple 142 | closestPointOnMesh(const MatrixXd& P, const MatrixXd& V, const MatrixXi& tris) 143 | { 144 | VectorXd sq_dists; 145 | MatrixX3d hit_pts; 146 | VectorXi tri_ixs; 147 | igl::point_mesh_squared_distance(P, V, tris, sq_dists, tri_ixs, hit_pts); 148 | 149 | // determine uv coordinates of those closest hits 150 | MatrixX2d hit_uv(hit_pts.rows(), 2); 151 | for (int i = 0; i < P.rows(); ++i) { 152 | Vector3d p = hit_pts.row(i); 153 | Vector3i tri = tris.row(tri_ixs(i)); 154 | // setup local coordinate frame 155 | Vector3d e10 = V.row(tri(1)) - V.row(tri(0)); 156 | Vector3d e20 = V.row(tri(2)) - V.row(tri(0)); 157 | Vector3d n = e10.cross(e20); // normal 158 | Matrix3d F; 159 | F << e10, e20, n; 160 | // invert local coordinate frame to get 161 | // frame-local coordinates 162 | Vector3d v0 = hit_pts.row(i) - V.row(tri(0)); 163 | Vector3d uvw = F.inverse() * v0; 164 | hit_uv(i, 0) = uvw(0); 165 | hit_uv(i, 1) = uvw(1); 166 | } 167 | 168 | return std::make_tuple(sq_dists, tri_ixs, hit_pts, hit_uv); 169 | }; 170 | 171 | PYBIND11_PLUGIN(_intersections_ext) { 172 | using namespace pybind11::literals; 173 | 174 | py::module m("_intersections_ext"); 175 | m.def("ray_mesh_intersect", &rayMeshIntersect, 176 | "verts"_a, "tris"_a, "ray_pts"_a, "ray_dirs"_a, 177 | "max_distance"_a = std::numeric_limits::infinity(), 178 | "max_angle"_a = std::numeric_limits::infinity(), 179 | "allow_backface_hit"_a = true); 180 | 181 | m.def("ray_mesh_intersect_fast", &rayMeshIntersectFast, 182 | "verts"_a, "tris"_a, "ray_pts"_a, "ray_dirs"_a); 183 | 184 | m.def("closest_points_on_mesh", &closestPointOnMesh, 185 | "points"_a, "vertices"_a, "triangles"_a); 186 | 187 | return m.ptr(); 188 | } 189 | 190 | -------------------------------------------------------------------------------- /tests/test_array_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools.array_utils import concatenate_fill 4 | 5 | 6 | def test_concatenate_fill_axis0(): 7 | a = np.random.random((10, 4, 5)) 8 | b = np.random.random((5, 2, 5)) 9 | c = np.random.random((5, 1, 4)) 10 | r = concatenate_fill((a, b, c), axis=0) 11 | npt.assert_equal(r.shape, (20, 4, 5)) 12 | npt.assert_array_equal(r[ 0:10, :4, :5], a) 13 | npt.assert_array_equal(r[ 0:10, 4:, 5:], np.nan) 14 | npt.assert_array_equal(r[10:15, :2, :5], b) 15 | npt.assert_array_equal(r[10:15, 2:, 5:], np.nan) 16 | npt.assert_array_equal(r[15:20, :1, :4], c) 17 | npt.assert_array_equal(r[15:20, 1:, 4:], np.nan) 18 | 19 | def test_concatenate_fill_axis1(): 20 | a = np.random.random((10, 4, 5)) 21 | b = np.random.random((8, 2, 3)) 22 | r = concatenate_fill((a, b), axis=1) 23 | npt.assert_equal(r.shape, (10, 6, 5)) 24 | sa = np.s_[ : , :4, :5] 25 | sb = np.s_[ 0:8, 4:, :3] 26 | npt.assert_array_equal(r[sa], a) 27 | npt.assert_array_equal(r[sb], b) 28 | r[sa] = np.nan 29 | r[sb] = np.nan 30 | npt.assert_array_equal(r, np.nan) 31 | 32 | def test_concatenate_fill_one_array(): 33 | a = np.random.random((10, 4, 5)) 34 | r = concatenate_fill((a, ), axis=1) 35 | npt.assert_array_equal(r, a) 36 | 37 | -------------------------------------------------------------------------------- /tests/test_circular.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools.circular import wrapped_distance 4 | 5 | def test_wrapped_distance(): 6 | npt.assert_allclose( 7 | wrapped_distance([0.1, 0.9, 0.2, 0.0, 0.5, 0.2, 0.25, 0.3], 8 | [0.9, 0.1, 0.3, 0.8, 0.5, 0.8, 0.75, 0.7], max_value=1), 9 | [0.2, 0.2, 0.1, 0.2, 0.0, 0.4, 0.5 , 0.4]) 10 | -------------------------------------------------------------------------------- /tests/test_fastmath.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from cgtools.fastmath import matmat, matvec, inv2, inv3, cross3, multikron 3 | 4 | 5 | def test_matmat_stride0(): 6 | a = np.random.random((10, 3, 5)) 7 | b = np.random.random((5, 3)) 8 | r1 = matmat(a, b) 9 | r2 = np.array([np.dot(ai, b) for ai in a]) 10 | np.testing.assert_allclose(r1, r2) 11 | a = np.random.random((3, 5)) 12 | b = np.random.random((12, 5, 3)) 13 | r1 = matmat(a, b) 14 | r2 = np.array([np.dot(a, bi) for bi in b]) 15 | np.testing.assert_allclose(r1, r2) 16 | 17 | def test_matmat_eqshape(): 18 | a = np.random.random((31, 4, 4)) 19 | b = np.random.random((31, 4, 4)) 20 | r1 = matmat(a, b) 21 | r2 = np.array(list(map(np.dot, a, b))) 22 | np.testing.assert_allclose(r1, r2) 23 | 24 | def test_matmat_ndim(): 25 | a = np.random.random((10, 11, 2, 4)) 26 | b = np.random.random((10, 11, 4, 3)) 27 | r1 = matmat(a, b) 28 | r2 = np.array(list(map(np.dot, a.reshape(-1, 2, 4), b.reshape(-1, 4, 3)))).reshape(10, 11, 2, 3) 29 | np.testing.assert_allclose(r1, r2) 30 | 31 | def test_matmat_broadcast(): 32 | a = np.random.random((10, 11, 2, 4)) 33 | b = np.random.random((11, 4, 3)) 34 | r1 = matmat(a, b[np.newaxis]) 35 | r2 = np.array([ 36 | [np.dot(ai, bi) for ai, bi in zip(aj, b)] 37 | for aj in a 38 | ]) 39 | np.testing.assert_allclose(r1, r2) 40 | 41 | a = np.random.random((7, 6, 3)) 42 | b = np.random.random((5, 3, 2)) 43 | r1 = matmat(a[:, np.newaxis], b[np.newaxis, :]) 44 | r2 = np.array([ 45 | [np.dot(ai, bi) for bi in b] 46 | for ai in a 47 | ]) 48 | assert r1.shape == r2.shape 49 | np.testing.assert_allclose(r1, r2) 50 | 51 | def test_matmat_noncontiguous(): 52 | # a non-contiguous 53 | a = np.random.random((10, 3, 3)).swapaxes(1, 2) 54 | b = np.random.random((10, 3, 3)) 55 | assert not a.flags.contiguous 56 | r1 = matmat(a, b) 57 | r2 = np.array(list(map(np.dot, a, b))) 58 | np.testing.assert_allclose(r1, r2) 59 | # b non-contiguous 60 | a = np.random.random((10, 3, 4)) 61 | b = np.random.random((10, 3, 4)).swapaxes(1, 2) 62 | assert not b.flags.contiguous 63 | r1 = matmat(a, b) 64 | r2 = np.array(list(map(np.dot, a, b))) 65 | np.testing.assert_allclose(r1, r2) 66 | # both non-contiguous 67 | a = np.random.random((10, 3, 5)).swapaxes(1, 2) 68 | b = np.random.random((10, 6, 3)).swapaxes(1, 2) 69 | assert not a.flags.contiguous 70 | assert not b.flags.contiguous 71 | r1 = matmat(a, b) 72 | r2 = np.array(list(map(np.dot, a, b))) 73 | np.testing.assert_allclose(r1, r2) 74 | 75 | def test_matvec_stride0(): 76 | a = np.random.random((10, 3, 5)) 77 | b = np.random.random(5) 78 | r1 = matvec(a, b) 79 | r2 = np.array([np.dot(ai, b) for ai in a]) 80 | np.testing.assert_allclose(r1, r2) 81 | a = np.random.random((3, 5)) 82 | b = np.random.random((12, 5)) 83 | r1 = matvec(a, b) 84 | r2 = np.array([np.dot(a, bi) for bi in b]) 85 | np.testing.assert_allclose(r1, r2) 86 | 87 | def test_matvec_eqshape(): 88 | a = np.random.random((31, 5, 5)) 89 | b = np.random.random((31, 5)) 90 | r1 = matvec(a, b) 91 | r2 = np.array(list(map(np.dot, a, b))) 92 | np.testing.assert_allclose(r1, r2) 93 | 94 | def test_matvec_ndim(): 95 | a = np.random.random((10, 31, 4, 5)) 96 | b = np.random.random((10, 31, 5)) 97 | r1 = matvec(a, b) 98 | r2 = np.array(list(map(np.dot, a.reshape(-1, 4, 5), b.reshape(-1, 5)))).reshape(10, 31, 4) 99 | np.testing.assert_allclose(r1, r2) 100 | 101 | def test_matvec_broadcast(): 102 | a = np.random.random((10, 31, 4, 5)) 103 | b = np.random.random((31, 5)) 104 | r1 = matvec(a, b[np.newaxis]) 105 | r2 = np.array([ 106 | [np.dot(ai, bi) for ai, bi in zip(aj, b)] 107 | for aj in a 108 | ]) 109 | np.testing.assert_allclose(r1, r2) 110 | 111 | a = np.random.random((7, 6, 3)) 112 | b = np.random.random((6, 3)) 113 | r1 = matvec(a[:, np.newaxis], b[np.newaxis, :]) 114 | r2 = np.array([ 115 | [np.dot(ai, bi) for bi in b] 116 | for ai in a 117 | ]) 118 | assert r1.shape == r2.shape 119 | np.testing.assert_allclose(r1, r2) 120 | 121 | def test_matvec_single(): 122 | a = np.random.random((3, 3)) 123 | b = np.random.random(3) 124 | r1 = matvec(a, b) 125 | r2 = np.dot(a, b) 126 | assert r1.shape == r2.shape 127 | np.testing.assert_allclose(r1, r2) 128 | 129 | def test_matmat_single(): 130 | a = np.random.random((3, 5)) 131 | b = np.random.random((5, 3)) 132 | r1 = matmat(a, b) 133 | r2 = np.dot(a, b) 134 | assert r1.shape == r2.shape 135 | assert r1.dtype == r2.dtype 136 | np.testing.assert_allclose(r1, r2) 137 | 138 | def test_inv3(): 139 | T = np.random.random((3, 3)) 140 | np.testing.assert_allclose(np.linalg.inv(T), inv3(T)) 141 | 142 | def test_inv3_multiple(): 143 | Ts = np.random.random((154, 7, 3, 3)) 144 | Tinv_np = np.array(list(map(np.linalg.inv, Ts.reshape((-1, 3, 3))))).reshape(Ts.shape) 145 | Tinv_blitz = inv3(Ts) 146 | np.set_printoptions(suppress=True) 147 | np.testing.assert_allclose(Tinv_np, Tinv_blitz) 148 | 149 | def test_inv3_float32(): 150 | np.random.seed(42) 151 | Ts = np.random.random((1000, 3, 3)).astype(np.float32) 152 | Tinv_np = np.array(list(map(np.linalg.inv, Ts.reshape((-1, 3, 3))))).reshape(Ts.shape) 153 | Tinv_blitz = inv3(Ts) 154 | assert Tinv_blitz.dtype == np.float32 155 | np.set_printoptions(suppress=True) 156 | np.testing.assert_allclose(Tinv_np, Tinv_blitz, rtol=1.e-3) 157 | 158 | def test_inv2(): 159 | T = np.random.random((2, 2)) 160 | np.testing.assert_allclose(np.linalg.inv(T), inv2(T)) 161 | 162 | def test_inv2_multiple(): 163 | Ts = np.random.random((154, 7, 2, 2)) 164 | Tinv_np = np.array(list(map(np.linalg.inv, Ts.reshape((-1, 2, 2))))).reshape(Ts.shape) 165 | Tinv_blitz = inv2(Ts) 166 | np.set_printoptions(suppress=True) 167 | np.testing.assert_allclose(Tinv_np, Tinv_blitz) 168 | 169 | def test_inv2_float32(): 170 | np.random.seed(42) 171 | Ts = np.random.random((1000, 2, 2)).astype(np.float32) 172 | Tinv_np = np.array(list(map(np.linalg.inv, Ts))).reshape(Ts.shape) 173 | Tinv_blitz = inv2(Ts) 174 | np.testing.assert_allclose(Tinv_np, Tinv_blitz, rtol=1.e-3) 175 | 176 | def test_cross3(): 177 | a = np.random.random((1000, 3)) 178 | b = np.random.random((1000, 3)) 179 | c_numpy = np.cross(a, b) 180 | c_fast = cross3(a, b) 181 | np.testing.assert_allclose(c_numpy, c_fast) 182 | 183 | def test_multikron_eqshape(): 184 | a = np.random.random((31, 4, 4)) 185 | b = np.random.random((31, 4, 4)) 186 | r1 = multikron(a, b) 187 | r2 = np.array(list(map(np.kron, a, b))) 188 | np.testing.assert_allclose(r1, r2) 189 | 190 | def test_multikron_eqshape(): 191 | a = np.random.random((31, 4, 4)) 192 | b = np.random.random((31, 4, 4)) 193 | r1 = multikron(a, b) 194 | r2 = np.array(list(map(np.kron, a, b))) 195 | np.testing.assert_allclose(r1, r2) 196 | 197 | def test_multikron_ndim(): 198 | a = np.random.random((10, 11, 2, 4)) 199 | b = np.random.random((10, 11, 4, 3)) 200 | r1 = multikron(a, b) 201 | r2 = np.array(list(map(np.kron, a.reshape(-1, 2, 4), b.reshape(-1, 4, 3)))).reshape(10, 11, 2*4, 4*3) 202 | np.testing.assert_allclose(r1, r2) 203 | 204 | def test_multikron_single(): 205 | a = np.random.random((2, 3, 5)) 206 | b = np.random.random((4, 8)) 207 | r1 = multikron(a, b) 208 | r2 = np.array([np.kron(ai, b) for ai in a]) 209 | assert r1.shape == r2.shape 210 | np.testing.assert_allclose(r1, r2) 211 | 212 | a = np.random.random((6, 3)) 213 | b = np.random.random((5, 2, 9)) 214 | r1 = multikron(a, b) 215 | r2 = np.array([np.kron(a, bi) for bi in b]) 216 | assert r1.shape == r2.shape 217 | np.testing.assert_allclose(r1, r2) 218 | 219 | def test_multikron_noncontiguous(): 220 | # a non-contiguous 221 | a = np.random.random((10, 3, 3)).swapaxes(1, 2) 222 | b = np.random.random((10, 3, 3)) 223 | assert not a.flags.contiguous 224 | r1 = multikron(a, b) 225 | r2 = np.array(list(map(np.kron, a, b))) 226 | np.testing.assert_allclose(r1, r2) 227 | # b non-contiguous 228 | a = np.random.random((10, 3, 4)) 229 | b = np.random.random((10, 3, 4)).swapaxes(1, 2) 230 | assert not b.flags.contiguous 231 | r1 = multikron(a, b) 232 | r2 = np.array(list(map(np.kron, a, b))) 233 | np.testing.assert_allclose(r1, r2) 234 | # both non-contiguous 235 | a = np.random.random((10, 3, 5)).swapaxes(1, 2) 236 | b = np.random.random((10, 6, 3)).swapaxes(1, 2) 237 | assert not a.flags.contiguous 238 | assert not b.flags.contiguous 239 | r1 = multikron(a, b) 240 | r2 = np.array(list(map(np.kron, a, b))) 241 | np.testing.assert_allclose(r1, r2) 242 | 243 | def test_multikron_broadcast(): 244 | a = np.random.random((10, 11, 2, 4)) 245 | b = np.random.random((11, 4, 3)) 246 | r1 = multikron(a, b[np.newaxis]) 247 | r2 = np.array([ 248 | [np.kron(ai, bi) for ai, bi in zip(aj, b)] 249 | for aj in a 250 | ]) 251 | np.testing.assert_allclose(r1, r2) 252 | 253 | a = np.random.random((7, 6, 3)) 254 | b = np.random.random((5, 2, 9)) 255 | r1 = multikron(a[:, np.newaxis], b[np.newaxis, :]) 256 | r2 = np.array([ 257 | [np.kron(ai, bi) for bi in b] 258 | for ai in a 259 | ]) 260 | assert r1.shape == r2.shape 261 | np.testing.assert_allclose(r1, r2) -------------------------------------------------------------------------------- /tests/test_indexing.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools.indexing import valid_indices 4 | 5 | 6 | def test_valid_indices_randomized_tests(): 7 | for axes in range(1, 10): 8 | shape = np.random.randint(1, 10, axes) 9 | a = np.empty(shape) 10 | indices = np.random.randint(-5, 15, (500, axes)) 11 | indices_valid = valid_indices(indices, shape) 12 | raised = False 13 | print("--") 14 | print(indices.shape) 15 | print(indices_valid.shape) 16 | print(indices_valid) 17 | try: 18 | a[tuple(zip(*indices_valid))] 19 | except IndexError: 20 | raised = True 21 | assert not raised 22 | 23 | def test_valid_indices_special_input(): 24 | # should also work on 1d input 25 | npt.assert_array_equal(valid_indices([-1, 1, 2, 3], (2,)), [1]) 26 | npt.assert_array_equal(valid_indices([-1, 1, 2, 3], 2), [1]) 27 | # should work on floats 28 | npt.assert_array_equal(valid_indices([-1., 1.3, 0.6, 2.2, 3.5], (2,)), [1, 1]) 29 | # should work on empty arrays 30 | npt.assert_array_equal(valid_indices([], (2, 3)), []) 31 | 32 | -------------------------------------------------------------------------------- /tests/test_intersections.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools.mesh.intersections import ray_mesh_intersect, closest_points_on_mesh 4 | 5 | 6 | def test_ray_mesh_intersect(): 7 | verts = np.array([[5,5,5],[10,15,4],[15,5,3]], np.float) 8 | hit_tri, hit_uv, ray_ix, hit_pt = ray_mesh_intersect( 9 | verts, 10 | np.array([[0, 1, 2]], np.int32), 11 | np.array([[9,5,-5]], np.float), 12 | np.array([[0.1,0.1,0.8]], np.float), 13 | ) 14 | p_true = np.array([10.121951219512194, 6.121951219512195, 3.97560975609756]) 15 | npt.assert_equal(hit_tri, [0]) 16 | npt.assert_equal(ray_ix, [0]) 17 | npt.assert_allclose(hit_pt, p_true[np.newaxis, :]) 18 | u = hit_uv[0, 0] 19 | v = hit_uv[0, 1] 20 | e1 = verts[1] - verts[0] 21 | e2 = verts[2] - verts[0] 22 | npt.assert_allclose(verts[0] + u*e1 + v*e2, p_true) 23 | 24 | 25 | def test_closest_point_on_mesh(): 26 | verts = np.array([[5,5,5],[10,15,4],[15,5,3]], np.float) 27 | pts = np.array([[9,5,-5]], np.float) 28 | sq_dist, hit_tri, hit_xyz, hit_uv = closest_points_on_mesh( 29 | pts, verts, np.array([[0, 1, 2]], np.int32), 30 | ) 31 | npt.assert_equal(hit_tri, [0]) 32 | 33 | # brute force compute closest hit and compare 34 | u, v = list(map(np.ravel, np.mgrid[:1:1000j, :1:1000j])) 35 | e1 = verts[1] - verts[0] 36 | e2 = verts[2] - verts[0] 37 | p = verts[np.newaxis, 0] + u[:, np.newaxis] * e1[np.newaxis] + v[:, np.newaxis] * e2[np.newaxis] 38 | sq_dist_brute = ((p - pts[0][np.newaxis])**2).sum(axis=1) 39 | imin = sq_dist_brute.argmin() 40 | npt.assert_almost_equal(sq_dist_brute[imin], sq_dist[0], decimal=3) 41 | npt.assert_almost_equal(u[imin], hit_uv[0, 0], decimal=3) 42 | npt.assert_almost_equal(v[imin], hit_uv[0, 1], decimal=3) 43 | npt.assert_equal(hit_tri, [0]) 44 | 45 | 46 | -------------------------------------------------------------------------------- /tests/test_procrustes.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from cgtools import vector as V 3 | from cgtools.procrustes import procrustes3d 4 | 5 | 6 | def test_procrustes3d(): 7 | # test with 100 random rotations/translations 8 | trials = 0 9 | while trials < 100: 10 | R = np.linalg.qr(np.random.uniform(-1, 1, size=(3,3)))[0] 11 | print((np.linalg.det(R))) 12 | if np.linalg.det(R) < 0: 13 | continue 14 | t = np.random.uniform(-2, 2, size=3) 15 | M = np.eye(4) 16 | M[:3,:3] = R 17 | M[:3, 3] = t 18 | N = np.random.randint(3, 1000) 19 | frompts = np.random.random((N, 3)) 20 | topts = V.transform(frompts, M) 21 | M_est = procrustes3d(frompts, topts) 22 | np.testing.assert_allclose(M, M_est) 23 | np.testing.assert_allclose(V.transform(frompts, M_est), topts) 24 | R = M_est[:3, :3] 25 | trials += 1 26 | 27 | def test_procrustes3d_reflection(): 28 | for axis in [0, 1, 2]: 29 | N = 100 30 | pts1 = np.random.random((N, 3)) 31 | S = np.eye(3) 32 | S[axis, axis] = -1 33 | pts2 = V.transform(pts1, S) 34 | M = procrustes3d(pts1, pts2) 35 | R = M[:3, :3] 36 | np.testing.assert_allclose(np.linalg.det(R), 1) 37 | assert not np.allclose(R, S) 38 | assert V.veclen(V.transform(pts1, M) - pts2).sum() > 0 39 | 40 | M2 = procrustes3d(pts1, pts2, allow_reflection=True) 41 | np.testing.assert_allclose(M2[:3, :3], S, atol=1.e-9) 42 | np.testing.assert_allclose(V.transform(pts1, M2), pts2) 43 | 44 | -------------------------------------------------------------------------------- /tests/test_vector.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools import vector as V 4 | 5 | 6 | def test_inv_3x4(): 7 | Xs = np.random.random((1000, 3, 4)) 8 | Xs_inv_np = np.array([np.linalg.inv(V.to_4x4(Xi)) for Xi in Xs]) 9 | Xs_inv_ours = V.inv_3x4(Xs) 10 | assert np.allclose(Xs_inv_np[:, 3, :], np.array([0, 0, 0, 1])[np.newaxis]) 11 | npt.assert_allclose(Xs_inv_ours, Xs_inv_np[:, :3, :]) 12 | 13 | 14 | def test_transform_many_matrices_many_vectors(): 15 | for dim in [2, 3, 4]: 16 | # no translation: 17 | vectors = np.random.random((1000, dim)) 18 | xforms = np.random.random(((1000, dim, dim))) 19 | reference = np.array([ 20 | M.dot(v) 21 | for v, M in zip(vectors, xforms) 22 | ]) 23 | result = V.transform(vectors, xforms) 24 | assert result.shape == vectors.shape 25 | npt.assert_allclose(reference, result) 26 | 27 | # with translation, no perspective (e.g. the common 3x4 matrices) 28 | vectors = np.random.random((1000, dim)) 29 | xforms = np.random.random(((1000, dim, dim + 1))) 30 | reference = np.array([ 31 | M.dot(V.hom(v)) 32 | for v, M in zip(vectors, xforms) 33 | ]) 34 | result = V.transform(vectors, xforms) 35 | assert result.shape == vectors.shape 36 | npt.assert_allclose(reference, result) 37 | 38 | # with translation, no perspective 39 | vectors = np.random.random((1000, dim)) 40 | xforms = np.random.random(((1000, dim + 1, dim + 1))) 41 | reference = np.array([ 42 | V.dehom(M.dot(V.hom(v))) 43 | for v, M in zip(vectors, xforms) 44 | ]) 45 | result = V.transform(vectors, xforms) 46 | assert result.shape == vectors.shape 47 | npt.assert_allclose(reference, result) 48 | 49 | def test_transform_one_matrix_many_vectors(): 50 | for dim in [2, 3, 4]: 51 | # no translation: 52 | vectors = np.random.random((1000, dim)) 53 | M = np.random.random(((dim, dim))) 54 | reference = np.array([M.dot(v) for v in vectors]) 55 | result = V.transform(vectors, M) 56 | assert result.shape == vectors.shape 57 | npt.assert_allclose(reference, result) 58 | 59 | # with translation, no perspective (e.g. the common 3x4 matrices) 60 | vectors = np.random.random((1000, dim)) 61 | M = np.random.random(((dim, dim + 1))) 62 | reference = np.array([M.dot(V.hom(v)) for v in vectors]) 63 | result = V.transform(vectors, M) 64 | assert result.shape == vectors.shape 65 | npt.assert_allclose(reference, result) 66 | 67 | # with translation, no perspective 68 | vectors = np.random.random((1000, dim)) 69 | M = np.random.random(((dim + 1, dim + 1))) 70 | reference = np.array([V.dehom(M.dot(V.hom(v))) for v in vectors]) 71 | result = V.transform(vectors, M) 72 | assert result.shape == vectors.shape 73 | npt.assert_allclose(reference, result) 74 | 75 | def test_transform_many_matrices_one_vector(): 76 | for dim in [2, 3, 4]: 77 | # no translation: 78 | v = np.random.random((dim)) 79 | xforms = np.random.random(((1000, dim, dim))) 80 | reference = np.array([ M.dot(v) for M in xforms ]) 81 | result = V.transform(v, xforms) 82 | assert result.shape == (1000, dim) 83 | npt.assert_allclose(reference, result) 84 | 85 | # with translation, no perspective (e.g. the common 3x4 matrices) 86 | v = np.random.random((dim)) 87 | xforms = np.random.random(((1000, dim, dim + 1))) 88 | reference = np.array([M.dot(V.hom(v)) for M in xforms ]) 89 | result = V.transform(v, xforms) 90 | assert result.shape == (1000, dim) 91 | npt.assert_allclose(reference, result) 92 | 93 | # with translation, no perspective 94 | v = np.random.random((dim)) 95 | xforms = np.random.random(((1000, dim + 1, dim + 1))) 96 | reference = np.array([ V.dehom(M.dot(V.hom(v))) for M in xforms ]) 97 | result = V.transform(v, xforms) 98 | assert result.shape == (1000, dim) 99 | npt.assert_allclose(reference, result) 100 | 101 | def test_transform_one_matrices_one_vector(): 102 | for dim in [2, 3, 4]: 103 | # no translation: 104 | v = np.random.random((dim)) 105 | M = np.random.random(((dim, dim))) 106 | reference = M.dot(v) 107 | result = V.transform(v, M) 108 | assert result.shape == v.shape 109 | npt.assert_allclose(reference, result) 110 | 111 | # with translation, no perspective (e.g. the common 3x4 matrices) 112 | v = np.random.random((dim)) 113 | M = np.random.random(((dim, dim + 1))) 114 | reference = M.dot(V.hom(v)) 115 | result = V.transform(v, M) 116 | assert result.shape == v.shape 117 | npt.assert_allclose(reference, result) 118 | 119 | # with translation, no perspective 120 | v = np.random.random((dim)) 121 | M = np.random.random(((dim + 1, dim + 1))) 122 | reference = V.dehom(M.dot(V.hom(v))) 123 | result = V.transform(v, M) 124 | assert result.shape == v.shape 125 | npt.assert_allclose(reference, result) -------------------------------------------------------------------------------- /tests/test_vis.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.testing as npt 3 | from cgtools.vis.mesh import compute_normals 4 | 5 | def test_compute_normals(): 6 | pts = np.array([(1, 0, 0), (0, 1, 0), (0, 0, 0)], np.float) 7 | tris = np.array([(0, 1, 2)], np.int) 8 | normals = compute_normals(pts, tris) 9 | npt.assert_allclose(normals, [(0, 0, 1), (0, 0, 1), (0, 0, 1)]) 10 | 11 | --------------------------------------------------------------------------------