├── .gitignore ├── LICENSE ├── README.md ├── assets └── img │ └── gradient.png ├── examples ├── demo.py ├── hello_world.py └── spot.obj ├── pcdiff ├── __init__.py ├── grad_div_mls.py ├── operators.py └── utils.py ├── pyproject.toml ├── setup.py └── test ├── test_grad_div_mls.py ├── test_operators.py └── test_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Package specific 2 | */data/* 3 | */logs/* 4 | */output/* 5 | */runs/* 6 | .vscode 7 | docker_build.sh 8 | docker_install.sh 9 | wheelhouse 10 | 11 | # Byte-compiled / optimized / DLL files 12 | __pycache__/ 13 | *.py[cod] 14 | *$py.class 15 | 16 | # C extensions 17 | *.so 18 | *.ini 19 | 20 | # Distribution / packaging 21 | .Python 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | downloads/ 26 | eggs/ 27 | .eggs/ 28 | lib/ 29 | lib64/ 30 | parts/ 31 | sdist/ 32 | var/ 33 | wheels/ 34 | pip-wheel-metadata/ 35 | share/python-wheels/ 36 | *.egg-info/ 37 | .installed.cfg 38 | *.egg 39 | MANIFEST 40 | 41 | # PyInstaller 42 | # Usually these files are written by a python script from a template 43 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 44 | *.manifest 45 | *.spec 46 | 47 | # Installer logs 48 | pip-log.txt 49 | pip-delete-this-directory.txt 50 | 51 | # Unit test / coverage reports 52 | htmlcov/ 53 | .tox/ 54 | .nox/ 55 | .coverage 56 | .coverage.* 57 | .cache 58 | nosetests.xml 59 | coverage.xml 60 | *.cover 61 | *.py,cover 62 | .hypothesis/ 63 | .pytest_cache/ 64 | 65 | # Translations 66 | *.mo 67 | *.pot 68 | 69 | # Django stuff: 70 | *.log 71 | local_settings.py 72 | db.sqlite3 73 | db.sqlite3-journal 74 | 75 | # Flask stuff: 76 | instance/ 77 | .webassets-cache 78 | 79 | # Scrapy stuff: 80 | .scrapy 81 | 82 | # Sphinx documentation 83 | docs/_build/ 84 | 85 | # PyBuilder 86 | target/ 87 | 88 | # Jupyter Notebook 89 | .ipynb_checkpoints 90 | 91 | # IPython 92 | profile_default/ 93 | ipython_config.py 94 | 95 | # pyenv 96 | .python-version 97 | 98 | # pipenv 99 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 100 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 101 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 102 | # install all needed dependencies. 103 | #Pipfile.lock 104 | 105 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 106 | __pypackages__/ 107 | 108 | # Celery stuff 109 | celerybeat-schedule 110 | celerybeat.pid 111 | 112 | # SageMath parsed files 113 | *.sage.py 114 | 115 | # Environments 116 | .env 117 | .venv 118 | env/ 119 | venv/ 120 | ENV/ 121 | env.bak/ 122 | venv.bak/ 123 | 124 | # Spyder project settings 125 | .spyderproject 126 | .spyproject 127 | 128 | # Rope project settings 129 | .ropeproject 130 | 131 | # mkdocs documentation 132 | /site 133 | 134 | # mypy 135 | .mypy_cache/ 136 | .dmypy.json 137 | dmypy.json 138 | 139 | # Pyre type checker 140 | .pyre/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 rubenwiersma 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `pcdiff`: Differential operators on point clouds 2 | Simple and small library to compute differential operators (gradient, divergence, Laplacian) on point clouds. 3 | 4 | ![Example gradient of x-coordinate](assets/img/gradient.png) 5 | 6 | _Visualization in [Polyscope](https://polyscope.run) of the output of the gradient operator on the x-coordinate of Spot (by Keenan Crane)._ 7 | 8 | ## Installation 9 | The recommended installation method is by using pip: 10 | ```bash 11 | pip install pcdiff 12 | ``` 13 | 14 | ## Example usage 15 | See [examples/demo.py](examples/demo.py) for a full visual demo. For a quick start: 16 | ```python 17 | import numpy as np 18 | from pcdiff import knn_graph, estimate_basis, build_grad_div, laplacian 19 | 20 | # Random point cloud 21 | pos = np.random.rand(1000, 3) 22 | 23 | # Generate kNN graph 24 | edge_index = knn_graph(pos, 20) 25 | # Estimate normals and local frames 26 | basis = estimate_basis(pos, edge_index) 27 | # Build gradient and divergence operators (Scipy sparse matrices) 28 | grad, div = build_grad_div(pos, *basis, edge_index) 29 | 30 | # Setup the Laplacian as the divergence of gradient: 31 | laplacian = -(div @ grad) 32 | 33 | # Define some function on the point cloud 34 | x = np.random.rand(1000, 1) 35 | 36 | # Compute gradient of function 37 | # The output is of size 2N, with the two components of the vector field interleaved: 38 | # [x_1, y_1, x_2, y_2, ..., x_N, y_N] 39 | grad_x = grad @ x 40 | ``` 41 | 42 | For sake of simplicity, every operation is written in Numpy and could be accelerated with Numba or Jax. If you would like to use these operators in PyTorch, please refer the github repository for [DeltaConv](https://github.com/rubenwiersma/deltaconv). The operators are accessible from `deltaconv.geometry`. 43 | 44 | ## How does it work? 45 | We use a moving-least-squares approach. TL;DR: we fit a small patch of surface to each point's neighborhood and compute gradient and divergence on this patch of surface. A more detailed procedure is described in [the paper where we used this technique](https://rubenwiersma.nl/assets/pdf/DeltaConv.pdf) and [its supplement](https://rubenwiersma.nl/assets/pdf/DeltaConv_supplement.pdf). 46 | 47 | The output of this procedure is a $2N \times N$ sparse matrix for gradient and $N \times 2N$ sparse matrix for divergence. We also add functionality to use these matrices to compute co-gradient, curl, and Laplacians on a scalar/vector field on point clouds. This functionality can be found in [pcdiff/operators.py](pcdiff/operators.py). 48 | 49 | ## Alternatives 50 | There are many alternatives to compute discrete differential operators in Python (e.g., `potpourri3d`, `libigl`, `gptoolbox`). Most of them did not have an implementation available or exposed for gradients and divergence on point clouds. Do check out these awesome libraries; `pcdiff` is intended to complement them. 51 | 52 | ## Citation 53 | If you find this library useful in your own work, please cite our paper on DeltaConv, a convolution for point clouds that uses these operators: 54 | 55 | ```bib 56 | @Article{Wiersma2022DeltaConv, 57 | author = {Ruben Wiersma, Ahmad Nasikun, Elmar Eisemann, Klaus Hildebrandt}, 58 | journal = {Transactions on Graphics}, 59 | title = {DeltaConv: Anisotropic Operators for Geometric Deep Learning on Point Clouds}, 60 | year = {2022}, 61 | month = jul, 62 | number = {4}, 63 | volume = {41}, 64 | doi = {10.1145/3528223.3530166}, 65 | publisher = {ACM}, 66 | } 67 | ``` 68 | -------------------------------------------------------------------------------- /assets/img/gradient.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rubenwiersma/pointcloud-differential/5916c5cb1ae72b70e6735feeea8e66c4e79b3f26/assets/img/gradient.png -------------------------------------------------------------------------------- /examples/demo.py: -------------------------------------------------------------------------------- 1 | # Install polyscope to run this example: 2 | # pip install polyscope, potpourri3d 3 | import polyscope as ps 4 | import potpourri3d as pp3d 5 | 6 | import numpy.linalg as LA 7 | from pcdiff import knn_graph, estimate_basis, build_grad_div 8 | 9 | # Initialize polyscope 10 | ps.init() 11 | ps.set_ground_plane_mode('none') 12 | 13 | # Load point cloud (vertices of mesh) 14 | pos, _ = pp3d.read_mesh('examples/spot.obj') 15 | 16 | # Add point cloud to Polyscope 17 | ps.register_point_cloud("Spot", pos) 18 | # Show x-coordinate as scalar value 19 | ps.get_point_cloud("Spot").add_scalar_quantity("x-coordinate", pos[:, 0], enabled=True) 20 | 21 | # Compute gradient, divergence 22 | edge_index = knn_graph(pos, 10) 23 | normal, x_basis, y_basis = estimate_basis(pos, edge_index) 24 | grad, div = build_grad_div(pos, normal, x_basis, y_basis, edge_index) 25 | 26 | # Compute gradient in tangent basis coordinates 27 | gradient_x = grad @ pos[:, :1] 28 | 29 | # Project into 3D 30 | gradient_x = gradient_x.reshape(-1, 2) 31 | gradient_x_3d = gradient_x[:, 0:1] * x_basis + gradient_x[:, 1:] * y_basis 32 | 33 | # Add gradient vectors to Polyscope 34 | ps.get_point_cloud("Spot").add_vector_quantity("Gradient of x-coordinate", gradient_x_3d, enabled=True) 35 | 36 | # Compute Laplacian on points as divergence of gradient 37 | L_pos = div @ grad @ pos 38 | 39 | # Show result as 3D vectors on point clouds (point in the normal direction, norm is mean curvature) 40 | ps.get_point_cloud("Spot").add_vector_quantity("Mean curvature vector", L_pos) 41 | ps.get_point_cloud("Spot").add_scalar_quantity("Mean curvature", LA.norm(L_pos, axis=1)) 42 | 43 | # Show result 44 | ps.show() 45 | -------------------------------------------------------------------------------- /examples/hello_world.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from pcdiff import knn_graph, estimate_basis, build_grad_div 3 | 4 | # Random point cloud 5 | pos = np.random.rand(1000, 3) 6 | 7 | # Generate kNN graph 8 | edge_index = knn_graph(pos, 20) 9 | # Estimate normals and local frames 10 | basis = estimate_basis(pos, edge_index) 11 | # Build gradient and divergence operators (Scipy sparse matrices) 12 | grad, div = build_grad_div(pos, *basis, edge_index) 13 | 14 | # ... use gradient and divergence in any task you like -------------------------------------------------------------------------------- /pcdiff/__init__.py: -------------------------------------------------------------------------------- 1 | from .operators import * 2 | from .grad_div_mls import * 3 | from .utils import * 4 | 5 | __version__ = (1, 0, 1) -------------------------------------------------------------------------------- /pcdiff/grad_div_mls.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.linalg as LA 3 | from scipy.sparse import coo_matrix 4 | from .utils import batch_dot 5 | 6 | EPS = 1e-5 7 | 8 | 9 | def estimate_basis(pos, edge_index, k=None, orientation=None): 10 | """Estimates a tangent basis for each point, given a k-nn graph and positions. 11 | 12 | Args: 13 | pos (Tensor): an [N, 3] tensor with the point positions. 14 | edge_index (Tensor): indices of the adjacency matrix of the k-nn graph [2, N * k]. 15 | k (int, optional): the number of neighbors per point, 16 | is derived from edge_index when no k is provided (default: None). 17 | orientation (Tensor, optional): an [N, 3] tensor with a rough direction of the normal to 18 | orient the estimated normals. 19 | """ 20 | row, col = edge_index 21 | k = (row == 0).sum() if k is None else k 22 | row, col = row.reshape(-1, k), col.reshape(-1, k) 23 | local_pos = (pos[col] - pos[row]).transpose(0, 2, 1) 24 | 25 | # SVD to estimate bases 26 | U, _, _ = LA.svd(local_pos) 27 | 28 | # Normal corresponds to smallest singular vector and normalize 29 | normal = U[:, :, 2] 30 | normal = normal / LA.norm(normal, axis=-1, keepdims=True).clip(EPS) 31 | 32 | # If normals are given, orient using the given normals 33 | if orientation is not None: 34 | normal = np.where(batch_dot(normal, orientation) < 0, -normal, normal) 35 | 36 | # X axis to largest singular vector and normalize 37 | x_basis = U[:, :, 0] 38 | x_basis = x_basis / LA.norm(x_basis, axis=-1, keepdims=True).clip(EPS) 39 | 40 | # Create orthonormal basis by taking cross product 41 | y_basis = np.cross(normal, x_basis) 42 | y_basis = y_basis / LA.norm(y_basis, axis=-1, keepdims=True).clip(EPS) 43 | 44 | return normal, x_basis, y_basis 45 | 46 | 47 | def build_tangent_basis(normal): 48 | """Constructs an orthonormal tangent basis, given a normal vector. 49 | 50 | Args: 51 | normal (Tensor): an [N, 3] tensor with normals per point. 52 | """ 53 | 54 | # Pick an arbitrary basis vector that does not align too much with the normal 55 | testvec = np.tile(np.array([[1, 0, 0]]), (normal.shape[0], 1)) 56 | testvec_alt = np.tile(np.array([[0, 1, 0]]), (normal.shape[0], 1)) 57 | testvec = np.where(np.abs(batch_dot(normal, testvec)) > 0.9, testvec_alt, testvec) 58 | 59 | # Derive x basis using cross product and normalize 60 | x_basis = np.cross(testvec, normal) 61 | x_basis = x_basis / LA.norm(x_basis, axis=-1, keepdims=True).clip(EPS) 62 | 63 | # Derive y basis using cross product and normalize 64 | y_basis = np.cross(normal, x_basis) 65 | y_basis = y_basis / LA.norm(y_basis, axis=-1, keepdims=True).clip(EPS) 66 | return x_basis, y_basis 67 | 68 | 69 | def coords_projected(pos, normal, x_basis, y_basis, edge_index, k=None): 70 | """Projects neighboring points to the tangent basis 71 | and returns the local coordinates. 72 | 73 | Args: 74 | pos (Tensor): an [N, 3] tensor with the point positions. 75 | normal (Tensor): an [N, 3] tensor with normals per point. 76 | x_basis (Tensor): an [N, 3] tensor with x basis per point. 77 | y_basis (Tensor): an [N, 3] tensor with y basis per point. 78 | edge_index (Tensor): indices of the adjacency matrix of the k-nn graph [2, N * k]. 79 | k (int): the number of neighbors per point. 80 | """ 81 | row, col = edge_index 82 | k = (row == 0).sum() if k is None else k 83 | 84 | # Compute coords 85 | normal = np.tile(normal[:, None], (1, k, 1)).reshape(-1, 3) 86 | x_basis = np.tile(x_basis[:, None], (1, k, 1)).reshape(-1, 3) 87 | y_basis = np.tile(y_basis[:, None], (1, k, 1)).reshape(-1, 3) 88 | local_pos = pos[col] - pos[row] 89 | local_pos = local_pos - normal * batch_dot(local_pos, normal) 90 | x_pos = batch_dot(local_pos, x_basis).flatten() 91 | y_pos = batch_dot(local_pos, y_basis).flatten() 92 | coords = np.stack([x_pos, y_pos], axis=1) 93 | 94 | return coords 95 | 96 | 97 | def gaussian_weights(dist, k, kernel_width=1): 98 | """Computes gaussian weights per edge and normalizes the sum per neighborhood. 99 | 100 | Args: 101 | dist (Tensor): an [N * k] tensor with the geodesic distance of each edge. 102 | k (int): the number of neighbors per point. 103 | kernel_width (float, optional): the size of the kernel, 104 | relative to the average edge length in each shape (default: 1). 105 | """ 106 | dist = dist.reshape(-1, k) 107 | avg_dist = dist.mean(axis=1, keepdims=True) 108 | weights = np.exp(- dist ** 2 / (kernel_width * avg_dist) ** 2) 109 | weights = weights / weights.sum(axis=1, keepdims=True).clip(EPS) 110 | 111 | return weights.flatten() 112 | 113 | 114 | def weighted_least_squares(coords, weights, k, regularizer, shape_regularizer=None): 115 | """Solves a weighted least squares equation (see http://www.nealen.net/projects/mls/asapmls.pdf). 116 | In practice, we compute the inverse of the left-hand side of a weighted-least squares problem: 117 | B^TB c = B^Tf(x). 118 | 119 | This inverse can be multiplied with the right hand side to find the coefficients 120 | of a second order polynomial that approximates f(x). 121 | c = (BTB)^-1 B^T f(x). 122 | 123 | The weighted least squares problem is regularized by adding a small value \lambda 124 | to the diagonals of the matrix on the left hand side of the equation: 125 | B^TB + \lambda I. 126 | """ 127 | # Setup polynomial basis 128 | coords_const = np.concatenate([np.ones((coords.shape[0], 1)), coords], axis=1) 129 | B = np.matmul(np.expand_dims(coords_const, -1), np.expand_dims(coords_const, -2)) 130 | triu = np.triu_indices(3) 131 | B = B[:, triu[0], triu[1]] 132 | B = B.reshape(-1, k, 6) # [1, x, y, x**2, xy, y**2] 133 | 134 | # Compute weighted least squares 135 | lI = regularizer * np.eye(6, 6)[None] 136 | BT = (weights.reshape(-1, k, 1) * B).transpose(0, 2, 1) 137 | BTB = BT @ B + lI 138 | BTB_inv = LA.inv(BTB) 139 | wls = (BTB_inv @ BT).transpose(0, 2, 1).reshape(-1, 6) 140 | 141 | if shape_regularizer is not None: 142 | lI = shape_regularizer * np.eye(6, 6)[None] 143 | BTB = BT @ B + lI 144 | BTB_inv = LA.inv(BTB) 145 | wls_shape = (BTB_inv @ BT).transpose(0, 2, 1).reshape(-1, 6) 146 | return wls, wls_shape 147 | return wls 148 | 149 | 150 | def fit_vector_mapping(pos, normal, x_basis, y_basis, edge_index, wls, coords): 151 | """Finds the transformation between a basis at point pj 152 | and the basis at point pi pushed forward to pj. 153 | 154 | See equation (15) in the supplement of DeltaConv for more details. 155 | """ 156 | row, col = edge_index 157 | k = (row == 0).sum() 158 | 159 | # Compute the height over the patch by projecting the relative positions onto the normal 160 | patch_f = batch_dot(normal[row], pos[col] - pos[row]) 161 | coefficients = np.sum((wls * patch_f).reshape(-1, k, 6), axis=1) 162 | if coefficients.shape[0] < row.max(): 163 | coefficients = np.repeat(coefficients, k, axis=0) 164 | 165 | # Equation (3) and (4) from supplement 166 | h_x = (coefficients[row, 1] + 2 * coefficients[row, 3] * coords[:, 0] + coefficients[row, 4] * coords[:, 1]) 167 | h_y = (coefficients[row, 2] + coefficients[row, 4] * coords[:, 0] + 2 * coefficients[row, 5] * coords[:, 1]) 168 | 169 | # Push forward bases to p_j 170 | # In equation (15): \Gamma(u_j, v_j) 171 | gamma_x = x_basis[row] + normal[row] * h_x[..., None] 172 | gamma_y = y_basis[row] + normal[row] * h_y[..., None] 173 | 174 | # Determine inverse metric for mapping 175 | # Inverse metric is given in equation (9) of supplement 176 | det_metric = (1 + h_x ** 2 + h_y ** 2) 177 | E, F, G = 1 + h_x ** 2, h_x * h_y, 1 + h_y ** 2 178 | inverse_metric = np.stack([ 179 | G, -F, 180 | -F, E 181 | ], axis=-1).reshape(-1, 2, 2) 182 | inverse_metric = inverse_metric / det_metric.reshape(-1, 1, 1) 183 | basis_transformation = np.concatenate([ 184 | batch_dot(gamma_x, x_basis[col]), 185 | batch_dot(gamma_x, y_basis[col]), 186 | batch_dot(gamma_y, x_basis[col]), 187 | batch_dot(gamma_y, y_basis[col]) 188 | ], axis=1).reshape(-1, 2, 2) 189 | 190 | # Compute mapping of vectors 191 | return inverse_metric @ basis_transformation # [N, 2, 2] 192 | 193 | 194 | def build_grad_div(pos, normal, x_basis, y_basis, edge_index, kernel_width=1, regularizer=1e-8, shape_regularizer=None): 195 | """Builds a gradient and divergence operators using Weighted Least Squares (WLS). 196 | Note: this function is only faster if used on the GPU. 197 | Use pointcloud-ops when applying transforms on the CPU. 198 | 199 | Args: 200 | pos (Tensor): an [N, 3] tensor with the point positions. 201 | normal (Tensor): an [N, 3] tensor with normals per point. 202 | x_basis (Tensor): an [N, 3] tensor with x basis per point. 203 | y_basis (Tensor): an [N, 3] tensor with y basis per point. 204 | edge_index (Tensor): indices of the adjacency matrix of the k-nn graph [2, N * k]. 205 | batch (Tensor): an [N] tensor denoting which batch each shape belongs to (default: None). 206 | kernel_width (float, optional): the size of the kernel, 207 | relative to the average edge length in each shape (default: 1). 208 | regularizer (float: optional): the regularizer parameter 209 | for weighted least squares fitting (default: 1e-8). 210 | normalized (bool: optional): Normalizes the operators by the 211 | infinity norm if set to True (default: True): 212 | G = G / |G|_{\inf} 213 | shape_regularizer (float: optional): sets the regularizer parameter 214 | for weighted least squares fitting of the surface, rather than the signal on the surface. 215 | By default, this is set to None and the same value is used for the surface and the signal. 216 | """ 217 | 218 | row, col = edge_index 219 | k = (row == 0).sum() 220 | 221 | # Get coordinates in tangent plane by projecting along the normal of the plane 222 | coords = coords_projected(pos, normal, x_basis, y_basis, edge_index, k) 223 | 224 | # Compute weights based on distance in euclidean space 225 | dist = LA.norm(pos[col] - pos[row], axis=1) 226 | weights = gaussian_weights(dist, k, kernel_width) 227 | 228 | # Get weighted least squares result 229 | # wls multiplied with a function f at k neighbors will give the coefficients c0-c5 230 | # for the surface f(x, y) = [x, y, c0 + c1*x + c2*y + c3*x**2 + c4*xy + c5*y**2] 231 | # defined on a neighborhood of each point. 232 | if shape_regularizer is None: 233 | wls = weighted_least_squares(coords, weights, k, regularizer) 234 | else: 235 | wls, wls_shape = weighted_least_squares(coords, weights, k, regularizer, shape_regularizer) 236 | 237 | # Format as sparse matrix 238 | 239 | # The gradient of f at (0, 0) will be 240 | # df/dx|(0, 0) = [1, 0, c1 + 2*c3*0 + c4*0] = [1, 0, c1] 241 | # df/dy|(0, 0) = [0, 1, c2 + c4*0 + 2*c5*0] = [0, 1, c2] 242 | # Hence, we can use the row in wls that outputs c1 and c2 for the gradient 243 | # in x direction and y direction, respectively 244 | grad_row = np.stack([row * 2, row * 2 + 1], axis=1).flatten() 245 | grad_col = np.stack([col]*2, axis=1).flatten() 246 | grad_values = np.stack([wls[:, 1], wls[:, 2]], axis=1).flatten() 247 | 248 | # Create gradient matrix 249 | grad = coo_matrix((grad_values, (grad_row, grad_col)), shape=(pos.shape[0] * 2, pos.shape[0])) 250 | 251 | # Divergence 252 | if shape_regularizer is not None: 253 | wls = wls_shape 254 | vector_mapping = fit_vector_mapping(pos, normal, x_basis, y_basis, (row, col), wls, coords) 255 | 256 | # Store as sparse tensor 257 | grad_vec = grad_values.reshape(-1, 1, 2) 258 | div_vec = (grad_vec @ vector_mapping).flatten() 259 | div_row = np.stack([row] * 2, axis=1).flatten() 260 | div_col = np.stack([col * 2, col * 2 + 1], axis=1).flatten() 261 | div = coo_matrix((div_vec, (div_row, div_col)), shape=(pos.shape[0], pos.shape[0] * 2)) 262 | 263 | return grad, div 264 | -------------------------------------------------------------------------------- /pcdiff/operators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.linalg as LA 3 | 4 | def norm(v): 5 | """Computes the norm of a vector field.""" 6 | _, C = v.shape 7 | return LA.norm(v.reshape(-1, 2, C), axis=1) 8 | 9 | def J(v): 10 | """Rotates a vector field by 90-degrees counter-clockwise.""" 11 | N, C = v.shape 12 | v = v.reshape(-1, 2, C) 13 | J_v = np.zeros_like(v) 14 | J_v[:, 0] = -v[:, 1] 15 | J_v[:, 1] = v[:, 0] 16 | J_v = J_v.reshape(N, C) 17 | return J_v 18 | 19 | def I_J(v): 20 | """Concatenates a vector field and its 90-degree rotated counterpart.""" 21 | return np.concatenate([v, J(v)], axis=1) 22 | 23 | def curl(v, div): 24 | """Computes the curl of a vector field using divergence: 25 | curl = - div J V. 26 | """ 27 | return - (div @ J(v)) 28 | 29 | def laplacian(x, grad, div): 30 | """Computes the laplacian of a function using gradient and divergence: 31 | laplacian = - div grad X. 32 | """ 33 | return - (div @ (grad @ x)) 34 | 35 | def hodge_laplacian(v, grad, div): 36 | """Computes the Hodge-Laplacian of a vector field using gradient and divergence: 37 | hodge-laplacian = - (grad div + J grad curl) V. 38 | """ 39 | # Compute - G G.T v (grad div) 40 | grad_div_v = grad @ (div @ v) 41 | 42 | # Compute J G G.T J v (J grad curl) 43 | J_grad_curl_v = J(grad @ curl(v, div)) 44 | 45 | # Combine 46 | return - (grad_div_v + J_grad_curl_v) 47 | -------------------------------------------------------------------------------- /pcdiff/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from sklearn.neighbors import KDTree 3 | 4 | def batch_dot(a, b): 5 | return np.matmul(a[:, None], b[..., None]).squeeze(-1) 6 | 7 | def knn_graph(pos, k=20): 8 | return (np.repeat(np.arange(pos.shape[0]), k), KDTree(pos, leaf_size=20).query(pos, k=k)[1].flatten()) -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = [ 3 | "setuptools>=42", 4 | "wheel" 5 | ] 6 | 7 | build-backend = "setuptools.build_meta" -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os, sys, re, subprocess, platform 2 | 3 | import setuptools 4 | from setuptools import setup, Extension 5 | from setuptools.command.build_ext import build_ext 6 | from distutils.version import LooseVersion 7 | 8 | 9 | class CMakeExtension(Extension): 10 | def __init__(self, name, sourcedir=''): 11 | Extension.__init__(self, name, sources=[]) 12 | self.sourcedir = os.path.abspath(sourcedir) 13 | 14 | 15 | class CMakeBuild(build_ext): 16 | def run(self): 17 | try: 18 | out = subprocess.check_output(['cmake', '--version']) 19 | except OSError: 20 | raise RuntimeError("CMake must be installed to build the following extensions: " + 21 | ", ".join(e.name for e in self.extensions)) 22 | 23 | if platform.system() == "Windows": 24 | cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)', out.decode()).group(1)) 25 | if cmake_version < '3.1.0': 26 | raise RuntimeError("CMake >= 3.1.0 is required on Windows") 27 | 28 | for ext in self.extensions: 29 | self.build_extension(ext) 30 | 31 | def build_extension(self, ext): 32 | extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) 33 | cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, 34 | '-DPYTHON_EXECUTABLE=' + sys.executable] 35 | 36 | cfg = 'Debug' if self.debug else 'Release' 37 | build_args = ['--config', cfg] 38 | 39 | if platform.system() == "Windows": 40 | cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] 41 | if sys.maxsize > 2**32: 42 | cmake_args += ['-A', 'x64'] 43 | build_args += ['--', '/m'] 44 | else: 45 | cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] 46 | build_args += ['--', '-j3'] 47 | 48 | env = os.environ.copy() 49 | env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), 50 | self.distribution.get_version()) 51 | if not os.path.exists(self.build_temp): 52 | os.makedirs(self.build_temp) 53 | subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) 54 | subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) 55 | 56 | 57 | with open("README.md", "r", encoding="utf-8") as fh: 58 | long_description = fh.read() 59 | 60 | setuptools.setup( 61 | name='pcdiff', 62 | version='1.0.1', 63 | author='Ruben Wiersma', 64 | author_email="rubenwiersma@gmail.com", 65 | description='Simple and small library to compute differential operators (gradient, divergence, Laplacian) on point clouds.', 66 | long_description=long_description, 67 | long_description_content_type="text/markdown", 68 | license="MIT", 69 | url="https://github.com/rubenwiersma/pcdiff", 70 | project_urls={ 71 | "Bug Tracker": "https://github.com/rubenwiersma/pcdiff", 72 | }, 73 | classifiers=[ 74 | "Programming Language :: Python :: 3", 75 | "License :: OSI Approved :: Apache Software License", 76 | "Operating System :: OS Independent", 77 | ], 78 | packages=setuptools.find_packages(where=".", include=["pcdiff", "pcdiff.*"]), 79 | python_requires='>=3.6', 80 | install_requires=['numpy', 'scikit-learn'], 81 | zip_safe=False, 82 | test_suite="test" 83 | ) -------------------------------------------------------------------------------- /test/test_grad_div_mls.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import numpy.linalg as LA 3 | 4 | from pcdiff.grad_div_mls import * 5 | from pcdiff.utils import batch_dot, knn_graph 6 | from pcdiff.operators import curl, hodge_laplacian, laplacian, J 7 | 8 | def test_build_tangent_basis(): 9 | normal = np.random.rand(100, 3) 10 | normal = normal / LA.norm(normal, axis=1, keepdims=True).clip(1e-8) 11 | x_basis, y_basis = build_tangent_basis(normal) 12 | 13 | # 1. The basis must be orthonormal 14 | basis = np.stack([normal, x_basis, y_basis], axis=-1) 15 | basisTbasis = basis.transpose(0, 2, 1) @ basis 16 | identity = np.stack([np.eye(3)]*100, 0) 17 | assert np.allclose(basisTbasis, identity, atol=1e-7) 18 | 19 | # 2. The basis must be right-handed 20 | assert (batch_dot(np.cross(x_basis, y_basis, axis=1), normal) < 0).sum() == 0 21 | 22 | 23 | def test_estimate_basis(): 24 | # Generate random points in a plane 25 | pos = np.concatenate([np.random.rand(100, 2), np.zeros((100, 1))], axis=1) 26 | 27 | # Generate a random normal 28 | normal = np.random.rand(1, 3) 29 | normal = normal / LA.norm(normal, axis=1, keepdims=True).clip(1e-8) 30 | # And compute an orthonormal basis around the normal 31 | xy_basis = build_tangent_basis(normal) 32 | 33 | # Transform points with new basis 34 | T = np.stack([*xy_basis, normal], axis=-1).squeeze(0) 35 | pos = pos @ T.T 36 | 37 | # Estimate bases with SVD 38 | edge_index = knn_graph(pos, 20) 39 | out_normal, out_x_basis, out_y_basis = estimate_basis(pos, edge_index) 40 | 41 | # 1. The basis must be orthonormal 42 | basis = np.stack([out_normal, out_x_basis, out_y_basis], axis=-1) 43 | basisTbasis = basis.transpose(0, 2, 1) @ basis 44 | identity = np.stack([np.eye(3)]*100, 0) 45 | assert np.allclose(basisTbasis, identity, atol=1e-5) 46 | 47 | # 2. The basis must be right-handed 48 | assert (batch_dot(np.cross(out_x_basis, out_y_basis, axis=1), out_normal) < 0).sum() == 0 49 | 50 | # 3. The normal should align with the ground truth normal 51 | assert np.allclose(np.abs((normal * out_normal).sum(axis=1)), np.ones(100)) 52 | 53 | 54 | def test_coords_projected(): 55 | # Setup a simple surface f(x, y) = [x, y, x^2 + y^2] 56 | x, y = np.random.rand(2, 100) * 2 - 1 57 | x[0] = y[0] = 0 58 | z = x**2 + y**2 59 | 60 | pos = np.stack([x, y, z], axis=1) + np.random.rand(3) 61 | 62 | # And rotate the surface in 3D 63 | 64 | # Generate a random normal 65 | normal = np.random.rand(1, 3) 66 | normal = normal / LA.norm(normal, axis=1, keepdims=True).clip(1e-8) 67 | # And compute an orthonormal basis around the normal 68 | x_basis, y_basis = build_tangent_basis(normal) 69 | 70 | # Transform points with new basis 71 | T = np.stack([x_basis, y_basis, normal], axis=-1).squeeze(0) 72 | pos = pos @ T.T 73 | 74 | # Compute coordinates by projection 75 | edge_index = knn_graph(pos, 20) 76 | out_coords = coords_projected(pos, np.tile(normal, (100, 1)), np.tile(x_basis, (100, 1)), np.tile(y_basis, (100, 1)), edge_index) 77 | 78 | # 1. The coordinates should be equal to the original x, y coordinates 79 | true_coords = np.stack([x[edge_index[1][:20]], y[edge_index[1][:20]]], axis=1) 80 | assert np.allclose(out_coords[:20], true_coords) 81 | 82 | 83 | def test_gaussian_weights(): 84 | # Random distances 85 | dist = np.random.rand(1000) 86 | 87 | out_weights = gaussian_weights(dist, 20) 88 | 89 | # 1. No NaNs 90 | assert np.isnan(out_weights).sum() == 0 91 | # 2. Sum of weights is 1 for every neighborhood 92 | assert np.allclose(out_weights.reshape(-1, 20).sum(axis=1), np.ones(50)) 93 | 94 | # 3. Points with closer distance have higher weight 95 | dist = np.array([0.1, 0.5, 1., 1.5, 2.]) 96 | out_weights = gaussian_weights(dist, 5) 97 | assert out_weights[0] > out_weights[1] 98 | assert out_weights[1] > out_weights[2] 99 | assert out_weights[2] > out_weights[3] 100 | assert out_weights[3] > out_weights[4] 101 | 102 | 103 | def test_weighted_least_squares(): 104 | N = 1000 105 | k = 20 106 | # Setup a simple surface f(x, y) = [x, y, c0 + c1*x + c2*y + c3*x**2 + c4*xy + c5*y**2] 107 | coords = np.random.rand(N, k, 2) * 2 - 1 108 | # Always add the center point 109 | coords[:, 0] = 0 110 | coords = coords.reshape(N * k, 2) 111 | 112 | # Compute XTX, so we can create a quadratic function 113 | coords_const = np.concatenate([np.ones((coords.shape[0], 1)), coords], axis=1) 114 | B = np.expand_dims(coords_const, -1) @ np.expand_dims(coords_const, -2) 115 | triu = np.triu_indices(3) 116 | B = B[:, triu[0], triu[1]] 117 | B = B.reshape(-1, k, 6) # [1, x, y, x**2, xy, y**2] 118 | 119 | # Set random coefficients 120 | coefficients = np.random.rand(N, 6) 121 | # And compute dummy function 122 | f = (B * coefficients[:, None]).sum(axis=-1, keepdims=True) # [N, k, 1] 123 | 124 | dist = LA.norm(coords, axis=1) 125 | weights = gaussian_weights(dist, k) 126 | out_wls = weighted_least_squares(coords, weights, k, 0) 127 | 128 | out_coefficients = (out_wls.reshape(N, k, 6) * f).sum(axis=1) 129 | # 1. The recovered coefficients should be equal to the actual coefficients 130 | assert np.allclose(out_coefficients, coefficients, atol=1e-3) 131 | 132 | # 2. The coefficients should be close when the regularizer is used 133 | out_wls = weighted_least_squares(coords, weights, k, 1e-5) 134 | out_coefficients = (out_wls.reshape(N, k, 6) * f).sum(axis=1) 135 | assert np.allclose(out_coefficients, coefficients, atol=5e-2) 136 | 137 | # 3. If we add noise to the function f, we want the derived coefficients to remain close 138 | f_noise = f + np.random.rand(N, k, 1) * 0.01 - 0.005 139 | out_coefficients = (out_wls.reshape(N, k, 6) * f_noise).sum(axis=1) 140 | assert np.allclose(out_coefficients, coefficients, atol=1e-1) 141 | # On average the error is < 0.05 142 | assert np.abs(out_coefficients - coefficients).mean() < 5e-2 143 | 144 | # 4. If we add outliers to the function f, we want the derived coefficients to remain close 145 | # Outliers are added with a 5% chance 146 | f_noise = f + (np.random.rand(N, k, 1) > 0.95) * np.random.rand(N, k, 1) * 0.1 147 | out_coefficients = (out_wls.reshape(N, k, 6) * f_noise).sum(axis=1) 148 | # The error is bounded by a value < 0.5 149 | assert np.allclose(out_coefficients, coefficients, atol=5e-1) 150 | # On average the error is close < 0.05 151 | assert np.abs(out_coefficients - coefficients).mean() < 5e-2 152 | 153 | 154 | def test_fit_vector_mapping(): 155 | N = 1000 156 | k = 20 157 | # Testing strategy: 158 | # ----------------- 159 | # 1. Create N separate patches with k points each. 160 | # Each patch is a surface randomly sampled from a quadratic polynomial with random coefficients. 161 | # 2. We setup a basis for each point in the patch and rotate it. 162 | # 3. Then we check that the fit_vector_mapping function correctly transforms from 163 | # a neighboring basis to the center points' basis. 164 | # 4. If the basis is transformed correctly, that means any vector expressed 165 | # in this basis will also be transformed correctly. 166 | 167 | # Setup a simple surface f(x, y) = [x, y, c0 * x**2 + c1 * xy + c2 * y**2] 168 | # for all N patches. In the comments, we refer to coords as X. 169 | # ------------------ 170 | coords = np.random.rand(N, k, 2) * 2 - 1 171 | # Always add the center point 172 | coords[:, 0] = 0 173 | 174 | # Set random coefficients 175 | coefficients = np.random.rand(N, 3) 176 | # And compute dummy function 177 | x = coords[..., 0] 178 | y = coords[..., 1] 179 | f = coefficients[:, None, 0] * x**2 + coefficients[:, None, 1] * x * y + coefficients[:, None, 2] * y ** 2 180 | coords = coords.reshape(-1, 2) 181 | 182 | # Assemble positions 183 | pos = np.concatenate([coords, f.reshape(-1, 1)], axis=1) 184 | 185 | # Compute basis and randomize in-plane rotation 186 | # --------------------------------------------- 187 | 188 | # Setup basis for each point 189 | # df/dx = [1, 0, 2*c0*x + c1*y] 190 | dfdx_z = 2 * coefficients[:, None, 0] * x + coefficients[:, None, 1] * y 191 | dfdx = np.stack([ 192 | np.ones_like(coords[:, 0]), 193 | np.zeros_like(coords[:, 0]), 194 | dfdx_z.flatten() 195 | ], axis=1) # [N * k, 3] 196 | # df/dy = [1, 0, c1*x + 2*c2*y] 197 | dfdy_z = coefficients[:, None, 1] * x + 2 * coefficients[:, None, 2] * y 198 | dfdy = np.stack([ 199 | np.zeros_like(coords[:, 0]), 200 | np.ones_like(coords[:, 0]), 201 | dfdy_z.flatten() 202 | ], axis=1) # [N * k, 3] 203 | # Normal is dfdx x dfdy 204 | normal = np.cross(dfdx, dfdy, axis=1) # [N * k, 3] 205 | normal = normal / LA.norm(normal, axis=1, keepdims=True).clip(1e-8) 206 | 207 | # Add a random rotation to each basis 208 | # Mix the x- and y-basis with random, non-zero weights 209 | weights = np.random.rand(N * k, 2) + 1e-2 # [2, N * k] 210 | # Flip some weights 211 | weights[:, 0] = np.where(np.random.rand(N * k) > 0.5, weights[:, 0], -weights[:, 0]) 212 | weights[:, 1] = np.where(np.random.rand(N * k) > 0.5, weights[:, 1], -weights[:, 1]) 213 | # Normalize weights 214 | weights = weights / LA.norm(weights, axis=1, keepdims=True).clip(1e-8) 215 | # Don't change center points 216 | weights = weights.reshape(N, k, 2) 217 | weights[:, 0] = np.array([1, 0]) 218 | weights = weights.reshape(N * k, 2) 219 | # Mix x and y basis 220 | x_basis = weights[:, 0:1] * dfdx + weights[:, 1:] * dfdy 221 | x_basis = x_basis / LA.norm(x_basis, axis=1, keepdims=True).clip(1e-8) 222 | # Recompute y-basis with cross-product between x-basis and normal 223 | y_basis = np.cross(normal, x_basis) 224 | 225 | # 226 | # (( 227 | # c| | 228 | # |__| 229 | # 230 | # Hey, nice to see you here! Glad to see someone reads tests :) 231 | # Come say hi on twitter (@rtwiersma) 232 | # As a token of my appreciation, here's a way to easily visualize the whole test surface we just made 233 | # first, install polyscope (pip install polyscope) then uncomment the next few lines 234 | # and make sure you set N=1 at the start of this test. 235 | # ---------------------- 236 | # import polyscope as ps 237 | # ps.init() 238 | # cloud = ps.register_point_cloud('cloud', pos) 239 | # cloud.add_vector_quantity('dfdx', dfdx, enabled=True) 240 | # cloud.add_vector_quantity('dfdy', dfdy) 241 | # cloud.add_vector_quantity('normal', normal, enabled=True) 242 | # cloud.add_vector_quantity('x_basis', x_basis, enabled=False) 243 | # cloud.add_vector_quantity('y_basis', y_basis, enabled=False) 244 | # ps.set_ground_plane_mode('none') 245 | # ps.show() 246 | 247 | # Compute the vector mapping 248 | # -------------------------- 249 | edge_index = ( 250 | np.repeat(np.arange(N), k) * k, 251 | np.arange(N * k) 252 | ) 253 | # The WLS and fit_vector_mapping code assumes that we projected 254 | # the coordinates of each neighbor to the tangent plane of the center point. 255 | dist = LA.norm(coords, axis=1) 256 | wls_weights = gaussian_weights(dist, k) 257 | wls = weighted_least_squares(coords, wls_weights, k, regularizer=0) 258 | 259 | out_vector_mapping = fit_vector_mapping(pos, normal, x_basis, y_basis, edge_index, wls, coords) 260 | 261 | # 0. Check some preliminaries 262 | assert out_vector_mapping.shape == (N * k, 2, 2) 263 | assert np.isnan(out_vector_mapping).sum() == 0 264 | 265 | # The vector fitting procedure solves 266 | # a_0^x dfdx|j + a_0^y dfdy|j = a_j^x e_j^x + a_j^y e_j^y 267 | # So, for a_j = [1, 0], we want [dfdx dfdy] a_0 == e_j^x 268 | # and for a_j = [0, 1], we want [dfdx dfdy] a_0 == e_j^y 269 | assert np.allclose(out_vector_mapping[:, None, 0, 0] * dfdx + out_vector_mapping[:, None, 1, 0] * dfdy, x_basis, atol=1e-6) 270 | assert np.allclose(out_vector_mapping[:, None, 0, 1] * dfdx + out_vector_mapping[:, None, 1, 1] * dfdy, y_basis, atol=1e-6) 271 | # That's it! 272 | 273 | 274 | def test_build_grad_div(): 275 | # Testing strategy 276 | # ---------------- 277 | # 1. Sample points from a parametric surface. 278 | # 2. Compute analytical derivatives. 279 | # 3. Compare result of gradient matrix with analytical derivatives. 280 | N = 1000 281 | k = 20 282 | 283 | np.random.seed(42) 284 | 285 | # Setup a simple surface f(x, y) = [x, y, c0 * x**2 + c1 * xy + c2 * y**2] 286 | coords = np.random.rand(N, 2) * 2 - 1 287 | 288 | # Compute XTX, so we can create a quadratic function 289 | coords_const = np.concatenate([np.ones((coords.shape[0], 1)), coords], axis=1) 290 | B = np.expand_dims(coords_const, -1) @ np.expand_dims(coords_const, -2) 291 | triu = np.triu_indices(3) 292 | B = B[:, triu[0], triu[1]] 293 | B = B.reshape(-1, 6) # [1, x, y, x**2, xy, y**2] 294 | 295 | # Set random coefficients 296 | coefficients = np.random.rand(6) 297 | # And compute dummy function 298 | f = (B * coefficients[None]).sum(axis=1, keepdims=True) # [N, 1] 299 | 300 | # Simple surface with boundary 301 | pos = np.concatenate([coords, f], axis=1) 302 | 303 | # Compute coordinate frame 304 | x, y = coords.T 305 | dfdx_z = coefficients[1] + 2 * coefficients[3] * x + coefficients[4] * y 306 | dfdx = np.stack([ 307 | np.ones_like(coords[:, 0]), 308 | np.zeros_like(coords[:, 0]), 309 | dfdx_z.flatten() 310 | ], axis=1) # [N * k, 3] 311 | dfdy_z = coefficients[2] + coefficients[4] * x + 2 * coefficients[5] * y 312 | dfdy = np.stack([ 313 | np.zeros_like(coords[:, 0]), 314 | np.ones_like(coords[:, 0]), 315 | dfdy_z.flatten() 316 | ], axis=1) # [N * k, 3] 317 | # Normal is dfdx x dfdy 318 | normal = np.cross(dfdx, dfdy, axis=1) # [N * k, 3] 319 | normal = normal / LA.norm(normal, axis=1, keepdims=True).clip(1e-8) 320 | # Normalize x_basis 321 | x_basis = dfdx / LA.norm(dfdx, axis=1, keepdims=True).clip(1e-8) 322 | y_basis = np.cross(normal, x_basis) 323 | 324 | edge_index = knn_graph(pos, k) 325 | out_grad, out_div = build_grad_div(pos, normal, x_basis, y_basis, edge_index, regularizer=1e-8) 326 | 327 | # 1. Size checking 328 | # ---------------- 329 | # 1a. Grad size must be [N*2, N] 330 | assert out_grad.shape[0] == N*2 331 | assert out_grad.shape[1] == N 332 | # 1b. Div size must be [N, N*2] 333 | assert out_div.shape[0] == N 334 | assert out_div.shape[1] == N*2 335 | 336 | # 2. Checking output for NaNs 337 | # --------------------------- 338 | # 2a. We shouldn't get NaNs from applying grad 339 | assert np.isnan(out_grad @ np.random.rand(N, 1)).sum() == 0 340 | # 2b. No NaNs from applying div 341 | assert np.isnan(out_div @ np.random.rand(N*2, 1)).sum() == 0 342 | 343 | # 3. De Rham complex properties 344 | # ----------------------------- 345 | # 3a. The result of applying grad to a constant function should be 0 346 | assert np.allclose(out_grad @ np.ones((N, 1)), np.zeros((N*2, 1)), atol=1e-2) 347 | # 3b. The result of applying laplacian (div grad) to a constant function should be 0 348 | # We check this with the normalized L1 norm (mean of absolute values) 349 | assert np.abs(laplacian(np.ones((N, 1)), out_grad, out_div)).mean() < 1e-2 350 | # 3c. The L1 norm of div grad x should be > 0 for a random function 351 | assert LA.norm(laplacian(np.random.rand(N, 1), out_grad, out_div), ord=1) > 0 352 | # 3d. Applying curl grad x should return all 0 for any function 353 | # We will have some non-zero outliers, e.g., on bounaries, so we'll check the mean 354 | # of the squared L2 norm (emphasizes lower values). 355 | assert np.power(curl(out_grad @ pos[:, 0:1], out_div), 2).mean() < 1e-2 356 | assert np.median(np.power(curl(out_grad @ pos[:, 0:1], out_div), 2)) < 1e-2 357 | # 3e. Applying div co-grad x should return all 0 for any function 358 | assert np.power(out_div @ J(out_grad @ pos[:, 0:1]), 2).mean() < 1e-2 359 | assert np.median(np.power(out_div @ J(out_grad @ pos[:, 0:1]), 2)) < 1e-2 360 | 361 | # 4. Correct gradients/divergences 362 | # -------------------------------- 363 | # We set up f as a height map over the x, y plane. 364 | # Therefore, the gradient in the x- and y-direction of f 365 | # should be the projection of [0, 0, 1] onto the two tangent vectors. 366 | grad_x_f, grad_y_f = (out_grad @ f).reshape(N, 2).T 367 | assert np.allclose(grad_x_f, x_basis[:, 2], atol=1e-2) 368 | assert np.allclose(grad_y_f, y_basis[:, 2], atol=1e-2) 369 | 370 | # Applying div grad to the point positions should 371 | # result in the mean curvature vector, pointing roughly along the normal. 372 | mean_curvature = laplacian(pos, out_grad, out_div) 373 | assert np.allclose(-batch_dot(mean_curvature, normal), LA.norm(mean_curvature, axis=1, keepdims=True), atol=1e-2) 374 | -------------------------------------------------------------------------------- /test/test_operators.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pcdiff import norm, J, I_J, curl, laplacian, hodge_laplacian, batch_dot 4 | 5 | 6 | def random_v(N=1024, C=16, return_components=False): 7 | v_norm = np.random.rand(N, C) * 5 8 | v_angles = np.random.rand(N, C) * 2 * np.pi 9 | v_x = v_norm * np.cos(v_angles) 10 | v_y = v_norm * np.sin(v_angles) 11 | 12 | v = np.stack([v_x, v_y], axis=1).reshape(-1, C) 13 | if return_components: 14 | return v, v_norm, v_angles, v_x, v_y 15 | return v 16 | 17 | 18 | def test_norm(): 19 | v, v_norm, _, _, _ = random_v(1024, 16, True) 20 | out = norm(v) 21 | assert np.allclose(out, v_norm) 22 | 23 | 24 | def test_J(): 25 | N = 1024 26 | C = 16 27 | v, _, _, v_x, v_y = random_v(N, C, True) 28 | 29 | J_v = np.stack([-v_y, v_x], axis=1).reshape(-1, C) 30 | out = J(v) 31 | assert np.allclose(out, J_v) 32 | dot_v_J_v = (v.reshape(-1, 2, C) * out.reshape(-1, 2, C)).sum(axis=1) 33 | assert np.allclose(dot_v_J_v, np.zeros_like(v_x)) 34 | 35 | 36 | def test_I_J(): 37 | N = 1024 38 | C = 16 39 | v = random_v(N, C) 40 | out = I_J(v) 41 | assert out.shape[1] == v.shape[1] * 2 42 | assert np.allclose(out[:, :C], v) 43 | assert np.allclose(out[:, C:], J(v)) 44 | 45 | # Curl, Laplacian, and Hodge-Laplacian are tested in test_gradient -------------------------------------------------------------------------------- /test/test_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from pcdiff import batch_dot 4 | 5 | 6 | def test_batch_dot(): 7 | a = np.random.rand(1024, 10) 8 | b = np.random.rand(1024, 10) 9 | 10 | a_dot_b = (a * b).sum(axis=1, keepdims=True) 11 | out = batch_dot(a, b) 12 | 13 | assert np.allclose(out, a_dot_b) 14 | --------------------------------------------------------------------------------