├── .gitignore ├── .vscode ├── launch.json ├── settings.json └── tasks.json ├── Dockerfile ├── Dockerfile20 ├── Dockerfile20110 ├── LICENSE ├── Makefile ├── README.md ├── ablations ├── complex_tall.txt ├── network.py └── pcc.py ├── base_utils ├── file_utils.py ├── mesh_utils.py ├── mp_utils.py ├── point_cloud.py └── utils.py ├── config_a2p_il.py ├── config_p2m.py ├── config_pcc.py ├── dataset.py ├── dataset_pcc.py ├── docker-compose.debug.yml ├── docker-compose.yml ├── generate_data.py ├── loss_pcc.py ├── main.py ├── main_pcc.py ├── models ├── layers │ ├── mesh.py │ ├── mesh_conv.py │ ├── mesh_pool.py │ ├── mesh_union.py │ └── mesh_unpool.py ├── losses.py ├── networks.py └── networks_p2m.py ├── network_pcc.py ├── original.py ├── p2p_loss.py ├── point_ops ├── Chamfer3D │ ├── chamfer3D.cu │ ├── chamfer_cuda.cpp │ ├── dist_chamfer_3D.py │ └── setup.py ├── compile_chamfer.sh ├── compile_emd.sh ├── earth_movers_distance │ ├── emd.cpp │ ├── emd.py │ ├── emd_kernel.cu │ └── setup.py ├── pointnet2_ops │ ├── __init__.py │ ├── _ext-src │ │ ├── include │ │ │ ├── ball_query.h │ │ │ ├── cuda_utils.h │ │ │ ├── group_points.h │ │ │ ├── interpolate.h │ │ │ ├── sampling.h │ │ │ └── utils.h │ │ └── src │ │ │ ├── ball_query.cpp │ │ │ ├── ball_query_gpu.cu │ │ │ ├── bindings.cpp │ │ │ ├── group_points.cpp │ │ │ ├── group_points_gpu.cu │ │ │ ├── interpolate.cpp │ │ │ ├── interpolate_gpu.cu │ │ │ ├── sampling.cpp │ │ │ └── sampling_gpu.cu │ ├── _version.py │ ├── pointnet2_modules.py │ └── pointnet2_utils.py └── setup.py ├── possible_normal.txt ├── post_subnets ├── evals.py └── post_ops.py ├── pp_conv.py ├── pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl ├── requirements.txt ├── sample.txt ├── sdf_try.py ├── sdf_try_2040.py ├── test_pcc.py ├── tmp_daacfd2c-f017-48a2-8c2f-54e457d7c6e6.obj └── trials.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow 95 | __pypackages__/ 96 | 97 | # Celery stuff 98 | celerybeat-schedule 99 | celerybeat.pid 100 | 101 | # SageMath parsed files 102 | *.sage.py 103 | 104 | # Environments 105 | .env 106 | .venv 107 | env/ 108 | venv/ 109 | ENV/ 110 | env.bak/ 111 | venv.bak/ 112 | 113 | # Spyder project settings 114 | .spyderproject 115 | .spyproject 116 | 117 | # Rope project settings 118 | .ropeproject 119 | 120 | # mkdocs documentation 121 | /site 122 | 123 | # mypy 124 | .mypy_cache/ 125 | .dmypy.json 126 | dmypy.json 127 | 128 | # Pyre type checker 129 | .pyre/ 130 | 131 | # manual additions 132 | seedformer 133 | results/experiments 134 | models/networks.py 135 | docker-compose.yml 136 | docker-compose.debug.yml 137 | Dockerfile 138 | Dockerfile20110 139 | trials.py -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | "configurations": [ 3 | { 4 | "name": "Docker: Python-General", 5 | "type": "docker", 6 | "request": "launch", 7 | "preLaunchTask": "docker-run: debug", 8 | "python": { 9 | "pathMappings": [ 10 | { 11 | "localRoot": "${workspaceFolder}", 12 | "remoteRoot": "/app" 13 | } 14 | ], 15 | "projectType": "general" 16 | }, 17 | // "justMyCode": false 18 | } 19 | ] 20 | } -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "workbench.editorLargeFileConfirmation": 15 3 | } -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "2.0.0", 3 | "tasks": [ 4 | { 5 | "type": "docker-build", 6 | "label": "docker-build", 7 | "platform": "python", 8 | "dockerBuild": { 9 | "tag": "finerdocker:latest", 10 | "buildArgs":{"IMAGE_NAME":"nvidia/cuda", 11 | "TARGETARCH":"x86_64" 12 | }, 13 | "dockerfile": "${workspaceFolder}/Dockerfile20", 14 | "context": "${workspaceFolder}", 15 | "pull":true 16 | } 17 | }, 18 | { 19 | "type": "docker-run", 20 | "label": "docker-run: debug", 21 | "dependsOn": ["docker-build"], 22 | "dockerRun":{ 23 | "customOptions":"--gpus all", 24 | "volumes": [ 25 | {"localPath":"/mnt/data/rec_data","containerPath":"/data"}, 26 | // {"localPath":"${PWD}/data","containerPath":"/data"}, 27 | {"localPath":"/home/hope/reconstruction/results","containerPath":"/outputs"} 28 | ] 29 | }, 30 | "python": { 31 | "file": "test_pcc.py" 32 | } 33 | } 34 | ] 35 | } -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG IMAGE_NAME 2 | FROM ${IMAGE_NAME}:version0 3 | 4 | WORKDIR /app 5 | COPY . /app 6 | 7 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /a2p 8 | USER appuser 9 | 10 | CMD ["python3", "main.py"] -------------------------------------------------------------------------------- /Dockerfile20: -------------------------------------------------------------------------------- 1 | ARG IMAGE_NAME 2 | FROM ${IMAGE_NAME}:11.3.1-runtime-ubuntu20.04 as base 3 | 4 | ENV NV_CUDA_LIB_VERSION "11.3.1-1" 5 | 6 | FROM base as base-amd64 7 | 8 | ENV NV_CUDA_CUDART_DEV_VERSION 11.3.109-1 9 | ENV NV_NVML_DEV_VERSION 11.3.58-1 10 | ENV NV_LIBCUSPARSE_DEV_VERSION 11.6.0.109-1 11 | ENV NV_LIBNPP_DEV_VERSION 11.3.3.95-1 12 | ENV NV_LIBNPP_DEV_PACKAGE libnpp-dev-11-3=${NV_LIBNPP_DEV_VERSION} 13 | 14 | ENV NV_LIBCUBLAS_DEV_VERSION 11.5.1.109-1 15 | ENV NV_LIBCUBLAS_DEV_PACKAGE_NAME libcublas-dev-11-3 16 | ENV NV_LIBCUBLAS_DEV_PACKAGE ${NV_LIBCUBLAS_DEV_PACKAGE_NAME}=${NV_LIBCUBLAS_DEV_VERSION} 17 | 18 | ENV NV_NVPROF_VERSION 11.3.111-1 19 | ENV NV_NVPROF_DEV_PACKAGE cuda-nvprof-11-3=${NV_NVPROF_VERSION} 20 | 21 | 22 | ENV NV_LIBNCCL_DEV_PACKAGE_NAME libnccl-dev 23 | ENV NV_LIBNCCL_DEV_PACKAGE_VERSION 2.9.9-1 24 | ENV NCCL_VERSION 2.9.9-1 25 | ENV NV_LIBNCCL_DEV_PACKAGE ${NV_LIBNCCL_DEV_PACKAGE_NAME}=${NV_LIBNCCL_DEV_PACKAGE_VERSION}+cuda11.3 26 | 27 | ARG TARGETARCH 28 | FROM base-amd64 29 | 30 | LABEL maintainer "NVIDIA CORPORATION " 31 | RUN apt-get update && apt-get install -y --no-install-recommends \ 32 | libtinfo5 libncursesw5 \ 33 | cuda-cudart-dev-11-3=${NV_CUDA_CUDART_DEV_VERSION} \ 34 | cuda-command-line-tools-11-3=${NV_CUDA_LIB_VERSION} \ 35 | cuda-minimal-build-11-3=${NV_CUDA_LIB_VERSION} \ 36 | cuda-libraries-dev-11-3=${NV_CUDA_LIB_VERSION} \ 37 | cuda-nvml-dev-11-3=${NV_NVML_DEV_VERSION} \ 38 | # ${NV_NVPROF_DEV_PACKAGE} \ 39 | ${NV_LIBNPP_DEV_PACKAGE} \ 40 | libcusparse-dev-11-3=${NV_LIBCUSPARSE_DEV_VERSION} \ 41 | ${NV_LIBCUBLAS_DEV_PACKAGE} \ 42 | ${NV_LIBNCCL_DEV_PACKAGE} \ 43 | # ${NV_CUDA_NSIGHT_COMPUTE_DEV_PACKAGE} \ 44 | && rm -rf /var/lib/apt/lists/* 45 | 46 | # Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 47 | RUN apt-mark hold ${NV_LIBCUBLAS_DEV_PACKAGE_NAME} ${NV_LIBNCCL_DEV_PACKAGE_NAME} 48 | 49 | ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs 50 | 51 | 52 | # FROM ${IMAGE_NAME}:11.6.0-base-ubuntu20.04 as base 53 | 54 | # ENV NV_CUDA_LIB_VERSION 11.6.0-1 55 | 56 | # FROM base as base-amd64 57 | 58 | # ENV NV_NVTX_VERSION 11.6.55-1 59 | # ENV NV_LIBNPP_VERSION 11.6.0.55-1 60 | # ENV NV_LIBNPP_PACKAGE libnpp-11-6=${NV_LIBNPP_VERSION} 61 | # ENV NV_LIBCUSPARSE_VERSION 11.7.1.55-1 62 | # # ENV NV_NVML_VERSION 11.0.167-1 63 | 64 | # ENV NV_LIBCUBLAS_VERSION 11.8.1.74-1 65 | # ENV NV_LIBCUBLAS_PACKAGE_NAME libcublas-11-6 66 | # ENV NV_LIBCUBLAS_PACKAGE ${NV_LIBCUBLAS_PACKAGE_NAME}=${NV_LIBCUBLAS_VERSION} 67 | 68 | # # ENV NV_NVPROF_VERSION 11.0.221-1 69 | # # ENV NV_NVPROF_DEV_PACKAGE cuda-nvprof-11-0=${NV_NVPROF_VERSION} 70 | 71 | # ENV NV_LIBNCCL_PACKAGE_NAME libnccl2 72 | # ENV NV_LIBNCCL_PACKAGE_VERSION 2.12.10-1 73 | # ENV NCCL_VERSION 2.12.10-1 74 | # ENV NV_LIBNCCL_PACKAGE ${NV_LIBNCCL_PACKAGE_NAME}=${NV_LIBNCCL_PACKAGE_VERSION}+cuda11.6 75 | 76 | # ARG TARGETARCH 77 | # FROM base-amd64 78 | # LABEL maintainer "NVIDIA CORPORATION " 79 | # RUN apt-get update && apt-get install -y --no-install-recommends \ 80 | # cuda-libraries-11-6=${NV_CUDA_LIB_VERSION} \ 81 | # ${NV_LIBNPP_PACKAGE} \ 82 | # cuda-nvtx-11-6=${NV_NVTX_VERSION} \ 83 | # libcusparse-11-6=${NV_LIBCUSPARSE_VERSION} \ 84 | # ${NV_LIBCUBLAS_PACKAGE} \ 85 | # ${NV_LIBNCCL_PACKAGE} \ 86 | # # libtinfo5 libncursesw5 \ 87 | # # cuda-cudart-dev-11-6=${NV_CUDA_CUDART_DEV_VERSION} \ 88 | # # cuda-command-line-tools-11-0=${NV_CUDA_LIB_VERSION} \ 89 | # # cuda-minimal-build-11-0=${NV_CUDA_LIB_VERSION} \ 90 | # # cuda-nvml-dev-11-6=${NV_NVML_DEV_VERSION} \ 91 | # # cuda-nvprof-11-6=${NV_NVPROF_VERSION} \ 92 | # && rm -rf /var/lib/apt/lists/* 93 | 94 | # # Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 95 | # RUN apt-mark hold ${NV_LIBCUBLAS_PACKAGE_NAME} ${NV_LIBNCCL_PACKAGE_NAME} 96 | 97 | # ENV LIBRARY_PATH /usr/local/cuda/lib64/stubs 98 | 99 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \ 100 | PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \ 101 | GIT_CLONE="git clone --depth 10" && \ 102 | apt-get update && apt-get install -y --no-install-recommends &&\ 103 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 104 | build-essential \ 105 | ca-certificates \ 106 | cmake \ 107 | wget \ 108 | git \ 109 | vim \ 110 | nano \ 111 | libx11-dev \ 112 | fish \ 113 | libsparsehash-dev \ 114 | sqlite3 \ 115 | libsqlite3-dev \ 116 | curl \ 117 | libcurl4-openssl-dev \ 118 | # libosmesa6-dev \ 119 | # llvm-6.0 \ 120 | # llvm-6.0-tools \ 121 | # freeglut3 \ 122 | # freeglut3-dev \ 123 | # libglfw3-dev \ 124 | # libgles2-mesa-dev \ 125 | python3-opengl \ 126 | pkg-config \ 127 | && \ 128 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 129 | software-properties-common \ 130 | && \ 131 | add-apt-repository ppa:deadsnakes/ppa && \ 132 | apt-get update && \ 133 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 134 | python3.8 \ 135 | python3.8-dev \ 136 | && \ 137 | wget -O ~/get-pip.py \ 138 | https://bootstrap.pypa.io/get-pip.py && \ 139 | python3.8 ~/get-pip.py && \ 140 | ln -s /usr/bin/python3.8 /usr/local/bin/python3 && \ 141 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \ 142 | $PIP_INSTALL \ 143 | setuptools \ 144 | && \ 145 | # ================================================================== 146 | # python 147 | # ------------------------------------------------------------------ 148 | $PIP_INSTALL \ 149 | numpy \ 150 | torch==1.12.1+cu113 torchvision==0.13.1+cu113 -f https://download.pytorch.org/whl/torch_stable.html \ 151 | scipy \ 152 | matplotlib \ 153 | Cython \ 154 | tqdm \ 155 | provider \ 156 | imageio \ 157 | # tfrecord \ 158 | natsort \ 159 | joblib \ 160 | tensorboard \ 161 | # coolname \ 162 | tabulate \ 163 | # runx \ 164 | ninja \ 165 | # nose \ 166 | # memcnn \ 167 | # dominate \ 168 | # cffi \ 169 | # piexif \ 170 | scikit-image \ 171 | jupyter \ 172 | scikit-learn \ 173 | numba \ 174 | einops \ 175 | opencv-python \ 176 | open3d \ 177 | torchsummary \ 178 | pytictoc \ 179 | # gdown \ 180 | # timm \ 181 | h5py \ 182 | # bz2file \ 183 | # hdf5storage \ 184 | pandas \ 185 | PyYAML \ 186 | Pillow \ 187 | plyfile \ 188 | pyntcloud \ 189 | # pycocotools \ 190 | pickleshare \ 191 | trimesh \ 192 | pyrender \ 193 | # p2j \ 194 | && \ 195 | ldconfig && \ 196 | apt-get clean && \ 197 | apt-get autoremove && \ 198 | rm -rf /var/lib/apt/lists/* /tmp/* ~/* 199 | 200 | # RUN wget https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py38_cu113_pyt1121/pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl 201 | # RUN git submodule add https://github.com/ThibaultGROUEIX/ChamferDistancePytorch 202 | ENV SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL True 203 | RUN PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \ 204 | $PIP_INSTALL \ 205 | mesh-to-sdf \ 206 | # torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.1+cu113.html \ 207 | # torch-sparse -f https://pytorch-geometric.com/whl/torch-1.12.1+cu113.html \ 208 | # torch-geometric \ 209 | # torch-cluster -f https://pytorch-geometric.com/whl/torch-1.12.1+cu113.html \ 210 | # pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl \ 211 | torch-summary \ 212 | pytorch_warmup 213 | 214 | 215 | # # RUN MESA_HOME="/mesa-18.3.3" 216 | # ENV LIBRARY_PATH="$LIBRARY_PATH:/mesa-18.3.3/lib" 217 | # ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$/mesa-18.3.3/lib" 218 | # ENV C_INCLUDE_PATH="$C_INCLUDE_PATH:$/mesa-18.3.3/include/" 219 | # ENV CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/mesa-18.3.3/include/" 220 | 221 | # RUN GIT_CLONE="git clone --recursive" && \ 222 | # $GIT_CLONE \ 223 | # https://github.com/ThibaultGROUEIX/ChamferDistancePytorch.git && \ 224 | # cd ChamferDistancePytorch/chamfer3D && \ 225 | # python3 setup.py install && \ 226 | # cd ../.. 227 | 228 | RUN GIT_CLONE="git clone --recursive" && \ 229 | $GIT_CLONE \ 230 | https://github.com/hjwdzh/Manifold.git && \ 231 | cd Manifold && \ 232 | mkdir build && \ 233 | cd build && \ 234 | cmake .. -DCMAKE_BUILD_TYPE=Release && \ 235 | make && \ 236 | cd ../.. 237 | 238 | WORKDIR /app 239 | COPY pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl 240 | RUN pip install pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl 241 | # workdir is where u work in dockers 242 | # copy . /app copies content of ur supposed working dir to the docker wk dir 243 | 244 | # comment out the following lines if u want to build image and commit it to dockerhub 245 | COPY . . 246 | 247 | # RUN bash point_ops/compile_chamfer.sh 248 | # RUN bash point_ops/compile_emd.sh 249 | 250 | # if this works, try setup of pointnet2ops to reduce compilation time. 251 | 252 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app 253 | USER appuser 254 | 255 | CMD ["python", "test_pcc.py"] 256 | -------------------------------------------------------------------------------- /Dockerfile20110: -------------------------------------------------------------------------------- 1 | ARG IMAGE_NAME 2 | FROM ${IMAGE_NAME}:11.0.3-base-ubuntu20.04 as base 3 | 4 | ENV NV_CUDA_LIB_VERSION 11.0.3-1 5 | 6 | FROM base as base-amd64 7 | 8 | ENV NV_NVTX_VERSION 11.0.167-1 9 | ENV NV_LIBNPP_VERSION 11.1.0.245-1 10 | ENV NV_LIBNPP_PACKAGE libnpp-11-0=${NV_LIBNPP_VERSION} 11 | ENV NV_LIBCUSPARSE_VERSION 11.1.1.245-1 12 | 13 | ENV NV_LIBCUBLAS_PACKAGE_NAME libcublas-11-0 14 | ENV NV_LIBCUBLAS_VERSION 11.2.0.252-1 15 | ENV NV_LIBCUBLAS_PACKAGE ${NV_LIBCUBLAS_PACKAGE_NAME}=${NV_LIBCUBLAS_VERSION} 16 | 17 | ENV NV_LIBNCCL_PACKAGE_NAME libnccl2 18 | ENV NV_LIBNCCL_PACKAGE_VERSION 2.15.5-1 19 | ENV NCCL_VERSION 2.15.5-1 20 | ENV NV_LIBNCCL_PACKAGE ${NV_LIBNCCL_PACKAGE_NAME}=${NV_LIBNCCL_PACKAGE_VERSION}+cuda11.0 21 | 22 | FROM base-amd64 23 | 24 | ARG TARGETARCH 25 | 26 | LABEL maintainer "NVIDIA CORPORATION " 27 | 28 | RUN apt-get update && apt-get install -y --no-install-recommends \ 29 | cuda-libraries-11-0=${NV_CUDA_LIB_VERSION} \ 30 | ${NV_LIBNPP_PACKAGE} \ 31 | cuda-nvtx-11-0=${NV_NVTX_VERSION} \ 32 | libcusparse-11-0=${NV_LIBCUSPARSE_VERSION} \ 33 | ${NV_LIBCUBLAS_PACKAGE} \ 34 | ${NV_LIBNCCL_PACKAGE} \ 35 | && rm -rf /var/lib/apt/lists/* 36 | 37 | # Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 38 | RUN apt-mark hold ${NV_LIBCUBLAS_PACKAGE_NAME} ${NV_LIBNCCL_PACKAGE_NAME} 39 | 40 | #--upgrade 41 | RUN APT_INSTALL="apt-get install -y --no-install-recommends" && \ 42 | PIP_INSTALL="python -m pip --no-cache-dir install" && \ 43 | GIT_CLONE="git clone --depth 10" && \ 44 | apt-get update && apt-get install -y --no-install-recommends &&\ 45 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 46 | build-essential \ 47 | ca-certificates \ 48 | cmake \ 49 | wget \ 50 | git \ 51 | vim \ 52 | nano \ 53 | libx11-dev \ 54 | fish \ 55 | libsparsehash-dev \ 56 | sqlite3 \ 57 | libsqlite3-dev \ 58 | curl \ 59 | libcurl4-openssl-dev \ 60 | python3-opengl \ 61 | pkg-config \ 62 | && \ 63 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 64 | software-properties-common \ 65 | && \ 66 | add-apt-repository ppa:deadsnakes/ppa && \ 67 | apt-get update && \ 68 | DEBIAN_FRONTEND=noninteractive $APT_INSTALL \ 69 | python3.8 \ 70 | python3.8-dev \ 71 | python3.8-distutils \ 72 | # python3-pip \ 73 | # python-wheel \ 74 | && \ 75 | wget -O ~/get-pip.py \ 76 | https://bootstrap.pypa.io/get-pip.py && \ 77 | python3.8 ~/get-pip.py && \ 78 | ln -s /usr/bin/python3.8 /usr/local/bin/python3 && \ 79 | ln -s /usr/bin/python3.8 /usr/local/bin/python && \ 80 | $PIP_INSTALL \ 81 | setuptools \ 82 | numpy \ 83 | scipy \ 84 | matplotlib \ 85 | Cython \ 86 | tqdm \ 87 | provider \ 88 | imageio \ 89 | # tfrecord \ 90 | natsort \ 91 | joblib \ 92 | tensorboard \ 93 | coolname \ 94 | tabulate \ 95 | runx \ 96 | ninja \ 97 | nose \ 98 | # memcnn \ 99 | dominate \ 100 | # cffi \ 101 | # piexif \ 102 | scikit-image \ 103 | jupyter \ 104 | sklearn \ 105 | # numba \ 106 | einops \ 107 | opencv-python \ 108 | open3d \ 109 | torchsummary \ 110 | # pytictoc \ 111 | gdown \ 112 | timm \ 113 | h5py \ 114 | bz2file \ 115 | hdf5storage \ 116 | pandas \ 117 | PyYAML \ 118 | Pillow \ 119 | plyfile \ 120 | pyntcloud \ 121 | # pycocotools \ 122 | pickleshare \ 123 | trimesh \ 124 | pyrender \ 125 | # p2j \ 126 | mesh-to-sdf \ 127 | && \ 128 | ldconfig && \ 129 | apt-get clean && \ 130 | apt-get autoremove && \ 131 | rm -rf /var/lib/apt/lists/* /tmp/* ~/* 132 | 133 | RUN PIP_INSTALL="python -m pip --no-cache-dir install" && \ 134 | $PIP_INSTALL \ 135 | torch==1.7.1 torchvision==0.8.2 -f https://download.pytorch.org/whl/torch_stable.html \ 136 | # torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html \ 137 | # torch-scatter -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 138 | # torch-sparse -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 139 | # torch-geometric \ 140 | # torch-cluster -f https://pytorch-geometric.com/whl/torch-1.6.0+cu101.html \ 141 | # RUN PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \ 142 | # GIT_CLONE="git clone --recurse-submodules" && \ 143 | # $GIT_CLONE \ 144 | # https://github.com/rubenwiersma/deltaconv.git && \ 145 | # cd deltaconv && \ 146 | # $PIP_INSTALL \ 147 | # ./ \ 148 | # # pyOpenGL_accelerate 149 | && \ 150 | curl -LO https://github.com/NVIDIA/cub/archive/1.10.0.tar.gz && \ 151 | tar xzf 1.10.0.tar.gz 152 | 153 | # workdir is where u work in dockers 154 | # copy . /app copies content of ur supposed working dir to the docker wk dir 155 | WORKDIR /app 156 | COPY . /app 157 | 158 | ENV CUB_HOME=$PWD/cub-1.10.0 159 | RUN PIP_INSTALL="python -m pip --no-cache-dir install --upgrade" && \ 160 | $PIP_INSTALL \ 161 | "git+https://github.com/facebookresearch/pytorch3d.git@stable" 162 | 163 | # ENV PATH="$PATH:/usr/lib/llvm-6.0/bin" 164 | # ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/usr/lib/llvm-6.0/lib" 165 | 166 | RUN adduser -u 5678 --disabled-password --gecos "" appuser && chown -R appuser /app 167 | USER appuser 168 | 169 | RUN echo "CUB_HOME env path: ${CUB_HOME}" 170 | RUN python -c "import torch; print(torch.__version__)" 171 | RUN python -c "import torch; print(torch.version.cuda)" 172 | RUN echo "CUB_HOME env path: ${PATH}" 173 | # RUN echo "CUB_HOME env path: ${CPATH}" 174 | 175 | CMD ["python", "dataset.py "] 176 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | clean: 2 | docker rm -f $$(docker ps -qa) 3 | build: 4 | docker build --build-arg IMAGE_NAME=nvidia/cuda -t sampledocker . 5 | 6 | cleandangling: 7 | docker rmi -f $(docker images -f "dangling=true" -q) 8 | 9 | run: 10 | docker run -it \ 11 | --runtime=nvidia \ 12 | --net=host \ 13 | --privileged=true \ 14 | --ipc=host \ 15 | --volume="/home/hope/docker_debug:/app" \ 16 | --volume="/mnt/data/rec_data:/data" \ 17 | --env="DISPLAY" \ 18 | --env="QT_X11_NO_MITSHM=1" \ 19 | --hostname="inside-DOCKER" \ 20 | --name="sampledocker" \ 21 | sampledocker bash 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # APC2Mesh 2 | 3 | __*APC2Mesh*__ is the repo for the implementation of our paper "*[APC2Mesh: Bridging the gap from occluded building façades to full 3D models](https://www.sciencedirect.com/science/article/pii/S0924271624001692)*". 4 | 5 | > [!NOTE] 6 | This [Building3D benchmark dataset](https://building3d.ucalgary.ca/reconstruction.php) is still undergoing curation and revisions. For now, the website only has data for Tallinn City. [Here](https://drive.google.com/file/d/17Wdi3ceJxMuyhHVmBjMeEyHod04p6QoQ/view?usp=sharing) is a subset of the Building3D dataset used in APC2Mesh. The `sample-data` folder has 3 sub-folders `mesh`, `xyz`, and `wframe` which represent the groundtruth meshes, partial point sets, and wireframes of buildings from different cities. 7 | 8 | 9 | ### Important project files to highlight 10 | ``` bash 11 | 12 | APC2Mesh/ 13 | │ 14 | ├── README.md # Project documentation 15 | ├── sdf_try.py # The python script for pre-processing the files in `sample-data` 16 | ├── dataset_pcc.py # The python script for the custom dataloader for point completion task 17 | ├── .vscode/ # Source code 18 | │ └── tasks.json 19 | ├── ablations/ # Test files 20 | │ └── pcc.py # script used to train point completion model 21 | ├── main.py # script used to run the reconstruction phase of the project 22 | └── Dockerfile20 # Used in conjunction with `.vscode\tasks.json` to run project in Docker 23 | 24 | ``` 25 | 26 | ### Prerequisites 27 | The necessary python packages and environmental settings we used in building this project can be found in `Dockerfile20`. 28 | To run this repo, either: 29 | 1. Use the packages and environmental specifications from the dockerfile to create your own local workspace OR 30 | 2. build the Dockerfile and run the project in a Docker container. 31 | 32 | ### Citation 33 | If you use APC2Mesh in a scientific work, please consider citing the paper: 34 | 35 | ``` bibtex 36 | @article{akwensi2024apc2mesh, 37 | title = {APC2Mesh: Bridging the gap from occluded building façades to full 3D models}, 38 | journal = {ISPRS Journal of Photogrammetry and Remote Sensing}, 39 | volume = {211}, 40 | pages = {438-451}, 41 | year = {2024}, 42 | issn = {0924-2716}, 43 | doi = {https://doi.org/10.1016/j.isprsjprs.2024.04.009}, 44 | url = {https://www.sciencedirect.com/science/article/pii/S0924271624001692}, 45 | author = {Perpetual Hope Akwensi and Akshay Bharadwaj and Ruisheng Wang} 46 | } 47 | ``` 48 | -------------------------------------------------------------------------------- /ablations/complex_tall.txt: -------------------------------------------------------------------------------- 1 | Sillamae_111.obj 2 | Sillamae_774.obj 3 | Sillamae_1105.obj 4 | Sillamae_1107.obj 5 | Sillamae_1155.obj 6 | Sillamae_1183.obj 7 | Sillamae_1185.obj 8 | Sillamae_1222.obj 9 | Sillamae_1274.obj 10 | Sillamae_1498.obj 11 | Sillamae_1706.obj 12 | Sillamae_1728.obj 13 | Sillamae_1734.obj 14 | Sillamae_1738.obj 15 | Sillamae_1747.obj 16 | Sillamae_1752.obj 17 | Sillamae_1768.obj 18 | Sillamae_1782.obj 19 | Sillamae_1803.obj 20 | Sillamae_1806.obj 21 | Sillamae_1814.obj 22 | Sillamae_1877.obj 23 | Sillamae_2014.obj 24 | Paide_655.obj 25 | Paide_866.obj 26 | Paide_1442.obj 27 | Paide_1529.obj 28 | Paide_1628.obj 29 | Paide_1789.obj 30 | Paide_1887.obj 31 | Paide_1959.obj 32 | Paide_2170.obj 33 | Paide_2484.obj 34 | Paide_2687.obj 35 | Paide_2723.obj 36 | Loksa_101.obj 37 | Loksa_477.obj 38 | Loksa_796.obj 39 | Loksa_839.obj -------------------------------------------------------------------------------- /ablations/pcc.py: -------------------------------------------------------------------------------- 1 | import os, sys 2 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 3 | import torch, random 4 | from dataset_pcc import CustomDataset 5 | from torch.utils import data 6 | from network import PCCNet, validate 7 | from point_ops.pointnet2_ops import pointnet2_utils as p2u 8 | # from torch.utils.tensorboard import SummaryWriter 9 | import pytorch_warmup as warmup 10 | from loss_pcc import chamfer_loss_sqrt, l2_normal_loss, density_cd #, emd_loss 11 | from config_pcc import Args as args 12 | from config_pcc import start_logger 13 | from base_utils.file_utils import save_args 14 | from pytictoc import TicToc 15 | 16 | 17 | seed_value = 42 18 | random.seed(seed_value) 19 | torch.manual_seed(seed_value) 20 | torch.cuda.manual_seed(seed_value) 21 | 22 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 23 | 24 | tr_dataset = CustomDataset(split='ablation_tr', npoints=args.npoints, device=device) 25 | tr_loader = data.DataLoader(tr_dataset, batch_size=args.bs, shuffle=True) 26 | 27 | ts_dataset = CustomDataset(split='ablation_ts', npoints=args.npoints, device=device) 28 | ts_loader = data.DataLoader(ts_dataset, batch_size=args.bs, shuffle=False) 29 | 30 | pcc_model = PCCNet(kmax=20, code_dim=1024, use_nmls=True, 31 | multi_scale=True, attn_pool=True, fps_crsovr=True).to(device) 32 | 33 | # sinkhorn_loss = SinkhornDistance(eps=1.0, max_iter=6, thresh=1e-5, reduction='mean') 34 | 35 | if args.load_chkpnt: 36 | optimizer = torch.optim.AdamW(pcc_model.parameters(), lr=args.fix_lr, betas=(0.9, 0.999), 37 | weight_decay=args.wd) #eps=1e-08, 38 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.93, patience=1, verbose=True) 39 | else: 40 | optimizer = torch.optim.AdamW(pcc_model.parameters(), lr=args.lr, betas=(0.9, 0.999), 41 | weight_decay=args.wd) #eps=1e-08, 42 | 43 | num_steps = len(tr_loader) * args.max_epoch 44 | lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_steps, eta_min=args.eta_min) #TODO: try steplr &eponentiallr too 45 | warmup_scheduler = warmup.UntunedLinearWarmup(optimizer) 46 | 47 | save_args(args) 48 | 49 | # loss init 50 | t = TicToc() #create instance of class 51 | llr_logger = start_logger(log_dir=args.log_dir, fname='lr_loss') 52 | if args.load_chkpnt: 53 | pcc_model.load_state_dict(torch.load(args.chkpnt_path)) 54 | llr_logger.info('model loaded and training continues from: %s'%(args.chkpnt_path)) 55 | llr_logger.info('ms:[10,20,30] | 256-trsf | 4heads | scale: x4 | bs: 8 | #ep: 140 |lr: 0.0006 | eta_min: 1e-07 | #tr/ts: 2k/40 | tr_loss: %s' %(args.tr_loss)) 56 | llr_logger.info('fps_crsovr: %s | attn_pool: %s | multiscale: %s | use_nmls: %s ' %(True, True, True, True)) 57 | # tb = SummaryWriter(comment=f'ms:[10,20,30] | 256-trsf | 4heads | scale: [x4, +ppconv]') 58 | 59 | init_epoch = int(args.chkpnt_path[-23:-20]) if args.load_chkpnt else 1 60 | max_epoch = init_epoch + 60 if args.load_chkpnt else args.max_epoch 61 | best_val_cdp = 20.0 62 | best_val_cdt = 20.0 63 | coarse_list, fine_list, total_list = [], [], [] 64 | for epoch in range(init_epoch, max_epoch+1): 65 | avg_time_per_iter = [] 66 | for i, data in enumerate(tr_loader, 0): 67 | t.tic() #Start timer 68 | optimizer.zero_grad() 69 | xyz = data[0][:, :, :6].to(device).float() # partial: [B 2048, 6] include normals 70 | if not pcc_model.use_nmls: 71 | xyz = xyz[:, :, :3] 72 | pcc_model.train() 73 | coarse, fine, finer = pcc_model(xyz) 74 | 75 | # Loss (xyz) 76 | gt_xyz = data[1][:, :, :6].to(device).float() # partial: [B 16348, 6] include normals 77 | 78 | gt_fine = p2u.gather_operation(gt_xyz.permute(0,2,1).contiguous(), p2u.furthest_point_sample(gt_xyz[:,:,:3].contiguous(), fine.size(1))).permute(0,2,1) 79 | if args.tr_loss == 'dcd': 80 | loss_fine = density_cd(fine[:, :, :3], gt_fine[:, :, :3], alpha=args.t_alpha, n_lambda=args.n_lambda) 81 | elif args.tr_loss == 'cdp': 82 | loss_fine = chamfer_loss_sqrt(fine[:, :, :3], gt_fine[:, :, :3]) * 10 #inputs shd be BNC 83 | 84 | gt_coarse = p2u.gather_operation(gt_fine.permute(0,2,1).contiguous(), p2u.furthest_point_sample(gt_fine[:,:,:3].contiguous(), coarse.size(1))).permute(0,2,1) 85 | if args.tr_loss == 'dcd': 86 | loss_coarse = density_cd(coarse[:, :, :3], gt_coarse[:, :, :3], alpha=args.t_alpha, n_lambda=args.n_lambda) 87 | elif args.tr_loss == 'cdp': 88 | loss_coarse = chamfer_loss_sqrt(coarse[:, :, :3], gt_coarse[:, :, :3]) 89 | 90 | loss = loss_coarse + loss_fine 91 | if pcc_model.use_nmls: 92 | normal_loss = l2_normal_loss(gt_fine, fine) 93 | else: 94 | normal_loss = 0.0 95 | 96 | coarse_list.append(loss_coarse) 97 | fine_list.append(loss_fine) 98 | 99 | # Loss (normals) 100 | # nbr_pnt_loss, nbr_norm_loss = nbrhood_uniformity_loss(fine, 10, 10) 101 | loss = loss + normal_loss #0.1* # + nbr_pnt_loss + nbr_norm_loss 102 | 103 | # if args.p2p and (epoch % args.p2p_interval) == 0: 104 | # loss += p2p_dist(gt_fine, fine) * 0.1 105 | 106 | total_list.append(loss) 107 | 108 | if i % args.record_step == 0: 109 | 110 | iter_time = 0.0 if i == 0 else sum(avg_time_per_iter)/len(avg_time_per_iter) 111 | llr_logger.info('Epoch %.3d | iter %.3d/%d, %.5f secs | l_coarse: %.6f | l_fine: %.6f | l_total: %.6f | lrs: %.10f | c_lr: %.10f' % (epoch, i, 112 | len(tr_dataset)/args.bs, 113 | iter_time, 114 | (sum(coarse_list)/len(coarse_list)).item(), 115 | (sum(fine_list)/len(fine_list)).item(), 116 | (sum(total_list)/len(total_list)).item(), 117 | 0.0 if args.load_chkpnt else warmup_scheduler.lrs[0], 118 | optimizer.param_groups[0]['lr'])) 119 | coarse_list, fine_list, total_list = [], [], [] 120 | #TODO: push record_step info it to tb for graphing or retrieve n plot from log files 121 | 122 | loss.backward() 123 | optimizer.step() 124 | 125 | if not args.load_chkpnt: 126 | with warmup_scheduler.dampening(): 127 | lr_scheduler.step() 128 | 129 | avg_time_per_iter.append(t.tocvalue()) # t.tocvalue: time elapsed since t.tic() 130 | 131 | if (epoch % args.val_interval == 0) or (epoch == args.max_epoch): # bcoz max_epoch above is +1 132 | val_losses = validate(pcc_model, ts_loader, epoch, args, device=loss.device, rand_save=True) 133 | '''first two values after vEpoch will be zero if args.tr_loss == 'cd' ''' 134 | llr_logger.info('vEpoch %.3d | %s_fine: %.6f | %s_coarse: %.6f |cdp_fine: %.6f | cdt_fine: %.6f | cdp_coarse: %.6f | cdt_coarse: %.6f' %(epoch, 135 | args.tr_loss, 136 | val_losses['fine_s'], 137 | args.tr_loss, 138 | val_losses['coarse_s'], 139 | val_losses['fine_p'], 140 | val_losses['fine_t'], 141 | val_losses['coarse_p'], 142 | val_losses['coarse_t'])) 143 | if args.load_chkpnt: 144 | scheduler.step(val_losses['fine_p']) 145 | # if args.tr_loss == 'dcd' and (val_losses['fine_d'] < best_val_cdp) or (val_losses['coarse_d'] < best_val_cdt): 146 | # best_val_cdp = val_losses['fine_d'] 147 | # best_val_cdt = val_losses['coarse_d'] 148 | # torch.save(pcc_model.state_dict(), '%s/pccnet_%.3d_%.5f_%.5f.pth' % (str(args.ckpts_dir), epoch, best_val_cdp, best_val_cdt)) 149 | 150 | if (val_losses['fine_p'] < best_val_cdp) or (val_losses['fine_t'] < best_val_cdt): 151 | best_val_cdp = val_losses['fine_p'] 152 | best_val_cdt = val_losses['fine_t'] 153 | torch.save(pcc_model.state_dict(), '%s/pccnet_%.3d_%.5f_%.5f.pth' % (str(args.ckpts_dir), epoch, best_val_cdp, best_val_cdt)) 154 | print("Saving model...") 155 | 156 | print('done ...') 157 | 158 | -------------------------------------------------------------------------------- /base_utils/file_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os 3 | 4 | def save_args(args): 5 | file = open(os.path.join(args.log_dir, 'args.txt'), "w") 6 | for k, v in vars(args).items(): 7 | if k in ['__dict__', '__weakref__', '__doc__']: 8 | continue 9 | file.write(f"{k}:\t {v}\n") 10 | file.close() 11 | 12 | def read_transformation(denorm_file_abs): 13 | with open(denorm_file_abs) as f: 14 | trans = f.read().splitlines() 15 | 16 | scale = int(trans[0]) 17 | centroid = np.array(trans[1]) 18 | 19 | # print("scale: ", scale) 20 | # print("centroid: ", centroid) 21 | return scale, centroid 22 | 23 | def filename_to_hash(file_path): 24 | import hashlib 25 | if not os.path.isfile(file_path): 26 | raise ValueError('Path does not point to a file: {}'.format(file_path)) 27 | hash_input = os.path.basename(file_path).split('.')[0] 28 | hash = int(hashlib.md5(hash_input.encode()).hexdigest(), 16) % (2**32 - 1) 29 | return hash 30 | 31 | def make_dir_for_file(file): 32 | file_dir = os.path.dirname(file) 33 | if file_dir != '': 34 | if not os.path.exists(file_dir): 35 | try: 36 | os.makedirs(os.path.dirname(file)) 37 | except OSError as exc: # Guard against race condition 38 | raise 39 | 40 | def call_necessary(file_in, file_out, min_file_size=0): 41 | """ 42 | Check if all input files exist and at least one output file does not exist or is invalid. 43 | :param file_in: list of str or str 44 | :param file_out: list of str or str 45 | :param min_file_size: int 46 | :return: 47 | """ 48 | 49 | if isinstance(file_in, str): 50 | file_in = [file_in] 51 | elif isinstance(file_in, list): 52 | pass 53 | else: 54 | raise ValueError('Wrong input type') 55 | 56 | if isinstance(file_out, str): 57 | file_out = [file_out] 58 | elif isinstance(file_out, list): 59 | pass 60 | else: 61 | raise ValueError('Wrong output type') 62 | 63 | inputs_missing = [f for f in file_in if not os.path.isfile(f)] 64 | if len(inputs_missing) > 0: 65 | print('WARNING: Input file are missing: {}'.format(inputs_missing)) 66 | return False 67 | 68 | outputs_missing = [f for f in file_out if not os.path.isfile(f)] 69 | if len(outputs_missing) > 0: 70 | if len(outputs_missing) < len(file_out): 71 | print("WARNING: Only some output files are missing: {}".format(outputs_missing)) 72 | return True 73 | 74 | min_output_file_size = min([os.path.getsize(f) for f in file_out]) 75 | if min_output_file_size < min_file_size: 76 | return True 77 | 78 | oldest_input_file_mtime = max([os.path.getmtime(f) for f in file_in]) 79 | youngest_output_file_mtime = min([os.path.getmtime(f) for f in file_out]) 80 | 81 | if oldest_input_file_mtime >= youngest_output_file_mtime: 82 | # debug 83 | import time 84 | input_file_mtime_arg_max = np.argmax(np.array([os.path.getmtime(f) for f in file_in])) 85 | output_file_mtime_arg_min = np.argmin(np.array([os.path.getmtime(f) for f in file_out])) 86 | input_file_mtime_max = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(oldest_input_file_mtime)) 87 | output_file_mtime_min = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(youngest_output_file_mtime)) 88 | print('Input file {} \nis newer than output file {}: \n{} >= {}'.format( 89 | file_in[input_file_mtime_arg_max], file_out[output_file_mtime_arg_min], 90 | input_file_mtime_max, output_file_mtime_min)) 91 | return True 92 | 93 | return False 94 | 95 | -------------------------------------------------------------------------------- /base_utils/mesh_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import uuid 3 | import trimesh 4 | import torch, os, glob 5 | from typing import Tuple 6 | 7 | 8 | MANIFOLD_SOFTWARE_DIR = "/Manifold/build" 9 | 10 | 11 | def random_file_name(ext, prefix="tmp_"): 12 | return f"{prefix}{uuid.uuid4()}.{ext}" 13 | 14 | def remesh(hull_mesh, num_faces): 15 | 16 | # Write the original mesh as OBJ. 17 | original_file = random_file_name("obj") 18 | with open(original_file, "w") as f: 19 | mesh = trimesh.Trimesh(vertices=hull_mesh.vertices, faces=hull_mesh.faces) 20 | f.write(trimesh.exchange.obj.export_obj(mesh)) 21 | 22 | # Create a manifold of the original file. 23 | manifold_file = random_file_name("obj") 24 | manifold_script_path = os.path.join(MANIFOLD_SOFTWARE_DIR, "manifold") 25 | cmd = f"{manifold_script_path} {original_file} {manifold_file}" 26 | os.system(cmd) 27 | 28 | # Simplify the manifold. 29 | simplified_file = random_file_name("obj") 30 | simplify_script_path = os.path.join(MANIFOLD_SOFTWARE_DIR, "simplify") 31 | cmd = ( 32 | f"{simplify_script_path} -i {manifold_file} -o {simplified_file} -f {num_faces}" 33 | ) 34 | os.system(cmd) 35 | 36 | # Read the simplified manifold. 37 | with open(simplified_file, "r") as f: 38 | mesh = trimesh.exchange.obj.load_obj(f) 39 | 40 | # Prevent file spam. 41 | os.remove(original_file) 42 | os.remove(manifold_file) 43 | os.remove(simplified_file) 44 | 45 | return mesh["vertices"], mesh["faces"] 46 | 47 | def manifold_upsample(mesh, save_path, curr_fname, Mesh, num_faces=2000, res=3000, simplify=True): 48 | # export before upsample 49 | fname = os.path.join(save_path, '{}_{}.obj'.format(curr_fname,len(mesh.faces))) 50 | mesh.export(fname) 51 | 52 | temp_file = os.path.join(save_path, random_file_name('obj')) 53 | opts = ' ' + str(res) if res is not None else '' 54 | 55 | manifold_script_path = os.path.join(MANIFOLD_SOFTWARE_DIR, 'manifold') 56 | if not os.path.exists(manifold_script_path): 57 | raise FileNotFoundError(f'{manifold_script_path} not found') 58 | cmd = "{} {} {}".format(manifold_script_path, fname, temp_file + opts) 59 | os.system(cmd) 60 | 61 | if simplify: 62 | cmd = "{} -i {} -o {} -f {}".format(os.path.join(MANIFOLD_SOFTWARE_DIR, 'simplify'), temp_file, 63 | temp_file, num_faces) 64 | os.system(cmd) 65 | 66 | m_out = Mesh(temp_file, hold_history=True, device=mesh.device) 67 | fname = os.path.join(save_path, '{}_{}.obj'.format(curr_fname,len(m_out.faces))) 68 | m_out.export(fname) 69 | [os.remove(_) for _ in list(glob.glob(os.path.splitext(temp_file)[0] + '*'))] 70 | return m_out 71 | 72 | #*************************** LOAD/SAVE FILES *****************************# 73 | def save(file_name: str, vertices: np.ndarray, faces: np.ndarray) -> None: 74 | os.makedirs(os.path.dirname(file_name), exist_ok=True) 75 | with open(file_name, "w") as f: 76 | mesh = trimesh.Trimesh(vertices=vertices, faces=faces) 77 | f.write(trimesh.exchange.obj.export_obj(mesh)) 78 | 79 | 80 | def export(file, vs, faces, vn=None, color=None): 81 | with open(file, 'w+') as f: 82 | for vi, v in enumerate(vs): 83 | if color is None: 84 | f.write("v %f %f %f\n" % (v[0], v[1], v[2])) 85 | else: 86 | f.write("v %f %f %f %f %f %f\n" % (v[0], v[1], v[2], color[vi][0], color[vi][1], color[vi][2])) 87 | if vn is not None: 88 | f.write("vn %f %f %f\n" % (vn[vi, 0], vn[vi, 1], vn[vi, 2])) 89 | for face in faces: 90 | f.write("f %d %d %d\n" % (face[0] + 1, face[1] + 1, face[2] + 1)) 91 | 92 | 93 | def load(file_name: str): 94 | with open(file_name, "r") as f: 95 | mesh = trimesh.exchange.obj.load_obj(f) 96 | return np.float32(mesh["vertices"]), mesh["faces"] 97 | 98 | 99 | def load_obj(file): 100 | vs, faces = [], [] 101 | f = open(file) 102 | for line in f: 103 | line = line.strip() 104 | splitted_line = line.split() 105 | if not splitted_line: 106 | continue 107 | elif splitted_line[0] == 'v': 108 | vs.append([float(v) for v in splitted_line[1:4]]) 109 | elif splitted_line[0] == 'f': 110 | face_vertex_ids = [int(c.split('/')[0]) for c in splitted_line[1:]] 111 | assert len(face_vertex_ids) == 3 112 | face_vertex_ids = [(ind - 1) if (ind >= 0) else (len(vs) + ind) 113 | for ind in face_vertex_ids] 114 | faces.append(face_vertex_ids) 115 | f.close() 116 | vs = np.asarray(vs) 117 | faces = np.asarray(faces, dtype=int) 118 | assert np.logical_and(faces >= 0, faces < len(vs)).all() 119 | return vs, faces 120 | 121 | #*************************** HELPER FUNCTIONS *****************************# 122 | def populate_e(meshes, verts=None): 123 | mesh = meshes[0] 124 | if verts is None: 125 | verts = torch.rand(len(meshes), mesh.vs.shape[0], 3).to(mesh.vs.device) 126 | x = verts[:, mesh.edges, :] 127 | return x.view(len(meshes), mesh.edges_count, -1).permute(0, 2, 1).type(torch.float32) 128 | 129 | def build_v(x, meshes): 130 | # mesh.edges[mesh.ve[2], mesh.vei[2]] 131 | mesh = meshes[0] # b/c all meshes in batch are same 132 | x = x.reshape(len(meshes), 2, 3, -1) 133 | vs_to_sum = torch.zeros([len(meshes), len(mesh.vs_in), mesh.max_nvs, 3], dtype=x.dtype, device=x.device) 134 | x = x[:, mesh.vei, :, mesh.ve_in].transpose(0, 1) 135 | vs_to_sum[:, mesh.nvsi, mesh.nvsin, :] = x 136 | vs_sum = torch.sum(vs_to_sum, dim=2) 137 | nvs = mesh.nvs 138 | vs = vs_sum / nvs[None, :, None] 139 | return vs 140 | 141 | def face_areas_normals(faces, vs): 142 | face_normals = torch.cross(vs[:, faces[:, 1], :] - vs[:, faces[:, 0], :], 143 | vs[:, faces[:, 2], :] - vs[:, faces[:, 1], :], dim=2) 144 | face_areas = torch.norm(face_normals, dim=2) 145 | face_normals = face_normals / face_areas[:, :, None] 146 | face_areas = 0.5*face_areas 147 | return face_areas, face_normals 148 | 149 | def sample_surface(faces, vs, count): # sample from a surf., like in trimesh.sample --> points & their normals 150 | """ 151 | sample mesh surface 152 | sample method: 153 | http://mathworld.wolfram.com/TrianglePointPicking.html 154 | 155 | Args 156 | --------- 157 | vs: vertices 158 | faces: triangle faces (torch.long) 159 | count: number of samples 160 | Return 161 | --------- 162 | samples: (count, 3) points in space on the surface of mesh 163 | normals: (count, 3) corresponding face normals for points 164 | """ 165 | bsize, nvs, _ = vs.shape 166 | weights, normal = face_areas_normals(faces, vs) 167 | weights_sum = torch.sum(weights, dim=1) 168 | dist = torch.distributions.categorical.Categorical(probs=weights / weights_sum[:, None]) 169 | face_index = dist.sample((count,)) 170 | 171 | # pull triangles into the form of an origin + 2 vectors 172 | tri_origins = vs[:, faces[:, 0], :] 173 | tri_vectors = vs[:, faces[:, 1:], :].clone() 174 | tri_vectors -= tri_origins.repeat(1, 1, 2).reshape((bsize, len(faces), 2, 3)) 175 | 176 | # pull the vectors for the faces we are going to sample from 177 | face_index = face_index.transpose(0, 1) 178 | face_index = face_index[:, :, None].expand((bsize, count, 3)) 179 | tri_origins = torch.gather(tri_origins, dim=1, index=face_index) 180 | face_index2 = face_index[:, :, None, :].expand((bsize, count, 2, 3)) 181 | tri_vectors = torch.gather(tri_vectors, dim=1, index=face_index2) 182 | 183 | # randomly generate two 0-1 scalar components to multiply edge vectors by 184 | random_lengths = torch.rand(count, 2, 1, device=vs.device, dtype=tri_vectors.dtype) 185 | 186 | # points will be distributed on a quadrilateral if we use 2x [0-1] samples 187 | # if the two scalar components sum less than 1.0 the point will be 188 | # inside the triangle, so we find vectors longer than 1.0 and 189 | # transform them to be inside the triangle 190 | random_test = random_lengths.sum(dim=1).reshape(-1) > 1.0 191 | random_lengths[random_test] -= 1.0 192 | random_lengths = torch.abs(random_lengths) 193 | 194 | # multiply triangle edge vectors by the random lengths and sum 195 | sample_vector = (tri_vectors * random_lengths[None, :]).sum(dim=2) 196 | 197 | # finally, offset by the origin to generate 198 | # (n,3) points in space on the triangle 199 | samples = sample_vector + tri_origins 200 | 201 | normals = torch.gather(normal, dim=1, index=face_index) 202 | 203 | return samples, normals 204 | 205 | def mesh_area(mesh): 206 | vs = mesh.vs 207 | faces = mesh.faces 208 | v1 = vs[faces[:, 1]] - vs[faces[:, 0]] 209 | v2 = vs[faces[:, 2]] - vs[faces[:, 0]] 210 | area = torch.cross(v1, v2, dim=-1).norm(dim=-1) 211 | return area 212 | 213 | def local_nonuniform_penalty(mesh): #it has to do with the local variations in neighboring faces properties & penalizing that diff 214 | # non-uniform penalty 215 | area = mesh_area(mesh) 216 | diff = area[mesh.gfmm][:, 0:1] - area[mesh.gfmm][:, 1:] 217 | penalty = torch.norm(diff, dim=1, p=1) 218 | loss = penalty.sum() / penalty.numel() 219 | return loss 220 | -------------------------------------------------------------------------------- /base_utils/mp_utils.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | import multiprocessing 3 | 4 | 5 | def mp_worker(call): 6 | """ 7 | Small function that starts a new thread with a system call. Used for thread pooling. 8 | :param call: 9 | :return: 10 | """ 11 | call = call.split(' ') 12 | verbose = call[-1] == '--verbose' 13 | if verbose: 14 | call = call[:-1] 15 | subprocess.run(call) 16 | else: 17 | #subprocess.run(call, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) # suppress outputs 18 | subprocess.run(call, stdout=subprocess.DEVNULL) 19 | 20 | 21 | def start_process_pool(worker_function, parameters, num_processes, timeout=None): 22 | 23 | if len(parameters) > 0: 24 | if num_processes <= 1: 25 | print('Running loop for {} with {} calls on {} workers'.format( 26 | str(worker_function), len(parameters), num_processes)) 27 | results = [] 28 | for c in parameters: 29 | results.append(worker_function(*c)) 30 | return results 31 | print('Running loop for {} with {} calls on {} subprocess workers'.format( 32 | str(worker_function), len(parameters), num_processes)) 33 | with multiprocessing.Pool(processes=num_processes, maxtasksperchild=1) as pool: 34 | results = pool.starmap(worker_function, parameters) 35 | return results 36 | else: 37 | return None 38 | 39 | -------------------------------------------------------------------------------- /base_utils/point_cloud.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from plyfile import PlyData 3 | from base_utils import file_utils 4 | 5 | 6 | def load_xyz(file_path): 7 | data = np.loadtxt(file_path).astype('float32') 8 | nan_lines = np.isnan(data).any(axis=1) 9 | num_nan_lines = np.sum(nan_lines) 10 | if num_nan_lines > 0: 11 | data = data[~nan_lines] # filter rows with nan values 12 | print('Ignored {} points containing NaN coordinates in point cloud {}'.format(num_nan_lines, file_path)) 13 | return data 14 | 15 | def load_ply(file_path): 16 | plydata = PlyData.read(file_path) 17 | assert plydata.elements 18 | data = (np.vstack((plydata['vertex']['x'], plydata['vertex']['y'], plydata['vertex']['z']))).T 19 | return data 20 | 21 | def write_xyz(file_path, points: np.ndarray, normals=None, colors=None): 22 | """ 23 | Write point cloud file. 24 | :param file_path: 25 | :param points: 26 | :param normals: 27 | :param colors: 28 | :return: None 29 | """ 30 | 31 | file_utils.make_dir_for_file(file_path) 32 | 33 | if points.shape == (3,): 34 | points = np.expand_dims(points, axis=0) 35 | 36 | if points.shape[0] == 3 and points.shape[1] != 3: 37 | points = points.transpose([1, 0]) 38 | 39 | if colors is not None and colors.shape[0] == 3 and colors.shape[1] != 3: 40 | colors = colors.transpose([1, 0]) 41 | 42 | if normals is not None and normals.shape[0] == 3 and normals.shape[1] != 3: 43 | normals = normals.transpose([1, 0]) 44 | 45 | with open(file_path, 'w') as fp: 46 | 47 | # convert 2d points to 3d 48 | if points.shape[1] == 2: 49 | vertices_2p5d = np.zeros((points.shape[0], 3)) 50 | vertices_2p5d[:, :2] = points 51 | vertices_2p5d[:, 2] = 0.0 52 | points = vertices_2p5d 53 | 54 | # write points 55 | # meshlab doesn't like colors, only using normals. try cloud compare instead. 56 | for vi, v in enumerate(points): 57 | line_vertex = str(v[0]) + " " + str(v[1]) + " " + str(v[2]) + " " 58 | if normals is not None: 59 | line_vertex += str(normals[vi][0]) + " " + str(normals[vi][1]) + " " + str(normals[vi][2]) + " " 60 | if colors is not None: 61 | line_vertex += str(colors[vi][0]) + " " + str(colors[vi][1]) + " " + str(colors[vi][2]) + " " 62 | fp.write(line_vertex + "\n") 63 | 64 | 65 | def load_pcd(file_in): 66 | # PCD: http://pointclouds.org/documentation/tutorials/pcd_file_format.php 67 | # PCD RGB: http://docs.pointclouds.org/trunk/structpcl_1_1_r_g_b.html#a4ad91ab9726a3580e6dfc734ab77cd18 68 | 69 | def read_header(lines_header): 70 | header_info = dict() 71 | 72 | def add_line_to_header_dict(header_dict, line, expected_field): 73 | line_parts = line.split(sep=' ') 74 | assert (line_parts[0] == expected_field), \ 75 | ('Warning: "' + expected_field + '" expected but not found in pcd header!') 76 | header_dict[expected_field] = (' '.join(line_parts[1:])).replace('\n', '') 77 | 78 | add_line_to_header_dict(header_info, lines_header[0], '#') 79 | add_line_to_header_dict(header_info, lines_header[1], 'VERSION') 80 | add_line_to_header_dict(header_info, lines_header[2], 'FIELDS') 81 | add_line_to_header_dict(header_info, lines_header[3], 'SIZE') 82 | add_line_to_header_dict(header_info, lines_header[4], 'TYPE') 83 | add_line_to_header_dict(header_info, lines_header[5], 'COUNT') 84 | add_line_to_header_dict(header_info, lines_header[6], 'WIDTH') 85 | add_line_to_header_dict(header_info, lines_header[7], 'HEIGHT') 86 | add_line_to_header_dict(header_info, lines_header[8], 'VIEWPOINT') 87 | add_line_to_header_dict(header_info, lines_header[9], 'POINTS') 88 | add_line_to_header_dict(header_info, lines_header[10], 'DATA') 89 | 90 | # TODO: lift limitations 91 | assert header_info['VERSION'] == '0.7' 92 | assert header_info['FIELDS'] == 'x y z rgb label' 93 | assert header_info['SIZE'] == '4 4 4 4 4' 94 | assert header_info['TYPE'] == 'F F F F U' 95 | assert header_info['COUNT'] == '1 1 1 1 1' 96 | # assert header_info['HEIGHT'] == '1' 97 | assert header_info['DATA'] == 'ascii' 98 | # assert header_info['WIDTH'] == header_info['POINTS'] 99 | 100 | return header_info 101 | 102 | f = open(file_in, "r") 103 | f_lines = f.readlines() 104 | f_lines_header = f_lines[:11] 105 | f_lines_points = f_lines[11:] 106 | header_info = read_header(f_lines_header) 107 | header_info['_file_'] = file_in 108 | 109 | num_points = int(header_info['POINTS']) 110 | point_data_list_str_ = [l.split(sep=' ')[:3] for l in f_lines_points] 111 | point_data_list = [[float(l[0]), float(l[1]), float(l[2])] for l in point_data_list_str_] 112 | 113 | # filter nan points that appear through the blensor kinect sensor 114 | point_data_list = [p for p in point_data_list if 115 | (not np.isnan(p[0]) and not np.isnan(p[1]) and not np.isnan(p[2]))] 116 | 117 | point_data = np.array(point_data_list) 118 | 119 | f.close() 120 | 121 | return point_data, header_info -------------------------------------------------------------------------------- /base_utils/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def right_handed_to_left_handed(pts: np.ndarray): 4 | pts_res = np.zeros_like(pts) 5 | if pts.shape[0] > 0: 6 | pts_res[:, 0] = pts[:, 0] 7 | pts_res[:, 1] = -pts[:, 2] 8 | pts_res[:, 2] = pts[:, 1] 9 | return pts_res -------------------------------------------------------------------------------- /config_a2p_il.py: -------------------------------------------------------------------------------- 1 | import os, logging 2 | import numpy as np 3 | import torch 4 | 5 | MANIFOLD_DIR = '/Manifold/build' # path to manifold software (https://github.com/hjwdzh/Manifold) 6 | 7 | class Args(object): 8 | 9 | # HParams - files 10 | fix_sample_cnt = 2048 # for now [2048, 4096] from sdf_try.py 11 | data_path = "/data/processed/%s/net_outputs/pcc_out/a2p_il_fine" %(fix_sample_cnt) # ; /fine_cmplx 12 | os.makedirs(data_path, exist_ok=True) 13 | # pcc_npz_dir = "/outputs/experiments/testing/2023-10-04_23-14" #2023-11-03_01-03: cmplx; 2023-10-04_23-14: _no_nmls; 2023-03-29_23-26: original 14 | p2m_logs = "/data/processed/2048/net_outputs/p2m_logs" 15 | os.makedirs(p2m_logs, exist_ok=True) 16 | save_path = "/data/processed/%s/net_outputs/p2m_rec_obj" %(fix_sample_cnt) 17 | os.makedirs(save_path, exist_ok=True) 18 | 19 | # HParams - Rec 20 | torch_seed = 5 21 | samples = 5100 # number of points to sample reconstruction with ??? 22 | initial_mesh = None # if available, replace this with path 23 | initial_num_faces = 3000 24 | init_samples = 4000 25 | attention = 'None' #'KVQ L2 normalization' # [KVQ div(sqrt(cv))] attention type 26 | iterations = 998 27 | upsamp = 500 # upsample each {upsamp}th iteration 28 | max_faces = 4000 # maximum number of faces to upsample to 29 | faces_to_part = [8000, 16000, 20000] # after how many faces to split 30 | 31 | # HParams - net 32 | gpu = 0 33 | lr = 1.1e-4 34 | ang_wt = 1e-1 # weight of the cosine loss for normals 35 | res_blks = 3 36 | lrelu_alpha = 0.01 37 | local_non_uniform = 0.1 # weight of local non uniform loss 38 | convs = [16, 32, 64, 64, 128] 39 | pools = [0.0, 0.0, 0.0, 0.0] # percent to pool from orig. resolution in each layer') 40 | transfer_data = True 41 | overlap = 0 # overlap for bfs 42 | global_step = False #perform the optimization step after all the parts are forwarded (only matters if nparts > 2) 43 | manifold_res = 20000 # resolution for manifold upsampling 44 | unoriented = True # take the normals loss term without any preferred orientation 45 | init_weights = 0.002 46 | export_interval = 100 47 | beamgap_iterations = 0 # the num of iters to which the beamgap loss will be calculated 48 | beamgap_modulo = 1 # skip iterations with beamgap loss, calc beamgap when: iter % (beamgap-modulo) == 0 49 | manifold_always = True # always run manifold even when the maximum number of faces is reached 50 | 51 | # if not os.path.exists(save_path): 52 | # os.makedirs(save_path) 53 | 54 | def get_num_parts(Args, num_faces): 55 | lookup_num_parts = [1, 2, 4, 8] 56 | num_parts = lookup_num_parts[np.digitize(num_faces, Args.faces_to_part, right=True)] 57 | return num_parts 58 | 59 | def dtype(): 60 | return torch.float32 61 | 62 | def get_num_samples(Args, cur_iter): 63 | slope = (Args.samples - Args.init_samples) / int(0.8 * Args.upsamp) 64 | return int(slope * min(cur_iter, 0.8 * Args.upsamp)) + Args.init_samples 65 | 66 | def start_logger(log_dir, fname): 67 | logger = logging.getLogger() 68 | logger.setLevel(logging.INFO) 69 | 70 | # logging to file 71 | file_handler = logging.FileHandler(str(log_dir) + '/%s.txt'%(fname)) 72 | file_handler.setLevel(logging.INFO) 73 | file_handler.setFormatter(logging.Formatter('%(message)s')) # %(asctime)s - %(levelname)s - 74 | logger.addHandler(file_handler) 75 | 76 | # logging to console 77 | stream_handler = logging.StreamHandler() 78 | stream_handler.setFormatter(logging.Formatter('\t\t %(message)s')) 79 | logger.addHandler(stream_handler) 80 | 81 | return logger 82 | -------------------------------------------------------------------------------- /config_p2m.py: -------------------------------------------------------------------------------- 1 | import os, logging 2 | import numpy as np 3 | import torch 4 | 5 | MANIFOLD_DIR = '/Manifold/build' # path to manifold software (https://github.com/hjwdzh/Manifold) 6 | 7 | class Args(object): 8 | 9 | # HParams - files 10 | fix_sample_cnt = 2048 # for now [2048, 4096] from sdf_try.py 11 | data_path = "/data/processed/%s/net_outputs/pcc_out/fine_no_nmls" %(fix_sample_cnt) # ; /fine_cmplx 12 | os.makedirs(data_path, exist_ok=True) 13 | pcc_npz_dir = "/outputs/experiments/testing/2023-10-04_23-14" #2023-11-03_01-03: cmplx; 2023-10-04_23-14: _no_nmls; 2023-03-29_23-26: original 14 | p2m_logs = "/data/processed/2048/net_outputs/p2m_logs" 15 | os.makedirs(p2m_logs, exist_ok=True) 16 | save_path = "/data/processed/%s/net_outputs/p2m_rec_obj" %(fix_sample_cnt) 17 | os.makedirs(save_path, exist_ok=True) 18 | 19 | # HParams - Rec 20 | torch_seed = 5 21 | samples = 5100 # number of points to sample reconstruction with ??? 22 | initial_mesh = None # if available, replace this with path 23 | initial_num_faces = 3000 24 | init_samples = 4000 25 | attention = 'None' #'KVQ L2 normalization' # [KVQ div(sqrt(cv))] attention type 26 | iterations = 998 27 | upsamp = 500 # upsample each {upsamp}th iteration 28 | max_faces = 4000 # maximum number of faces to upsample to 29 | faces_to_part = [8000, 16000, 20000] # after how many faces to split 30 | 31 | # HParams - net 32 | gpu = 0 33 | lr = 1.1e-4 34 | ang_wt = 1e-1 # weight of the cosine loss for normals 35 | res_blks = 3 36 | lrelu_alpha = 0.01 37 | local_non_uniform = 0.1 # weight of local non uniform loss 38 | convs = [16, 32, 64, 64, 128] 39 | pools = [0.0, 0.0, 0.0, 0.0] # percent to pool from orig. resolution in each layer') 40 | transfer_data = True 41 | overlap = 0 # overlap for bfs 42 | global_step = False #perform the optimization step after all the parts are forwarded (only matters if nparts > 2) 43 | manifold_res = 20000 # resolution for manifold upsampling 44 | unoriented = True # take the normals loss term without any preferred orientation 45 | init_weights = 0.002 46 | export_interval = 100 47 | beamgap_iterations = 0 # the num of iters to which the beamgap loss will be calculated 48 | beamgap_modulo = 1 # skip iterations with beamgap loss, calc beamgap when: iter % (beamgap-modulo) == 0 49 | manifold_always = True # always run manifold even when the maximum number of faces is reached 50 | 51 | # if not os.path.exists(save_path): 52 | # os.makedirs(save_path) 53 | 54 | def get_num_parts(Args, num_faces): 55 | lookup_num_parts = [1, 2, 4, 8] 56 | num_parts = lookup_num_parts[np.digitize(num_faces, Args.faces_to_part, right=True)] 57 | return num_parts 58 | 59 | def dtype(): 60 | return torch.float32 61 | 62 | def get_num_samples(Args, cur_iter): 63 | slope = (Args.samples - Args.init_samples) / int(0.8 * Args.upsamp) 64 | return int(slope * min(cur_iter, 0.8 * Args.upsamp)) + Args.init_samples 65 | 66 | def start_logger(log_dir, fname): 67 | logger = logging.getLogger() 68 | logger.setLevel(logging.INFO) 69 | 70 | # logging to file 71 | file_handler = logging.FileHandler(str(log_dir) + '/%s.txt'%(fname)) 72 | file_handler.setLevel(logging.INFO) 73 | file_handler.setFormatter(logging.Formatter('%(message)s')) # %(asctime)s - %(levelname)s - 74 | logger.addHandler(file_handler) 75 | 76 | # logging to console 77 | stream_handler = logging.StreamHandler() 78 | stream_handler.setFormatter(logging.Formatter('\t\t %(message)s')) 79 | logger.addHandler(stream_handler) 80 | 81 | return logger 82 | -------------------------------------------------------------------------------- /config_pcc.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | from pathlib import Path 4 | 5 | class Args(object): 6 | 7 | # HParams - files 8 | experiment_dir = Path('/outputs/experiments/ISPRS_R1') 9 | experiment_dir.mkdir(exist_ok=True) 10 | file_dir = Path(str(experiment_dir) + '/' + str(datetime.now().strftime('%Y-%m-%d_%H-%M'))) 11 | file_dir.mkdir(exist_ok=True) 12 | ckpts_dir = file_dir.joinpath('checkpoints/') 13 | ckpts_dir.mkdir(exist_ok=True) 14 | log_dir = file_dir.joinpath('logs/') 15 | log_dir.mkdir(exist_ok=True) 16 | 17 | record_step = 2 # print/log the model info/metrics every ... 18 | # save_interval = 10 # save every ... 19 | val_interval = 2 # run the validation model every ... 20 | # p2p_interval = 10 21 | # p2p = True 22 | tr_loss = 'cdp' # cdp, dcd, emd 23 | # t_alpha = 200 24 | # n_lambda = 0.5 25 | 26 | # HParams - net 27 | gpu = 0 28 | lr = 0.0006 29 | eta_min = 0.0000001 30 | wd = 0.01 # weight decay (AdamW default) 31 | max_epoch = 140 32 | bs = 8 # batch_size 33 | npoints = 2048 # number of input points 34 | 35 | #HParams - chkpnting 36 | load_chkpnt = False 37 | chkpnt_path = '/outputs/experiments/2023-02-01_07-58/checkpoints/pccnet_128_0.01709_0.00082.pth' 38 | fix_lr = 0.00007 39 | if load_chkpnt: 40 | val_interval = 1 41 | 42 | def start_logger(log_dir, fname): 43 | logger = logging.getLogger() 44 | logger.setLevel(logging.INFO) 45 | 46 | # logging to file 47 | file_handler = logging.FileHandler(str(log_dir) + '/%s_log.txt'%(fname)) 48 | file_handler.setLevel(logging.INFO) 49 | file_handler.setFormatter(logging.Formatter('%(message)s')) # %(asctime)s - %(levelname)s - 50 | logger.addHandler(file_handler) 51 | 52 | # logging to console 53 | stream_handler = logging.StreamHandler() 54 | stream_handler.setFormatter(logging.Formatter('\t\t %(message)s')) 55 | logger.addHandler(stream_handler) 56 | 57 | return logger 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /dataset.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | import trimesh 4 | import uuid 5 | import torch 6 | 7 | MANIFOLD_SOFTWARE_DIR = "/Manifold/build" 8 | 9 | 10 | def random_file_name(ext, prefix="tmp_"): 11 | return f"{prefix}{uuid.uuid4()}.{ext}" 12 | 13 | 14 | def remesh(hull_mesh, num_faces): 15 | 16 | # Write the original mesh as OBJ. 17 | original_file = random_file_name("obj") 18 | with open(original_file, "w") as f: 19 | mesh = trimesh.Trimesh(vertices=hull_mesh.vertices, faces=hull_mesh.faces) 20 | f.write(trimesh.exchange.obj.export_obj(mesh)) 21 | 22 | # Create a manifold of the original file. 23 | manifold_file = random_file_name("obj") 24 | manifold_script_path = os.path.join(MANIFOLD_SOFTWARE_DIR, "manifold") 25 | cmd = f"{manifold_script_path} {original_file} {manifold_file}" 26 | os.system(cmd) 27 | 28 | # Simplify the manifold. 29 | simplified_file = random_file_name("obj") 30 | simplify_script_path = os.path.join(MANIFOLD_SOFTWARE_DIR, "simplify") 31 | cmd = ( 32 | f"{simplify_script_path} -i {manifold_file} -o {simplified_file} -f {num_faces}" 33 | ) 34 | os.system(cmd) 35 | 36 | # Read the simplified manifold. 37 | with open(simplified_file, "r") as f: 38 | mesh = trimesh.exchange.obj.load_obj(f) 39 | 40 | # Prevent file spam. 41 | os.remove(original_file) 42 | os.remove(manifold_file) 43 | os.remove(simplified_file) 44 | 45 | return mesh["vertices"], mesh["faces"] 46 | 47 | 48 | data_path = '/data/processed/%d' %(4096) 49 | complete_path = os.path.join(data_path, '04_query_npz') 50 | partial_path = os.path.join(data_path, '05_als_npz') 51 | 52 | # complete_filelist = os.listdir(complete_path) 53 | partial_filelist = os.listdir(partial_path) 54 | 55 | trial_filelist = partial_filelist[:10] 56 | 57 | datalist = [] 58 | num_faces = 2000 59 | 60 | for i in range(len(trial_filelist)): 61 | part_data = np.load(os.path.join(partial_path, trial_filelist[i])) 62 | xyz = part_data['unit_als'] 63 | hull_mesh = trimesh.convex.convex_hull(xyz) 64 | # vs, faces = m.vertices, m.faces 65 | mesh = remesh(hull_mesh, num_faces) -------------------------------------------------------------------------------- /docker-compose.debug.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | services: 4 | testdocker: 5 | image: testdocker 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | volumes: 10 | - /mnt/data/rec_data:/data:rw 11 | command: ["sh", "-c", "pip install debugpy -t /tmp && python /tmp/debugpy --wait-for-client --listen 0.0.0.0:5678 main.py "] 12 | ports: 13 | - 5678:5678 14 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3.4' 2 | 3 | services: 4 | sampledocker: 5 | image: sampledocker 6 | build: 7 | context: . 8 | dockerfile: ./Dockerfile 9 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import time, os, glob 2 | import numpy as np 3 | 4 | from config_a2p_il import Args as args 5 | # from Config import Args as args 6 | from config_p2m import dtype, get_num_parts, get_num_samples, start_logger 7 | from post_subnets.post_ops import get_complete_files, get_dist_losses, get_per_instance_errors, config3_per_instance_error 8 | 9 | import trimesh 10 | import torch 11 | from torch import optim 12 | 13 | from base_utils import mesh_utils 14 | from models.layers.mesh import Mesh, PartMesh 15 | from models.networks_p2m import PartNet, get_scheduler 16 | from models.losses import BeamGapLoss, chamfer_distance, point_mesh_loss 17 | from pytorch3d.ops.points_normals import estimate_pointcloud_normals 18 | 19 | 20 | def compute_normals(pc): 21 | pc = pc.reshape(-1,3) 22 | pc = torch.from_numpy(pc).to(device) 23 | pc = pc.unsqueeze(0) 24 | pc = pc.type(dtype()) 25 | normals = estimate_pointcloud_normals(pc, 12) 26 | normals = normals.squeeze().cpu().numpy() 27 | return normals 28 | 29 | 30 | torch.manual_seed(args.torch_seed) 31 | device = torch.device('cuda:{}'.format(args.gpu) if torch.cuda.is_available() else torch.device('cpu')) 32 | print('device: {}'.format(device)) 33 | 34 | # files and dirs prep 35 | exp_num = '0' 36 | # subdirs = ['als', 'twentyFivePCC', 'fiftyPCC'] # 'seventyFivePCC' 37 | # for subdir in subdirs: 38 | # if subdir == subdirs[0]: 39 | # exp_num = 11 40 | # elif subdir == subdirs[1]: 41 | # exp_num = 10 42 | # elif subdir == subdirs[2]: 43 | # exp_num = 9 44 | 45 | os.makedirs(os.path.join(args.save_path,'config-a2pil{}'.format(exp_num)), exist_ok=True) # config details are in matching loss_log file. 46 | 47 | # start logger 48 | p2m_logger = start_logger(log_dir=args.p2m_logs, fname='loss_log-a2pil{}'.format(exp_num)) 49 | p2m_logger.info('description: For comparative analysis in A2P_IL work') # - {}'.format(subdir)) 50 | p2m_logger.info(f'Attention: {args.attention}') # @ feat_dim {96}') 51 | p2m_logger.info('init#faces: {}'.format(args.initial_num_faces)) 52 | p2m_logger.info('init_samples: {}'.format(args.init_samples)) 53 | p2m_logger.info('iterations & upsampling: {} & {}'.format(args.iterations, args.upsamp)) 54 | p2m_logger.info('normals ang wt: {} | local non-uniform weight: {}'.format(args.ang_wt, args.local_non_uniform)) 55 | p2m_logger.info('beamgap iterations & modulo: {} | {}'.format(args.beamgap_iterations, args.beamgap_modulo)) 56 | p2m_logger.info('normals orientation: {} \n\n'.format(args.unoriented)) 57 | 58 | #retrieve individual completed point files from the batched .npz files 59 | # if not os.path.exists(f'{args.data_path}/als') or not os.listdir(f'{args.data_path}/als'): 60 | # pcc_blist = glob.glob(f'{args.pcc_npz_dir}/*.npz') 61 | # get_complete_files(blist=pcc_blist, rt_dir=args.pcc_npz_dir, save_dir=args.data_path) 62 | 63 | # get all the files in the data_path 64 | rebt_files = glob.glob(f'{args.data_path}/*.txt') 65 | for i, pc_filepath in enumerate(rebt_files): 66 | # pc_folder = pc_filepath.split('/')[-2] 67 | # if not pc_folder == 'fine': # if pc_folder == 'gt': 68 | # continue 69 | rebt_files[i] = pc_filepath.split('/')[-1] # pc_folder +'/'+pc_filepath.split('/')[-1] 70 | 71 | # for pc_filename in os.listdir(f'{args.data_path}/{subdir}'): # fine, als, subdir 72 | for pc_filename in rebt_files: 73 | # pc_filename = 'Tartu1_4770.txt' 74 | # pc_folder = pc_filename.split('/')[0] 75 | pc_fname_no_ext = pc_filename.split('.')[0] 76 | file_chk = os.path.join(args.save_path, f'config-a2pil{exp_num}', f'last_rec_{pc_fname_no_ext}.obj') 77 | 78 | if os.path.exists(file_chk): 79 | print(f'file {pc_filename} already done, skipping...') 80 | continue 81 | 82 | p2m_logger.info('\n** ' + pc_fname_no_ext) 83 | # initial_data = np.loadtxt(os.path.join(f'{args.data_path}/{subdir}', pc_filename)) #TODO:save normals for pcc_out files in main_pcc.py 84 | initial_data = np.loadtxt(os.path.join(f'{args.data_path}', pc_filename)) 85 | # initial_data = np.loadtxt(os.path.join(f'{args.data_path}/fine', pc_filename)) #TODO:save normals for pcc_out files in main_pcc.py 86 | if initial_data.shape[1] == 3: 87 | initial_data = np.hstack((initial_data, compute_normals(initial_data))) 88 | input_normals = initial_data[:, 3:] 89 | # input_normals = pcc_as_gt[:, 3:] 90 | # pcc_xyz_as_gt = pcc_as_gt[:, :3] 91 | initial_xyz = initial_data[:, :3] 92 | del initial_data 93 | 94 | # Create the mesh. 95 | if args.initial_mesh: 96 | remeshed_vertices, remeshed_faces = mesh_utils.load_obj(args.initial_mesh) 97 | else: 98 | convex_hull = trimesh.convex.convex_hull(initial_xyz) 99 | remeshed_vertices, remeshed_faces = mesh_utils.remesh(convex_hull, #TODO: remove normalization, to ensure chamfer dist coa2pilectness 100 | args.initial_num_faces) 101 | mesh_utils.save("%s/%s_initial_mesh.obj"%(os.path.join(args.save_path, f'config-a2pil{exp_num}'), pc_fname_no_ext), remeshed_vertices, remeshed_faces) 102 | 103 | # initial mesh 104 | mesh = Mesh("%s/%s_initial_mesh.obj"%(os.path.join(args.save_path, f'config-a2pil{exp_num}'), pc_fname_no_ext), device=device, hold_history=True) 105 | 106 | # normalize point cloud based on initial mesh 107 | initial_xyz /= mesh.scale 108 | initial_xyz += mesh.translations[None, :] 109 | input_xyz = torch.Tensor(initial_xyz).type(dtype()).to(device)[None, :, :] 110 | 111 | # pcc_xyz_as_gt /= mesh.scale 112 | # pcc_xyz_as_gt += mesh.translations[None, :] 113 | # pcc_xyz_as_gt = torch.Tensor(pcc_xyz_as_gt).type(dtype()).to(device)[None, :, :] 114 | 115 | input_normals = torch.Tensor(input_normals).type(dtype()).to(device)[None, :, :] 116 | 117 | part_mesh = PartMesh(mesh, num_parts=get_num_parts(args,len(mesh.faces)), bfs_depth=args.overlap) 118 | print(f'number of parts {part_mesh.n_submeshes}') 119 | 120 | # initialize network, weights, and random input tensor 121 | init_verts = mesh.vs.clone().detach() 122 | model = PartNet(init_part_mesh=part_mesh, convs=args.convs, 123 | pool=args.pools, res_blocks=args.res_blks, 124 | init_verts=init_verts, transfer_data=args.transfer_data, 125 | leaky=args.lrelu_alpha, init_weights_size=args.init_weights).to(device) 126 | 127 | optimizer = optim.Adam(model.parameters(), lr=args.lr) 128 | scheduler = get_scheduler(args.iterations, optimizer) 129 | rand_verts = mesh_utils.populate_e([mesh]) # totally random verticies re-indexed with mesh edges. NB: 6 = a pair 3D random coords of an egde 130 | 131 | beamgap_loss = BeamGapLoss(device) 132 | if args.beamgap_iterations > 0: 133 | print('beamgap on') 134 | beamgap_loss.update_pm(part_mesh, torch.cat([input_xyz, input_normals], dim=-1)) 135 | 136 | for i in range(args.iterations): 137 | num_samples = get_num_samples(args, i % args.upsamp) 138 | if args.global_step: # only matters if part_mesh.n_submeshes > 2 139 | optimizer.zero_grad() 140 | start_time = time.time() 141 | for part_i, est_verts in enumerate(model(rand_verts, part_mesh)): 142 | if not args.global_step: 143 | optimizer.zero_grad() 144 | part_mesh.update_verts(est_verts[0], part_i) 145 | num_samples = get_num_samples(args, i % args.upsamp) 146 | recon_xyz, recon_normals = mesh_utils.sample_surface(part_mesh.main_mesh.faces, part_mesh.main_mesh.vs.unsqueeze(0), num_samples) 147 | 148 | # calc chamfer loss w/ normals 149 | recon_xyz, recon_normals = recon_xyz.type(dtype()), recon_normals.type(dtype()) 150 | xyz_chamfer_loss, normals_chamfer_loss = chamfer_distance(recon_xyz, input_xyz, # replaced input_xyz with pcc_xyz_as_gt 151 | x_normals=recon_normals, y_normals=input_normals, 152 | unoriented=args.unoriented) 153 | # # calc point to mesh loss 154 | # pnt_face_loss = point_mesh_loss(input_xyz, part_mesh.main_mesh) 155 | # loss = ((args.ang_wt * normals_chamfer_loss) + pnt_face_loss) 156 | if (i < args.beamgap_iterations) and (i % args.beamgap_modulo == 0): 157 | loss = beamgap_loss(part_mesh, part_i) 158 | else: 159 | loss = (xyz_chamfer_loss + (args.ang_wt * normals_chamfer_loss)) # this n the next loss component might be related to normal consistency loss 160 | if args.local_non_uniform > 0: 161 | loss += args.local_non_uniform * mesh_utils.local_nonuniform_penalty(part_mesh.main_mesh).float() 162 | loss.backward() 163 | 164 | if not args.global_step: 165 | optimizer.step() 166 | scheduler.step() 167 | part_mesh.main_mesh.vs.detach_() 168 | 169 | if args.global_step: 170 | optimizer.step() 171 | scheduler.step() 172 | 173 | end_time = time.time() 174 | 175 | if i % 4 == 0: 176 | p2m_logger.info(f'{pc_filename}; iter: {i:4d} out of: {args.iterations}; loss: {loss.item():.4f};' 177 | f' sample count: {num_samples}; time: {end_time - start_time:.2f}') 178 | 179 | if i % args.export_interval == 0 and i > 0: 180 | print('exporting reconstruction... current LR: {}'.format(optimizer.param_groups[0]['lr'])) 181 | with torch.no_grad(): 182 | part_mesh.export(os.path.join(args.save_path, f'config-a2pil{exp_num}', f'rec_{pc_fname_no_ext}.obj')) 183 | mesh_path = os.path.join(args.save_path, f'config-a2pil{exp_num}', f'rec_{pc_fname_no_ext}.obj') 184 | # get_per_instance_errors(f'{args.data_path}/gt', pc_fname_no_ext, mesh_path) 185 | # perr, derr = config3_per_instance_error(pc_fname_no_ext) 186 | 187 | if (i > 0 and (i + 1) % args.upsamp == 0): 188 | mesh = part_mesh.main_mesh 189 | num_faces = int(np.clip(len(mesh.faces) * 1.5, len(mesh.faces), args.max_faces)) 190 | 191 | if num_faces > len(mesh.faces) or args.manifold_always: 192 | # up-sample mesh 193 | mesh = mesh_utils.manifold_upsample(mesh, f'{args.save_path}/config-a2pil{exp_num}', pc_fname_no_ext, Mesh, 194 | num_faces=min(num_faces, args.max_faces), 195 | res=args.manifold_res, simplify=True) 196 | 197 | part_mesh = PartMesh(mesh, num_parts=get_num_parts(args,len(mesh.faces)), bfs_depth=args.overlap) 198 | print(f'upsampled to {len(mesh.faces)} faces; number of parts {part_mesh.n_submeshes}') 199 | 200 | # re-initialize the network and it params to re-fit the upsampled mesh 201 | init_verts = mesh.vs.clone().detach() 202 | model = PartNet(init_part_mesh=part_mesh, convs=args.convs, 203 | pool=args.pools, res_blocks=args.res_blks, 204 | init_verts=init_verts, transfer_data=args.transfer_data, 205 | leaky=args.lrelu_alpha, init_weights_size=args.init_weights).to(device) 206 | 207 | optimizer = optim.Adam(model.parameters(), lr=args.lr) 208 | scheduler = get_scheduler(args.iterations, optimizer) 209 | rand_verts = mesh_utils.populate_e([mesh]) # totally random verticies re-indexed with mesh edges. NB: 6 = a pair 3D random coords of an egde 210 | 211 | if i < args.beamgap_iterations: 212 | print('beamgap updated') 213 | beamgap_loss.update_pm(part_mesh, input_xyz) 214 | 215 | p2m_logger.info(f'{pc_filename}; final chamfer xyz loss: {xyz_chamfer_loss.item():.7f};') 216 | with torch.no_grad(): 217 | mesh.export(os.path.join(args.save_path, f'config-a2pil{exp_num}', f'last_rec_{pc_fname_no_ext}.obj')) 218 | 219 | print('p2m done...!') 220 | 221 | # compute final losses 222 | rlist = glob.glob(os.path.join(args.save_path, f'config-a2pil{exp_num}/rec_*.obj')) 223 | glist = glob.glob(f'{args.data_path}/gt/*.txt') 224 | get_dist_losses(rec_list=rlist, gt_list=glist) #FIXME: check for scale/translation consistency 225 | 226 | print('losses done...!') 227 | -------------------------------------------------------------------------------- /main_pcc.py: -------------------------------------------------------------------------------- 1 | import torch, random 2 | from dataset_pcc import CustomDataset 3 | from torch.utils import data 4 | from network_pcc import PCCNet, validate 5 | from point_ops.pointnet2_ops import pointnet2_utils as p2u 6 | # from torch.utils.tensorboard import SummaryWriter 7 | import pytorch_warmup as warmup 8 | from loss_pcc import chamfer_loss_sqrt, l2_normal_loss, density_cd 9 | from config_pcc import Args as args 10 | from config_pcc import start_logger 11 | from pytictoc import TicToc 12 | 13 | 14 | seed_value = 42 15 | random.seed(seed_value) 16 | torch.manual_seed(seed_value) 17 | torch.cuda.manual_seed(seed_value) 18 | 19 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 20 | 21 | tr_dataset = CustomDataset(split='train', npoints=args.npoints, device=device) 22 | tr_loader = data.DataLoader(tr_dataset, batch_size=args.bs, shuffle=True) 23 | 24 | ts_dataset = CustomDataset(split='test', npoints=args.npoints, device=device) 25 | ts_loader = data.DataLoader(ts_dataset, batch_size=args.bs, shuffle=False) 26 | 27 | pcc_model = PCCNet(kmax=20, code_dim=1024).to(device) 28 | 29 | if args.load_chkpnt: 30 | optimizer = torch.optim.AdamW(pcc_model.parameters(), lr=args.fix_lr, betas=(0.9, 0.999), 31 | weight_decay=args.wd) #eps=1e-08, 32 | scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.93, patience=1, verbose=True) 33 | else: 34 | optimizer = torch.optim.AdamW(pcc_model.parameters(), lr=args.lr, betas=(0.9, 0.999), 35 | weight_decay=args.wd) #eps=1e-08, 36 | 37 | num_steps = len(tr_loader) * args.max_epoch 38 | lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=num_steps, eta_min=args.eta_min) #TODO: try steplr &eponentiallr too 39 | warmup_scheduler = warmup.UntunedLinearWarmup(optimizer) 40 | 41 | # loss init 42 | t = TicToc() #create instance of class 43 | llr_logger = start_logger(log_dir=args.log_dir, fname='lr_loss') 44 | if args.load_chkpnt: 45 | pcc_model.load_state_dict(torch.load(args.chkpnt_path)) 46 | llr_logger.info('model loaded and training continues from: %s'%(args.chkpnt_path)) 47 | llr_logger.info('ms:[10,20,30] | 256-trsf | 4heads | scale: x4 | bs: 8 | #ep: 140 |lr: 0.0006 | eta_min: 1e-07 | normals: Yes | #tr/ts: 2k/40 | tr_loss: %s' %(args.tr_loss)) 48 | # tb = SummaryWriter(comment=f'ms:[10,20,30] | 256-trsf | 4heads | scale: [x4, +ppconv]') 49 | 50 | init_epoch = int(args.chkpnt_path[-23:-20]) if args.load_chkpnt else 1 51 | max_epoch = init_epoch + 60 if args.load_chkpnt else args.max_epoch 52 | best_val_cdp = 20.0 53 | best_val_cdt = 20.0 54 | coarse_list, fine_list, total_list = [], [], [] 55 | for epoch in range(init_epoch, max_epoch+1): 56 | avg_time_per_iter = [] 57 | for i, data in enumerate(tr_loader, 0): 58 | t.tic() #Start timer 59 | optimizer.zero_grad() 60 | xyz = data[0][:, :, :6].to(device).float() # partial: [B 2048, 6] include normals 61 | 62 | pcc_model.train() 63 | coarse, fine, finer = pcc_model(xyz) 64 | 65 | # Loss (xyz) 66 | gt_xyz = data[1][:, :, :6].to(device).float() # partial: [B 16348, 6] include normals 67 | 68 | gt_fine = p2u.gather_operation(gt_xyz.permute(0,2,1).contiguous(), p2u.furthest_point_sample(gt_xyz[:,:,:3].contiguous(), fine.size(1))).permute(0,2,1) 69 | if args.tr_loss == 'dcd': 70 | loss_fine = density_cd(fine[:, :, :3], gt_fine[:, :, :3], alpha=args.t_alpha, n_lambda=args.n_lambda) 71 | elif args.tr_loss == 'cd': 72 | loss_fine = chamfer_loss_sqrt(fine[:, :, :3], gt_fine[:, :, :3]) * 10 #inputs shd be BNC 73 | 74 | gt_coarse = p2u.gather_operation(gt_fine.permute(0,2,1).contiguous(), p2u.furthest_point_sample(gt_fine[:,:,:3].contiguous(), coarse.size(1))).permute(0,2,1) 75 | if args.tr_loss == 'dcd': 76 | loss_coarse = density_cd(coarse[:, :, :3], gt_coarse[:, :, :3], alpha=args.t_alpha, n_lambda=args.n_lambda) 77 | elif args.tr_loss == 'cd': 78 | loss_coarse = chamfer_loss_sqrt(coarse[:, :, :3], gt_coarse[:, :, :3]) * 10 79 | 80 | loss = loss_coarse + loss_fine 81 | normal_loss = l2_normal_loss(gt_fine, fine) 82 | 83 | coarse_list.append(loss_coarse) 84 | fine_list.append(loss_fine) 85 | 86 | # Loss (normals) 87 | # nbr_pnt_loss, nbr_norm_loss = nbrhood_uniformity_loss(fine, 10, 10) 88 | loss = loss + normal_loss #0.1* # + nbr_pnt_loss + nbr_norm_loss 89 | 90 | # if args.p2p and (epoch % args.p2p_interval) == 0: 91 | # loss += p2p_dist(gt_fine, fine) * 0.1 92 | 93 | total_list.append(loss) 94 | 95 | if i % args.record_step == 0: 96 | 97 | iter_time = 0.0 if i == 0 else sum(avg_time_per_iter)/len(avg_time_per_iter) 98 | llr_logger.info('Epoch %.3d | iter %.3d/%d, %.5f secs | l_coarse: %.6f | l_fine: %.6f | l_total: %.6f | lrs: %.10f | c_lr: %.10f' % (epoch, i, 99 | len(tr_dataset)/args.bs, 100 | iter_time, 101 | (sum(coarse_list)/len(coarse_list)).item(), 102 | (sum(fine_list)/len(fine_list)).item(), 103 | (sum(total_list)/len(total_list)).item(), 104 | 0.0 if args.load_chkpnt else warmup_scheduler.lrs[0], 105 | optimizer.param_groups[0]['lr'])) 106 | coarse_list, fine_list, total_list = [], [], [] 107 | #TODO: push record_step info it to tb for graphing or retrieve n plot from log files 108 | 109 | loss.backward() 110 | optimizer.step() 111 | 112 | if not args.load_chkpnt: 113 | with warmup_scheduler.dampening(): 114 | lr_scheduler.step() 115 | 116 | avg_time_per_iter.append(t.tocvalue()) # t.tocvalue: time elapsed since t.tic() 117 | 118 | if (epoch % args.val_interval == 0) or (epoch == args.max_epoch): # bcoz max_epoch above is +1 119 | val_losses = validate(pcc_model, ts_loader, epoch, args, device=loss.device, rand_save=True) 120 | '''first two values after vEpoch will be zero if args.tr_loss == 'cd' ''' 121 | llr_logger.info('vEpoch %.3d | %s_fine: %.6f | %s_coarse: %.6f |cdp_fine: %.6f | cdt_fine: %.6f | cdp_coarse: %.6f | cdt_coarse: %.6f' %(epoch, 122 | args.tr_loss, 123 | val_losses['fine_d'], 124 | args.tr_loss, 125 | val_losses['coarse_d'], 126 | val_losses['fine_p'], 127 | val_losses['fine_t'], 128 | val_losses['coarse_p'], 129 | val_losses['coarse_t'])) 130 | if args.load_chkpnt: 131 | scheduler.step(val_losses['fine_p']) 132 | # if args.tr_loss == 'dcd' and (val_losses['fine_d'] < best_val_cdp) or (val_losses['coarse_d'] < best_val_cdt): 133 | # best_val_cdp = val_losses['fine_d'] 134 | # best_val_cdt = val_losses['coarse_d'] 135 | # torch.save(pcc_model.state_dict(), '%s/pccnet_%.3d_%.5f_%.5f.pth' % (str(args.ckpts_dir), epoch, best_val_cdp, best_val_cdt)) 136 | 137 | if (val_losses['fine_p'] < best_val_cdp) or (val_losses['fine_t'] < best_val_cdt): 138 | best_val_cdp = val_losses['fine_p'] 139 | best_val_cdt = val_losses['fine_t'] 140 | torch.save(pcc_model.state_dict(), '%s/pccnet_%.3d_%.5f_%.5f.pth' % (str(args.ckpts_dir), epoch, best_val_cdp, best_val_cdt)) 141 | print("Saving model...") 142 | 143 | print('done ...') 144 | 145 | #TODO: ideas 146 | ''' 147 | (*) point2plane in terms of loss (3 point to compute plane... kinda like a face of a mesh with normal) 148 | (*) increase training data 149 | (*) attention on points where weights r assigned according to normal cosine similarity. weak weights 150 | to points on a diff. plane than the query point. (DONE, 2023-01-15_05-26... needs some tweaking for better losses) 151 | (*) introduce pointconv 152 | (*) introduce one more trsf_layer after refine_1 [2023-01-05_18-07] (DONE, has potential, but will require a lot more training epochs) 153 | (*) (10,20,30) (20,30) (20) --> (10,20,30) (10,20) (20) [2023-01-05_12-47] (DONE, 154 | visual diffs not very pronounced, will have to test both in the surf. rec to see) 155 | (*) (10,20,30) --> (8,16,24) (DONE, thighter to some planes but a bit buffered from some) [2023-01-03_14-44] 156 | (*) add a second refine step (DONE with 2x,2x, expensive, COMMENTED, could be better with 4x,4x) [2023-01-03_20-01] 157 | (*) increase training epochs (DONE) 158 | (*) currently, c_lr/lrs goes all the way to 0, find a way to stick to a fix mininum (DONE) 159 | (*) include normal data n loss (maybe this will boost edge/corner awareness) (DONE) 160 | (*) test more normal losses (DONE, smoothening effect on corners & edges, COMMENTED) 161 | ''' 162 | 163 | -------------------------------------------------------------------------------- /models/layers/mesh_conv.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import torch.nn.functional as F 4 | 5 | class MeshConv(nn.Module): 6 | """ Computes convolution between edges and 4 incident (1-ring) edge neighbors 7 | in the forward pass takes: 8 | x: edge features (Batch x Features x Edges) 9 | mesh: list of mesh data-structure (len(mesh) == Batch) 10 | and applies convolution 11 | """ 12 | def __init__(self, in_channels, out_channels, k=5, bias=True): 13 | super(MeshConv, self).__init__() 14 | self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=(1, k), bias=bias) 15 | self.k = k 16 | 17 | def __call__(self, edge_f, mesh): 18 | return self.forward(edge_f, mesh) 19 | 20 | def forward(self, x, mesh): 21 | x = x.squeeze(-1) 22 | G = torch.cat([self.pad_gemm(i, x.shape[2], x.device) for i in mesh], 0) 23 | # build 'neighborhood image' and apply convolution 24 | G = self.create_GeMM(x, G) 25 | x = self.conv(G) 26 | return x 27 | 28 | def flatten_gemm_inds(self, Gi): 29 | (b, ne, nn) = Gi.shape 30 | ne += 1 31 | batch_n = torch.floor(torch.arange(b * ne, device=Gi.device).float() / ne).view(b, ne) 32 | add_fac = batch_n * ne 33 | add_fac = add_fac.view(b, ne, 1) 34 | add_fac = add_fac.repeat(1, 1, nn) 35 | # flatten Gi 36 | Gi = Gi.float() + add_fac[:, 1:, :] 37 | return Gi 38 | 39 | def create_GeMM(self, x, Gi): 40 | """ gathers the edge features (x) with from the 1-ring indices (Gi) 41 | applys symmetric functions to handle order invariance 42 | returns a 'fake image' which can use 2d convolution on 43 | output dimensions: Batch x Channels x Edges x 5 44 | """ 45 | Gishape = Gi.shape 46 | # pad the first row of every sample in batch with zeros 47 | padding = torch.zeros((x.shape[0], x.shape[1], 1), requires_grad=True, device=x.device) 48 | # padding = padding.to(x.device) 49 | x = torch.cat((padding, x), dim=2) 50 | Gi = Gi + 1 #shift 51 | 52 | # first flatten indices 53 | Gi_flat = self.flatten_gemm_inds(Gi) 54 | Gi_flat = Gi_flat.view(-1).long() 55 | # 56 | odim = x.shape 57 | x = x.permute(0, 2, 1).contiguous() 58 | x = x.view(odim[0] * odim[2], odim[1]) 59 | 60 | f = torch.index_select(x, dim=0, index=Gi_flat) 61 | f = f.view(Gishape[0], Gishape[1], Gishape[2], -1) 62 | f = f.permute(0, 3, 1, 2) 63 | 64 | # apply the symmetric functions for an equivariant conv 65 | x_1 = f[:, :, :, 1] + f[:, :, :, 3] 66 | x_2 = f[:, :, :, 2] + f[:, :, :, 4] 67 | x_3 = torch.abs(f[:, :, :, 1] - f[:, :, :, 3]) 68 | x_4 = torch.abs(f[:, :, :, 2] - f[:, :, :, 4]) 69 | f = torch.stack([f[:, :, :, 0], x_1, x_2, x_3, x_4], dim=3) 70 | return f 71 | 72 | def pad_gemm(self, m, xsz, device): 73 | """ extracts one-ring neighbors (4x) -> m.gemm_edges 74 | which is of size #edges x 4 75 | add the edge_id itself to make #edges x 5 76 | then pad to desired size e.g., xsz x 5 77 | """ 78 | padded_gemm = torch.tensor(m.gemm_edges, device=device).float() 79 | padded_gemm = padded_gemm.requires_grad_() 80 | padded_gemm = torch.cat((torch.arange(m.edges_count, device=device).float().unsqueeze(1), padded_gemm), dim=1) 81 | # pad using F 82 | padded_gemm = F.pad(padded_gemm, (0, 0, 0, xsz - m.edges_count), "constant", 0) 83 | padded_gemm = padded_gemm.unsqueeze(0) 84 | return padded_gemm -------------------------------------------------------------------------------- /models/layers/mesh_pool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from models.layers.mesh_union import MeshUnion 4 | import numpy as np 5 | 6 | 7 | class MeshPool(nn.Module): 8 | 9 | def __init__(self, target): 10 | super(MeshPool, self).__init__() 11 | self.__out_target = target 12 | self.__fe = None 13 | self.__updated_fe = None 14 | self.__meshes = None 15 | 16 | def __call__(self, fe, meshes): 17 | return self.forward(fe, meshes) 18 | 19 | def forward(self, fe, meshes): 20 | self.__updated_fe = [[] for _ in range(len(meshes))] 21 | self.__fe = fe 22 | self.__meshes = meshes 23 | # iterate over batch 24 | for mesh_index in range(len(meshes)): 25 | self.__pool_main(mesh_index) 26 | out_features = torch.cat(self.__updated_fe).view(len(meshes), -1, self.__out_target) 27 | return out_features 28 | 29 | def __pool_main(self, mesh_index): 30 | mesh = self.__meshes[mesh_index] 31 | fe = self.__fe[mesh_index, :, :mesh.edges_count] 32 | in_fe_sq = torch.sum(fe ** 2, dim=0) 33 | sorted, edge_ids = torch.sort(in_fe_sq, descending=True) 34 | edge_ids = edge_ids.tolist() 35 | mask = np.ones(mesh.edges_count, dtype=bool) 36 | edge_groups = MeshUnion(mesh.edges_count, self.__fe.device) 37 | while mesh.edges_count > self.__out_target: 38 | edge_id = edge_ids.pop() 39 | if mask[edge_id]: 40 | self.__pool_edge(mesh, edge_id, mask, edge_groups) 41 | mesh.clean(mask, edge_groups) 42 | fe = edge_groups.rebuild_features(self.__fe[mesh_index], mask, self.__out_target) 43 | self.__updated_fe[mesh_index] = fe 44 | 45 | def __pool_edge(self, mesh, edge_id, mask, edge_groups): 46 | if self.has_boundaries(mesh, edge_id): 47 | return False 48 | elif self.__clean_side(mesh, edge_id, mask, edge_groups, 0) \ 49 | and self.__clean_side(mesh, edge_id, mask, edge_groups, 2) \ 50 | and self.__is_one_ring_valid(mesh, edge_id): 51 | self.__pool_side(mesh, edge_id, mask, edge_groups, 0) 52 | self.__pool_side(mesh, edge_id, mask, edge_groups, 2) 53 | mesh.merge_vertices(edge_id) 54 | mask[edge_id] = False 55 | MeshPool.__remove_group(edge_groups, edge_id) 56 | mesh.edges_count -= 1 57 | return True 58 | else: 59 | return False 60 | 61 | def __clean_side(self, mesh, edge_id, mask, edge_groups, side): 62 | if mesh.edges_count <= self.__out_target: 63 | return False 64 | invalid_edges = MeshPool.__get_invalids(mesh, edge_id, edge_groups, side) 65 | while len(invalid_edges) != 0 and mesh.edges_count > self.__out_target: 66 | self.__remove_triplete(mesh, mask, edge_groups, invalid_edges) 67 | if mesh.edges_count <= self.__out_target: 68 | return False 69 | if self.has_boundaries(mesh, edge_id): 70 | return False 71 | invalid_edges = self.__get_invalids(mesh, edge_id, edge_groups, side) 72 | return True 73 | 74 | @staticmethod 75 | def has_boundaries(mesh, edge_id): 76 | for edge in mesh.gemm_edges[edge_id]: 77 | if edge == -1 or -1 in mesh.gemm_edges[edge]: 78 | return True 79 | return False 80 | 81 | @staticmethod 82 | def __is_one_ring_valid(mesh, edge_id): 83 | v_a = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 0]]].reshape(-1)) 84 | v_b = set(mesh.edges[mesh.ve[mesh.edges[edge_id, 1]]].reshape(-1)) 85 | shared = v_a & v_b - set(mesh.edges[edge_id]) 86 | return len(shared) == 2 87 | 88 | def __pool_side(self, mesh, edge_id, mask, edge_groups, side): 89 | info = MeshPool.__get_face_info(mesh, edge_id, side) 90 | key_a, key_b, side_a, side_b, _, other_side_b, _, other_keys_b = info 91 | self.__redirect_edges(mesh, key_a, side_a - side_a % 2, other_keys_b[0], mesh.sides[key_b, other_side_b]) 92 | self.__redirect_edges(mesh, key_a, side_a - side_a % 2 + 1, other_keys_b[1], 93 | mesh.sides[key_b, other_side_b + 1]) 94 | MeshPool.__union_groups(edge_groups, key_b, key_a) 95 | MeshPool.__union_groups(edge_groups, edge_id, key_a) 96 | mask[key_b] = False 97 | MeshPool.__remove_group(edge_groups, key_b) 98 | mesh.remove_edge(key_b) 99 | mesh.edges_count -= 1 100 | return key_a 101 | 102 | @staticmethod 103 | def __get_invalids(mesh, edge_id, edge_groups, side): 104 | info = MeshPool.__get_face_info(mesh, edge_id, side) 105 | key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b = info 106 | shared_items = MeshPool.__get_shared_items(other_keys_a, other_keys_b) 107 | if len(shared_items) == 0: 108 | return [] 109 | else: 110 | assert (len(shared_items) == 2) 111 | middle_edge = other_keys_a[shared_items[0]] 112 | update_key_a = other_keys_a[1 - shared_items[0]] 113 | update_key_b = other_keys_b[1 - shared_items[1]] 114 | update_side_a = mesh.sides[key_a, other_side_a + 1 - shared_items[0]] 115 | update_side_b = mesh.sides[key_b, other_side_b + 1 - shared_items[1]] 116 | MeshPool.__redirect_edges(mesh, edge_id, side, update_key_a, update_side_a) 117 | MeshPool.__redirect_edges(mesh, edge_id, side + 1, update_key_b, update_side_b) 118 | MeshPool.__redirect_edges(mesh, update_key_a, MeshPool.__get_other_side(update_side_a), update_key_b, 119 | MeshPool.__get_other_side(update_side_b)) 120 | MeshPool.__union_groups(edge_groups, key_a, edge_id) 121 | MeshPool.__union_groups(edge_groups, key_b, edge_id) 122 | MeshPool.__union_groups(edge_groups, key_a, update_key_a) 123 | MeshPool.__union_groups(edge_groups, middle_edge, update_key_a) 124 | MeshPool.__union_groups(edge_groups, key_b, update_key_b) 125 | MeshPool.__union_groups(edge_groups, middle_edge, update_key_b) 126 | return [key_a, key_b, middle_edge] 127 | 128 | @staticmethod 129 | def __redirect_edges(mesh, edge_a_key, side_a, edge_b_key, side_b): 130 | mesh.gemm_edges[edge_a_key, side_a] = edge_b_key 131 | mesh.gemm_edges[edge_b_key, side_b] = edge_a_key 132 | mesh.sides[edge_a_key, side_a] = side_b 133 | mesh.sides[edge_b_key, side_b] = side_a 134 | 135 | @staticmethod 136 | def __get_shared_items(list_a, list_b): 137 | shared_items = [] 138 | for i in range(len(list_a)): 139 | for j in range(len(list_b)): 140 | if list_a[i] == list_b[j]: 141 | shared_items.extend([i, j]) 142 | return shared_items 143 | 144 | @staticmethod 145 | def __get_other_side(side): 146 | return side + 1 - 2 * (side % 2) 147 | 148 | @staticmethod 149 | def __get_face_info(mesh, edge_id, side): 150 | key_a = mesh.gemm_edges[edge_id, side] 151 | key_b = mesh.gemm_edges[edge_id, side + 1] 152 | side_a = mesh.sides[edge_id, side] 153 | side_b = mesh.sides[edge_id, side + 1] 154 | other_side_a = (side_a - (side_a % 2) + 2) % 4 155 | other_side_b = (side_b - (side_b % 2) + 2) % 4 156 | other_keys_a = [mesh.gemm_edges[key_a, other_side_a], mesh.gemm_edges[key_a, other_side_a + 1]] 157 | other_keys_b = [mesh.gemm_edges[key_b, other_side_b], mesh.gemm_edges[key_b, other_side_b + 1]] 158 | return key_a, key_b, side_a, side_b, other_side_a, other_side_b, other_keys_a, other_keys_b 159 | 160 | @staticmethod 161 | def __remove_triplete(mesh, mask, edge_groups, invalid_edges): 162 | vertex = set(mesh.edges[invalid_edges[0]]) 163 | for edge_key in invalid_edges: 164 | vertex &= set(mesh.edges[edge_key]) 165 | mask[edge_key] = False 166 | MeshPool.__remove_group(edge_groups, edge_key) 167 | mesh.edges_count -= 3 168 | vertex = list(vertex) 169 | assert (len(vertex) == 1) 170 | mesh.remove_vertex(vertex[0]) 171 | 172 | @staticmethod 173 | def __union_groups(edge_groups, source, target): 174 | edge_groups.union(source, target) 175 | 176 | @staticmethod 177 | def __remove_group(edge_groups, index): 178 | edge_groups.remove_group(index) -------------------------------------------------------------------------------- /models/layers/mesh_union.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.nn import ConstantPad2d 3 | 4 | 5 | class MeshUnion: 6 | def __init__(self, n, device=torch.device('cpu')): 7 | self.__size = n 8 | self.rebuild_features = self.rebuild_features_average 9 | self.groups = torch.eye(n, device=device) 10 | 11 | def union(self, source, target): 12 | self.groups[target, :] += self.groups[source, :] 13 | 14 | def remove_group(self, index): 15 | return 16 | 17 | def get_group(self, edge_key): 18 | return self.groups[edge_key, :] 19 | 20 | def get_occurrences(self): 21 | return torch.sum(self.groups, 0) 22 | 23 | def get_groups(self, tensor_mask): 24 | self.groups = torch.clamp(self.groups, 0, 1) 25 | return self.groups[tensor_mask, :] 26 | 27 | def rebuild_features_average(self, features, mask, target_edges): 28 | self.prepare_groups(features, mask) 29 | fe = torch.matmul(features.squeeze(-1), self.groups) 30 | occurrences = torch.sum(self.groups, 0).expand(fe.shape) 31 | fe = fe / occurrences 32 | padding_b = target_edges - fe.shape[1] 33 | if padding_b > 0: 34 | padding_b = ConstantPad2d((0, padding_b, 0, 0), 0) 35 | fe = padding_b(fe) 36 | return fe 37 | 38 | def prepare_groups(self, features, mask): 39 | tensor_mask = torch.from_numpy(mask) 40 | self.groups = torch.clamp(self.groups[tensor_mask, :], 0, 1).transpose_(1, 0) 41 | padding_a = features.shape[1] - self.groups.shape[0] 42 | if padding_a > 0: 43 | padding_a = ConstantPad2d((0, 0, 0, padding_a), 0) 44 | self.groups = padding_a(self.groups) -------------------------------------------------------------------------------- /models/layers/mesh_unpool.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | 6 | class MeshUnpool(nn.Module): 7 | def __init__(self, unroll_target): 8 | super(MeshUnpool, self).__init__() 9 | self.unroll_target = unroll_target 10 | 11 | def __call__(self, features, meshes): 12 | return self.forward(features, meshes) 13 | 14 | def pad_groups(self, group, unroll_start): 15 | start, end = group.shape 16 | padding_rows = unroll_start - start 17 | padding_cols = self.unroll_target - end 18 | if padding_rows != 0 or padding_cols !=0: 19 | padding = nn.ConstantPad2d((0, padding_cols, 0, padding_rows), 0) 20 | group = padding(group) 21 | return group 22 | 23 | def pad_occurrences(self, occurrences): 24 | padding = self.unroll_target - occurrences.shape[0] 25 | if padding != 0: 26 | padding = nn.ConstantPad1d((0, padding), 1) 27 | occurrences = padding(occurrences) 28 | return occurrences 29 | 30 | def forward(self, features, meshes): 31 | batch_size, nf, edges = features.shape 32 | groups = [self.pad_groups(mesh.get_groups(), edges) for mesh in meshes] 33 | unroll_mat = torch.cat(groups, dim=0).view(batch_size, edges, -1) 34 | occurrences = [self.pad_occurrences(mesh.get_occurrences()) for mesh in meshes] 35 | occurrences = torch.cat(occurrences, dim=0).view(batch_size, 1, -1) 36 | occurrences = occurrences.expand(unroll_mat.shape) 37 | unroll_mat = unroll_mat / occurrences 38 | unroll_mat = unroll_mat.to(features.device) 39 | for mesh in meshes: 40 | mesh.unroll_gemm() 41 | return torch.matmul(features, unroll_mat) -------------------------------------------------------------------------------- /models/losses.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from pytorch3d.structures import Pointclouds, Meshes 3 | from pytorch3d.ops.knn import knn_gather, knn_points 4 | from pytorch3d.loss import point_mesh_face_distance 5 | from typing import Union 6 | import torch.nn.functional as F 7 | 8 | def point_mesh_loss(points, mesh): 9 | """ 10 | Args: 11 | points: FloatTensor of shape (N, P, 3) giving a batch of N pointclouds 12 | each with at most P points. 13 | mesh: Meshes object representing a batch of N meshes. 14 | Returns: 15 | loss: Tensor of shape (N,) giving the mean distance from each point in 16 | the batch to the nearest face in the corresponding mesh. 17 | """ 18 | # convert points to Pointclouds 19 | if points.ndim == 2: 20 | points = points[None] 21 | points = Pointclouds(points) 22 | # convert mesh to Meshes 23 | v = mesh.vs if mesh.vs.ndim == 3 else mesh.vs[None] 24 | f = mesh.faces if mesh.faces.ndim == 3 else mesh.faces[None] 25 | mesh = Meshes(verts=v, faces=f) 26 | 27 | return point_mesh_face_distance(mesh, points) # you can try mean reduction by adding '/2' here 28 | 29 | def _validate_chamfer_reduction_inputs( 30 | batch_reduction: Union[str, None], point_reduction: str 31 | ): 32 | """Check the requested reductions are valid. 33 | Args: 34 | batch_reduction: Reduction operation to apply for the loss across the 35 | batch, can be one of ["mean", "sum"] or None. 36 | point_reduction: Reduction operation to apply for the loss across the 37 | points, can be one of ["mean", "sum"]. 38 | """ 39 | if batch_reduction is not None and batch_reduction not in ["mean", "sum"]: 40 | raise ValueError('batch_reduction must be one of ["mean", "sum"] or None') 41 | if point_reduction not in ["mean", "sum"]: 42 | raise ValueError('point_reduction must be one of ["mean", "sum"]') 43 | 44 | 45 | def _handle_pointcloud_input( 46 | points: Union[torch.Tensor, Pointclouds], 47 | lengths: Union[torch.Tensor, None], 48 | normals: Union[torch.Tensor, None], 49 | ): 50 | """ 51 | If points is an instance of Pointclouds, retrieve the padded points tensor 52 | along with the number of points per batch and the padded normals. 53 | Otherwise, return the input points (and normals) with the number of points per cloud 54 | set to the size of the second dimension of `points`. 55 | """ 56 | if isinstance(points, Pointclouds): 57 | X = points.points_padded() 58 | lengths = points.num_points_per_cloud() 59 | normals = points.normals_padded() # either a tensor or None 60 | elif torch.is_tensor(points): 61 | if points.ndim != 3: 62 | raise ValueError("Expected points to be of shape (N, P, D)") 63 | X = points 64 | if lengths is not None and ( 65 | lengths.ndim != 1 or lengths.shape[0] != X.shape[0] 66 | ): 67 | raise ValueError("Expected lengths to be of shape (N,)") 68 | if lengths is None: 69 | lengths = torch.full( 70 | (X.shape[0],), X.shape[1], dtype=torch.int64, device=points.device 71 | ) 72 | if normals is not None and normals.ndim != 3: 73 | raise ValueError("Expected normals to be of shape (N, P, 3") 74 | else: 75 | raise ValueError( 76 | "The input pointclouds should be either " 77 | + "Pointclouds objects or torch.Tensor of shape " 78 | + "(minibatch, num_points, 3)." 79 | ) 80 | return X, lengths, normals 81 | 82 | def chamfer_distance( 83 | x, 84 | y, 85 | x_lengths=None, 86 | y_lengths=None, 87 | x_normals=None, 88 | y_normals=None, 89 | weights=None, 90 | batch_reduction: Union[str, None] = "mean", 91 | point_reduction: str = "mean", 92 | unoriented=False, 93 | ): 94 | """ 95 | Chamfer distance between two pointclouds x and y. 96 | Args: 97 | x: FloatTensor of shape (N, P1, D) or a Pointclouds object representing 98 | a batch of point clouds with at most P1 points in each batch element, 99 | batch size N and feature dimension D. 100 | y: FloatTensor of shape (N, P2, D) or a Pointclouds object representing 101 | a batch of point clouds with at most P2 points in each batch element, 102 | batch size N and feature dimension D. 103 | x_lengths: Optional LongTensor of shape (N,) giving the number of points in each 104 | cloud in x. 105 | y_lengths: Optional LongTensor of shape (N,) giving the number of points in each 106 | cloud in x. 107 | x_normals: Optional FloatTensor of shape (N, P1, D). 108 | y_normals: Optional FloatTensor of shape (N, P2, D). 109 | weights: Optional FloatTensor of shape (N,) giving weights for 110 | batch elements for reduction operation. 111 | batch_reduction: Reduction operation to apply for the loss across the 112 | batch, can be one of ["mean", "sum"] or None. 113 | point_reduction: Reduction operation to apply for the loss across the 114 | points, can be one of ["mean", "sum"]. 115 | Returns: 116 | 2-element tuple containing 117 | - **loss**: Tensor giving the reduced distance between the pointclouds 118 | in x and the pointclouds in y. 119 | - **loss_normals**: Tensor giving the reduced cosine distance of normals 120 | between pointclouds in x and pointclouds in y. Returns None if 121 | x_normals and y_normals are None. 122 | """ 123 | _validate_chamfer_reduction_inputs(batch_reduction, point_reduction) 124 | 125 | x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals) 126 | y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals) 127 | 128 | return_normals = x_normals is not None and y_normals is not None 129 | 130 | N, P1, D = x.shape 131 | P2 = y.shape[1] 132 | 133 | # Check if inputs are heterogeneous and create a lengths mask. 134 | is_x_heterogeneous = (x_lengths != P1).any() 135 | is_y_heterogeneous = (y_lengths != P2).any() 136 | x_mask = ( 137 | torch.arange(P1, device=x.device)[None] >= x_lengths[:, None] 138 | ) # shape [N, P1] 139 | y_mask = ( 140 | torch.arange(P2, device=y.device)[None] >= y_lengths[:, None] 141 | ) # shape [N, P2] 142 | 143 | if y.shape[0] != N or y.shape[2] != D: 144 | raise ValueError("y does not have the correct shape.") 145 | if weights is not None: 146 | if weights.size(0) != N: 147 | raise ValueError("weights must be of shape (N,).") 148 | if not (weights >= 0).all(): 149 | raise ValueError("weights cannot be negative.") 150 | if weights.sum() == 0.0: 151 | weights = weights.view(N, 1) 152 | if batch_reduction in ["mean", "sum"]: 153 | return ( 154 | (x.sum((1, 2)) * weights).sum() * 0.0, 155 | (x.sum((1, 2)) * weights).sum() * 0.0, 156 | ) 157 | return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0) 158 | 159 | cham_norm_x = x.new_zeros(()) 160 | cham_norm_y = x.new_zeros(()) 161 | 162 | x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, K=1) 163 | y_nn = knn_points(y, x, lengths1=y_lengths, lengths2=x_lengths, K=1) 164 | 165 | cham_x = x_nn.dists[..., 0] # (N, P1) 166 | cham_y = y_nn.dists[..., 0] # (N, P2) 167 | 168 | eps = 0 169 | cham_x = torch.sqrt(cham_x + eps) 170 | cham_y = torch.sqrt(cham_y + eps) 171 | 172 | if is_x_heterogeneous: 173 | cham_x[x_mask] = 0.0 174 | if is_y_heterogeneous: 175 | cham_y[y_mask] = 0.0 176 | 177 | if weights is not None: 178 | cham_x *= weights.view(N, 1) 179 | cham_y *= weights.view(N, 1) 180 | 181 | if return_normals: 182 | # Gather the normals using the indices and keep only value for k=0 183 | x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :] 184 | y_normals_near = knn_gather(x_normals, y_nn.idx, x_lengths)[..., 0, :] 185 | 186 | # cham_norm_x = 1 - torch.abs( 187 | # F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6) 188 | # ) 189 | # cham_norm_y = 1 - torch.abs( 190 | # F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6) 191 | # ) 192 | cham_norm_x = F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6) 193 | cham_norm_y = F.cosine_similarity(y_normals, y_normals_near, dim=2, eps=1e-6) 194 | if unoriented: 195 | cham_norm_x = abs(cham_norm_x) 196 | cham_norm_y = abs(cham_norm_y) 197 | 198 | cham_norm_x = -1*cham_norm_x 199 | cham_norm_y = -1*cham_norm_y 200 | 201 | if is_x_heterogeneous: 202 | cham_norm_x[x_mask] = 0.0 203 | if is_y_heterogeneous: 204 | cham_norm_y[y_mask] = 0.0 205 | 206 | if weights is not None: 207 | cham_norm_x *= weights.view(N, 1) 208 | cham_norm_y *= weights.view(N, 1) 209 | 210 | # Apply point reduction 211 | cham_x = cham_x.sum(1) # (N,) 212 | cham_y = cham_y.sum(1) # (N,) 213 | if return_normals: 214 | cham_norm_x = cham_norm_x.sum(1) # (N,) 215 | cham_norm_y = cham_norm_y.sum(1) # (N,) 216 | if point_reduction == "mean": 217 | cham_x /= x_lengths 218 | cham_y /= y_lengths 219 | if return_normals: 220 | cham_norm_x /= x_lengths 221 | cham_norm_y /= y_lengths 222 | 223 | if batch_reduction is not None: 224 | # batch_reduction == "sum" 225 | cham_x = cham_x.sum() 226 | cham_y = cham_y.sum() 227 | if return_normals: 228 | cham_norm_x = cham_norm_x.sum() 229 | cham_norm_y = cham_norm_y.sum() 230 | if batch_reduction == "mean": 231 | div = weights.sum() if weights is not None else N 232 | cham_x /= div 233 | cham_y /= div 234 | if return_normals: 235 | cham_norm_x /= div 236 | cham_norm_y /= div 237 | 238 | cham_dist = cham_x + cham_y 239 | cham_normals = cham_norm_x + cham_norm_y if return_normals else None 240 | 241 | return cham_dist, cham_normals 242 | 243 | 244 | class ZeroNanGrad(torch.autograd.Function): 245 | @staticmethod 246 | def forward(ctx, x): 247 | return x 248 | 249 | @staticmethod 250 | def backward(ctx, grad): 251 | grad[grad != grad] = 0 252 | return grad 253 | 254 | 255 | class BeamGapLoss: 256 | def __init__(self, device): 257 | self.device = device 258 | self.points, self.masks = None, None 259 | 260 | def update_pm(self, pmesh, target_pc): 261 | points, masks = [], [] 262 | target_pc.to(self.device) 263 | total_mask = torch.zeros(pmesh.main_mesh.vs.shape[0]) 264 | for i, m in enumerate(pmesh): 265 | p, mask = m.discrete_project(target_pc, thres=0.99, cpu=True) 266 | p, mask = p.to(target_pc.device), mask.to(target_pc.device) 267 | points.append(p[:, :3]) 268 | masks.append(mask) 269 | temp = torch.zeros(m.vs.shape[0]) 270 | if (mask != False).any(): 271 | temp[m.faces[mask]] = 1 272 | total_mask[pmesh.sub_mesh_index[i]] += temp 273 | self.points, self.masks = points, masks 274 | 275 | def __call__(self, pmesh, j): 276 | losses = self.points[j] - pmesh[j].vs[pmesh[j].faces].mean(dim=1) 277 | losses = ZeroNanGrad.apply(losses) 278 | losses = torch.norm(losses, dim=1)[self.masks[j]] 279 | l2 = losses.mean().float() 280 | return l2 * 1e1 -------------------------------------------------------------------------------- /original.py: -------------------------------------------------------------------------------- 1 | import torch, os, sys 2 | import numpy as np 3 | import trimesh 4 | from mesh_to_sdf import get_surface_point_cloud 5 | from point_ops.pointnet2_ops import pointnet2_utils 6 | 7 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 8 | 9 | data_path = '/data/processed/2048' 10 | save_path = '/data/under70' 11 | mesh_path = '/data/processed/2048/03_nnt_obj' 12 | partial_path = '/data/processed/2048/fixed_als_txt' 13 | 14 | mesh_flist = os.listdir(mesh_path) 15 | partial_flist = os.listdir(partial_path) 16 | 17 | omesh_list = [i for i in mesh_flist if i.startswith('original')] 18 | opartial_list = [i for i in partial_flist if i.startswith('original')] 19 | 20 | for i in range(len(omesh_list)): 21 | mesh_file = omesh_list[i] 22 | if mesh_file.replace('.obj', '.txt.npz') in opartial_list: 23 | partial_file = mesh_file.replace('.obj', '.txt.npz') 24 | else: 25 | print('partial file not found for: ', mesh_file) 26 | continue 27 | print(partial_file, " | ", mesh_file) 28 | 29 | partial_pc = np.load(os.path.join(partial_path, partial_file))['unit_als'] # has normals 30 | np.savez(os.path.join(save_path, 'xyz', mesh_file[:-4] + '.npz'), unit_als=partial_pc) 31 | 32 | mesh = trimesh.load(os.path.join(mesh_path, mesh_file)) 33 | 34 | # complete_pc = trimesh.sample 35 | '''surf_instance contains various data, e.g. points, kd_tree, etc.''' 36 | surf_instance = get_surface_point_cloud(mesh, 37 | surface_point_method='sample', 38 | bounding_radius=1, 39 | scan_count=30, 40 | scan_resolution=200, 41 | sample_point_count=50000, 42 | calculate_normals=True) 43 | 44 | # use fps to reduce points to fixed number 45 | surf_pnt_samples = torch.from_numpy(surf_instance.points).float()[None, :, :].to(device) # BN3 46 | surf_pnt_normals = torch.from_numpy(surf_instance.normals).float()[None, :, :].to(device) # BN3 47 | fps_idx = pointnet2_utils.furthest_point_sample(surf_pnt_samples, 16348) # xyz: torch.Tensor 48 | 49 | surf_pnt_samples = surf_pnt_samples.permute(0,2,1).contiguous() 50 | complete_pc = pointnet2_utils.gather_operation(surf_pnt_samples, fps_idx.int()) 51 | complete_normals = pointnet2_utils.gather_operation(surf_pnt_normals.permute(0,2,1).contiguous(), fps_idx.int()) 52 | 53 | complete_pc = torch.cat((torch.squeeze(complete_pc), torch.squeeze(complete_normals)),0).permute(1,0) # add .copy() to this line and change variable name if previous line has more use down the line 54 | 55 | # save complete_pc as .txt 56 | np.savetxt(os.path.join(save_path, 'gt_xyz', mesh_file[:-4] + '.txt'), complete_pc.cpu().numpy(), fmt='%.6f %.6f %.6f %.6f %.6f %.6f') 57 | 58 | # os copy partial_pc file from partial_path to save_path 59 | # os.system('cp ' + os.path.join(partial_path, partial_file) + ' ' + os.path.join(save_path, 'xyz', partial_file)) 60 | 61 | # print counts 62 | print(len(os.listdir(os.path.join(save_path, 'gt_xyz')))) 63 | print(len(os.listdir(os.path.join(save_path, 'xyz')))) 64 | 65 | 66 | -------------------------------------------------------------------------------- /p2p_loss.py: -------------------------------------------------------------------------------- 1 | import random, torch 2 | from dataset_pcc import CustomDataset 3 | import numpy as np 4 | from pytorch3d.ops.knn import knn_gather, knn_points 5 | from pytictoc import TicToc 6 | 7 | def get_data_without_normals(fpath): 8 | npz_data = np.load(fpath) 9 | fine = npz_data['final_pnts'] 10 | # gt = npz_data['gt_pnts'] 11 | del npz_data 12 | return torch.from_numpy(fine).float() #, torch.from_numpy(gt).float() 13 | 14 | 15 | def get_gt_with_normals(device): 16 | from torch.utils import data 17 | 18 | bs = 8 # batch_size 19 | npoints = 2048 20 | 21 | seed_value = 42 22 | random.seed(seed_value) 23 | torch.manual_seed(seed_value) 24 | torch.cuda.manual_seed(seed_value) 25 | 26 | tr_dataset = CustomDataset(split='train', npoints=npoints, device=device) 27 | tr_loader = data.DataLoader(tr_dataset, batch_size=bs, shuffle=True) 28 | 29 | ts_dataset = CustomDataset(split='test', npoints=npoints, device=device) 30 | ts_loader = data.DataLoader(ts_dataset, batch_size=bs, shuffle=False) 31 | 32 | for i, data in enumerate(ts_loader): 33 | #gt data 34 | gt_xyz = data[1][:, :, :6].to(device).float() # partial: [B 16348, 6] 35 | break 36 | 37 | return gt_xyz 38 | 39 | 40 | def computeCandidateNormals(gt, predicted, k=20): 41 | pred = predicted.detach().clone() 42 | B, N, C = gt.shape 43 | _, S, _ = pred.shape 44 | 45 | # get indices of gt which has a minimum distance from pred 46 | knn = knn_points(pred[:, :, :3], gt[:, :, :3], K=k) # input shd be B N/S C; dist, k_idx: BSK 47 | query_gt_grps = knn_gather(gt, knn.idx)[:,:,:,3:] # BSKC 48 | 49 | query_grp_cnters = query_gt_grps[:, :, 0, :] # i.e., the smallest dist to the pred query 50 | 51 | # cosine similarity 52 | dot = torch.matmul(query_gt_grps, query_grp_cnters.view(B, S, 1, 3).permute(0,1,3,2)).squeeze() #BSK 53 | cos_sim = dot / (torch.linalg.norm(query_gt_grps, dim=-1) * torch.linalg.norm(query_grp_cnters, dim=-1).unsqueeze(-1)) #BSK/(BSK * BS1) 54 | 55 | # cos_sim = torch.where(cos_sim < 0.75, cos_sim*0.0, cos_sim) #BSK, C is one so squeezed 56 | trunc_idx = torch.where(cos_sim < 0.75) 57 | query_gt_grps[trunc_idx[0], trunc_idx[1], trunc_idx[2], :] = float('nan') # : or 3: 'Nan' bcoz normals parallel to the z-axiz will hv a 0.0 z-value 58 | query_grp_normal = torch.nanmean(query_gt_grps, axis=2) #BN3 non-zero mean 59 | 60 | return query_grp_normal, query_grp_cnters 61 | 62 | 63 | def computePlaneWithNormalAndPoint(grp_nmls, query_pnts): 64 | '''a, b, c are normals which corresponds to the last 3 columns of cloud''' 65 | 66 | a = grp_nmls[:,:,0] #BN 67 | b = grp_nmls[:,:,1] 68 | c = grp_nmls[:,:,2] 69 | d = torch.diagonal(torch.matmul(query_pnts, grp_nmls.permute(0, 2, 1)), offset=0, dim1=1, dim2=2) * -1.0 # Multiplication by -1 preserves the sign (+) of D on the LHS 70 | # d = np.diag(np.dot(test_nbr[:,:3], test_nbr[:,3:].T)) * -1.0 # Multiplication by -1 preserves the sign (+) of D on the LHS 71 | normalizationFactor = torch.sqrt((a * a) + (b * b) + (c * c)) 72 | 73 | # if normalizationFactor == 0: 74 | # return None 75 | a /= normalizationFactor 76 | b /= normalizationFactor 77 | c /= normalizationFactor 78 | d /= normalizationFactor 79 | 80 | return (a, b, c, d) 81 | 82 | 83 | def p2p_dist(pl_coeffs, points): 84 | dist = torch.abs(pl_coeffs[0]*points[:,:,0] + pl_coeffs[1]*points[:,:,1] + pl_coeffs[2]*points[:,:,2] + pl_coeffs[3]) 85 | nml_len = torch.sqrt((pl_coeffs[0] * pl_coeffs[0]) + (pl_coeffs[1] * pl_coeffs[1]) + (pl_coeffs[2] * pl_coeffs[2])) 86 | return dist/nml_len 87 | 88 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 89 | fpath = '/outputs/experiments/2023-01-17_22-29/rand_outs.npz' 90 | pred = get_data_without_normals(fpath) 91 | gt = get_gt_with_normals(device) 92 | t = TicToc() 93 | t.tic() #Start timer 94 | grp_normals, query_pnts = computeCandidateNormals(gt, pred.to(device).float()) 95 | plane_coeffs = computePlaneWithNormalAndPoint(grp_normals, query_pnts) 96 | dist = p2p_dist(plane_coeffs, pred.to(device).float()) 97 | t.toc() -------------------------------------------------------------------------------- /point_ops/Chamfer3D/chamfer3D.cu: -------------------------------------------------------------------------------- 1 | 2 | #include 3 | #include 4 | 5 | #include 6 | #include 7 | 8 | #include 9 | 10 | 11 | 12 | __global__ void NmDistanceKernel(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i){ 13 | const int batch=512; 14 | __shared__ float buf[batch*3]; 15 | for (int i=blockIdx.x;ibest){ 127 | result[(i*n+j)]=best; 128 | result_i[(i*n+j)]=best_i; 129 | } 130 | } 131 | __syncthreads(); 132 | } 133 | } 134 | } 135 | // int chamfer_cuda_forward(int b,int n,const float * xyz,int m,const float * xyz2,float * result,int * result_i,float * result2,int * result2_i, cudaStream_t stream){ 136 | int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2){ 137 | 138 | const auto batch_size = xyz1.size(0); 139 | const auto n = xyz1.size(1); //num_points point cloud A 140 | const auto m = xyz2.size(1); //num_points point cloud B 141 | 142 | NmDistanceKernel<<>>(batch_size, n, xyz1.data(), m, xyz2.data(), dist1.data(), idx1.data()); 143 | NmDistanceKernel<<>>(batch_size, m, xyz2.data(), n, xyz1.data(), dist2.data(), idx2.data()); 144 | 145 | cudaError_t err = cudaGetLastError(); 146 | if (err != cudaSuccess) { 147 | printf("error in nnd updateOutput: %s\n", cudaGetErrorString(err)); 148 | //THError("aborting"); 149 | return 0; 150 | } 151 | return 1; 152 | 153 | 154 | } 155 | __global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){ 156 | for (int i=blockIdx.x;i>>(batch_size,n,xyz1.data(),m,xyz2.data(),graddist1.data(),idx1.data(),gradxyz1.data(),gradxyz2.data()); 185 | NmDistanceGradKernel<<>>(batch_size,m,xyz2.data(),n,xyz1.data(),graddist2.data(),idx2.data(),gradxyz2.data(),gradxyz1.data()); 186 | 187 | cudaError_t err = cudaGetLastError(); 188 | if (err != cudaSuccess) { 189 | printf("error in nnd get grad: %s\n", cudaGetErrorString(err)); 190 | //THError("aborting"); 191 | return 0; 192 | } 193 | return 1; 194 | 195 | } 196 | 197 | -------------------------------------------------------------------------------- /point_ops/Chamfer3D/chamfer_cuda.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | ///TMP 5 | //#include "common.h" 6 | /// NOT TMP 7 | 8 | 9 | int chamfer_cuda_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2); 10 | 11 | 12 | int chamfer_cuda_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2); 13 | 14 | 15 | 16 | 17 | int chamfer_forward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor dist1, at::Tensor dist2, at::Tensor idx1, at::Tensor idx2) { 18 | return chamfer_cuda_forward(xyz1, xyz2, dist1, dist2, idx1, idx2); 19 | } 20 | 21 | 22 | int chamfer_backward(at::Tensor xyz1, at::Tensor xyz2, at::Tensor gradxyz1, at::Tensor gradxyz2, at::Tensor graddist1, 23 | at::Tensor graddist2, at::Tensor idx1, at::Tensor idx2) { 24 | 25 | return chamfer_cuda_backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2); 26 | } 27 | 28 | 29 | 30 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 31 | m.def("forward", &chamfer_forward, "chamfer forward (CUDA)"); 32 | m.def("backward", &chamfer_backward, "chamfer backward (CUDA)"); 33 | } -------------------------------------------------------------------------------- /point_ops/Chamfer3D/dist_chamfer_3D.py: -------------------------------------------------------------------------------- 1 | from torch import nn 2 | from torch.autograd import Function 3 | import torch 4 | import importlib 5 | import os 6 | chamfer_found = importlib.find_loader("chamfer_3D") is not None 7 | if not chamfer_found: 8 | ## Cool trick from https://github.com/chrdiller 9 | print("Jitting Chamfer 3D") 10 | 11 | from torch.utils.cpp_extension import load 12 | chamfer_3D = load(name="chamfer_3D", 13 | sources=[ 14 | "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer_cuda.cpp"]), 15 | "/".join(os.path.abspath(__file__).split('/')[:-1] + ["chamfer3D.cu"]), 16 | ]) 17 | print("Loaded JIT 3D CUDA chamfer distance") 18 | 19 | else: 20 | import chamfer_3D 21 | print("Loaded compiled 3D CUDA chamfer distance") 22 | 23 | 24 | # Chamfer's distance module @thibaultgroueix 25 | # GPU tensors only 26 | class chamfer_3DFunction(Function): 27 | @staticmethod 28 | def forward(ctx, xyz1, xyz2): 29 | batchsize, n, _ = xyz1.size() 30 | _, m, _ = xyz2.size() 31 | device = xyz1.device 32 | 33 | dist1 = torch.zeros(batchsize, n) 34 | dist2 = torch.zeros(batchsize, m) 35 | 36 | idx1 = torch.zeros(batchsize, n).type(torch.IntTensor) 37 | idx2 = torch.zeros(batchsize, m).type(torch.IntTensor) 38 | 39 | dist1 = dist1.to(device) 40 | dist2 = dist2.to(device) 41 | idx1 = idx1.to(device) 42 | idx2 = idx2.to(device) 43 | torch.cuda.set_device(device) 44 | 45 | chamfer_3D.forward(xyz1, xyz2, dist1, dist2, idx1, idx2) 46 | ctx.save_for_backward(xyz1, xyz2, idx1, idx2) 47 | return dist1, dist2, idx1, idx2 48 | 49 | @staticmethod 50 | def backward(ctx, graddist1, graddist2, gradidx1, gradidx2): 51 | xyz1, xyz2, idx1, idx2 = ctx.saved_tensors 52 | graddist1 = graddist1.contiguous() 53 | graddist2 = graddist2.contiguous() 54 | device = graddist1.device 55 | 56 | gradxyz1 = torch.zeros(xyz1.size()) 57 | gradxyz2 = torch.zeros(xyz2.size()) 58 | 59 | gradxyz1 = gradxyz1.to(device) 60 | gradxyz2 = gradxyz2.to(device) 61 | chamfer_3D.backward( 62 | xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2 63 | ) 64 | return gradxyz1, gradxyz2 65 | 66 | 67 | class chamfer_3DDist(nn.Module): 68 | def __init__(self): 69 | super(chamfer_3DDist, self).__init__() 70 | 71 | def forward(self, input1, input2): 72 | input1 = input1.contiguous() 73 | input2 = input2.contiguous() 74 | return chamfer_3DFunction.apply(input1, input2) 75 | 76 | -------------------------------------------------------------------------------- /point_ops/Chamfer3D/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | 4 | setup( 5 | name='chamfer_3D', 6 | ext_modules=[ 7 | CUDAExtension('chamfer_3D', [ 8 | "/".join(__file__.split('/')[:-1] + ['chamfer_cuda.cpp']), 9 | "/".join(__file__.split('/')[:-1] + ['chamfer3D.cu']), 10 | ]), 11 | ], 12 | cmdclass={ 13 | 'build_ext': BuildExtension 14 | }) -------------------------------------------------------------------------------- /point_ops/compile_chamfer.sh: -------------------------------------------------------------------------------- 1 | cd point_ops/Chamfer3D 2 | python3 setup.py install 3 | cd ../.. -------------------------------------------------------------------------------- /point_ops/compile_emd.sh: -------------------------------------------------------------------------------- 1 | cd point_ops/earth_movers_distance 2 | python3 setup.py install 3 | cd ../.. -------------------------------------------------------------------------------- /point_ops/earth_movers_distance/emd.cpp: -------------------------------------------------------------------------------- 1 | #ifndef _EMD 2 | #define _EMD 3 | 4 | #include 5 | #include 6 | 7 | //CUDA declarations 8 | at::Tensor ApproxMatchForward( 9 | const at::Tensor xyz1, 10 | const at::Tensor xyz2); 11 | 12 | at::Tensor MatchCostForward( 13 | const at::Tensor xyz1, 14 | const at::Tensor xyz2, 15 | const at::Tensor match); 16 | 17 | std::vector MatchCostBackward( 18 | const at::Tensor grad_cost, 19 | const at::Tensor xyz1, 20 | const at::Tensor xyz2, 21 | const at::Tensor match); 22 | 23 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 24 | m.def("approxmatch_forward", &ApproxMatchForward,"ApproxMatch forward (CUDA)"); 25 | m.def("matchcost_forward", &MatchCostForward,"MatchCost forward (CUDA)"); 26 | m.def("matchcost_backward", &MatchCostBackward,"MatchCost backward (CUDA)"); 27 | } 28 | 29 | #endif 30 | -------------------------------------------------------------------------------- /point_ops/earth_movers_distance/emd.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import os 4 | import importlib 5 | 6 | chamfer_found = importlib.find_loader("emd_cuda") is not None 7 | if not chamfer_found: 8 | ## Cool trick from https://github.com/chrdiller 9 | print("Jitting emd_cuda") 10 | 11 | from torch.utils.cpp_extension import load 12 | emd_cuda = load(name="emd_cuda", 13 | sources=[ 14 | "/".join(os.path.abspath(__file__).split('/')[:-1] + ["emd.cpp"]), 15 | "/".join(os.path.abspath(__file__).split('/')[:-1] + ["emd_kernel.cu"]), 16 | ]) 17 | print("Loaded JIT 3D CUDA earth mover distance") 18 | 19 | else: 20 | import emd_cuda 21 | print("Loaded compiled 3D CUDA earth mover distance") 22 | 23 | 24 | class EarthMoverDistanceFunction(torch.autograd.Function): 25 | @staticmethod 26 | def forward(ctx, xyz1, xyz2): 27 | xyz1 = xyz1.contiguous() 28 | xyz2 = xyz2.contiguous() 29 | assert xyz1.is_cuda and xyz2.is_cuda, "Only support cuda currently." 30 | match = emd_cuda.approxmatch_forward(xyz1, xyz2) 31 | cost = emd_cuda.matchcost_forward(xyz1, xyz2, match) 32 | ctx.save_for_backward(xyz1, xyz2, match) 33 | return cost 34 | 35 | @staticmethod 36 | def backward(ctx, grad_cost): 37 | xyz1, xyz2, match = ctx.saved_tensors 38 | grad_cost = grad_cost.contiguous() 39 | grad_xyz1, grad_xyz2 = emd_cuda.matchcost_backward(grad_cost, xyz1, xyz2, match) 40 | return grad_xyz1, grad_xyz2 41 | 42 | 43 | class EarthMoverDistance(nn.Module): 44 | def __init__(self): 45 | super().__init__() 46 | 47 | def forward(self, xyz1, xyz2): 48 | """ 49 | Args: 50 | xyz1 (torch.Tensor): (b, N1, 3) 51 | xyz2 (torch.Tensor): (b, N2, 3) 52 | 53 | Returns: 54 | cost (torch.Tensor): (b) 55 | """ 56 | if xyz1.dim() == 2: 57 | xyz1 = xyz1.unsqueeze(0) 58 | if xyz2.dim() == 2: 59 | xyz2 = xyz2.unsqueeze(0) 60 | cost = EarthMoverDistanceFunction.apply(xyz1, xyz2) 61 | return cost 62 | -------------------------------------------------------------------------------- /point_ops/earth_movers_distance/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 3 | import os 4 | 5 | os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5" 6 | setup( 7 | name='emd_cuda', 8 | ext_modules=[ 9 | CUDAExtension( 10 | name='emd_cuda', 11 | sources=[ 12 | "/".join(__file__.split('/')[:-1] + ['emd.cpp']), 13 | "/".join(__file__.split('/')[:-1] + ['emd_kernel.cu']), 14 | ], 15 | extra_compile_args={'cxx': ['-g'], 'nvcc': ['-O2']} 16 | # extra_compile_args={ 17 | # "cxx": ["-O3"], 18 | # "nvcc": ["-O3", "-Xfatbin", "-compress-all"], 19 | # }, 20 | ), 21 | ], 22 | cmdclass={ 23 | 'build_ext': BuildExtension 24 | }) 25 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/__init__.py: -------------------------------------------------------------------------------- 1 | # import pointnet2_ops.pointnet2_modules 2 | from point_ops.pointnet2_ops import pointnet2_utils 3 | from point_ops.pointnet2_ops._version import __version__ 4 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/ball_query.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 5 | const int nsample); 6 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/cuda_utils.h: -------------------------------------------------------------------------------- 1 | #ifndef _CUDA_UTILS_H 2 | #define _CUDA_UTILS_H 3 | 4 | #include 5 | #include 6 | #include 7 | 8 | #include 9 | #include 10 | 11 | #include 12 | 13 | #define TOTAL_THREADS 512 14 | 15 | inline int opt_n_threads(int work_size) { 16 | const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); 17 | 18 | return max(min(1 << pow_2, TOTAL_THREADS), 1); 19 | } 20 | 21 | inline dim3 opt_block_config(int x, int y) { 22 | const int x_threads = opt_n_threads(x); 23 | const int y_threads = 24 | max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); 25 | dim3 block_config(x_threads, y_threads, 1); 26 | 27 | return block_config; 28 | } 29 | 30 | #define CUDA_CHECK_ERRORS() \ 31 | do { \ 32 | cudaError_t err = cudaGetLastError(); \ 33 | if (cudaSuccess != err) { \ 34 | fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ 35 | cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ 36 | __FILE__); \ 37 | exit(-1); \ 38 | } \ 39 | } while (0) 40 | 41 | #endif 42 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/group_points.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor group_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/interpolate.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | 3 | #include 4 | #include 5 | 6 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows); 7 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, 8 | at::Tensor weight); 9 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, 10 | at::Tensor weight, const int m); 11 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/sampling.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | 4 | at::Tensor gather_points(at::Tensor points, at::Tensor idx); 5 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); 6 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); 7 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/include/utils.h: -------------------------------------------------------------------------------- 1 | #pragma once 2 | #include 3 | #include 4 | 5 | #define CHECK_CUDA(x) \ 6 | do { \ 7 | AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \ 8 | } while (0) 9 | 10 | #define CHECK_CONTIGUOUS(x) \ 11 | do { \ 12 | AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \ 13 | } while (0) 14 | 15 | #define CHECK_IS_INT(x) \ 16 | do { \ 17 | AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \ 18 | #x " must be an int tensor"); \ 19 | } while (0) 20 | 21 | #define CHECK_IS_FLOAT(x) \ 22 | do { \ 23 | AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \ 24 | #x " must be a float tensor"); \ 25 | } while (0) 26 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/ball_query.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "utils.h" 3 | 4 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 5 | int nsample, const float *new_xyz, 6 | const float *xyz, int *idx); 7 | 8 | at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, 9 | const int nsample) { 10 | CHECK_CONTIGUOUS(new_xyz); 11 | CHECK_CONTIGUOUS(xyz); 12 | CHECK_IS_FLOAT(new_xyz); 13 | CHECK_IS_FLOAT(xyz); 14 | 15 | if (new_xyz.is_cuda()) { 16 | CHECK_CUDA(xyz); 17 | } 18 | 19 | at::Tensor idx = 20 | torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, 21 | at::device(new_xyz.device()).dtype(at::ScalarType::Int)); 22 | 23 | if (new_xyz.is_cuda()) { 24 | query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), 25 | radius, nsample, new_xyz.data_ptr(), 26 | xyz.data_ptr(), idx.data_ptr()); 27 | } else { 28 | AT_ASSERT(false, "CPU not supported"); 29 | } 30 | 31 | return idx; 32 | } 33 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/ball_query_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: new_xyz(b, m, 3) xyz(b, n, 3) 8 | // output: idx(b, m, nsample) 9 | __global__ void query_ball_point_kernel(int b, int n, int m, float radius, 10 | int nsample, 11 | const float *__restrict__ new_xyz, 12 | const float *__restrict__ xyz, 13 | int *__restrict__ idx) { 14 | int batch_index = blockIdx.x; 15 | xyz += batch_index * n * 3; 16 | new_xyz += batch_index * m * 3; 17 | idx += m * nsample * batch_index; 18 | 19 | int index = threadIdx.x; 20 | int stride = blockDim.x; 21 | 22 | float radius2 = radius * radius; 23 | for (int j = index; j < m; j += stride) { 24 | float new_x = new_xyz[j * 3 + 0]; 25 | float new_y = new_xyz[j * 3 + 1]; 26 | float new_z = new_xyz[j * 3 + 2]; 27 | for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { 28 | float x = xyz[k * 3 + 0]; 29 | float y = xyz[k * 3 + 1]; 30 | float z = xyz[k * 3 + 2]; 31 | float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + 32 | (new_z - z) * (new_z - z); 33 | if (d2 < radius2) { 34 | if (cnt == 0) { 35 | for (int l = 0; l < nsample; ++l) { 36 | idx[j * nsample + l] = k; 37 | } 38 | } 39 | idx[j * nsample + cnt] = k; 40 | ++cnt; 41 | } 42 | } 43 | } 44 | } 45 | 46 | void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, 47 | int nsample, const float *new_xyz, 48 | const float *xyz, int *idx) { 49 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 50 | query_ball_point_kernel<<>>( 51 | b, n, m, radius, nsample, new_xyz, xyz, idx); 52 | 53 | CUDA_CHECK_ERRORS(); 54 | } 55 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/bindings.cpp: -------------------------------------------------------------------------------- 1 | #include "ball_query.h" 2 | #include "group_points.h" 3 | #include "interpolate.h" 4 | #include "sampling.h" 5 | 6 | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { 7 | m.def("gather_points", &gather_points); 8 | m.def("gather_points_grad", &gather_points_grad); 9 | m.def("furthest_point_sampling", &furthest_point_sampling); 10 | 11 | m.def("three_nn", &three_nn); 12 | m.def("three_interpolate", &three_interpolate); 13 | m.def("three_interpolate_grad", &three_interpolate_grad); 14 | 15 | m.def("ball_query", &ball_query); 16 | 17 | m.def("group_points", &group_points); 18 | m.def("group_points_grad", &group_points_grad); 19 | } 20 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/group_points.cpp: -------------------------------------------------------------------------------- 1 | #include "group_points.h" 2 | #include "utils.h" 3 | 4 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, 5 | const float *points, const int *idx, 6 | float *out); 7 | 8 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 9 | int nsample, const float *grad_out, 10 | const int *idx, float *grad_points); 11 | 12 | at::Tensor group_points(at::Tensor points, at::Tensor idx) { 13 | CHECK_CONTIGUOUS(points); 14 | CHECK_CONTIGUOUS(idx); 15 | CHECK_IS_FLOAT(points); 16 | CHECK_IS_INT(idx); 17 | 18 | if (points.is_cuda()) { 19 | CHECK_CUDA(idx); 20 | } 21 | 22 | at::Tensor output = 23 | torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, 24 | at::device(points.device()).dtype(at::ScalarType::Float)); 25 | 26 | if (points.is_cuda()) { 27 | group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), 28 | idx.size(1), idx.size(2), 29 | points.data_ptr(), idx.data_ptr(), 30 | output.data_ptr()); 31 | } else { 32 | AT_ASSERT(false, "CPU not supported"); 33 | } 34 | 35 | return output; 36 | } 37 | 38 | at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { 39 | CHECK_CONTIGUOUS(grad_out); 40 | CHECK_CONTIGUOUS(idx); 41 | CHECK_IS_FLOAT(grad_out); 42 | CHECK_IS_INT(idx); 43 | 44 | if (grad_out.is_cuda()) { 45 | CHECK_CUDA(idx); 46 | } 47 | 48 | at::Tensor output = 49 | torch::zeros({grad_out.size(0), grad_out.size(1), n}, 50 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 51 | 52 | if (grad_out.is_cuda()) { 53 | group_points_grad_kernel_wrapper( 54 | grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), 55 | grad_out.data_ptr(), idx.data_ptr(), 56 | output.data_ptr()); 57 | } else { 58 | AT_ASSERT(false, "CPU not supported"); 59 | } 60 | 61 | return output; 62 | } 63 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/group_points_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | 6 | // input: points(b, c, n) idx(b, npoints, nsample) 7 | // output: out(b, c, npoints, nsample) 8 | __global__ void group_points_kernel(int b, int c, int n, int npoints, 9 | int nsample, 10 | const float *__restrict__ points, 11 | const int *__restrict__ idx, 12 | float *__restrict__ out) { 13 | int batch_index = blockIdx.x; 14 | points += batch_index * n * c; 15 | idx += batch_index * npoints * nsample; 16 | out += batch_index * npoints * nsample * c; 17 | 18 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 19 | const int stride = blockDim.y * blockDim.x; 20 | for (int i = index; i < c * npoints; i += stride) { 21 | const int l = i / npoints; 22 | const int j = i % npoints; 23 | for (int k = 0; k < nsample; ++k) { 24 | int ii = idx[j * nsample + k]; 25 | out[(l * npoints + j) * nsample + k] = points[l * n + ii]; 26 | } 27 | } 28 | } 29 | 30 | void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, 31 | const float *points, const int *idx, 32 | float *out) { 33 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 34 | 35 | group_points_kernel<<>>( 36 | b, c, n, npoints, nsample, points, idx, out); 37 | 38 | CUDA_CHECK_ERRORS(); 39 | } 40 | 41 | // input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) 42 | // output: grad_points(b, c, n) 43 | __global__ void group_points_grad_kernel(int b, int c, int n, int npoints, 44 | int nsample, 45 | const float *__restrict__ grad_out, 46 | const int *__restrict__ idx, 47 | float *__restrict__ grad_points) { 48 | int batch_index = blockIdx.x; 49 | grad_out += batch_index * npoints * nsample * c; 50 | idx += batch_index * npoints * nsample; 51 | grad_points += batch_index * n * c; 52 | 53 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 54 | const int stride = blockDim.y * blockDim.x; 55 | for (int i = index; i < c * npoints; i += stride) { 56 | const int l = i / npoints; 57 | const int j = i % npoints; 58 | for (int k = 0; k < nsample; ++k) { 59 | int ii = idx[j * nsample + k]; 60 | atomicAdd(grad_points + l * n + ii, 61 | grad_out[(l * npoints + j) * nsample + k]); 62 | } 63 | } 64 | } 65 | 66 | void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 67 | int nsample, const float *grad_out, 68 | const int *idx, float *grad_points) { 69 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 70 | 71 | group_points_grad_kernel<<>>( 72 | b, c, n, npoints, nsample, grad_out, idx, grad_points); 73 | 74 | CUDA_CHECK_ERRORS(); 75 | } 76 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/interpolate.cpp: -------------------------------------------------------------------------------- 1 | #include "interpolate.h" 2 | #include "utils.h" 3 | 4 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, 5 | const float *known, float *dist2, int *idx); 6 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n, 7 | const float *points, const int *idx, 8 | const float *weight, float *out); 9 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, 10 | const float *grad_out, 11 | const int *idx, const float *weight, 12 | float *grad_points); 13 | 14 | std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { 15 | CHECK_CONTIGUOUS(unknowns); 16 | CHECK_CONTIGUOUS(knows); 17 | CHECK_IS_FLOAT(unknowns); 18 | CHECK_IS_FLOAT(knows); 19 | 20 | if (unknowns.is_cuda()) { 21 | CHECK_CUDA(knows); 22 | } 23 | 24 | at::Tensor idx = 25 | torch::zeros({unknowns.size(0), unknowns.size(1), 3}, 26 | at::device(unknowns.device()).dtype(at::ScalarType::Int)); 27 | at::Tensor dist2 = 28 | torch::zeros({unknowns.size(0), unknowns.size(1), 3}, 29 | at::device(unknowns.device()).dtype(at::ScalarType::Float)); 30 | 31 | if (unknowns.is_cuda()) { 32 | three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), 33 | unknowns.data_ptr(), knows.data_ptr(), 34 | dist2.data_ptr(), idx.data_ptr()); 35 | } else { 36 | AT_ASSERT(false, "CPU not supported"); 37 | } 38 | 39 | return {dist2, idx}; 40 | } 41 | 42 | at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, 43 | at::Tensor weight) { 44 | CHECK_CONTIGUOUS(points); 45 | CHECK_CONTIGUOUS(idx); 46 | CHECK_CONTIGUOUS(weight); 47 | CHECK_IS_FLOAT(points); 48 | CHECK_IS_INT(idx); 49 | CHECK_IS_FLOAT(weight); 50 | 51 | if (points.is_cuda()) { 52 | CHECK_CUDA(idx); 53 | CHECK_CUDA(weight); 54 | } 55 | 56 | at::Tensor output = 57 | torch::zeros({points.size(0), points.size(1), idx.size(1)}, 58 | at::device(points.device()).dtype(at::ScalarType::Float)); 59 | 60 | if (points.is_cuda()) { 61 | three_interpolate_kernel_wrapper( 62 | points.size(0), points.size(1), points.size(2), idx.size(1), 63 | points.data_ptr(), idx.data_ptr(), weight.data_ptr(), 64 | output.data_ptr()); 65 | } else { 66 | AT_ASSERT(false, "CPU not supported"); 67 | } 68 | 69 | return output; 70 | } 71 | at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, 72 | at::Tensor weight, const int m) { 73 | CHECK_CONTIGUOUS(grad_out); 74 | CHECK_CONTIGUOUS(idx); 75 | CHECK_CONTIGUOUS(weight); 76 | CHECK_IS_FLOAT(grad_out); 77 | CHECK_IS_INT(idx); 78 | CHECK_IS_FLOAT(weight); 79 | 80 | if (grad_out.is_cuda()) { 81 | CHECK_CUDA(idx); 82 | CHECK_CUDA(weight); 83 | } 84 | 85 | at::Tensor output = 86 | torch::zeros({grad_out.size(0), grad_out.size(1), m}, 87 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 88 | 89 | if (grad_out.is_cuda()) { 90 | three_interpolate_grad_kernel_wrapper( 91 | grad_out.size(0), grad_out.size(1), grad_out.size(2), m, 92 | grad_out.data_ptr(), idx.data_ptr(), 93 | weight.data_ptr(), output.data_ptr()); 94 | } else { 95 | AT_ASSERT(false, "CPU not supported"); 96 | } 97 | 98 | return output; 99 | } 100 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/interpolate_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | 5 | #include "cuda_utils.h" 6 | 7 | // input: unknown(b, n, 3) known(b, m, 3) 8 | // output: dist2(b, n, 3), idx(b, n, 3) 9 | __global__ void three_nn_kernel(int b, int n, int m, 10 | const float *__restrict__ unknown, 11 | const float *__restrict__ known, 12 | float *__restrict__ dist2, 13 | int *__restrict__ idx) { 14 | int batch_index = blockIdx.x; 15 | unknown += batch_index * n * 3; 16 | known += batch_index * m * 3; 17 | dist2 += batch_index * n * 3; 18 | idx += batch_index * n * 3; 19 | 20 | int index = threadIdx.x; 21 | int stride = blockDim.x; 22 | for (int j = index; j < n; j += stride) { 23 | float ux = unknown[j * 3 + 0]; 24 | float uy = unknown[j * 3 + 1]; 25 | float uz = unknown[j * 3 + 2]; 26 | 27 | double best1 = 1e40, best2 = 1e40, best3 = 1e40; 28 | int besti1 = 0, besti2 = 0, besti3 = 0; 29 | for (int k = 0; k < m; ++k) { 30 | float x = known[k * 3 + 0]; 31 | float y = known[k * 3 + 1]; 32 | float z = known[k * 3 + 2]; 33 | float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); 34 | if (d < best1) { 35 | best3 = best2; 36 | besti3 = besti2; 37 | best2 = best1; 38 | besti2 = besti1; 39 | best1 = d; 40 | besti1 = k; 41 | } else if (d < best2) { 42 | best3 = best2; 43 | besti3 = besti2; 44 | best2 = d; 45 | besti2 = k; 46 | } else if (d < best3) { 47 | best3 = d; 48 | besti3 = k; 49 | } 50 | } 51 | dist2[j * 3 + 0] = best1; 52 | dist2[j * 3 + 1] = best2; 53 | dist2[j * 3 + 2] = best3; 54 | 55 | idx[j * 3 + 0] = besti1; 56 | idx[j * 3 + 1] = besti2; 57 | idx[j * 3 + 2] = besti3; 58 | } 59 | } 60 | 61 | void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, 62 | const float *known, float *dist2, int *idx) { 63 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 64 | three_nn_kernel<<>>(b, n, m, unknown, known, 65 | dist2, idx); 66 | 67 | CUDA_CHECK_ERRORS(); 68 | } 69 | 70 | // input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) 71 | // output: out(b, c, n) 72 | __global__ void three_interpolate_kernel(int b, int c, int m, int n, 73 | const float *__restrict__ points, 74 | const int *__restrict__ idx, 75 | const float *__restrict__ weight, 76 | float *__restrict__ out) { 77 | int batch_index = blockIdx.x; 78 | points += batch_index * m * c; 79 | 80 | idx += batch_index * n * 3; 81 | weight += batch_index * n * 3; 82 | 83 | out += batch_index * n * c; 84 | 85 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 86 | const int stride = blockDim.y * blockDim.x; 87 | for (int i = index; i < c * n; i += stride) { 88 | const int l = i / n; 89 | const int j = i % n; 90 | float w1 = weight[j * 3 + 0]; 91 | float w2 = weight[j * 3 + 1]; 92 | float w3 = weight[j * 3 + 2]; 93 | 94 | int i1 = idx[j * 3 + 0]; 95 | int i2 = idx[j * 3 + 1]; 96 | int i3 = idx[j * 3 + 2]; 97 | 98 | out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + 99 | points[l * m + i3] * w3; 100 | } 101 | } 102 | 103 | void three_interpolate_kernel_wrapper(int b, int c, int m, int n, 104 | const float *points, const int *idx, 105 | const float *weight, float *out) { 106 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 107 | three_interpolate_kernel<<>>( 108 | b, c, m, n, points, idx, weight, out); 109 | 110 | CUDA_CHECK_ERRORS(); 111 | } 112 | 113 | // input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) 114 | // output: grad_points(b, c, m) 115 | 116 | __global__ void three_interpolate_grad_kernel( 117 | int b, int c, int n, int m, const float *__restrict__ grad_out, 118 | const int *__restrict__ idx, const float *__restrict__ weight, 119 | float *__restrict__ grad_points) { 120 | int batch_index = blockIdx.x; 121 | grad_out += batch_index * n * c; 122 | idx += batch_index * n * 3; 123 | weight += batch_index * n * 3; 124 | grad_points += batch_index * m * c; 125 | 126 | const int index = threadIdx.y * blockDim.x + threadIdx.x; 127 | const int stride = blockDim.y * blockDim.x; 128 | for (int i = index; i < c * n; i += stride) { 129 | const int l = i / n; 130 | const int j = i % n; 131 | float w1 = weight[j * 3 + 0]; 132 | float w2 = weight[j * 3 + 1]; 133 | float w3 = weight[j * 3 + 2]; 134 | 135 | int i1 = idx[j * 3 + 0]; 136 | int i2 = idx[j * 3 + 1]; 137 | int i3 = idx[j * 3 + 2]; 138 | 139 | atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); 140 | atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); 141 | atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); 142 | } 143 | } 144 | 145 | void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, 146 | const float *grad_out, 147 | const int *idx, const float *weight, 148 | float *grad_points) { 149 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 150 | three_interpolate_grad_kernel<<>>( 151 | b, c, n, m, grad_out, idx, weight, grad_points); 152 | 153 | CUDA_CHECK_ERRORS(); 154 | } 155 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/sampling.cpp: -------------------------------------------------------------------------------- 1 | #include "sampling.h" 2 | #include "utils.h" 3 | 4 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints, 5 | const float *points, const int *idx, 6 | float *out); 7 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 8 | const float *grad_out, const int *idx, 9 | float *grad_points); 10 | 11 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m, 12 | const float *dataset, float *temp, 13 | int *idxs); 14 | 15 | at::Tensor gather_points(at::Tensor points, at::Tensor idx) { 16 | CHECK_CONTIGUOUS(points); 17 | CHECK_CONTIGUOUS(idx); 18 | CHECK_IS_FLOAT(points); 19 | CHECK_IS_INT(idx); 20 | 21 | if (points.is_cuda()) { 22 | CHECK_CUDA(idx); 23 | } 24 | 25 | at::Tensor output = 26 | torch::zeros({points.size(0), points.size(1), idx.size(1)}, 27 | at::device(points.device()).dtype(at::ScalarType::Float)); 28 | 29 | if (points.is_cuda()) { 30 | gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), 31 | idx.size(1), points.data_ptr(), 32 | idx.data_ptr(), output.data_ptr()); 33 | } else { 34 | AT_ASSERT(false, "CPU not supported"); 35 | } 36 | 37 | return output; 38 | } 39 | 40 | at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, 41 | const int n) { 42 | CHECK_CONTIGUOUS(grad_out); 43 | CHECK_CONTIGUOUS(idx); 44 | CHECK_IS_FLOAT(grad_out); 45 | CHECK_IS_INT(idx); 46 | 47 | if (grad_out.is_cuda()) { 48 | CHECK_CUDA(idx); 49 | } 50 | 51 | at::Tensor output = 52 | torch::zeros({grad_out.size(0), grad_out.size(1), n}, 53 | at::device(grad_out.device()).dtype(at::ScalarType::Float)); 54 | 55 | if (grad_out.is_cuda()) { 56 | gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, 57 | idx.size(1), grad_out.data_ptr(), 58 | idx.data_ptr(), 59 | output.data_ptr()); 60 | } else { 61 | AT_ASSERT(false, "CPU not supported"); 62 | } 63 | 64 | return output; 65 | } 66 | at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { 67 | CHECK_CONTIGUOUS(points); 68 | CHECK_IS_FLOAT(points); 69 | 70 | at::Tensor output = 71 | torch::zeros({points.size(0), nsamples}, 72 | at::device(points.device()).dtype(at::ScalarType::Int)); 73 | 74 | at::Tensor tmp = 75 | torch::full({points.size(0), points.size(1)}, 1e10, 76 | at::device(points.device()).dtype(at::ScalarType::Float)); 77 | 78 | if (points.is_cuda()) { 79 | furthest_point_sampling_kernel_wrapper( 80 | points.size(0), points.size(1), nsamples, points.data_ptr(), 81 | tmp.data_ptr(), output.data_ptr()); 82 | } else { 83 | AT_ASSERT(false, "CPU not supported"); 84 | } 85 | 86 | return output; 87 | } 88 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_ext-src/src/sampling_gpu.cu: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | #include "cuda_utils.h" 5 | 6 | // input: points(b, c, n) idx(b, m) 7 | // output: out(b, c, m) 8 | __global__ void gather_points_kernel(int b, int c, int n, int m, 9 | const float *__restrict__ points, 10 | const int *__restrict__ idx, 11 | float *__restrict__ out) { 12 | for (int i = blockIdx.x; i < b; i += gridDim.x) { 13 | for (int l = blockIdx.y; l < c; l += gridDim.y) { 14 | for (int j = threadIdx.x; j < m; j += blockDim.x) { 15 | int a = idx[i * m + j]; 16 | out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; 17 | } 18 | } 19 | } 20 | } 21 | 22 | void gather_points_kernel_wrapper(int b, int c, int n, int npoints, 23 | const float *points, const int *idx, 24 | float *out) { 25 | gather_points_kernel<<>>(b, c, n, npoints, 27 | points, idx, out); 28 | 29 | CUDA_CHECK_ERRORS(); 30 | } 31 | 32 | // input: grad_out(b, c, m) idx(b, m) 33 | // output: grad_points(b, c, n) 34 | __global__ void gather_points_grad_kernel(int b, int c, int n, int m, 35 | const float *__restrict__ grad_out, 36 | const int *__restrict__ idx, 37 | float *__restrict__ grad_points) { 38 | for (int i = blockIdx.x; i < b; i += gridDim.x) { 39 | for (int l = blockIdx.y; l < c; l += gridDim.y) { 40 | for (int j = threadIdx.x; j < m; j += blockDim.x) { 41 | int a = idx[i * m + j]; 42 | atomicAdd(grad_points + (i * c + l) * n + a, 43 | grad_out[(i * c + l) * m + j]); 44 | } 45 | } 46 | } 47 | } 48 | 49 | void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, 50 | const float *grad_out, const int *idx, 51 | float *grad_points) { 52 | gather_points_grad_kernel<<>>( 54 | b, c, n, npoints, grad_out, idx, grad_points); 55 | 56 | CUDA_CHECK_ERRORS(); 57 | } 58 | 59 | __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, 60 | int idx1, int idx2) { 61 | const float v1 = dists[idx1], v2 = dists[idx2]; 62 | const int i1 = dists_i[idx1], i2 = dists_i[idx2]; 63 | dists[idx1] = max(v1, v2); 64 | dists_i[idx1] = v2 > v1 ? i2 : i1; 65 | } 66 | 67 | // Input dataset: (b, n, 3), tmp: (b, n) 68 | // Ouput idxs (b, m) 69 | template 70 | __global__ void furthest_point_sampling_kernel( 71 | int b, int n, int m, const float *__restrict__ dataset, 72 | float *__restrict__ temp, int *__restrict__ idxs) { 73 | if (m <= 0) return; 74 | __shared__ float dists[block_size]; 75 | __shared__ int dists_i[block_size]; 76 | 77 | int batch_index = blockIdx.x; 78 | dataset += batch_index * n * 3; 79 | temp += batch_index * n; 80 | idxs += batch_index * m; 81 | 82 | int tid = threadIdx.x; 83 | const int stride = block_size; 84 | 85 | int old = 0; 86 | if (threadIdx.x == 0) idxs[0] = old; 87 | 88 | __syncthreads(); 89 | for (int j = 1; j < m; j++) { 90 | int besti = 0; 91 | float best = -1; 92 | float x1 = dataset[old * 3 + 0]; 93 | float y1 = dataset[old * 3 + 1]; 94 | float z1 = dataset[old * 3 + 2]; 95 | for (int k = tid; k < n; k += stride) { 96 | float x2, y2, z2; 97 | x2 = dataset[k * 3 + 0]; 98 | y2 = dataset[k * 3 + 1]; 99 | z2 = dataset[k * 3 + 2]; 100 | float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); 101 | if (mag <= 1e-3) continue; 102 | 103 | float d = 104 | (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); 105 | 106 | float d2 = min(d, temp[k]); 107 | temp[k] = d2; 108 | besti = d2 > best ? k : besti; 109 | best = d2 > best ? d2 : best; 110 | } 111 | dists[tid] = best; 112 | dists_i[tid] = besti; 113 | __syncthreads(); 114 | 115 | if (block_size >= 512) { 116 | if (tid < 256) { 117 | __update(dists, dists_i, tid, tid + 256); 118 | } 119 | __syncthreads(); 120 | } 121 | if (block_size >= 256) { 122 | if (tid < 128) { 123 | __update(dists, dists_i, tid, tid + 128); 124 | } 125 | __syncthreads(); 126 | } 127 | if (block_size >= 128) { 128 | if (tid < 64) { 129 | __update(dists, dists_i, tid, tid + 64); 130 | } 131 | __syncthreads(); 132 | } 133 | if (block_size >= 64) { 134 | if (tid < 32) { 135 | __update(dists, dists_i, tid, tid + 32); 136 | } 137 | __syncthreads(); 138 | } 139 | if (block_size >= 32) { 140 | if (tid < 16) { 141 | __update(dists, dists_i, tid, tid + 16); 142 | } 143 | __syncthreads(); 144 | } 145 | if (block_size >= 16) { 146 | if (tid < 8) { 147 | __update(dists, dists_i, tid, tid + 8); 148 | } 149 | __syncthreads(); 150 | } 151 | if (block_size >= 8) { 152 | if (tid < 4) { 153 | __update(dists, dists_i, tid, tid + 4); 154 | } 155 | __syncthreads(); 156 | } 157 | if (block_size >= 4) { 158 | if (tid < 2) { 159 | __update(dists, dists_i, tid, tid + 2); 160 | } 161 | __syncthreads(); 162 | } 163 | if (block_size >= 2) { 164 | if (tid < 1) { 165 | __update(dists, dists_i, tid, tid + 1); 166 | } 167 | __syncthreads(); 168 | } 169 | 170 | old = dists_i[0]; 171 | if (tid == 0) idxs[j] = old; 172 | } 173 | } 174 | 175 | void furthest_point_sampling_kernel_wrapper(int b, int n, int m, 176 | const float *dataset, float *temp, 177 | int *idxs) { 178 | unsigned int n_threads = opt_n_threads(n); 179 | 180 | cudaStream_t stream = at::cuda::getCurrentCUDAStream(); 181 | 182 | switch (n_threads) { 183 | case 512: 184 | furthest_point_sampling_kernel<512> 185 | <<>>(b, n, m, dataset, temp, idxs); 186 | break; 187 | case 256: 188 | furthest_point_sampling_kernel<256> 189 | <<>>(b, n, m, dataset, temp, idxs); 190 | break; 191 | case 128: 192 | furthest_point_sampling_kernel<128> 193 | <<>>(b, n, m, dataset, temp, idxs); 194 | break; 195 | case 64: 196 | furthest_point_sampling_kernel<64> 197 | <<>>(b, n, m, dataset, temp, idxs); 198 | break; 199 | case 32: 200 | furthest_point_sampling_kernel<32> 201 | <<>>(b, n, m, dataset, temp, idxs); 202 | break; 203 | case 16: 204 | furthest_point_sampling_kernel<16> 205 | <<>>(b, n, m, dataset, temp, idxs); 206 | break; 207 | case 8: 208 | furthest_point_sampling_kernel<8> 209 | <<>>(b, n, m, dataset, temp, idxs); 210 | break; 211 | case 4: 212 | furthest_point_sampling_kernel<4> 213 | <<>>(b, n, m, dataset, temp, idxs); 214 | break; 215 | case 2: 216 | furthest_point_sampling_kernel<2> 217 | <<>>(b, n, m, dataset, temp, idxs); 218 | break; 219 | case 1: 220 | furthest_point_sampling_kernel<1> 221 | <<>>(b, n, m, dataset, temp, idxs); 222 | break; 223 | default: 224 | furthest_point_sampling_kernel<512> 225 | <<>>(b, n, m, dataset, temp, idxs); 226 | } 227 | 228 | CUDA_CHECK_ERRORS(); 229 | } 230 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/_version.py: -------------------------------------------------------------------------------- 1 | __version__ = "3.0.0" 2 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/pointnet2_modules.py: -------------------------------------------------------------------------------- 1 | from typing import List, Optional, Tuple 2 | 3 | import torch 4 | import torch.nn as nn 5 | import torch.nn.functional as F 6 | from pointnet2_ops import pointnet2_utils 7 | 8 | 9 | def build_shared_mlp(mlp_spec: List[int], bn: bool = True): 10 | layers = [] 11 | for i in range(1, len(mlp_spec)): 12 | layers.append( 13 | nn.Conv2d(mlp_spec[i - 1], mlp_spec[i], kernel_size=1, bias=not bn) 14 | ) 15 | if bn: 16 | layers.append(nn.BatchNorm2d(mlp_spec[i])) 17 | layers.append(nn.ReLU(True)) 18 | 19 | return nn.Sequential(*layers) 20 | 21 | 22 | class _PointnetSAModuleBase(nn.Module): 23 | def __init__(self): 24 | super(_PointnetSAModuleBase, self).__init__() 25 | self.npoint = None 26 | self.groupers = None 27 | self.mlps = None 28 | 29 | def forward( 30 | self, xyz: torch.Tensor, features: Optional[torch.Tensor] 31 | ) -> Tuple[torch.Tensor, torch.Tensor]: 32 | r""" 33 | Parameters 34 | ---------- 35 | xyz : torch.Tensor 36 | (B, N, 3) tensor of the xyz coordinates of the features 37 | features : torch.Tensor 38 | (B, C, N) tensor of the descriptors of the the features 39 | 40 | Returns 41 | ------- 42 | new_xyz : torch.Tensor 43 | (B, npoint, 3) tensor of the new features' xyz 44 | new_features : torch.Tensor 45 | (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors 46 | """ 47 | 48 | new_features_list = [] 49 | 50 | xyz_flipped = xyz.transpose(1, 2).contiguous() 51 | new_xyz = ( 52 | pointnet2_utils.gather_operation( 53 | xyz_flipped, pointnet2_utils.furthest_point_sample(xyz, self.npoint) 54 | ) 55 | .transpose(1, 2) 56 | .contiguous() 57 | if self.npoint is not None 58 | else None 59 | ) 60 | 61 | for i in range(len(self.groupers)): 62 | new_features = self.groupers[i]( 63 | xyz, new_xyz, features 64 | ) # (B, C, npoint, nsample) 65 | 66 | new_features = self.mlps[i](new_features) # (B, mlp[-1], npoint, nsample) 67 | new_features = F.max_pool2d( 68 | new_features, kernel_size=[1, new_features.size(3)] 69 | ) # (B, mlp[-1], npoint, 1) 70 | new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) 71 | 72 | new_features_list.append(new_features) 73 | 74 | return new_xyz, torch.cat(new_features_list, dim=1) 75 | 76 | 77 | class PointnetSAModuleMSG(_PointnetSAModuleBase): 78 | r"""Pointnet set abstrction layer with multiscale grouping 79 | 80 | Parameters 81 | ---------- 82 | npoint : int 83 | Number of features 84 | radii : list of float32 85 | list of radii to group with 86 | nsamples : list of int32 87 | Number of samples in each ball query 88 | mlps : list of list of int32 89 | Spec of the pointnet before the global max_pool for each scale 90 | bn : bool 91 | Use batchnorm 92 | """ 93 | 94 | def __init__(self, npoint, radii, nsamples, mlps, bn=True, use_xyz=True): 95 | # type: (PointnetSAModuleMSG, int, List[float], List[int], List[List[int]], bool, bool) -> None 96 | super(PointnetSAModuleMSG, self).__init__() 97 | 98 | assert len(radii) == len(nsamples) == len(mlps) 99 | 100 | self.npoint = npoint 101 | self.groupers = nn.ModuleList() 102 | self.mlps = nn.ModuleList() 103 | for i in range(len(radii)): 104 | radius = radii[i] 105 | nsample = nsamples[i] 106 | self.groupers.append( 107 | pointnet2_utils.QueryAndGroup(radius, nsample, use_xyz=use_xyz) 108 | if npoint is not None 109 | else pointnet2_utils.GroupAll(use_xyz) 110 | ) 111 | mlp_spec = mlps[i] 112 | if use_xyz: 113 | mlp_spec[0] += 3 114 | 115 | self.mlps.append(build_shared_mlp(mlp_spec, bn)) 116 | 117 | 118 | class PointnetSAModule(PointnetSAModuleMSG): 119 | r"""Pointnet set abstrction layer 120 | 121 | Parameters 122 | ---------- 123 | npoint : int 124 | Number of features 125 | radius : float 126 | Radius of ball 127 | nsample : int 128 | Number of samples in the ball query 129 | mlp : list 130 | Spec of the pointnet before the global max_pool 131 | bn : bool 132 | Use batchnorm 133 | """ 134 | 135 | def __init__( 136 | self, mlp, npoint=None, radius=None, nsample=None, bn=True, use_xyz=True 137 | ): 138 | # type: (PointnetSAModule, List[int], int, float, int, bool, bool) -> None 139 | super(PointnetSAModule, self).__init__( 140 | mlps=[mlp], 141 | npoint=npoint, 142 | radii=[radius], 143 | nsamples=[nsample], 144 | bn=bn, 145 | use_xyz=use_xyz, 146 | ) 147 | 148 | 149 | class PointnetFPModule(nn.Module): 150 | r"""Propigates the features of one set to another 151 | 152 | Parameters 153 | ---------- 154 | mlp : list 155 | Pointnet module parameters 156 | bn : bool 157 | Use batchnorm 158 | """ 159 | 160 | def __init__(self, mlp, bn=True): 161 | # type: (PointnetFPModule, List[int], bool) -> None 162 | super(PointnetFPModule, self).__init__() 163 | self.mlp = build_shared_mlp(mlp, bn=bn) 164 | 165 | def forward(self, unknown, known, unknow_feats, known_feats): 166 | # type: (PointnetFPModule, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor 167 | r""" 168 | Parameters 169 | ---------- 170 | unknown : torch.Tensor 171 | (B, n, 3) tensor of the xyz positions of the unknown features 172 | known : torch.Tensor 173 | (B, m, 3) tensor of the xyz positions of the known features 174 | unknow_feats : torch.Tensor 175 | (B, C1, n) tensor of the features to be propigated to 176 | known_feats : torch.Tensor 177 | (B, C2, m) tensor of features to be propigated 178 | 179 | Returns 180 | ------- 181 | new_features : torch.Tensor 182 | (B, mlp[-1], n) tensor of the features of the unknown features 183 | """ 184 | 185 | if known is not None: 186 | dist, idx = pointnet2_utils.three_nn(unknown, known) 187 | dist_recip = 1.0 / (dist + 1e-8) 188 | norm = torch.sum(dist_recip, dim=2, keepdim=True) 189 | weight = dist_recip / norm 190 | 191 | interpolated_feats = pointnet2_utils.three_interpolate( 192 | known_feats, idx, weight 193 | ) 194 | else: 195 | interpolated_feats = known_feats.expand( 196 | *(known_feats.size()[0:2] + [unknown.size(1)]) 197 | ) 198 | 199 | if unknow_feats is not None: 200 | new_features = torch.cat( 201 | [interpolated_feats, unknow_feats], dim=1 202 | ) # (B, C2 + C1, n) 203 | else: 204 | new_features = interpolated_feats 205 | 206 | new_features = new_features.unsqueeze(-1) 207 | new_features = self.mlp(new_features) 208 | 209 | return new_features.squeeze(-1) 210 | -------------------------------------------------------------------------------- /point_ops/pointnet2_ops/pointnet2_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | import warnings 4 | from torch.autograd import Function 5 | from typing import * 6 | 7 | try: 8 | import pointnet2_ops._ext as _ext 9 | except ImportError: 10 | from torch.utils.cpp_extension import load 11 | import glob 12 | import os.path as osp 13 | import os 14 | 15 | warnings.warn("Unable to load pointnet2_ops cpp extension. JIT Compiling.") 16 | 17 | _ext_src_root = osp.join(osp.dirname(__file__), "_ext-src") 18 | _ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob( 19 | osp.join(_ext_src_root, "src", "*.cu") 20 | ) 21 | _ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*")) 22 | 23 | os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5;8.0" 24 | _ext = load( 25 | "_ext", 26 | sources=_ext_sources, 27 | extra_include_paths=[osp.join(_ext_src_root, "include")], 28 | extra_cflags=["-O3"], 29 | extra_cuda_cflags=["-O3", "-Xfatbin", "-compress-all"], 30 | with_cuda=True, 31 | ) 32 | 33 | 34 | class FurthestPointSampling(Function): 35 | @staticmethod 36 | def forward(ctx, xyz, npoint): 37 | # type: (Any, torch.Tensor, int) -> torch.Tensor 38 | r""" 39 | Uses iterative furthest point sampling to select a set of npoint features that have the largest 40 | minimum distance 41 | 42 | Parameters 43 | ---------- 44 | xyz : torch.Tensor 45 | (B, N, 3) tensor where N > npoint 46 | npoint : int32 47 | number of features in the sampled set 48 | 49 | Returns 50 | ------- 51 | torch.Tensor 52 | (B, npoint) tensor containing the set 53 | """ 54 | out = _ext.furthest_point_sampling(xyz, npoint) 55 | 56 | ctx.mark_non_differentiable(out) 57 | 58 | return out 59 | 60 | @staticmethod 61 | def backward(ctx, grad_out): 62 | return () 63 | 64 | 65 | furthest_point_sample = FurthestPointSampling.apply 66 | 67 | 68 | class GatherOperation(Function): 69 | @staticmethod 70 | def forward(ctx, features, idx): 71 | # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor 72 | r""" 73 | 74 | Parameters 75 | ---------- 76 | features : torch.Tensor 77 | (B, C, N) tensor 78 | 79 | idx : torch.Tensor 80 | (B, npoint) tensor of the features to gather 81 | 82 | Returns 83 | ------- 84 | torch.Tensor 85 | (B, C, npoint) tensor 86 | """ 87 | 88 | ctx.save_for_backward(idx, features) 89 | 90 | return _ext.gather_points(features, idx) 91 | 92 | @staticmethod 93 | def backward(ctx, grad_out): 94 | idx, features = ctx.saved_tensors 95 | N = features.size(2) 96 | 97 | grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) 98 | return grad_features, None 99 | 100 | 101 | gather_operation = GatherOperation.apply 102 | 103 | 104 | class ThreeNN(Function): 105 | @staticmethod 106 | def forward(ctx, unknown, known): 107 | # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] 108 | r""" 109 | Find the three nearest neighbors of unknown in known 110 | Parameters 111 | ---------- 112 | unknown : torch.Tensor 113 | (B, n, 3) tensor of known features 114 | known : torch.Tensor 115 | (B, m, 3) tensor of unknown features 116 | 117 | Returns 118 | ------- 119 | dist : torch.Tensor 120 | (B, n, 3) l2 distance to the three nearest neighbors 121 | idx : torch.Tensor 122 | (B, n, 3) index of 3 nearest neighbors 123 | """ 124 | dist2, idx = _ext.three_nn(unknown, known) 125 | dist = torch.sqrt(dist2) 126 | 127 | ctx.mark_non_differentiable(dist, idx) 128 | 129 | return dist, idx 130 | 131 | @staticmethod 132 | def backward(ctx, grad_dist, grad_idx): 133 | return () 134 | 135 | 136 | three_nn = ThreeNN.apply 137 | 138 | 139 | class ThreeInterpolate(Function): 140 | @staticmethod 141 | def forward(ctx, features, idx, weight): 142 | # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor 143 | r""" 144 | Performs weight linear interpolation on 3 features 145 | Parameters 146 | ---------- 147 | features : torch.Tensor 148 | (B, c, m) Features descriptors to be interpolated from 149 | idx : torch.Tensor 150 | (B, n, 3) three nearest neighbors of the target features in features 151 | weight : torch.Tensor 152 | (B, n, 3) weights 153 | 154 | Returns 155 | ------- 156 | torch.Tensor 157 | (B, c, n) tensor of the interpolated features 158 | """ 159 | ctx.save_for_backward(idx, weight, features) 160 | 161 | return _ext.three_interpolate(features, idx, weight) 162 | 163 | @staticmethod 164 | def backward(ctx, grad_out): 165 | # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] 166 | r""" 167 | Parameters 168 | ---------- 169 | grad_out : torch.Tensor 170 | (B, c, n) tensor with gradients of ouputs 171 | 172 | Returns 173 | ------- 174 | grad_features : torch.Tensor 175 | (B, c, m) tensor with gradients of features 176 | 177 | None 178 | 179 | None 180 | """ 181 | idx, weight, features = ctx.saved_tensors 182 | m = features.size(2) 183 | 184 | grad_features = _ext.three_interpolate_grad( 185 | grad_out.contiguous(), idx, weight, m 186 | ) 187 | 188 | return grad_features, torch.zeros_like(idx), torch.zeros_like(weight) 189 | 190 | 191 | three_interpolate = ThreeInterpolate.apply 192 | 193 | 194 | class GroupingOperation(Function): 195 | @staticmethod 196 | def forward(ctx, features, idx): 197 | # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor 198 | r""" 199 | 200 | Parameters 201 | ---------- 202 | features : torch.Tensor 203 | (B, C, N) tensor of features to group 204 | idx : torch.Tensor 205 | (B, npoint, nsample) tensor containing the indicies of features to group with 206 | 207 | Returns 208 | ------- 209 | torch.Tensor 210 | (B, C, npoint, nsample) tensor 211 | """ 212 | ctx.save_for_backward(idx, features) 213 | 214 | return _ext.group_points(features, idx) 215 | 216 | @staticmethod 217 | def backward(ctx, grad_out): 218 | # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] 219 | r""" 220 | 221 | Parameters 222 | ---------- 223 | grad_out : torch.Tensor 224 | (B, C, npoint, nsample) tensor of the gradients of the output from forward 225 | 226 | Returns 227 | ------- 228 | torch.Tensor 229 | (B, C, N) gradient of the features 230 | None 231 | """ 232 | idx, features = ctx.saved_tensors 233 | N = features.size(2) 234 | 235 | grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) 236 | 237 | return grad_features, torch.zeros_like(idx) 238 | 239 | 240 | grouping_operation = GroupingOperation.apply 241 | 242 | 243 | class BallQuery(Function): 244 | @staticmethod 245 | def forward(ctx, radius, nsample, xyz, new_xyz): 246 | # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor 247 | r""" 248 | 249 | Parameters 250 | ---------- 251 | radius : float 252 | radius of the balls 253 | nsample : int 254 | maximum number of features in the balls 255 | xyz : torch.Tensor 256 | (B, N, 3) xyz coordinates of the features 257 | new_xyz : torch.Tensor 258 | (B, npoint, 3) centers of the ball query 259 | 260 | Returns 261 | ------- 262 | torch.Tensor 263 | (B, npoint, nsample) tensor with the indicies of the features that form the query balls 264 | """ 265 | output = _ext.ball_query(new_xyz, xyz, radius, nsample) 266 | 267 | ctx.mark_non_differentiable(output) 268 | 269 | return output 270 | 271 | @staticmethod 272 | def backward(ctx, grad_out): 273 | return () 274 | 275 | 276 | ball_query = BallQuery.apply 277 | 278 | 279 | class QueryAndGroup(nn.Module): 280 | r""" 281 | Groups with a ball query of radius 282 | 283 | Parameters 284 | --------- 285 | radius : float32 286 | Radius of ball 287 | nsample : int32 288 | Maximum number of features to gather in the ball 289 | """ 290 | 291 | def __init__(self, radius, nsample, use_xyz=True): 292 | # type: (QueryAndGroup, float, int, bool) -> None 293 | super(QueryAndGroup, self).__init__() 294 | self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz 295 | 296 | def forward(self, xyz, new_xyz, features=None): 297 | # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] 298 | r""" 299 | Parameters 300 | ---------- 301 | xyz : torch.Tensor 302 | xyz coordinates of the features (B, N, 3) 303 | new_xyz : torch.Tensor 304 | centriods (B, npoint, 3) 305 | features : torch.Tensor 306 | Descriptors of the features (B, C, N) 307 | 308 | Returns 309 | ------- 310 | new_features : torch.Tensor 311 | (B, 3 + C, npoint, nsample) tensor 312 | """ 313 | 314 | idx = ball_query(self.radius, self.nsample, xyz, new_xyz) 315 | xyz_trans = xyz.transpose(1, 2).contiguous() 316 | grouped_xyz = grouping_operation(xyz_trans, idx) # (B, 3, npoint, nsample) 317 | grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) 318 | 319 | if features is not None: 320 | grouped_features = grouping_operation(features, idx) 321 | if self.use_xyz: 322 | new_features = torch.cat( 323 | [grouped_xyz, grouped_features], dim=1 324 | ) # (B, C + 3, npoint, nsample) 325 | else: 326 | new_features = grouped_features 327 | else: 328 | assert ( 329 | self.use_xyz 330 | ), "Cannot have not features and not use xyz as a feature!" 331 | new_features = grouped_xyz 332 | 333 | return new_features 334 | 335 | 336 | class GroupAll(nn.Module): 337 | r""" 338 | Groups all features 339 | 340 | Parameters 341 | --------- 342 | """ 343 | 344 | def __init__(self, use_xyz=True): 345 | # type: (GroupAll, bool) -> None 346 | super(GroupAll, self).__init__() 347 | self.use_xyz = use_xyz 348 | 349 | def forward(self, xyz, new_xyz, features=None): 350 | # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] 351 | r""" 352 | Parameters 353 | ---------- 354 | xyz : torch.Tensor 355 | xyz coordinates of the features (B, N, 3) 356 | new_xyz : torch.Tensor 357 | Ignored 358 | features : torch.Tensor 359 | Descriptors of the features (B, C, N) 360 | 361 | Returns 362 | ------- 363 | new_features : torch.Tensor 364 | (B, C + 3, 1, N) tensor 365 | """ 366 | 367 | grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) 368 | if features is not None: 369 | grouped_features = features.unsqueeze(2) 370 | if self.use_xyz: 371 | new_features = torch.cat( 372 | [grouped_xyz, grouped_features], dim=1 373 | ) # (B, 3 + C, 1, N) 374 | else: 375 | new_features = grouped_features 376 | else: 377 | new_features = grouped_xyz 378 | 379 | return new_features 380 | -------------------------------------------------------------------------------- /point_ops/setup.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import os.path as osp 4 | 5 | from setuptools import find_packages, setup 6 | from torch.utils.cpp_extension import BuildExtension, CUDAExtension 7 | 8 | this_dir = osp.dirname(osp.abspath(__file__)) 9 | _ext_src_root = osp.join("pointnet2_ops", "_ext-src") 10 | _ext_sources = glob.glob(osp.join(_ext_src_root, "src", "*.cpp")) + glob.glob( 11 | osp.join(_ext_src_root, "src", "*.cu") 12 | ) 13 | _ext_headers = glob.glob(osp.join(_ext_src_root, "include", "*")) 14 | 15 | requirements = ["torch>=1.4"] 16 | 17 | exec(open(osp.join("pointnet2_ops", "_version.py")).read()) 18 | 19 | os.environ["TORCH_CUDA_ARCH_LIST"] = "3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5" 20 | setup( 21 | name="pointnet2_ops", 22 | version=__version__, 23 | author="Erik Wijmans", 24 | packages=find_packages(), 25 | install_requires=requirements, 26 | ext_modules=[ 27 | CUDAExtension( 28 | name="pointnet2_ops._ext", 29 | sources=_ext_sources, 30 | extra_compile_args={ 31 | "cxx": ["-O3"], 32 | "nvcc": ["-O3", "-Xfatbin", "-compress-all"], 33 | }, 34 | include_dirs=[osp.join(this_dir, _ext_src_root, "include")], 35 | ) 36 | ], 37 | cmdclass={"build_ext": BuildExtension}, 38 | include_package_data=True, 39 | ) -------------------------------------------------------------------------------- /post_subnets/evals.py: -------------------------------------------------------------------------------- 1 | from post_ops import * 2 | from glob import glob 3 | 4 | # trsf_path = "/data/processed/2048/03_trsf_npz" 5 | # rt_dir = "/outputs/experiments/testing/2023-11-08_21-05" #2023-04-20_01-33 2023-03-29_23-26 6 | # save_dir = f"{rt_dir}/complete_txt" 7 | # save_dir = "/data/processed/2048/net_outputs/pcc_out/p2p_train" 8 | 9 | # pcc_blist = glob(f'{rt_dir}/*.npz') 10 | 11 | # # retrieve individual completed point files from the batched .npz files 12 | # if not os.path.exists(f'{save_dir}/als') or not os.listdir(f'{save_dir}/als'): 13 | # get_complete_files(pcc_blist, rt_dir, save_dir) 14 | 15 | # # since point/meshes were normalized to [-1, +1] before learning, denormalize to original geo-coords 16 | # denormalize(save_dir, trsf_path) 17 | 18 | data_path = "/data/processed/2048/net_outputs/pcc_out/fine_cmplx" 19 | save_path = "/data/processed/2048/net_outputs/p2m_rec_obj" 20 | rlist = glob(os.path.join(save_path, f'config-r2/rec_*.obj')) 21 | glist = glob(f'{data_path}/gt/*.txt') 22 | #TODO: ensure the sequence of the two lists are in sync. 23 | 24 | # since pcc test computed metrics for whole tests, now cumpute fron individual instances 25 | get_dist_losses(rlist, glist) 26 | 27 | # # compute mesh scene level errors 28 | # get_per_scene_errors(save_path) 29 | 30 | print('done !') -------------------------------------------------------------------------------- /post_subnets/post_ops.py: -------------------------------------------------------------------------------- 1 | '''classes and functions for pcc and p2m net evaluations''' 2 | import os, sys, glob 3 | 4 | sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) 5 | import numpy as np 6 | import open3d as o3d 7 | from scipy.spatial import cKDTree 8 | import trimesh 9 | from models.layers.mesh import Mesh 10 | from base_utils import mesh_utils 11 | 12 | def get_list_per_batch(bs, bseq): 13 | for i in range(0, len(bseq), bs): 14 | yield bseq[i : i+bs] 15 | 16 | def get_complete_files(blist, rt_dir, save_dir): 17 | try: 18 | with open(f"{rt_dir}/ts_fileseq.txt", 'r') as f: 19 | bfiles_seq = f.readlines() 20 | except FileNotFoundError: 21 | print(f'File {rt_dir}/ts_fileseq.txt not found!', file=sys.stderr) 22 | return 23 | 24 | if not os.path.exists(f"{save_dir}/als"): 25 | os.makedirs(f"{save_dir}/gt") 26 | os.makedirs(f"{save_dir}/fine") 27 | os.makedirs(f"{save_dir}/als") 28 | 29 | list_per_batch = list(get_list_per_batch(bs=8, bseq=bfiles_seq)) 30 | 31 | for bname in blist: 32 | pcc_out = np.load(bname) # 8 files per batch 33 | fine = pcc_out['final_pnts'] 34 | gt = pcc_out['gt_pnts'] 35 | als = pcc_out['als_pnts'] 36 | 37 | del pcc_out 38 | b_num = int(os.path.splitext(os.path.basename(bname))[0].split('_')[-1]) 39 | 40 | for i in range(len(fine)): #which is equal to batch size of 8 41 | temp_c = np.squeeze(gt[i, :, :]) 42 | np.savetxt(f"{save_dir}/gt/" + list_per_batch[b_num][i].strip()[:-4] + '.txt', temp_c) 43 | temp_f = np.squeeze(fine[i, :, :]) 44 | np.savetxt(f"{save_dir}/fine/" + list_per_batch[b_num][i].strip()[:-4] + '.txt', temp_f) 45 | temp_als = np.squeeze(als[i, :, :]) 46 | np.savetxt(f"{save_dir}/als/" + list_per_batch[b_num][i].strip()[:-4] + '.txt', temp_als) 47 | 48 | 49 | def normalize_mesh(file_in, file_out=None): 50 | 51 | mesh = trimesh.load(file_in) 52 | 53 | # Calculate centroid 54 | centroid = mesh.vertices.mean(0) 55 | 56 | # translate to origin 57 | recenteredData = mesh.vertices - centroid 58 | 59 | # scale to unit sphere 60 | scale = np.abs(recenteredData).max() # multiply by 2 if u want range [-0.5, 0.5] 61 | normalized_data = np.divide(recenteredData, scale) 62 | outmesh = trimesh.Trimesh(vertices=normalized_data, faces=mesh.faces) 63 | outmesh.export(file_out) 64 | 65 | def normalize_txt(data): 66 | 67 | # Calculate centroid 68 | centroid = data[:, :3].mean(0) 69 | 70 | # translate to origin 71 | recenteredData = data[:, :3] - centroid 72 | 73 | # scale to unit sphere 74 | scale = np.abs(recenteredData).max() # multiply by 2 if u want range [-0.5, 0.5] 75 | normalized_data = np.divide(recenteredData, scale) 76 | 77 | return normalized_data 78 | 79 | 80 | def denormalize(data_dir, trsf_path): 81 | 82 | # als_list = os.listdir(f'{data_dir}/als') 83 | fine_list = os.listdir(f'{data_dir}/fine') 84 | trsf_list = os.listdir(trsf_path) 85 | 86 | for file in fine_list: 87 | als_abs = f'{data_dir}/als/{file}' 88 | fine_abs = f'{data_dir}/fine/{file}' 89 | if file[:-4]+'.npz' in trsf_list: 90 | trsf_abs = trsf_path + '/' + file[:-4] + '.npz' 91 | 92 | als_pnts = np.loadtxt(als_abs) 93 | fine_pnts = np.loadtxt(fine_abs) 94 | trsf_data = np.load(trsf_abs) 95 | 96 | #re-normalize fine_pnts, bcoz some completed points could be out of the original [-1,+1] bounds 97 | re_norm_xyz = normalize_txt(fine_pnts) 98 | 99 | # un-normalize the data 100 | rescaled_als = np.multiply(als_pnts, trsf_data['scale']) 101 | rescaled_finexyz = np.multiply(re_norm_xyz, trsf_data['scale']) 102 | 103 | translated_als = rescaled_als + trsf_data['centroid'] 104 | translated_fine = rescaled_finexyz + trsf_data['centroid'] 105 | fine_pnts[:,:3] = translated_fine 106 | 107 | #TODO: check correctness of denormalized data by comparing to the original 108 | # print(np.allclose(de_normdata,orig_data)) 109 | 110 | #create dir and save files 111 | denorm_dir = os.path.join(os.path.split(data_dir)[0], 'denorm_txt') 112 | if not os.path.exists(denorm_dir): 113 | os.makedirs(f"{denorm_dir}/fine") 114 | os.makedirs(f"{denorm_dir}/als") 115 | 116 | np.savetxt(f"{denorm_dir}/fine/{file}", fine_pnts, fmt='%.4f %.4f %.4f %.6f %.6f %.6f') 117 | np.savetxt(f"{denorm_dir}/als/{file}", translated_als, fmt='%.4f %.4f %.4f') 118 | 119 | def get_pcc_errors(): 120 | pass 121 | 122 | def get_dist_losses(rec_list, gt_list): 123 | xyz_errs, nml_errs = [], [] 124 | rec_list_dir = os.path.dirname(rec_list[0]) 125 | for gt_file in gt_list: 126 | 127 | rec_file = 'rec_'+ os.path.basename(gt_file)[:-4] +'.obj' 128 | rec_file = os.path.join(rec_list_dir, rec_file) 129 | if rec_file not in rec_list: 130 | print(f'{rec_file} not found, skipping...') 131 | continue 132 | 133 | gt = np.loadtxt(gt_file) 134 | # Pass gt's xyz to Open3D.o3d.geometry.PointCloud 135 | # gt_pcd = o3d.geometry.PointCloud() 136 | # gt_pcd.points = o3d.utility.Vector3dVector(gt[:,:3]) 137 | # o3d.visualization.draw_geometries([gt_pcd]) 138 | 139 | # load mesh and sample from it 140 | mesh = Mesh(rec_file, nml=False) 141 | xyz, normals = mesh_utils.sample_surface(mesh.faces, mesh.vs.unsqueeze(0), 16348) 142 | xyz = xyz.squeeze(0) 143 | normals = normals.squeeze(0) 144 | # convert to numpy array 145 | xyz = xyz.cpu().numpy() 146 | normals = normals.cpu().numpy() 147 | 148 | #re-normalize fine_pnts, bcoz some completed points could be out of the original [-1,+1] bounds 149 | # xyz = normalize_txt(xyz) 150 | 151 | tree = cKDTree(gt[:,:3]) 152 | dist, idx = tree.query(np.asarray(xyz), k=1) 153 | delta_sq = np.square(np.asarray(xyz) - gt[idx,:3]) 154 | euc_dist = np.sum(delta_sq, axis=1) 155 | delta_sq = np.square(np.asarray(normals) - gt[idx,3:]) 156 | nml_dist = np.sum(delta_sq, axis=1) 157 | euc_dist = np.sqrt(euc_dist).reshape(-1,1) 158 | nml_dist = np.sqrt(nml_dist).reshape(-1,1) 159 | 160 | xyz_errs.append(np.mean(euc_dist)) 161 | nml_errs.append(np.mean(nml_dist)) 162 | 163 | # dist_err_pcd = o3d.geometry.PointCloud() 164 | # dist_err_pcd.points = o3d.utility.Vector3dVector(gt) 165 | # o3d.visualization.draw_geometries([dist_err_pcd]) 166 | 167 | dist_errs = np.column_stack([xyz,euc_dist,nml_dist]) #derr: dist_err 168 | fname = rec_file[:-4]+'.txt' 169 | np.savetxt(fname, dist_errs, fmt='%.6f %.6f %.6f %.6f %.6f') 170 | 171 | out = ['distancex error: {}'.format(np.mean(xyz_errs)), 'normal error: {}'.format(np.mean(nml_errs))] 172 | fpath = os.path.dirname(rec_list[0]) 173 | with open(f'{fpath}/error_summary.txt', 'w') as f: 174 | f.write('\n'.join(out)) 175 | 176 | def get_per_instance_errors(gt_path, pcc_name, mesh_path): 177 | gt_file = os.path.join(gt_path, f'{pcc_name}.txt') 178 | gt = np.loadtxt(gt_file) 179 | 180 | # load mesh and sample from it 181 | mesh = Mesh(mesh_path, nml=False) 182 | xyz, normals = mesh_utils.sample_surface(mesh.faces, mesh.vs.unsqueeze(0), 16348) 183 | xyz = xyz.squeeze(0) 184 | normals = normals.squeeze(0) 185 | # convert to numpy array 186 | xyz = xyz.cpu().numpy() 187 | normals = normals.cpu().numpy() 188 | 189 | tree = cKDTree(gt[:,:3]) 190 | dist, idx = tree.query(np.asarray(xyz), k=1) 191 | delta_sq = np.square(np.asarray(xyz) - gt[idx,:3]) 192 | euc_dist = np.sum(delta_sq, axis=1) 193 | delta_sq = np.square(np.asarray(normals) - gt[idx,3:]) 194 | nml_dist = np.sum(delta_sq, axis=1) 195 | euc_dist = np.sqrt(euc_dist).reshape(-1,1) 196 | nml_dist = np.sqrt(nml_dist).reshape(-1,1) 197 | 198 | print('distance error: ', np.mean(euc_dist)) 199 | print('normal error: ', np.mean(nml_dist)) 200 | 201 | def get_per_scene_errors(mesh_err_dir): 202 | mesh_err_files = glob.glob(f'{mesh_err_dir}/*.txt') 203 | mesh_err_files = [os.path.basename(f) for f in mesh_err_files] 204 | scenes = ['rec_Tartu1', 'rec_Tartu2', 'rec_Tartu3'] 205 | t1d, t1n, t2d, t2n, t3d, t3n = [], [], [], [], [], [] 206 | def read_mesh_err(mesh_err_file): 207 | mesh_err = np.loadtxt(mesh_err_file) 208 | xyz_err = (np.mean(mesh_err[:,3])) 209 | nml_err = (np.mean(mesh_err[:,4])) 210 | return xyz_err, nml_err 211 | 212 | for mesh_err_file in mesh_err_files: 213 | if mesh_err_file.startswith(scenes[0]): 214 | mesh_err_file = os.path.join(mesh_err_dir, mesh_err_file) 215 | d1, n1 = read_mesh_err(mesh_err_file) 216 | t1d.append(d1) 217 | t1n.append(n1) 218 | elif mesh_err_file.startswith(scenes[1]): 219 | mesh_err_file = os.path.join(mesh_err_dir, mesh_err_file) 220 | d2, n2 = read_mesh_err(mesh_err_file) 221 | t2d.append(d2) 222 | t2n.append(n2) 223 | elif mesh_err_file.startswith(scenes[2]): 224 | mesh_err_file = os.path.join(mesh_err_dir, mesh_err_file) 225 | d3, n3 = read_mesh_err(mesh_err_file) 226 | t3d.append(d3) 227 | t3n.append(n3) 228 | del mesh_err_files 229 | print('Tartu1 distance error: ', np.mean(t1d), 'normal error: ', np.mean(t1n)) 230 | print('Tartu2 distance error: ', np.mean(t2d), 'normal error: ', np.mean(t2n)) 231 | print('Tartu3 distance error: ', np.mean(t3d), 'normal error: ', np.mean(t3n)) 232 | 233 | 234 | def config3_per_instance_error(fname): 235 | cfg_dir = '/data/processed/2048/net_outputs/p2m_rec_obj/config-f3' 236 | data = np.loadtxt(f'{cfg_dir}/rec_{fname}.txt') 237 | perr = np.mean(data[:,3]) 238 | derr = np.mean(data[:,4]) 239 | return perr, derr 240 | 241 | 242 | print('Done!!') -------------------------------------------------------------------------------- /pp_conv.py: -------------------------------------------------------------------------------- 1 | '''Plane-Point refinement module 2 | using cosine similarity of point normals as weighting criterion 3 | thinking weighting is suppose to happen at the nbrhood level 4 | unlike pointconv that computed the mean of all points to all other points 5 | ''' 6 | 7 | import torch 8 | from pytorch3d.ops.knn import knn_gather, knn_points 9 | from torch import nn 10 | from torch.nn import functional as F 11 | 12 | 13 | def cosine_similarity(x, k): 14 | """ 15 | Parameters 16 | ---------- 17 | x: input cloud [B N 6] 18 | 19 | Returns 20 | ---------- 21 | cosine similarity of query points to their k-neighborhood points [B N K] 22 | NB: [-1 to 1] weight space shifted to [0 to 1] in CSBlk 23 | """ 24 | B, N, C = x.shape 25 | # get indices of cloud1 which has a minimum distance from cloud2 26 | knn = knn_points(x[:, :, :3], x[:, :, :3], K=k) # input shd be BNC; dist, k_idx: BNK 27 | # dist = knn.dists 28 | 29 | grouped_x = knn_gather(x, knn.idx) 30 | 31 | dot = torch.matmul(grouped_x[:,:,:,3:], x[:,:,3:].view(B, N, 1, 3).permute(0,1,3,2)).squeeze() #BNK 32 | cos_sim = dot / (torch.linalg.norm(grouped_x[:,:,:,3:], dim=-1) * torch.linalg.norm(x[:,:,3:], dim=-1).unsqueeze(-1)) #BNK/(BNK * BN1) 33 | 34 | delta_xyz = grouped_x[:,:,:,:3] - x[:,:,:3].view(B, N, 1, 3) 35 | return cos_sim, delta_xyz 36 | 37 | 38 | class CSBlock(nn.Module): 39 | '''cosine similarity block''' 40 | def __init__(self, c_in, feat_dims): 41 | super(CSBlock, self).__init__() 42 | 43 | self.mlp_convs = nn.ModuleList() 44 | self.mlp_bns = nn.ModuleList() 45 | 46 | last_channel = c_in 47 | for c_out in feat_dims: # [32, 32, 64] 48 | self.mlp_convs.append(nn.Conv2d(last_channel, c_out, 1)) 49 | self.mlp_bns.append(nn.BatchNorm2d(c_out)) 50 | last_channel = c_out 51 | 52 | def forward(self, cos_sim): 53 | # cos_sim: shd b [BCKN], so permute(0, 2, 1).unsqueeze(1) coz cos_sim is [BNK] 54 | cos_sim = cos_sim.permute(0, 2, 1).unsqueeze(1) 55 | B, C, K, N = cos_sim.shape 56 | for i, conv in enumerate(self.mlp_convs): 57 | bn = self.mlp_bns[i] 58 | cos_sim = bn(conv(cos_sim)) 59 | if i == len(self.mlp_convs)-1: # this takes care of the -ve cos_sim vals (chk) 60 | cos_sim = torch.sigmoid(cos_sim) 61 | else: 62 | cos_sim = F.gelu(cos_sim) 63 | 64 | return cos_sim 65 | 66 | 67 | class SharedMLPBlock(nn.Module): 68 | def __init__(self, c_in, feat_dims): 69 | super(SharedMLPBlock, self).__init__() 70 | 71 | self.mlp_convs = nn.ModuleList() 72 | self.mlp_bns = nn.ModuleList() 73 | 74 | last_channel = c_in 75 | for c_out in feat_dims: # [32, 32, 64] 76 | self.mlp_convs.append(nn.Conv2d(last_channel, c_out, 1)) 77 | self.mlp_bns.append(nn.BatchNorm2d(c_out)) 78 | last_channel = c_out 79 | 80 | def forward(self, grouped_xyz): 81 | # grouped_xyz: shd b [BCKN] 82 | B, C, K, N = grouped_xyz 83 | for i, conv in enumerate(self.mlp_convs): 84 | bn = self.mlp_bns[i] 85 | grouped_xyz = F.gelu(bn(conv(grouped_xyz))) 86 | 87 | return grouped_xyz 88 | 89 | 90 | class PPConv(nn.Module): 91 | '''Point-Plane refinement module''' 92 | def __init__(self, c_in, feat_dims, k): 93 | super(PPConv, self).__init__() 94 | 95 | self.k = k 96 | self.mlp_convs = nn.ModuleList() 97 | self.mlp_bns = nn.ModuleList() 98 | 99 | last_channel = c_in 100 | for c_out in feat_dims: # [32, 64, 64] 101 | self.mlp_convs.append(nn.Conv2d(last_channel, c_out, 1)) 102 | self.mlp_bns.append(nn.BatchNorm2d(c_out)) 103 | last_channel = c_out 104 | 105 | # self.smlp = SharedMLPBlock(c_in=3, feat_dims=[32, 64, 64]) 106 | self.csblk = CSBlock(c_in=1, feat_dims=[32,64]) 107 | self.mlp = nn.Sequential( 108 | nn.Conv1d(feat_dims[2], feat_dims[0], kernel_size=1), 109 | nn.GELU(), 110 | nn.Conv1d(feat_dims[0], feat_dims[0], kernel_size=1), 111 | nn.GELU(), 112 | nn.Conv1d(feat_dims[0], 6, kernel_size=1) 113 | ) 114 | 115 | def forward(self, fine_out): 116 | """fine_out: shd hv channel of 6, 3 coords 3 normals""" 117 | 118 | grouped_cs, grouped_deltas = cosine_similarity(fine_out, k=self.k) # takes in both xyz & normals; out [BNK, BNKC] 119 | grouped_deltas = grouped_deltas.permute(0,3,2,1)# for conv, grouped_xyz has to be BCKN 120 | 121 | for i, conv in enumerate(self.mlp_convs): 122 | bn = self.mlp_bns[i] 123 | grouped_deltas = F.gelu(bn(conv(grouped_deltas))) 124 | 125 | #TODO: (1) no max-scaling, (2) max-scaling w/ sigmoid (3) max-scaling w/ softmax 126 | max_cs = grouped_cs.max(dim = 2, keepdim=True)[0] 127 | cs_weight = grouped_cs / max_cs 128 | cs_weight = self.csblk(cs_weight) # [BCKN] 129 | 130 | grouped_deltas = torch.sum(grouped_deltas * cs_weight, dim=-2) # TODO: matmul or mul 131 | 132 | #TODO:the sharedmlp blk can be applied on self.conv1 (from network.py) & results torch.mul with grp deltas 133 | grouped_deltas = self.mlp(grouped_deltas) 134 | 135 | return grouped_deltas 136 | 137 | 138 | x = torch.rand(2, 20, 6) * 2 - 1 #BN3 139 | net = PPConv(c_in=3, feat_dims=[32, 64, 64], k=10) 140 | output = net(x) 141 | print('done !') -------------------------------------------------------------------------------- /pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/geospatial-lab/APC2Mesh/6cb68e6dbf15e2aa69463440d1153f9abd1d7b8b/pytorch3d-0.7.1-cp38-cp38-linux_x86_64.whl -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # To ensure app dependencies are ported from your virtual environment/host machine into your container, run 'pip freeze > requirements.txt' in the terminal to overwrite this file 2 | -------------------------------------------------------------------------------- /sample.txt: -------------------------------------------------------------------------------- 1 | 2 4 5 2 | 6 2 6 3 | 5 6 2 -------------------------------------------------------------------------------- /test_pcc.py: -------------------------------------------------------------------------------- 1 | import torch, random 2 | from dataset_pcc import CustomDataset 3 | from torch.utils import data 4 | from ablations.network import PCCNet 5 | # from point_ops.pointnet2_ops import pointnet2_utils as p2u 6 | from loss_pcc import chamfer_loss_sqrt, chamfer_loss 7 | from pytictoc import TicToc 8 | import numpy as np 9 | from datetime import datetime 10 | from pathlib import Path 11 | import logging 12 | 13 | # Params - files 14 | chkpnt_path = '/outputs/experiments/2023-03-21_08-01/checkpoints/pccnet_112_0.01676_0.00083.pth' # full 10k dataset trained 15 | # chkpnt_path = '/outputs/experiments/2023-05-17_07-06/checkpoints/pccnet_136_0.01926_0.00111.pth' # ablation tr nmls: no 16 | # chkpnt_path = '/outputs/experiments/ISPRS_R1/2023-10-09_22-30/checkpoints/pccnet_134_0.01810_0.00099.pth' # ablation 1024 input 17 | 18 | experiment_dir = Path('/outputs/experiments/testing/') 19 | experiment_dir.mkdir(exist_ok=True) 20 | file_dir = Path(str(experiment_dir) + '/' + str(datetime.now().strftime('%Y-%m-%d_%H-%M'))) 21 | file_dir.mkdir(exist_ok=True) 22 | log_dir = file_dir.joinpath('logs/') 23 | log_dir.mkdir(exist_ok=True) 24 | 25 | def start_logger(log_dir, fname): 26 | logger = logging.getLogger() 27 | logger.setLevel(logging.INFO) 28 | 29 | # logging to file 30 | file_handler = logging.FileHandler(str(log_dir) + '/%s.txt'%(fname)) 31 | file_handler.setLevel(logging.INFO) 32 | file_handler.setFormatter(logging.Formatter('%(message)s')) # %(asctime)s - %(levelname)s - 33 | logger.addHandler(file_handler) 34 | 35 | # logging to console 36 | stream_handler = logging.StreamHandler() 37 | stream_handler.setFormatter(logging.Formatter('\t\t %(message)s')) 38 | logger.addHandler(stream_handler) 39 | 40 | return logger 41 | 42 | # Params - others 43 | bs = 8 # batch_size 44 | npoints = 2048 45 | 46 | seed_value = 42 47 | random.seed(seed_value) 48 | torch.manual_seed(seed_value) 49 | torch.cuda.manual_seed(seed_value) 50 | 51 | device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') 52 | 53 | ts_dataset = CustomDataset(split='a2p_il_ablation', npoints=npoints, device=device) #'full_test' 'test_cmplx' 'a2p_il_ablation' 54 | ts_loader = data.DataLoader(ts_dataset, batch_size=bs, shuffle=True) 55 | 56 | # ts_dataset = CustomDataset(split='custom', npoints=npoints, device=device) 57 | # ts_loader = data.DataLoader(ts_dataset, batch_size=bs, shuffle=False) 58 | 59 | pcc_model = PCCNet(kmax=20, code_dim=1024, use_nmls=True, 60 | multi_scale=True, attn_pool=True, fps_crsovr=True).to(device) 61 | 62 | pcc_model.load_state_dict(torch.load(chkpnt_path)) 63 | 64 | t = TicToc() #create instance of class 65 | test_logger = start_logger(log_dir=log_dir, fname='test_log') 66 | # test_logger.info('Creating "pcc fine" as point2poly training data') 67 | test_logger.info('a2p_il ablation: 4096 pcc output for selected 2048 input files') 68 | test_logger.info('all optimal configs reported maintained') 69 | test_logger.info('loaded model: %s' % chkpnt_path) 70 | 71 | def testing(model, loader, file_dir, device, rand_save=False): 72 | print("Testing ...") 73 | model.eval() 74 | num_iters = len(loader) 75 | 76 | with torch.no_grad(): 77 | cdt_coarse, cdp_coarse, cdt_fine, cdp_fine, cdt_finer, cdp_finer = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 78 | for i, data in enumerate(loader): 79 | # print(i,'\n') 80 | # continue 81 | #data 82 | xyz = data[0][:, :, :6].to(device).float() # partial: [B 2048, 6] include normals 83 | if not pcc_model.use_nmls: 84 | xyz = xyz[:, :, :3] 85 | #model 86 | coarse, fine, finer = model(xyz) 87 | # coarse, fine = coarse[:, :, :3], fine[:, :, :3] 88 | 89 | #losses 90 | gt_xyz = data[1][:, :, :3].to(device).float() # partial: [B 16348, 6] 91 | if finer is not None: 92 | # finer = finer[:, :, :3] 93 | cdp_finer += chamfer_loss_sqrt(finer[:, :, :3], gt_xyz).item() #inputs shd be BNC; cd_p 94 | cdt_finer += chamfer_loss(finer[:, :, :3], gt_xyz).item() 95 | else: 96 | cdp_finer, cdt_finer = 0.0, 0.0 97 | cdp_fine += chamfer_loss_sqrt(fine[:, :, :3], gt_xyz).item() #inputs shd be BNC; cd_p 98 | cdp_coarse += chamfer_loss_sqrt(coarse[:, :, :3], gt_xyz).item() 99 | cdt_fine += chamfer_loss(fine[:, :, :3], gt_xyz).item() # cd_t 100 | cdt_coarse += chamfer_loss(coarse[:, :, :3], gt_xyz).item() 101 | 102 | if rand_save: 103 | # if finer is not None: 104 | # np.savez(str(file_dir) + '/rand_outs.npz', gt_pnts=gt_xyz.data.cpu().numpy(), 105 | # final_pnts=finer.data.cpu().numpy(), 106 | # fine_pnts=fine.data.cpu().numpy(), 107 | # coarse_pnts=coarse.data.cpu().numpy(), 108 | # als_pnts=xyz.data.cpu().numpy()[:, :, :3]) 109 | # else: 110 | np.savez(str(file_dir) + '/rand_outs_{}.npz'.format(i), gt_pnts=data[1].cpu().numpy(), 111 | final_pnts=fine.data.cpu().numpy(), 112 | coarse_pnts=coarse.data.cpu().numpy(), 113 | als_pnts=xyz.data.cpu().numpy()[:, :, :3]) 114 | 115 | return {'finer_p': cdp_finer/num_iters, 'fine_p': cdp_fine/num_iters, 'coarse_p': cdp_coarse/num_iters, 'finer_t': cdt_finer/num_iters, 'fine_t': cdt_fine/num_iters, 'coarse_t': cdt_coarse/num_iters} 116 | 117 | 118 | test_losses = testing(pcc_model, ts_loader, file_dir=file_dir, device=device, rand_save=True) 119 | 120 | test_logger.info('cdp_finer: %.6f | cdt_finer: %.6f |cdp_fine: %.6f | cdt_fine: %.6f | cdp_coarse: %.6f | cdt_coarse: %.6f' %( 121 | test_losses['finer_p'], 122 | test_losses['finer_t'], 123 | test_losses['fine_p'], 124 | test_losses['fine_t'], 125 | test_losses['coarse_p'], 126 | test_losses['coarse_t'])) 127 | # get file name and sequence for future de-normalization step. 128 | ts_fileseq = ts_loader.batch_sampler.sampler.data_source.mesh_list 129 | # open file in write mode 130 | with open(str(file_dir)+'/ts_fileseq.txt', 'w') as fp: 131 | for item in ts_fileseq: 132 | # write each item on a new line 133 | fp.write("%s\n" % item) 134 | # print('Done') 135 | print('done ...') 136 | 137 | -------------------------------------------------------------------------------- /trials.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import trimesh 3 | 4 | datapath = "sampling/data/noname1308907.obj" 5 | data = np.array([[1164.098500, -1258.009500, -20.479850], 6 | [1164.098500, -1258.009500, -23.374150], 7 | [1168.120500, -1257.953500, -20.479850], 8 | [1168.120500, -1257.953500, -23.374150], 9 | [1163.911500, -1244.585500, -20.571850], 10 | [1163.911500, -1244.585500, -23.374150], 11 | [1167.933500, -1244.529500, -20.571850], 12 | [1167.933500, -1244.529500, -23.374150]]) 13 | 14 | ''' 15 | 16 | def normalize(data, min_bound=-0.5, max_bound=0.5): 17 | xyzmin = np.min(data,axis=0) 18 | xyzmax = np.max(data, axis=0) 19 | 20 | ndata = (max_bound - min_bound) * (data - xyzmin) / (xyzmax - xyzmin) + min_bound 21 | return ndata, xyzmin, xyzmax 22 | 23 | def denormalize(data, xyzmin, xyzmax, min_bound=-0.5, max_bound=0.5): 24 | odata = (data - min_bound) * (xyzmax - xyzmin) / (max_bound - min_bound) + xyzmin 25 | return odata 26 | 27 | ''' 28 | def normalize(data): 29 | 30 | # Find the centroid 31 | print("Calculating centroid") 32 | centroid = data.mean(0) 33 | 34 | # subrtact centroid from the data 35 | recenteredData = data - centroid 36 | 37 | # Calculate Scale factor 38 | scale = np.abs(recenteredData).max()*2 39 | # Normalize 40 | normalized_data = np.divide(recenteredData,scale) 41 | 42 | return [normalized_data, scale, centroid] 43 | 44 | 45 | def denormalize(data, scale, centroid): 46 | # un-normalize the data 47 | recenteredData = np.multiply(data,scale) 48 | 49 | translatedPoints = recenteredData + centroid 50 | 51 | return translatedPoints 52 | 53 | 54 | def from_trimesh(file_in): 55 | mesh = trimesh.load(file_in) 56 | bounds = mesh.extents 57 | if bounds.min() == 0.0: 58 | return 59 | 60 | # translate to origin 61 | translation = (mesh.bounds[0] + mesh.bounds[1]) * 0.5 62 | translation = trimesh.transformations.translation_matrix(direction=-translation) 63 | mesh.apply_transform(translation) 64 | 65 | # scale to unit cube 66 | scale = 1.0/bounds.max() 67 | scale_trafo = trimesh.transformations.scale_matrix(factor=scale) 68 | mesh.apply_transform(scale_trafo) 69 | return mesh 70 | 71 | def plot_vertices(data): 72 | 73 | import matplotlib.pyplot as plt 74 | 75 | fig = plt.figure(figsize=(12, 12)) 76 | ax = fig.add_subplot(projection='3d') 77 | 78 | ax.scatter(data[:,0], data[:,1], data[:,2]) 79 | plt.show() 80 | 81 | 82 | [ndata, scale, centroid] = normalize(data) 83 | plot_vertices(ndata) 84 | nmesh = from_trimesh(file_in=datapath) 85 | plot_vertices(nmesh.vertices) 86 | odata = denormalize(ndata, scale, centroid) 87 | plot_vertices(odata) 88 | 89 | print(np.allclose(data,odata)) --------------------------------------------------------------------------------