├── .gitignore ├── CMakeLists.txt ├── LICENSE ├── README.txt ├── docs ├── Makefile ├── gh_deploy.sh └── source │ ├── api │ ├── io.rst │ ├── mesh.rst │ ├── rendering.rst │ ├── sdf.rst │ └── stable_poses.rst │ ├── conf.py │ ├── index.rst │ └── install │ └── install.rst ├── docs_requirements.txt ├── examples └── render_images.py ├── install_meshrender.sh ├── meshpy ├── CMakeLists.txt ├── __init__.py ├── image_converter.py ├── lighting.py ├── mesh.py ├── mesh_renderer.py ├── meshrender.cpp ├── obj_file.py ├── off_file.py ├── random_variables.py ├── render_modes.py ├── sdf.py ├── sdf_file.py ├── stable_pose.py ├── stp_file.py └── urdf_writer.py ├── package.xml ├── requirements.txt ├── setup.py ├── test ├── __init__.py ├── data │ ├── bad_tetrahedron.obj │ └── tetrahedron.obj ├── mesh_test.py └── mesh_visualizer.py └── tools ├── convert_image_to_obj.py ├── convert_to_obj.py ├── mesh_to_urdf.py └── test_stable_pose.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | *~ 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *,cover 47 | .hypothesis/ 48 | 49 | # Translations 50 | *.mo 51 | *.pot 52 | 53 | # Django stuff: 54 | *.log 55 | local_settings.py 56 | 57 | # Flask stuff: 58 | instance/ 59 | .webassets-cache 60 | 61 | # Scrapy stuff: 62 | .scrapy 63 | 64 | # PyBuilder 65 | target/ 66 | 67 | # IPython Notebook 68 | .ipynb_checkpoints 69 | 70 | # pyenv 71 | .python-version 72 | 73 | # celery beat schedule file 74 | celerybeat-schedule 75 | 76 | # dotenv 77 | .env 78 | 79 | # virtualenv 80 | venv/ 81 | ENV/ 82 | 83 | # Spyder project settings 84 | .spyderproject 85 | 86 | # Rope project settings 87 | .ropeproject 88 | 89 | # Temp files 90 | *~ 91 | .#* 92 | #* -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(meshpy) 3 | 4 | FIND_PACKAGE(PythonInterp) 5 | FIND_PACKAGE(PythonLibs 2.7) 6 | find_package(OpenGL REQUIRED) 7 | find_package(GLUT REQUIRED) 8 | FIND_PACKAGE(Boost COMPONENTS python) 9 | 10 | set( CMAKE_VERBOSE_MAKEFILE on ) 11 | set( CMAKE_INSTALL_PREFIX ../) 12 | INCLUDE_DIRECTORIES(${Boost_INCLUDE_DIRS} ${PYTHON_INCLUDE_DIRS} ${OPENGL_INCLUDE_DIRS} ${GLUT_INCLUDE_DIRS}) 13 | LINK_LIBRARIES(${Boost_LIBRARIES} ${PYTHON_LIBRARIES} boost_numpy ${GLUT_LIBRARY} OSMesa) 14 | 15 | ADD_SUBDIRECTORY(meshpy) 16 | 17 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 Berkeley AUTOLAB & University of California, Berkeley 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | This is a 3D mesh processing library, courtesty of 2 | the Berkeley AutoLab and Jeff Mahler. 3 | 4 | To run unit tests, from the top level directory simply run 5 | >>> python -m unittest discover 6 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | GH_PAGES_SOURCES = docs meshpy CMakeLists.txt install_meshrender.sh 10 | 11 | # User-friendly check for sphinx-build 12 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 13 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 14 | endif 15 | 16 | # Internal variables. 17 | PAPEROPT_a4 = -D latex_paper_size=a4 18 | PAPEROPT_letter = -D latex_paper_size=letter 19 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 20 | # the i18n builder cannot share the environment and doctrees with the others 21 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 22 | 23 | .PHONY: help 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | .PHONY: clean 52 | clean: 53 | rm -rf $(BUILDDIR)/* 54 | 55 | .PHONY: gh-pages 56 | gh-pages: 57 | git checkout gh-pages && \ 58 | cd .. && \ 59 | git rm -rf . && git clean -fxd && \ 60 | git checkout master $(GH_PAGES_SOURCES) && \ 61 | git reset HEAD && \ 62 | sh install_meshrender.sh && \ 63 | cd docs && \ 64 | make html && \ 65 | cd .. && \ 66 | mv -fv docs/build/html/* ./ && \ 67 | touch .nojekyll && \ 68 | rm -rf $(GH_PAGES_SOURCES) && \ 69 | git add -A && \ 70 | git commit -m "Generated gh-pages for `git log master -1 --pretty=short --abbrev-commit`" && \ 71 | git push origin --delete gh-pages && \ 72 | git push origin gh-pages ; \ 73 | git checkout master 74 | 75 | .PHONY: html 76 | html: 77 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 78 | @echo 79 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 80 | 81 | .PHONY: dirhtml 82 | dirhtml: 83 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 84 | @echo 85 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 86 | 87 | .PHONY: singlehtml 88 | singlehtml: 89 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 90 | @echo 91 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 92 | 93 | .PHONY: pickle 94 | pickle: 95 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 96 | @echo 97 | @echo "Build finished; now you can process the pickle files." 98 | 99 | .PHONY: json 100 | json: 101 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 102 | @echo 103 | @echo "Build finished; now you can process the JSON files." 104 | 105 | .PHONY: htmlhelp 106 | htmlhelp: 107 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 108 | @echo 109 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 110 | ".hhp project file in $(BUILDDIR)/htmlhelp." 111 | 112 | .PHONY: qthelp 113 | qthelp: 114 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 115 | @echo 116 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 117 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 118 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/meshpy.qhcp" 119 | @echo "To view the help file:" 120 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/meshpy.qhc" 121 | 122 | .PHONY: applehelp 123 | applehelp: 124 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 125 | @echo 126 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 127 | @echo "N.B. You won't be able to view it unless you put it in" \ 128 | "~/Library/Documentation/Help or install it in your application" \ 129 | "bundle." 130 | 131 | .PHONY: devhelp 132 | devhelp: 133 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 134 | @echo 135 | @echo "Build finished." 136 | @echo "To view the help file:" 137 | @echo "# mkdir -p $$HOME/.local/share/devhelp/meshpy" 138 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/meshpy" 139 | @echo "# devhelp" 140 | 141 | .PHONY: epub 142 | epub: 143 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 144 | @echo 145 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 146 | 147 | .PHONY: latex 148 | latex: 149 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 150 | @echo 151 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 152 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 153 | "(use \`make latexpdf' here to do that automatically)." 154 | 155 | .PHONY: latexpdf 156 | latexpdf: 157 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 158 | @echo "Running LaTeX files through pdflatex..." 159 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 160 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 161 | 162 | .PHONY: latexpdfja 163 | latexpdfja: 164 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 165 | @echo "Running LaTeX files through platex and dvipdfmx..." 166 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 167 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 168 | 169 | .PHONY: text 170 | text: 171 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 172 | @echo 173 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 174 | 175 | .PHONY: man 176 | man: 177 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 178 | @echo 179 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 180 | 181 | .PHONY: texinfo 182 | texinfo: 183 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 184 | @echo 185 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 186 | @echo "Run \`make' in that directory to run these through makeinfo" \ 187 | "(use \`make info' here to do that automatically)." 188 | 189 | .PHONY: info 190 | info: 191 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 192 | @echo "Running Texinfo files through makeinfo..." 193 | make -C $(BUILDDIR)/texinfo info 194 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 195 | 196 | .PHONY: gettext 197 | gettext: 198 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 199 | @echo 200 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 201 | 202 | .PHONY: changes 203 | changes: 204 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 205 | @echo 206 | @echo "The overview file is in $(BUILDDIR)/changes." 207 | 208 | .PHONY: linkcheck 209 | linkcheck: 210 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 211 | @echo 212 | @echo "Link check complete; look for any errors in the above output " \ 213 | "or in $(BUILDDIR)/linkcheck/output.txt." 214 | 215 | .PHONY: doctest 216 | doctest: 217 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 218 | @echo "Testing of doctests in the sources finished, look at the " \ 219 | "results in $(BUILDDIR)/doctest/output.txt." 220 | 221 | .PHONY: coverage 222 | coverage: 223 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 224 | @echo "Testing of coverage in the sources finished, look at the " \ 225 | "results in $(BUILDDIR)/coverage/python.txt." 226 | 227 | .PHONY: xml 228 | xml: 229 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 230 | @echo 231 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 232 | 233 | .PHONY: pseudoxml 234 | pseudoxml: 235 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 236 | @echo 237 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 238 | -------------------------------------------------------------------------------- /docs/gh_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make gh-pages 3 | cd .. 4 | sh install_meshrender.sh 5 | -------------------------------------------------------------------------------- /docs/source/api/io.rst: -------------------------------------------------------------------------------- 1 | File I/O 2 | ======== 3 | 4 | ObjFile 5 | ~~~~~~~ 6 | .. autoclass:: meshpy.ObjFile 7 | 8 | OffFile 9 | ~~~~~~~ 10 | .. autoclass:: meshpy.OffFile 11 | 12 | SdfFile 13 | ~~~~~~~ 14 | .. autoclass:: meshpy.SdfFile 15 | 16 | StablePoseFile 17 | ~~~~~~~~~~~~~~ 18 | .. autoclass:: meshpy.StablePoseFile 19 | -------------------------------------------------------------------------------- /docs/source/api/mesh.rst: -------------------------------------------------------------------------------- 1 | Meshes 2 | ====== 3 | 4 | Mesh3D 5 | ~~~~~~ 6 | .. autoclass:: meshpy.Mesh3D 7 | -------------------------------------------------------------------------------- /docs/source/api/rendering.rst: -------------------------------------------------------------------------------- 1 | Rendering 2 | ========= 3 | Classes for rendering images of meshes. 4 | 5 | VirtualCamera 6 | ~~~~~~~~~~~~~ 7 | .. autoclass:: meshpy.VirtualCamera 8 | 9 | ViewsphereDiscretizer 10 | ~~~~~~~~~~~~~~~~~~~~~ 11 | .. autoclass:: meshpy.ViewsphereDiscretizer 12 | 13 | PlanarWorksurfaceDiscretizer 14 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 15 | .. autoclass:: meshpy.PlanarWorksurfaceDiscretizer 16 | -------------------------------------------------------------------------------- /docs/source/api/sdf.rst: -------------------------------------------------------------------------------- 1 | Signed Distance Fields 2 | ====================== 3 | 4 | Sdf3D 5 | ~~~~~ 6 | .. autoclass:: meshpy.Sdf3D 7 | 8 | Sdf 9 | ~~~ 10 | .. autoclass:: meshpy.Sdf 11 | 12 | -------------------------------------------------------------------------------- /docs/source/api/stable_poses.rst: -------------------------------------------------------------------------------- 1 | Stable Poses 2 | ============ 3 | Discrete set of poses for which a triangular mesh may rest on an infinite planar 4 | worksurface when dropped with an initial orientation uniformly at random. 5 | 6 | StablePose 7 | ~~~~~~~~~~ 8 | .. autoclass:: meshpy.StablePose 9 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # meshpy documentation build configuration file, created by 4 | # sphinx-quickstart on Wed Oct 12 02:07:20 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import sphinx_rtd_theme 18 | 19 | # If extensions (or modules to document with autodoc) are in another directory, 20 | # add these directories to sys.path here. If the directory is relative to the 21 | # documentation root, use os.path.abspath to make it absolute, like shown here. 22 | sys.path.insert(0, os.path.abspath('../../')) 23 | 24 | # -- General configuration ------------------------------------------------ 25 | 26 | # If your documentation needs a minimal Sphinx version, state it here. 27 | #needs_sphinx = '1.0' 28 | 29 | # Add any Sphinx extension module names here, as strings. They can be 30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 31 | # ones. 32 | extensions = [ 33 | 'sphinx.ext.autodoc', 'sphinxcontrib.napoleon' 34 | ] 35 | autoclass_content = 'class' 36 | autodoc_member_order = 'bysource' 37 | autodoc_default_flags = ['members', 'show-inheritance'] 38 | napoleon_include_special_with_doc = True 39 | napoleon_include_init_with_doc = True 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | # The suffix(es) of source filenames. 45 | # You can specify multiple suffix as a list of string: 46 | # source_suffix = ['.rst', '.md'] 47 | source_suffix = '.rst' 48 | 49 | # The encoding of source files. 50 | #source_encoding = 'utf-8-sig' 51 | 52 | # The master toctree document. 53 | master_doc = 'index' 54 | 55 | # General information about the project. 56 | project = u'meshpy' 57 | copyright = u'2016, Jeff Mahler' 58 | author = u'Jeff Mahler' 59 | 60 | # The version info for the project you're documenting, acts as replacement for 61 | # |version| and |release|, also used in various other places throughout the 62 | # built documents. 63 | # 64 | # The short X.Y version. 65 | version = u'0.0.1' 66 | # The full version, including alpha/beta/rc tags. 67 | release = u'0.0.1' 68 | 69 | # The language for content autogenerated by Sphinx. Refer to documentation 70 | # for a list of supported languages. 71 | # 72 | # This is also used if you do content translation via gettext catalogs. 73 | # Usually you set "language" from the command line for these cases. 74 | language = None 75 | 76 | # There are two options for replacing |today|: either, you set today to some 77 | # non-false value, then it is used: 78 | #today = '' 79 | # Else, today_fmt is used as the format for a strftime call. 80 | #today_fmt = '%B %d, %Y' 81 | 82 | # List of patterns, relative to source directory, that match files and 83 | # directories to ignore when looking for source files. 84 | exclude_patterns = [] 85 | 86 | # The reST default role (used for this markup: `text`) to use for all 87 | # documents. 88 | #default_role = None 89 | 90 | # If true, '()' will be appended to :func: etc. cross-reference text. 91 | #add_function_parentheses = True 92 | 93 | # If true, the current module name will be prepended to all description 94 | # unit titles (such as .. function::). 95 | #add_module_names = True 96 | 97 | # If true, sectionauthor and moduleauthor directives will be shown in the 98 | # output. They are ignored by default. 99 | #show_authors = False 100 | 101 | # The name of the Pygments (syntax highlighting) style to use. 102 | pygments_style = 'sphinx' 103 | 104 | # A list of ignored prefixes for module index sorting. 105 | #modindex_common_prefix = [] 106 | 107 | # If true, keep warnings as "system message" paragraphs in the built documents. 108 | #keep_warnings = False 109 | 110 | # If true, `todo` and `todoList` produce output, else they produce nothing. 111 | todo_include_todos = False 112 | 113 | 114 | # -- Options for HTML output ---------------------------------------------- 115 | 116 | # The theme to use for HTML and HTML Help pages. See the documentation for 117 | # a list of builtin themes. 118 | #html_theme = 'alabaster' 119 | html_theme = 'sphinx_rtd_theme' 120 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 121 | 122 | # Theme options are theme-specific and customize the look and feel of a theme 123 | # further. For a list of options available for each theme, see the 124 | # documentation. 125 | #html_theme_options = {} 126 | 127 | # Add any paths that contain custom themes here, relative to this directory. 128 | #html_theme_path = [] 129 | 130 | # The name for this set of Sphinx documents. If None, it defaults to 131 | # " v documentation". 132 | #html_title = None 133 | 134 | # A shorter title for the navigation bar. Default is the same as html_title. 135 | #html_short_title = None 136 | 137 | # The name of an image file (relative to this directory) to place at the top 138 | # of the sidebar. 139 | #html_logo = None 140 | 141 | # The name of an image file (relative to this directory) to use as a favicon of 142 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 143 | # pixels large. 144 | #html_favicon = None 145 | 146 | # Add any paths that contain custom static files (such as style sheets) here, 147 | # relative to this directory. They are copied after the builtin static files, 148 | # so a file named "default.css" will overwrite the builtin "default.css". 149 | html_static_path = ['_static'] 150 | 151 | # Add any extra paths that contain custom files (such as robots.txt or 152 | # .htaccess) here, relative to this directory. These files are copied 153 | # directly to the root of the documentation. 154 | #html_extra_path = [] 155 | 156 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 157 | # using the given strftime format. 158 | #html_last_updated_fmt = '%b %d, %Y' 159 | 160 | # If true, SmartyPants will be used to convert quotes and dashes to 161 | # typographically correct entities. 162 | #html_use_smartypants = True 163 | 164 | # Custom sidebar templates, maps document names to template names. 165 | #html_sidebars = {} 166 | 167 | # Additional templates that should be rendered to pages, maps page names to 168 | # template names. 169 | #html_additional_pages = {} 170 | 171 | # If false, no module index is generated. 172 | #html_domain_indices = True 173 | 174 | # If false, no index is generated. 175 | #html_use_index = True 176 | 177 | # If true, the index is split into individual pages for each letter. 178 | #html_split_index = False 179 | 180 | # If true, links to the reST sources are added to the pages. 181 | html_show_sourcelink = True 182 | 183 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 184 | #html_show_sphinx = True 185 | 186 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 187 | #html_show_copyright = True 188 | 189 | # If true, an OpenSearch description file will be output, and all pages will 190 | # contain a tag referring to it. The value of this option must be the 191 | # base URL from which the finished HTML is served. 192 | #html_use_opensearch = '' 193 | 194 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 195 | #html_file_suffix = None 196 | 197 | # Language to be used for generating the HTML full-text search index. 198 | # Sphinx supports the following languages: 199 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 200 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 201 | #html_search_language = 'en' 202 | 203 | # A dictionary with options for the search language support, empty by default. 204 | # Now only 'ja' uses this config value 205 | #html_search_options = {'type': 'default'} 206 | 207 | # The name of a javascript file (relative to the configuration directory) that 208 | # implements a search results scorer. If empty, the default will be used. 209 | #html_search_scorer = 'scorer.js' 210 | 211 | # Output file base name for HTML help builder. 212 | htmlhelp_basename = 'meshpydoc' 213 | 214 | # -- Options for LaTeX output --------------------------------------------- 215 | 216 | latex_elements = { 217 | # The paper size ('letterpaper' or 'a4paper'). 218 | #'papersize': 'letterpaper', 219 | 220 | # The font size ('10pt', '11pt' or '12pt'). 221 | #'pointsize': '10pt', 222 | 223 | # Additional stuff for the LaTeX preamble. 224 | #'preamble': '', 225 | 226 | # Latex figure (float) alignment 227 | #'figure_align': 'htbp', 228 | } 229 | 230 | # Grouping the document tree into LaTeX files. List of tuples 231 | # (source start file, target name, title, 232 | # author, documentclass [howto, manual, or own class]). 233 | latex_documents = [ 234 | (master_doc, 'meshpy.tex', u'meshpy Documentation', 235 | u'Jeff Mahler', 'manual'), 236 | ] 237 | 238 | # The name of an image file (relative to this directory) to place at the top of 239 | # the title page. 240 | #latex_logo = None 241 | 242 | # For "manual" documents, if this is true, then toplevel headings are parts, 243 | # not chapters. 244 | #latex_use_parts = False 245 | 246 | # If true, show page references after internal links. 247 | #latex_show_pagerefs = False 248 | 249 | # If true, show URL addresses after external links. 250 | #latex_show_urls = False 251 | 252 | # Documents to append as an appendix to all manuals. 253 | #latex_appendices = [] 254 | 255 | # If false, no module index is generated. 256 | #latex_domain_indices = True 257 | 258 | 259 | # -- Options for manual page output --------------------------------------- 260 | 261 | # One entry per manual page. List of tuples 262 | # (source start file, name, description, authors, manual section). 263 | man_pages = [ 264 | (master_doc, 'meshpy', u'meshpy Documentation', 265 | [author], 1) 266 | ] 267 | 268 | # If true, show URL addresses after external links. 269 | #man_show_urls = False 270 | 271 | 272 | # -- Options for Texinfo output ------------------------------------------- 273 | 274 | # Grouping the document tree into Texinfo files. List of tuples 275 | # (source start file, target name, title, author, 276 | # dir menu entry, description, category) 277 | texinfo_documents = [ 278 | (master_doc, 'meshpy', u'meshpy Documentation', 279 | author, 'meshpy', 'One line description of project.', 280 | 'Miscellaneous'), 281 | ] 282 | 283 | # Documents to append as an appendix to all manuals. 284 | #texinfo_appendices = [] 285 | 286 | # If false, no module index is generated. 287 | #texinfo_domain_indices = True 288 | 289 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 290 | #texinfo_show_urls = 'footnote' 291 | 292 | # If true, do not generate a @detailmenu in the "Top" node's menu. 293 | #texinfo_no_detailmenu = False 294 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. core documentation master file, created by 2 | sphinx-quickstart on Sun Oct 16 14:33:48 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Berkeley AutoLab meshpy Documentation 7 | ===================================== 8 | Welcome to the documentation for the Berkeley AutoLab's `meshpy` module! 9 | This module is enables basic operations with 3-dimensional 10 | triangular meshes such as reading, writing, editing, rendering, 11 | and computation of stable poses. 12 | The `meshpy` module depends on `numpy`_, `scipy`_, and 13 | `matplotlib`_, which you can install with pip. 14 | 15 | .. _numpy: http://www.numpy.org/ 16 | .. _scipy: https://www.scipy/org/ 17 | .. _matplotlib: http://www.matplotlib.org/ 18 | 19 | .. toctree:: 20 | :maxdepth: 2 21 | :caption: Installation Guide 22 | 23 | install/install.rst 24 | 25 | .. toctree:: 26 | :maxdepth: 2 27 | :caption: API Documentation 28 | :glob: 29 | 30 | api/* 31 | 32 | Indices and tables 33 | ================== 34 | 35 | * :ref:`genindex` 36 | * :ref:`modindex` 37 | * :ref:`search` 38 | 39 | 40 | -------------------------------------------------------------------------------- /docs/source/install/install.rst: -------------------------------------------------------------------------------- 1 | Installation Instructions 2 | ========================= 3 | 4 | Dependencies 5 | ~~~~~~~~~~~~ 6 | The `meshpy` module depends on the Berkeley AutoLab's `autolab_core`_ and `perception`_ modules, 7 | which can be installed by following instructions in their respective 8 | repositories. 9 | 10 | .. _autolab_core: https://github.com/BerkeleyAutomation/autolab_core 11 | .. _perception: https://github.com/BerkeleyAutomation/perception 12 | 13 | Rendering using `meshpy` also depends on `OSMesa`_ and `Boost.NumPy`_, and 14 | compiling the renderer depends on `CMake`_. 15 | 16 | .. _OSMesa: http://www.mesa3d.org/osmesa.html 17 | .. _Boost.NumPy: https://github.com/ndarray/Boost.NumPy 18 | .. _CMake: https://cmake.org/ 19 | 20 | Any other dependencies will be installed automatically when `meshpy` is 21 | installed with `pip`. 22 | 23 | Cloning the Repository 24 | ~~~~~~~~~~~~~~~~~~~~~~ 25 | You can clone or download our source code from `Github`_. :: 26 | 27 | $ git clone git@github.com:BerkeleyAutomation/meshpy.git 28 | 29 | .. _Github: https://github.com/BerkeleyAutomation/meshpy 30 | 31 | Installation 32 | ~~~~~~~~~~~~ 33 | Install `OSMesa`_ by running: :: 34 | 35 | $ sudo apt-get install libosmesa6-dev 36 | 37 | Install `Boost.NumPy`_ by cloning the latest stable repo: :: 38 | 39 | $ git clone https://github.com/ndarray/Boost.NumPy.git 40 | 41 | and following `Boost-Numpy's installation instructions`_. 42 | 43 | .. _OSMesa: http://www.mesa3d.org/osmesa.html 44 | .. _Boost.NumPy: https://github.com/ndarray/Boost.NumPy 45 | .. _Boost-Numpy's installation instructions: https://github.com/ndarray/Boost.NumPy 46 | 47 | Then to install `meshpy` in your current Python environment, simply 48 | change directories into the `meshpy` repository and run :: 49 | 50 | $ pip install -e . 51 | 52 | Alternatively, you can run :: 53 | 54 | $ pip install /path/to/meshpy 55 | 56 | to install `meshpy` from anywhere. 57 | 58 | To visualize meshes, we highly recommend also installing 59 | the Berkeley AutoLab's `visualization`_ module, which uses `mayavi`_. 60 | This can be installed by cloning the repo: :: 61 | 62 | $ git clone git@github.com:BerkeleyAutomation/visualization.git 63 | 64 | and following `installation instructions`_. 65 | 66 | .. _visualization: https://github.com/BerkeleyAutomation/visualization 67 | .. _mayavi: http://docs.enthought.com/mayavi/mayavi/ 68 | .. _installation instructions: https://BerkeleyAutomation.github.io/visualization 69 | 70 | Testing 71 | ~~~~~~~ 72 | To test your installation, run :: 73 | 74 | $ python setup.py test 75 | 76 | We highly recommend testing before using the module. 77 | 78 | Building Documentation 79 | ~~~~~~~~~~~~~~~~~~~~~~ 80 | Building `meshpy`'s documentation requires a few extra dependencies -- 81 | specifically, `sphinx`_ and a few plugins. 82 | 83 | .. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ 84 | 85 | To install the dependencies required, simply run :: 86 | 87 | $ pip install -r docs_requirements.txt 88 | 89 | Then, go to the `docs` directory and run ``make`` with the appropriate target. 90 | For example, :: 91 | 92 | $ cd docs/ 93 | $ make html 94 | 95 | will generate a set of web pages. Any documentation files 96 | generated in this manner can be found in `docs/build`. 97 | 98 | Deploying Documentation 99 | ~~~~~~~~~~~~~~~~~~~~~~~ 100 | To deploy documentation to the Github Pages site for the repository, 101 | simply push any changes to the documentation source to master 102 | and then run :: 103 | 104 | $ . gh_deploy.sh 105 | 106 | from the `docs` folder. This script will automatically checkout the 107 | ``gh-pages`` branch, build the documentation from source, and push it 108 | to Github. 109 | -------------------------------------------------------------------------------- /docs_requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinxcontrib-napoleon 3 | sphinx_rtd_theme 4 | 5 | -------------------------------------------------------------------------------- /examples/render_images.py: -------------------------------------------------------------------------------- 1 | """ 2 | Renders an image for a mesh in each stable pose to demo the rendering interface. 3 | Author: Jeff Mahler 4 | """ 5 | import argparse 6 | import copy 7 | import IPython 8 | import logging 9 | import numpy as np 10 | import os 11 | import sys 12 | import time 13 | 14 | import autolab_core.utils as utils 15 | from autolab_core import NormalCloud, PointCloud, RigidTransform 16 | from perception import CameraIntrinsics, ObjectRender, RenderMode 17 | from meshpy import MaterialProperties, LightingProperties, ObjFile, VirtualCamera, ViewsphereDiscretizer, SceneObject 18 | 19 | from visualization import Visualizer2D as vis 20 | from visualization import Visualizer3D as vis3d 21 | 22 | if __name__ == '__main__': 23 | # parse args 24 | logging.getLogger().setLevel(logging.INFO) 25 | parser = argparse.ArgumentParser() 26 | parser.add_argument('mesh_filename', type=str, help='filename for .OBJ mesh file to render') 27 | args = parser.parse_args() 28 | 29 | vis_normals = False 30 | 31 | # read data 32 | mesh_filename = args.mesh_filename 33 | _, mesh_ext = os.path.splitext(mesh_filename) 34 | if mesh_ext != '.obj': 35 | raise ValueError('Must provide mesh in Wavefront .OBJ format!') 36 | orig_mesh = ObjFile(mesh_filename).read() 37 | mesh = orig_mesh.subdivide(min_tri_length=0.01) 38 | mesh.compute_vertex_normals() 39 | stable_poses = mesh.stable_poses() 40 | 41 | if vis_normals: 42 | vis3d.figure() 43 | vis3d.mesh(mesh) 44 | vis3d.normals(NormalCloud(mesh.normals.T), PointCloud(mesh.vertices.T), subsample=10) 45 | vis3d.show() 46 | 47 | d = utils.sqrt_ceil(len(stable_poses)) 48 | vis.figure(size=(16,16)) 49 | 50 | table_mesh = ObjFile('data/meshes/table.obj').read() 51 | table_mesh = table_mesh.subdivide() 52 | table_mesh.compute_vertex_normals() 53 | table_mat_props = MaterialProperties(color=(0,255,0), 54 | ambient=0.5, 55 | diffuse=1.0, 56 | specular=1, 57 | shininess=0) 58 | 59 | for k, stable_pose in enumerate(stable_poses): 60 | logging.info('Rendering stable pose %d' %(k)) 61 | 62 | # set resting pose 63 | T_obj_world = mesh.get_T_surface_obj(stable_pose.T_obj_table).as_frames('obj', 'world') 64 | 65 | # load camera intrinsics 66 | camera_intr = CameraIntrinsics.load('data/camera_intr/primesense_carmine_108.intr') 67 | #camera_intr = camera_intr.resize(4) 68 | 69 | # create virtual camera 70 | virtual_camera = VirtualCamera(camera_intr) 71 | 72 | 73 | # create lighting props 74 | T_light_camera = RigidTransform(translation=[0,0,0], 75 | from_frame='light', 76 | to_frame=camera_intr.frame) 77 | light_props = LightingProperties(ambient=-0.25, 78 | diffuse=1, 79 | specular=0.25, 80 | T_light_camera=T_light_camera, 81 | cutoff=180) 82 | 83 | # create material props 84 | mat_props = MaterialProperties(color=(249,241,21), 85 | ambient=0.5, 86 | diffuse=1.0, 87 | specular=1, 88 | shininess=0) 89 | 90 | # create scene objects 91 | scene_objs = {'table': SceneObject(table_mesh, T_obj_world.inverse(), 92 | mat_props=table_mat_props)} 93 | for name, scene_obj in scene_objs.iteritems(): 94 | virtual_camera.add_to_scene(name, scene_obj) 95 | 96 | # camera pose 97 | cam_dist = 0.3 98 | T_camera_world = RigidTransform(rotation=np.array([[0, 1, 0], 99 | [1, 0, 0], 100 | [0, 0, -1]]), 101 | translation=[0,0,cam_dist], 102 | from_frame=camera_intr.frame, 103 | to_frame='world') 104 | 105 | T_obj_camera = T_camera_world.inverse() * T_obj_world 106 | 107 | # show mesh 108 | if False: 109 | vis3d.figure() 110 | vis3d.mesh(mesh, T_obj_camera) 111 | vis3d.pose(RigidTransform(), alpha=0.1) 112 | vis3d.pose(T_obj_camera, alpha=0.1) 113 | vis3d.show() 114 | 115 | # render depth image 116 | render_start = time.time() 117 | IPython.embed() 118 | renders = virtual_camera.wrapped_images(mesh, 119 | [T_obj_camera], 120 | RenderMode.RGBD_SCENE, 121 | mat_props=mat_props, 122 | light_props=light_props, 123 | debug=False) 124 | render_stop = time.time() 125 | logging.info('Render took %.3f sec' %(render_stop-render_start)) 126 | 127 | vis.subplot(d,d,k+1) 128 | vis.imshow(renders[0].image.color) 129 | #vis.imshow(renders[0].image.depth) 130 | vis.show() 131 | -------------------------------------------------------------------------------- /install_meshrender.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | mkdir build 3 | cd build 4 | cmake .. 5 | make 6 | cp meshpy/meshrender.so ../meshpy 7 | cd .. 8 | rm -rf build 9 | -------------------------------------------------------------------------------- /meshpy/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | PYTHON_ADD_MODULE(meshrender meshrender.cpp) 2 | INSTALL(TARGETS meshrender DESTINATION meshpy) -------------------------------------------------------------------------------- /meshpy/__init__.py: -------------------------------------------------------------------------------- 1 | try: 2 | import meshrender 3 | except: 4 | print 'Unable to import meshrender shared library! Rendering will not work. Likely due to missing Boost.Numpy' 5 | print 'Boost.Numpy can be installed following the instructions in https://github.com/ndarray/Boost.NumPy' 6 | from mesh import Mesh3D 7 | from image_converter import ImageToMeshConverter 8 | from obj_file import ObjFile 9 | from off_file import OffFile 10 | from render_modes import RenderMode 11 | from sdf import Sdf, Sdf3D 12 | from sdf_file import SdfFile 13 | from stable_pose import StablePose 14 | from stp_file import StablePoseFile 15 | from urdf_writer import UrdfWriter, convex_decomposition 16 | from lighting import MaterialProperties, LightingProperties 17 | 18 | from mesh_renderer import ViewsphereDiscretizer, PlanarWorksurfaceDiscretizer, VirtualCamera, SceneObject 19 | from random_variables import CameraSample, RenderSample, UniformViewsphereRandomVariable, UniformPlanarWorksurfaceRandomVariable, UniformPlanarWorksurfaceImageRandomVariable 20 | 21 | __all__ = ['Mesh3D', 22 | 'ViewsphereDiscretizer', 'PlanarWorksurfaceDiscretizer', 'VirtualCamera', 'SceneObject', 23 | 'ImageToMeshConverter', 24 | 'ObjFile', 'OffFile', 25 | 'RenderMode', 26 | 'Sdf', 'Sdf3D', 27 | 'SdfFile', 28 | 'StablePose', 29 | 'StablePoseFile', 30 | 'CameraSample', 31 | 'RenderSample', 32 | 'UniformViewsphereRandomVariable', 33 | 'UniformPlanarWorksurfaceRandomVariable', 34 | 'UniformPlanarWorksurfaceImageRandomVariable' 35 | 'UrdfWriter', 'convex_decomposition', 36 | 'MaterialProperties' 37 | ] 38 | -------------------------------------------------------------------------------- /meshpy/image_converter.py: -------------------------------------------------------------------------------- 1 | """ 2 | Classes to convert binary images to extruded meshes 3 | Author: Jeff Mahler 4 | """ 5 | import IPython 6 | import logging 7 | import numpy as np 8 | import os 9 | from PIL import Image, ImageDraw 10 | import sklearn.decomposition 11 | import sys 12 | 13 | import matplotlib.pyplot as plt 14 | import skimage.morphology as morph 15 | from skimage.transform import resize 16 | 17 | from autolab_core import RigidTransform 18 | from meshpy import Mesh3D 19 | from perception import BinaryImage 20 | 21 | class ImageToMeshConverter: 22 | """ Namespace class for converting binary images to SDFs and meshes. """ 23 | 24 | @staticmethod 25 | def binary_image_to_mesh(binary_im, extrusion=1000, scale_factor=1.0): 26 | """ 27 | Converts a binary image to a 3D extruded polygonal mesh 28 | 29 | Parameters 30 | ---------- 31 | binary_im : :obj:`perception.BinaryImage` 32 | binary image for silhouette 33 | extrusion : float 34 | amount to extrude the polygon in meters 35 | scale_factor : float 36 | amount to rescale the final mesh (from units of pixels to meters) 37 | 38 | Returns 39 | ------- 40 | :obj:`Mesh3D` 41 | the resulting mesh 42 | 43 | Raises 44 | ------ 45 | :obj:`ValueError` 46 | if the triangulation was not successful due to topology or other factors 47 | """ 48 | # check valid input 49 | if not isinstance(binary_im, BinaryImage): 50 | raise ValueError('Must provide perception.BinaryImage as input') 51 | 52 | # get occupied indices from binary image 53 | binary_data = binary_im.data 54 | occ_coords = binary_im.nonzero_pixels() 55 | 56 | # create mesh faces and concatenate 57 | front_face_depth = extrusion / 2.0 58 | back_face_depth = -extrusion / 2.0 59 | front_verts, front_tris, front_ind_map = ImageToMeshConverter.create_mesh_face(occ_coords, front_face_depth, binary_data.shape, cw=True) 60 | back_verts, back_tris, back_ind_map = ImageToMeshConverter.create_mesh_face(occ_coords, back_face_depth, binary_data.shape, cw=False) 61 | verts, tris = ImageToMeshConverter.join_vert_tri_lists(front_verts, front_tris, back_verts, back_tris) 62 | num_verts = len(front_verts) 63 | back_ind_map = back_ind_map + num_verts 64 | 65 | # connect boundaries 66 | boundary_im = binary_im.boundary_map() 67 | ImageToMeshConverter.add_boundary_tris(boundary_im, verts, tris, front_ind_map, back_ind_map) 68 | 69 | # convert to mesh and clean 70 | m = Mesh3D(verts, tris) 71 | m.remove_unreferenced_vertices() 72 | T_im_world = RigidTransform(rotation=np.array([[0, 1, 0], 73 | [-1, 0, 0], 74 | [0, 0, 1]]), 75 | from_frame='obj', 76 | to_frame='obj') 77 | m = m.transform(T_im_world) 78 | m.rescale_dimension(scale_factor, Mesh3D.ScalingTypeRelative) 79 | return m 80 | 81 | @staticmethod 82 | def join_vert_tri_lists(verts1, tris1, verts2, tris2): 83 | """ 84 | Concatenates two lists of vertices and triangles. 85 | 86 | Parameters 87 | ---------- 88 | verts1 : :obj:`list` of 3-:obj:`list` of float 89 | first list of vertices 90 | tris1 : :obj:`list` of 3-:obj`list` of int 91 | first list of triangles 92 | verts2 : :obj:`list` of 3-:obj:`list` of float 93 | second list of vertices 94 | tris2 : :obj:`list` of 3-:obj`list` of int 95 | second list of triangles 96 | 97 | Returns 98 | ------- 99 | verts : :obj:`list` of 3-:obj:`list` of float 100 | joined list of vertices 101 | tris : :obj:`list` of 3-:obj`list` of int 102 | joined list of triangles 103 | """ 104 | num_verts1 = len(verts1) 105 | 106 | # simple append for verts 107 | verts = list(verts1) 108 | verts.extend(verts2) 109 | 110 | # offset and append triangle (vertex indices) 111 | tris = list(tris1) 112 | tris2_offset = [[num_verts1 + t[0], num_verts1 + t[1], num_verts1 + t[2]] for t in tris2] 113 | tris.extend(tris2_offset) 114 | return verts, tris 115 | 116 | @staticmethod 117 | def add_boundary_tris(boundary_im, verts, tris, front_ind_map, back_ind_map): 118 | """ 119 | Connects front and back faces along the boundary, modifying tris IN PLACE 120 | NOTE: Right now this only works for points topologically equivalent to a sphere, eg. no holes! 121 | This can be extended by parsing back over untriangulated boundary points. 122 | 123 | Parameters 124 | ---------- 125 | boundary_im : :obj:`perception.BinaryImage` 126 | binary image of the boundary 127 | verts : :obj:`list` of 3-:obj:`list` of float 128 | list of vertices 129 | tris : :obj:`list` of 3-:obj`list` of int 130 | list of triangles 131 | front_ind_map : :obj:`numpy.ndarray` 132 | maps vertex coords to the indices of their front face vertex in list 133 | back_ind_map : :obj:`numpy.ndarray` 134 | maps vertex coords to the indices of their back face vertex in list 135 | 136 | Raises 137 | ------ 138 | :obj:`ValueError` 139 | triangulation failed 140 | """ 141 | # TODO: fix multiple connected comps 142 | 143 | # setup variables for boundary coords 144 | upper_bound = np.iinfo(np.uint8).max 145 | remaining_boundary = boundary_im.data.copy() 146 | boundary_ind = np.where(remaining_boundary == upper_bound) 147 | boundary_coords = zip(boundary_ind[0], boundary_ind[1]) 148 | if len(boundary_coords) == 0: 149 | raise ValueError('No boundary coordinates') 150 | 151 | # setup inital vars 152 | tris_arr = np.array(tris) 153 | visited_map = np.zeros(boundary_im.shape) 154 | another_visit_avail = True 155 | 156 | # make sure to start with a reffed tri 157 | visited_marker = 128 158 | finished = False 159 | it = 0 160 | i = 0 161 | coord_visits = [] 162 | 163 | while not finished: 164 | finished = True 165 | logging.info('Boundary triangulation iter %d' %(it)) 166 | reffed = False 167 | while not reffed and i < len(boundary_coords): 168 | cur_coord = boundary_coords[i] 169 | if visited_map[cur_coord[0], cur_coord[1]] == 0: 170 | visited_map[cur_coord[0], cur_coord[1]] = 1 171 | front_ind = front_ind_map[cur_coord[0], cur_coord[1]] 172 | back_ind = back_ind_map[cur_coord[0], cur_coord[1]] 173 | ref_tris = np.where(tris_arr == front_ind) 174 | ref_tris = ref_tris[0] 175 | reffed = (ref_tris.shape[0] > 0) 176 | remaining_boundary[cur_coord[0], cur_coord[1]] = visited_marker 177 | i = i+1 178 | 179 | coord_visits.extend([cur_coord]) 180 | cur_dir_angle = np.pi / 2 # start straight down 181 | 182 | # loop around boundary and add faces connecting front and back 183 | while another_visit_avail: 184 | front_ind = front_ind_map[cur_coord[0], cur_coord[1]] 185 | back_ind = back_ind_map[cur_coord[0], cur_coord[1]] 186 | ref_tris = np.where(tris_arr == front_ind) 187 | ref_tris = ref_tris[0] 188 | num_reffing_tris = ref_tris.shape[0] 189 | 190 | # get all possible cadidates from neighboring tris 191 | another_visit_avail = False 192 | candidate_next_coords = [] 193 | for i in xrange(num_reffing_tris): 194 | reffing_tri = tris[ref_tris[i]] 195 | for j in xrange(3): 196 | v = verts[reffing_tri[j]] 197 | if boundary_im[v[0], v[1]] == upper_bound and visited_map[v[0], v[1]] == 0: 198 | candidate_next_coords.append([v[0], v[1]]) 199 | another_visit_avail = True 200 | 201 | # get the "rightmost" next point 202 | num_candidates = len(candidate_next_coords) 203 | if num_candidates > 0: 204 | # calculate candidate directions 205 | directions = [] 206 | next_dirs = np.array(candidate_next_coords) - np.array(cur_coord) 207 | dir_norms = np.linalg.norm(next_dirs, axis = 1) 208 | next_dirs = next_dirs / np.tile(dir_norms, [2, 1]).T 209 | 210 | # calculate angles relative to positive x axis 211 | new_angles = np.arctan(next_dirs[:,0] / next_dirs[:,1]) 212 | negative_ind = np.where(next_dirs[:,1] < 0) 213 | negative_ind = negative_ind[0] 214 | new_angles[negative_ind] = new_angles[negative_ind] + np.pi 215 | 216 | # compute difference in angles 217 | angle_diff = new_angles - cur_dir_angle 218 | correction_ind = np.where(angle_diff <= -np.pi) 219 | correction_ind = correction_ind[0] 220 | angle_diff[correction_ind] = angle_diff[correction_ind] + 2 * np.pi 221 | 222 | # choose the next coordinate with the maximum angle diff (rightmost) 223 | next_ind = np.where(angle_diff == np.max(angle_diff)) 224 | next_ind = next_ind[0] 225 | 226 | cur_coord = candidate_next_coords[next_ind[0]] 227 | cur_dir_angle = new_angles[next_ind[0]] 228 | 229 | # add triangles (only add if there is a new candidate) 230 | next_front_ind = front_ind_map[cur_coord[0], cur_coord[1]] 231 | next_back_ind = back_ind_map[cur_coord[0], cur_coord[1]] 232 | tris.append([int(front_ind), int(back_ind), int(next_front_ind)]) 233 | tris.append([int(back_ind), int(next_back_ind), int(next_front_ind)]) 234 | 235 | # mark coordinate as visited 236 | visited_map[cur_coord[0], cur_coord[1]] = 1 237 | coord_visits.append(cur_coord) 238 | remaining_boundary[cur_coord[0], cur_coord[1]] = visited_marker 239 | 240 | # add edge back to first coord 241 | cur_coord = coord_visits[0] 242 | next_front_ind = front_ind_map[cur_coord[0], cur_coord[1]] 243 | next_back_ind = back_ind_map[cur_coord[0], cur_coord[1]] 244 | tris.append([int(front_ind), int(back_ind), int(next_front_ind)]) 245 | tris.append([int(back_ind), int(next_back_ind), int(next_front_ind)]) 246 | 247 | # check success 248 | finished = (np.sum(remaining_boundary == upper_bound) == 0) or (i == len(boundary_coords)) 249 | it += 1 250 | 251 | @staticmethod 252 | def create_mesh_face(occ_coords, depth, index_shape, cw=True): 253 | """ 254 | Creates a 2D mesh face of vertices and triangles from the given coordinates at a specified depth. 255 | 256 | Parameters 257 | ---------- 258 | occ_coords : :obj:`list` of 3-:obj:`tuple 259 | the coordinates of vertices 260 | depth : float 261 | the depth at which to place the face 262 | index_shape : 2-:obj:`tuple` 263 | the shape of the numpy grid on which the vertices lie 264 | cw : bool 265 | clockwise or counterclockwise orientation 266 | 267 | Returns 268 | ------- 269 | verts : :obj:`list` of 3-:obj:`list` of float 270 | list of vertices 271 | tris : :obj:`list` of 3-:obj`list` of int 272 | list of triangles 273 | """ 274 | # get mesh vertices 275 | verts = [] 276 | tris = [] 277 | ind_map = -1 * np.ones(index_shape) # map vertices to indices in vert list 278 | for coord in occ_coords: 279 | verts.append([coord[0], coord[1], depth]) 280 | ind_map[coord[0], coord[1]] = len(verts) - 1 281 | 282 | # get mesh triangles 283 | # rule: vertex adds triangles that it is the 90 degree corner of 284 | for coord in occ_coords: 285 | coord_right = [coord[0] + 1, coord[1]] 286 | coord_left = [coord[0] - 1, coord[1]] 287 | coord_below = [coord[0], coord[1] + 1] 288 | coord_above = [coord[0], coord[1] - 1] 289 | cur_ind = ind_map[coord[0], coord[1]] 290 | 291 | # add tri above left 292 | if coord_left[0] >= 0 and coord_above[1] >= 0: 293 | left_ind = ind_map[coord_left[0], coord_left[1]] 294 | above_ind = ind_map[coord_above[0], coord_above[1]] 295 | 296 | # check if valid vertices and add 297 | if left_ind > -1 and above_ind > -1: 298 | if cw: 299 | tris.append([int(cur_ind), int(left_ind), int(above_ind)]) 300 | else: 301 | tris.append([int(cur_ind), int(above_ind), int(left_ind)]) 302 | elif above_ind > -1: 303 | # try to patch area 304 | coord_left_above = [coord[0] - 1, coord[1] - 1] 305 | if coord_left_above[0] > 0 and coord_left_above[1] > 0: 306 | left_above_ind = ind_map[coord_left_above[0], coord_left_above[1]] 307 | 308 | # check validity 309 | if left_above_ind > -1: 310 | if cw: 311 | tris.append([int(cur_ind), int(left_above_ind), int(above_ind)]) 312 | else: 313 | tris.append([int(cur_ind), int(above_ind), int(left_above_ind)]) 314 | 315 | # add tri below right 316 | if coord_right[0] < index_shape[1] and coord_below[1] < index_shape[0]: 317 | right_ind = ind_map[coord_right[0], coord_right[1]] 318 | below_ind = ind_map[coord_below[0], coord_below[1]] 319 | 320 | # check if valid vertices and add 321 | if right_ind > -1 and below_ind > -1: 322 | if cw: 323 | tris.append([int(cur_ind), int(right_ind), int(below_ind)]) 324 | else: 325 | tris.append([int(cur_ind), int(below_ind), int(right_ind)]) 326 | elif below_ind > -1: 327 | # try to patch area 328 | coord_right_below = [coord[0] + 1, coord[1] + 1] 329 | if coord_right_below[0] < index_shape[0] and coord_right_below[1] < index_shape[1]: 330 | right_below_ind = ind_map[coord_right_below[0], coord_right_below[1]] 331 | 332 | # check validity 333 | if right_below_ind > -1: 334 | if cw: 335 | tris.append([int(cur_ind), int(right_below_ind), int(below_ind)]) 336 | else: 337 | tris.append([int(cur_ind), int(below_ind), int(right_below_ind)]) 338 | 339 | return verts, tris, ind_map 340 | 341 | -------------------------------------------------------------------------------- /meshpy/lighting.py: -------------------------------------------------------------------------------- 1 | """ 2 | Classes for lighting in renderer 3 | Author: Jeff Mahler 4 | """ 5 | import numpy as np 6 | 7 | from autolab_core import RigidTransform 8 | 9 | class Color(object): 10 | WHITE = np.array([255, 255, 255]) 11 | BLACK = np.array([0, 0, 0]) 12 | RED = np.array([255, 0, 0]) 13 | GREEN = np.array([0, 255, 0]) 14 | BLUE = np.array([0, 0, 255]) 15 | 16 | class MaterialProperties(object): 17 | """ Struct to encapsulate material properties for 18 | OpenGL rendering. 19 | 20 | Attributes 21 | ---------- 22 | color : :obj:`numpy.ndarray` 23 | 3-array of integers between 0 and 255 24 | """ 25 | def __init__(self, color=Color.WHITE, 26 | ambient=0.2, 27 | diffuse=0.8, 28 | specular=0, 29 | shininess=0): 30 | # set params 31 | self.color = np.array(color).astype(np.uint8) 32 | self.ambient = ambient 33 | self.diffuse = diffuse 34 | self.specular = specular 35 | self.shininess = shininess 36 | 37 | def __str__(self): 38 | s = '' 39 | s += 'Color: %s\n' %(str(self.color)) 40 | s += 'Ambient: %f\n' %(self.ambient) 41 | s += 'Diffuse: %f\n' %(self.diffuse) 42 | s += 'Specular: %f\n' %(self.specular) 43 | s += 'Shininess: %f\n' %(self.shininess) 44 | return s 45 | 46 | @property 47 | def arr(self): 48 | """ Returns the material properties as a contiguous numpy array. """ 49 | return np.r_[self.color, 50 | self.ambient * np.ones(3), 1, 51 | self.diffuse * np.ones(3), 1, 52 | self.specular * np.ones(3), 1, 53 | self.shininess].astype(np.float64) 54 | 55 | class LightingProperties(object): 56 | """ Struct to encapsulate lighting properties for 57 | OpenGL rendering. 58 | """ 59 | def __init__(self, ambient=0, 60 | diffuse=1, 61 | specular=1, 62 | T_light_camera=RigidTransform(rotation=np.eye(3), 63 | translation=np.zeros(3), 64 | from_frame='light', 65 | to_frame='camera'), 66 | cutoff=180.0): 67 | self.ambient = ambient 68 | self.diffuse = diffuse 69 | self.specular = specular 70 | self.T_light_camera = T_light_camera 71 | self.cutoff = cutoff 72 | self.T_light_obj = None 73 | 74 | def __str__(self): 75 | s = '' 76 | s += 'Ambient: %f\n' %(self.ambient) 77 | s += 'Diffuse: %f\n' %(self.diffuse) 78 | s += 'Specular: %f\n' %(self.specular) 79 | s += 'T_light_camera: %s\n' %(str(self.T_light_camera)) 80 | s += 'Cutoff: %f\n' %(self.cutoff) 81 | return s 82 | 83 | def set_pose(self, T_obj_camera): 84 | self.T_light_obj = T_obj_camera.inverse() * self.T_light_camera.as_frames('light', T_obj_camera.to_frame) 85 | 86 | @property 87 | def arr(self): 88 | """ Returns the lighting properties as a contiguous numpy array. """ 89 | if self.T_light_obj is None: 90 | raise ValueError('Need to set pose relative to object!') 91 | return np.r_[self.ambient * np.ones(3), 1, 92 | self.diffuse * np.ones(3), 1, 93 | self.specular * np.ones(3), 1, 94 | self.T_light_obj.translation, 95 | self.T_light_obj.z_axis, 96 | self.cutoff].astype(np.float64) 97 | 98 | -------------------------------------------------------------------------------- /meshpy/mesh_renderer.py: -------------------------------------------------------------------------------- 1 | """ 2 | Class to render a set of images for a graspable objects 3 | Author: Jeff Mahler 4 | """ 5 | import copy 6 | import IPython 7 | import logging 8 | import numpy as np 9 | import os 10 | import sys 11 | import time 12 | 13 | try: 14 | import meshrender 15 | except: 16 | pass 17 | 18 | from autolab_core import Point, RigidTransform 19 | from autolab_core.utils import sph2cart, cart2sph 20 | from perception import CameraIntrinsics, BinaryImage, ColorImage, DepthImage, RgbdImage, ObjectRender 21 | from meshpy import MaterialProperties, LightingProperties, RenderMode 22 | 23 | class ViewsphereDiscretizer(object): 24 | """Set of parameters for automatically rendering a set of images from virtual 25 | cameras placed around a viewing sphere. 26 | 27 | The view sphere indicates camera poses relative to the object. 28 | 29 | Attributes 30 | ---------- 31 | min_radius : float 32 | Minimum radius for viewing sphere. 33 | max_radius : float 34 | Maximum radius for viewing sphere. 35 | num_radii : int 36 | Number of radii between min_radius and max_radius. 37 | min_elev : float 38 | Minimum elevation (angle from z-axis) for camera position. 39 | max_elev : float 40 | Maximum elevation for camera position. 41 | num_elev : int 42 | Number of discrete elevations. 43 | min_az : float 44 | Minimum azimuth (angle from x-axis) for camera position. 45 | max_az : float 46 | Maximum azimuth for camera position. 47 | num_az : int 48 | Number of discrete azimuth locations. 49 | min_roll : float 50 | Minimum roll (rotation of camera about axis generated by azimuth and 51 | elevation) for camera. 52 | max_roll : float 53 | Maximum roll for camera. 54 | num_roll : int 55 | Number of discrete rolls. 56 | """ 57 | 58 | def __init__(self, min_radius, max_radius, num_radii, 59 | min_elev, max_elev, num_elev, 60 | min_az=0, max_az=2*np.pi, num_az=1, 61 | min_roll=0, max_roll=2*np.pi, num_roll=1): 62 | """Initialize a ViewsphereDiscretizer. 63 | 64 | Parameters 65 | ---------- 66 | min_radius : float 67 | Minimum radius for viewing sphere. 68 | max_radius : float 69 | Maximum radius for viewing sphere. 70 | num_radii : int 71 | Number of radii between min_radius and max_radius. 72 | min_elev : float 73 | Minimum elevation (angle from z-axis) for camera position. 74 | max_elev : float 75 | Maximum elevation for camera position. 76 | num_elev : int 77 | Number of discrete elevations. 78 | min_az : float 79 | Minimum azimuth (angle from x-axis) for camera position. 80 | max_az : float 81 | Maximum azimuth for camera position. 82 | num_az : int 83 | Number of discrete azimuth locations. 84 | min_roll : float 85 | Minimum roll (rotation of camera about axis generated by azimuth and 86 | elevation) for camera. 87 | max_roll : float 88 | Maximum roll for camera. 89 | num_roll : int 90 | Number of discrete rolls. 91 | """ 92 | if num_radii < 1 or num_az < 1 or num_elev < 1: 93 | raise ValueError('Discretization must be at least one in each dimension') 94 | self.min_radius = min_radius 95 | self.max_radius = max_radius 96 | self.num_radii = num_radii 97 | self.min_az = min_az 98 | self.max_az = max_az 99 | self.num_az = num_az 100 | self.min_elev = min_elev 101 | self.max_elev = max_elev 102 | self.num_elev = num_elev 103 | self.min_roll = min_roll 104 | self.max_roll = max_roll 105 | self.num_roll = num_roll 106 | 107 | def object_to_camera_poses(self): 108 | """Turn the params into a set of object to camera transformations. 109 | 110 | Returns 111 | ------- 112 | :obj:`list` of :obj:`RigidTransform` 113 | A list of rigid transformations that transform from object space 114 | to camera space. 115 | """ 116 | # compute increments in radial coordinates 117 | if self.max_radius == self.min_radius: 118 | radius_inc = 1 119 | elif self.num_radii == 1: 120 | radius_inc = self.max_radius - self.min_radius + 1 121 | else: 122 | radius_inc = (self.max_radius - self.min_radius) / (self.num_radii - 1) 123 | az_inc = (self.max_az - self.min_az) / self.num_az 124 | if self.max_elev == self.min_elev: 125 | elev_inc = 1 126 | elif self.num_elev == 1: 127 | elev_inc = self.max_elev - self.min_elev + 1 128 | else: 129 | elev_inc = (self.max_elev - self.min_elev) / (self.num_elev - 1) 130 | roll_inc = (self.max_roll - self.min_roll) / self.num_roll 131 | 132 | # create a pose for each set of spherical coords 133 | object_to_camera_poses = [] 134 | radius = self.min_radius 135 | while radius <= self.max_radius: 136 | elev = self.min_elev 137 | while elev <= self.max_elev: 138 | az = self.min_az 139 | while az < self.max_az: #not inclusive due to topology (simplifies things) 140 | roll = self.min_roll 141 | while roll < self.max_roll: 142 | 143 | # generate camera center from spherical coords 144 | camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze() 145 | camera_z_obj = -camera_center_obj / np.linalg.norm(camera_center_obj) 146 | 147 | # find the canonical camera x and y axes 148 | camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0]) 149 | if np.linalg.norm(camera_x_par_obj) == 0: 150 | camera_x_par_obj = np.array([1, 0, 0]) 151 | camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj) 152 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 153 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 154 | if camera_y_par_obj[2] > 0: 155 | camera_x_par_obj = -camera_x_par_obj 156 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 157 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 158 | 159 | # rotate by the roll 160 | R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj] 161 | R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0], 162 | [np.sin(roll), np.cos(roll), 0], 163 | [0, 0, 1]]) 164 | R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera) 165 | t_obj_camera = camera_center_obj 166 | 167 | # create final transform 168 | T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera, 169 | from_frame='camera', to_frame='obj') 170 | object_to_camera_poses.append(T_obj_camera.inverse()) 171 | roll += roll_inc 172 | az += az_inc 173 | elev += elev_inc 174 | radius += radius_inc 175 | return object_to_camera_poses 176 | 177 | class PlanarWorksurfaceDiscretizer(object): 178 | """ 179 | Set of parameters for automatically rendering a set of images from virtual 180 | cameras placed around a viewsphere and translated along a planar worksurface. 181 | 182 | The view sphere indicates camera poses relative to the object. 183 | """ 184 | def __init__(self, min_radius, max_radius, num_radii, 185 | min_elev, max_elev, num_elev, 186 | min_az=0, max_az=2*np.pi, num_az=1, 187 | min_roll=0, max_roll=2*np.pi, num_roll=1, 188 | min_x=0, max_x=1, num_x=1, 189 | min_y=0, max_y=1, num_y=1): 190 | """Initialize a ViewsphereDiscretizer. 191 | 192 | Parameters 193 | ---------- 194 | min_radius : float 195 | Minimum radius for viewing sphere. 196 | max_radius : float 197 | Maximum radius for viewing sphere. 198 | num_radii : int 199 | Number of radii between min_radius and max_radius. 200 | min_elev : float 201 | Minimum elevation (angle from z-axis) for camera position. 202 | max_elev : float 203 | Maximum elevation for camera position. 204 | num_elev : int 205 | Number of discrete elevations. 206 | min_az : float 207 | Minimum azimuth (angle from x-axis) for camera position. 208 | max_az : float 209 | Maximum azimuth for camera position. 210 | num_az : int 211 | Number of discrete azimuth locations. 212 | min_roll : float 213 | Minimum roll (rotation of camera about axis generated by azimuth and 214 | elevation) for camera. 215 | max_roll : float 216 | Maximum roll for camera. 217 | num_roll : int 218 | Number of discrete rolls. 219 | min_x : float 220 | Minimum x value. 221 | max_x : float 222 | Maximum x value. 223 | num_x : int 224 | Number of points along x axis. 225 | min_y : float 226 | Minimum y value. 227 | may_y : float 228 | Mayimum y value. 229 | num_y : int 230 | Number of points along y axis. 231 | """ 232 | if num_radii < 1 or num_az < 1 or num_elev < 1: 233 | raise ValueError('Discretization must be at least one in each dimension') 234 | self.min_radius = min_radius 235 | self.max_radius = max_radius 236 | self.num_radii = num_radii 237 | self.min_az = min_az 238 | self.max_az = max_az 239 | self.num_az = num_az 240 | self.min_elev = min_elev 241 | self.max_elev = max_elev 242 | self.num_elev = num_elev 243 | self.min_roll = min_roll 244 | self.max_roll = max_roll 245 | self.num_roll = num_roll 246 | self.min_x = min_x 247 | self.max_x = max_x 248 | self.num_x = num_x 249 | self.min_y = min_y 250 | self.max_y = max_y 251 | self.num_y = num_y 252 | 253 | def object_to_camera_poses(self, camera_intr): 254 | """Turn the params into a set of object to camera transformations. 255 | 256 | Returns 257 | ------- 258 | :obj:`list` of :obj:`RigidTransform` 259 | A list of rigid transformations that transform from object space 260 | to camera space. 261 | :obj:`list` of :obj:`RigidTransform` 262 | A list of rigid transformations that transform from object space 263 | to camera space without the translation in the plane 264 | :obj:`list` of :obj:`CameraIntrinsics` 265 | A list of camera intrinsics that project the translated object 266 | into the center pixel of the camera, simulating cropping 267 | """ 268 | # compute increments in radial coordinates 269 | if self.max_radius == self.min_radius: 270 | radius_inc = 1 271 | elif self.num_radii == 1: 272 | radius_inc = self.max_radius - self.min_radius + 1 273 | else: 274 | radius_inc = (self.max_radius - self.min_radius) / (self.num_radii - 1) 275 | az_inc = (self.max_az - self.min_az) / self.num_az 276 | if self.max_elev == self.min_elev: 277 | elev_inc = 1 278 | elif self.num_elev == 1: 279 | elev_inc = self.max_elev - self.min_elev + 1 280 | else: 281 | elev_inc = (self.max_elev - self.min_elev) / (self.num_elev - 1) 282 | roll_inc = (self.max_roll - self.min_roll) / self.num_roll 283 | 284 | if self.max_x == self.min_x: 285 | x_inc = 1 286 | elif self.num_x == 1: 287 | x_inc = self.max_x - self.min_x + 1 288 | else: 289 | x_inc = (self.max_x - self.min_x) / (self.num_x - 1) 290 | 291 | 292 | if self.max_y == self.min_y: 293 | y_inc = 1 294 | elif self.num_y == 1: 295 | y_inc = self.max_y - self.min_y + 1 296 | else: 297 | y_inc = (self.max_y - self.min_y) / (self.num_y - 1) 298 | 299 | # create a pose for each set of spherical coords 300 | object_to_camera_poses = [] 301 | object_to_camera_normalized_poses = [] 302 | camera_shifted_intrinsics = [] 303 | radius = self.min_radius 304 | while radius <= self.max_radius: 305 | elev = self.min_elev 306 | while elev <= self.max_elev: 307 | az = self.min_az 308 | while az < self.max_az: #not inclusive due to topology (simplifies things) 309 | roll = self.min_roll 310 | while roll < self.max_roll: 311 | x = self.min_x 312 | while x <= self.max_x: 313 | y = self.min_y 314 | while y <= self.max_y: 315 | num_poses = len(object_to_camera_poses) 316 | 317 | # generate camera center from spherical coords 318 | delta_t = np.array([x, y, 0]) 319 | camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze() + delta_t 320 | camera_z_obj = -np.array([sph2cart(radius, az, elev)]).squeeze() 321 | camera_z_obj = camera_z_obj / np.linalg.norm(camera_z_obj) 322 | 323 | # find the canonical camera x and y axes 324 | camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0]) 325 | if np.linalg.norm(camera_x_par_obj) == 0: 326 | camera_x_par_obj = np.array([1, 0, 0]) 327 | camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj) 328 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 329 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 330 | if camera_y_par_obj[2] > 0: 331 | print 'Flipping', num_poses 332 | camera_x_par_obj = -camera_x_par_obj 333 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 334 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 335 | 336 | # rotate by the roll 337 | R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj] 338 | R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0], 339 | [np.sin(roll), np.cos(roll), 0], 340 | [0, 0, 1]]) 341 | R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera) 342 | t_obj_camera = camera_center_obj 343 | 344 | # create final transform 345 | T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera, 346 | from_frame='camera', 347 | to_frame='obj') 348 | 349 | object_to_camera_poses.append(T_obj_camera.inverse()) 350 | 351 | # compute pose without the center offset because we can easily add in the object offset later 352 | t_obj_camera_normalized = camera_center_obj - delta_t 353 | T_obj_camera_normalized = RigidTransform(R_obj_camera, t_obj_camera_normalized, 354 | from_frame='camera', 355 | to_frame='obj') 356 | object_to_camera_normalized_poses.append(T_obj_camera_normalized.inverse()) 357 | 358 | # compute new camera center by projecting object 0,0,0 into the camera 359 | center_obj_obj = Point(np.zeros(3), frame='obj') 360 | center_obj_camera = T_obj_camera.inverse() * center_obj_obj 361 | u_center_obj = camera_intr.project(center_obj_camera) 362 | camera_shifted_intr = copy.deepcopy(camera_intr) 363 | camera_shifted_intr.cx = 2 * camera_intr.cx - float(u_center_obj.x) 364 | camera_shifted_intr.cy = 2 * camera_intr.cy - float(u_center_obj.y) 365 | camera_shifted_intrinsics.append(camera_shifted_intr) 366 | 367 | y += y_inc 368 | x += x_inc 369 | roll += roll_inc 370 | az += az_inc 371 | elev += elev_inc 372 | radius += radius_inc 373 | return object_to_camera_poses, object_to_camera_normalized_poses, camera_shifted_intrinsics 374 | 375 | class SceneObject(object): 376 | """ Struct to encapsulate objects added to a scene """ 377 | def __init__(self, mesh, T_mesh_world, 378 | mat_props=MaterialProperties()): 379 | self.mesh = mesh 380 | self.T_mesh_world = T_mesh_world 381 | self.mat_props = mat_props 382 | 383 | class VirtualCamera(object): 384 | """A virtualized camera for rendering virtual color and depth images of meshes. 385 | 386 | Rendering is performed by using OSMesa offscreen rendering and boost_numpy. 387 | """ 388 | def __init__(self, camera_intr): 389 | """Initialize a virtual camera. 390 | 391 | Parameters 392 | ---------- 393 | camera_intr : :obj:`CameraIntrinsics` 394 | The CameraIntrinsics object used to parametrize the virtual camera. 395 | 396 | Raises 397 | ------ 398 | ValueError 399 | When camera_intr is not a CameraIntrinsics object. 400 | """ 401 | if not isinstance(camera_intr, CameraIntrinsics): 402 | raise ValueError('Must provide camera intrinsics as a CameraIntrinsics object') 403 | self._camera_intr = camera_intr 404 | self._scene = {} 405 | 406 | def add_to_scene(self, name, scene_object): 407 | """ Add an object to the scene. 408 | 409 | Parameters 410 | --------- 411 | name : :obj:`str` 412 | name of object in the scene 413 | scene_object : :obj:`SceneObject` 414 | object to add to the scene 415 | """ 416 | self._scene[name] = scene_object 417 | 418 | def remove_from_scene(self, name): 419 | """ Remove an object to a from the scene. 420 | 421 | Parameters 422 | --------- 423 | name : :obj:`str` 424 | name of object to remove 425 | """ 426 | self._scene[name] = None 427 | 428 | def images(self, mesh, object_to_camera_poses, 429 | mat_props=None, light_props=None, enable_lighting=True, debug=False): 430 | """Render images of the given mesh at the list of object to camera poses. 431 | 432 | Parameters 433 | ---------- 434 | mesh : :obj:`Mesh3D` 435 | The mesh to be rendered. 436 | object_to_camera_poses : :obj:`list` of :obj:`RigidTransform` 437 | A list of object to camera transforms to render from. 438 | mat_props : :obj:`MaterialProperties` 439 | Material properties for the mesh 440 | light_props : :obj:`MaterialProperties` 441 | Lighting properties for the scene 442 | enable_lighting : bool 443 | Whether or not to enable lighting 444 | debug : bool 445 | Whether or not to debug the C++ meshrendering code. 446 | 447 | Returns 448 | ------- 449 | :obj:`tuple` of `numpy.ndarray` 450 | A 2-tuple of ndarrays. The first, which represents the color image, 451 | contains ints (0 to 255) and is of shape (height, width, 3). 452 | Each pixel is a 3-ndarray (red, green, blue) associated with a given 453 | y and x value. The second, which represents the depth image, 454 | contains floats and is of shape (height, width). Each pixel is a 455 | single float that represents the depth of the image. 456 | """ 457 | # get mesh spec as numpy arrays 458 | vertex_arr = mesh.vertices 459 | tri_arr = mesh.triangles.astype(np.int32) 460 | if mesh.normals is None: 461 | mesh.compute_vertex_normals() 462 | norms_arr = mesh.normals 463 | 464 | # set default material properties 465 | if mat_props is None: 466 | mat_props = MaterialProperties() 467 | mat_props_arr = mat_props.arr 468 | 469 | # set default light properties 470 | if light_props is None: 471 | light_props = LightingProperties() 472 | 473 | # render for each object to camera pose 474 | # TODO: clean up interface, use modelview matrix!!!! 475 | color_ims = [] 476 | depth_ims = [] 477 | render_start = time.time() 478 | for T_obj_camera in object_to_camera_poses: 479 | # form projection matrix 480 | R = T_obj_camera.rotation 481 | t = T_obj_camera.translation 482 | P = self._camera_intr.proj_matrix.dot(np.c_[R, t]) 483 | 484 | # form light props 485 | light_props.set_pose(T_obj_camera) 486 | light_props_arr = light_props.arr 487 | 488 | # render images for each 489 | c, d = meshrender.render_mesh([P], 490 | self._camera_intr.height, 491 | self._camera_intr.width, 492 | vertex_arr, 493 | tri_arr, 494 | norms_arr, 495 | mat_props_arr, 496 | light_props_arr, 497 | enable_lighting, 498 | debug) 499 | color_ims.extend(c) 500 | depth_ims.extend(d) 501 | render_stop = time.time() 502 | logging.debug('Rendering took %.3f sec' %(render_stop - render_start)) 503 | 504 | return color_ims, depth_ims 505 | 506 | def images_viewsphere(self, mesh, vs_disc, mat_props=None, light_props=None): 507 | """Render images of the given mesh around a view sphere. 508 | 509 | Parameters 510 | ---------- 511 | mesh : :obj:`Mesh3D` 512 | The mesh to be rendered. 513 | vs_disc : :obj:`ViewsphereDiscretizer` 514 | A discrete viewsphere from which we draw object to camera 515 | transforms. 516 | mat_props : :obj:`MaterialProperties` 517 | Material properties for the mesh 518 | light_props : :obj:`MaterialProperties` 519 | Lighting properties for the scene 520 | 521 | Returns 522 | ------- 523 | :obj:`tuple` of `numpy.ndarray` 524 | A 2-tuple of ndarrays. The first, which represents the color image, 525 | contains ints (0 to 255) and is of shape (height, width, 3). 526 | Each pixel is a 3-ndarray (red, green, blue) associated with a given 527 | y and x value. The second, which represents the depth image, 528 | contains floats and is of shape (height, width). Each pixel is a 529 | single float that represents the depth of the image. 530 | """ 531 | return self.images(mesh, vs_disc.object_to_camera_poses(), 532 | mat_props=mat_props, light_props=light_props) 533 | 534 | def wrapped_images(self, mesh, object_to_camera_poses, 535 | render_mode, stable_pose=None, mat_props=None, 536 | light_props=None,debug=False): 537 | """Create ObjectRender objects of the given mesh at the list of object to camera poses. 538 | 539 | Parameters 540 | ---------- 541 | mesh : :obj:`Mesh3D` 542 | The mesh to be rendered. 543 | object_to_camera_poses : :obj:`list` of :obj:`RigidTransform` 544 | A list of object to camera transforms to render from. 545 | render_mode : int 546 | One of RenderMode.COLOR, RenderMode.DEPTH, or 547 | RenderMode.SCALED_DEPTH. 548 | stable_pose : :obj:`StablePose` 549 | A stable pose to render the object in. 550 | mat_props : :obj:`MaterialProperties` 551 | Material properties for the mesh 552 | light_props : :obj:`MaterialProperties` 553 | Lighting properties for the scene 554 | debug : bool 555 | Whether or not to debug the C++ meshrendering code. 556 | 557 | Returns 558 | ------- 559 | :obj:`list` of :obj:`ObjectRender` 560 | A list of ObjectRender objects generated from the given parameters. 561 | """ 562 | # pre-multiply the stable pose 563 | world_to_camera_poses = [T_obj_camera.as_frames('obj', 'camera') for T_obj_camera in object_to_camera_poses] 564 | if stable_pose is not None: 565 | t_obj_stp = np.array([0,0,-stable_pose.r.dot(stable_pose.x0)[2]]) 566 | T_obj_stp = RigidTransform(rotation=stable_pose.r, 567 | translation=t_obj_stp, 568 | from_frame='obj', 569 | to_frame='stp') 570 | stp_to_camera_poses = copy.copy(object_to_camera_poses) 571 | object_to_camera_poses = [] 572 | for T_stp_camera in stp_to_camera_poses: 573 | T_stp_camera.from_frame = 'stp' 574 | object_to_camera_poses.append(T_stp_camera.dot(T_obj_stp)) 575 | 576 | # set lighting mode 577 | enable_lighting = True 578 | if render_mode == RenderMode.SEGMASK or render_mode == RenderMode.DEPTH or \ 579 | render_mode == RenderMode.DEPTH_SCENE: 580 | enable_lighting = False 581 | 582 | # render both image types (doesn't really cost any time) 583 | color_ims, depth_ims = self.images(mesh, object_to_camera_poses, 584 | mat_props=mat_props, 585 | light_props=light_props, 586 | enable_lighting=enable_lighting, 587 | debug=debug) 588 | 589 | # convert to image wrapper classes 590 | images = [] 591 | if render_mode == RenderMode.SEGMASK: 592 | # wrap binary images 593 | for binary_im in color_ims: 594 | images.append(BinaryImage(binary_im[:,:,0], frame=self._camera_intr.frame, threshold=0)) 595 | 596 | elif render_mode == RenderMode.COLOR: 597 | # wrap color images 598 | for color_im in color_ims: 599 | images.append(ColorImage(color_im, frame=self._camera_intr.frame)) 600 | 601 | elif render_mode == RenderMode.COLOR_SCENE: 602 | # wrap color and depth images 603 | for color_im in color_ims: 604 | images.append(ColorImage(color_im, frame=self._camera_intr.frame)) 605 | 606 | # render images of scene objects 607 | color_scene_ims = {} 608 | for name, scene_obj in self._scene.iteritems(): 609 | scene_object_to_camera_poses = [] 610 | for world_to_camera_pose in world_to_camera_poses: 611 | scene_object_to_camera_poses.append(world_to_camera_pose * scene_obj.T_mesh_world) 612 | 613 | color_scene_ims[name] = self.wrapped_images(scene_obj.mesh, scene_object_to_camera_poses, RenderMode.COLOR, mat_props=scene_obj.mat_props, light_props=light_props, debug=debug) 614 | 615 | # combine with scene images 616 | # TODO: re-add farther 617 | for i in range(len(images)): 618 | for j, name in enumerate(color_scene_ims.keys()): 619 | zero_px = images[i].zero_pixels() 620 | images[i].data[zero_px[:,0], zero_px[:,1], :] = color_scene_ims[name][i].image.data[zero_px[:,0], zero_px[:,1], :] 621 | 622 | elif render_mode == RenderMode.DEPTH: 623 | # render depth image 624 | for depth_im in depth_ims: 625 | images.append(DepthImage(depth_im, frame=self._camera_intr.frame)) 626 | 627 | elif render_mode == RenderMode.DEPTH_SCENE: 628 | # create empty depth images 629 | for depth_im in depth_ims: 630 | images.append(DepthImage(depth_im, frame=self._camera_intr.frame)) 631 | 632 | # render images of scene objects 633 | depth_scene_ims = {} 634 | for name, scene_obj in self._scene.iteritems(): 635 | scene_object_to_camera_poses = [] 636 | for world_to_camera_pose in world_to_camera_poses: 637 | scene_object_to_camera_poses.append(world_to_camera_pose * scene_obj.T_mesh_world) 638 | depth_scene_ims[name] = self.wrapped_images(scene_obj.mesh, scene_object_to_camera_poses, RenderMode.DEPTH, mat_props=scene_obj.mat_props, light_props=light_props) 639 | 640 | # combine with scene images 641 | for i in range(len(images)): 642 | for j, name in enumerate(depth_scene_ims.keys()): 643 | images[i] = images[i].combine_with(depth_scene_ims[name][i].image) 644 | 645 | elif render_mode == RenderMode.RGBD: 646 | # create RGB-D images 647 | for color_im, depth_im in zip(color_ims, depth_ims): 648 | c = ColorImage(color_im, frame=self._camera_intr.frame) 649 | d = DepthImage(depth_im, frame=self._camera_intr.frame) 650 | images.append(RgbdImage.from_color_and_depth(c, d)) 651 | 652 | elif render_mode == RenderMode.RGBD_SCENE: 653 | # create RGB-D images 654 | for color_im, depth_im in zip(color_ims, depth_ims): 655 | c = ColorImage(color_im, frame=self._camera_intr.frame) 656 | d = DepthImage(depth_im, frame=self._camera_intr.frame) 657 | images.append(RgbdImage.from_color_and_depth(c, d)) 658 | 659 | # render images of scene objects 660 | rgbd_scene_ims = {} 661 | for name, scene_obj in self._scene.iteritems(): 662 | scene_object_to_camera_poses = [] 663 | for world_to_camera_pose in world_to_camera_poses: 664 | scene_object_to_camera_poses.append(world_to_camera_pose * scene_obj.T_mesh_world) 665 | 666 | rgbd_scene_ims[name] = self.wrapped_images(scene_obj.mesh, scene_object_to_camera_poses, RenderMode.RGBD, mat_props=scene_obj.mat_props, light_props=light_props, debug=debug) 667 | 668 | # combine with scene images 669 | for i in range(len(images)): 670 | for j, name in enumerate(rgbd_scene_ims.keys()): 671 | images[i] = images[i].combine_with(rgbd_scene_ims[name][i].image) 672 | 673 | elif render_mode == RenderMode.SCALED_DEPTH: 674 | # convert to color image 675 | for depth_im in depth_ims: 676 | d = DepthImage(depth_im, frame=self._camera_intr.frame) 677 | images.append(d.to_color()) 678 | else: 679 | raise ValueError('Render mode %s not supported' %(render_mode)) 680 | 681 | # create object renders 682 | if stable_pose is not None: 683 | object_to_camera_poses = copy.copy(stp_to_camera_poses) 684 | rendered_images = [] 685 | for image, T_obj_camera in zip(images, object_to_camera_poses): 686 | T_camera_obj = T_obj_camera.inverse() 687 | rendered_images.append(ObjectRender(image, T_camera_obj)) 688 | 689 | return rendered_images 690 | 691 | def wrapped_images_viewsphere(self, mesh, vs_disc, render_mode, stable_pose=None, mat_props=None, light_props=None): 692 | """Create ObjectRender objects of the given mesh around a viewsphere. 693 | 694 | Parameters 695 | ---------- 696 | mesh : :obj:`Mesh3D` 697 | The mesh to be rendered. 698 | vs_disc : :obj:`ViewsphereDiscretizer` 699 | A discrete viewsphere from which we draw object to camera 700 | transforms. 701 | render_mode : int 702 | One of RenderMode.COLOR, RenderMode.DEPTH, or 703 | RenderMode.SCALED_DEPTH. 704 | stable_pose : :obj:`StablePose` 705 | A stable pose to render the object in. 706 | mat_props : :obj:`MaterialProperties` 707 | Material properties for the mesh 708 | light_props : :obj:`MaterialProperties` 709 | Lighting properties for the scene 710 | 711 | Returns 712 | ------- 713 | :obj:`list` of :obj:`ObjectRender` 714 | A list of ObjectRender objects generated from the given parameters. 715 | """ 716 | return self.wrapped_images(mesh, vs_disc.object_to_camera_poses(), render_mode, stable_pose=stable_pose, mat_props=mat_props, light_props=light_props) 717 | 718 | def wrapped_images_planar_worksurface(self, mesh, ws_disc, render_mode, stable_pose=None, mat_props=None, light_props=None): 719 | """ Create ObjectRender objects of the given mesh around a viewsphere and 720 | a planar worksurface, where translated objects project into the center of 721 | the camera. 722 | 723 | Parameters 724 | ---------- 725 | mesh : :obj:`Mesh3D` 726 | The mesh to be rendered. 727 | ws_disc : :obj:`PlanarWorksurfaceDiscretizer` 728 | A discrete viewsphere and translations in plane from which we draw 729 | object to camera transforms. 730 | render_mode : int 731 | One of RenderMode.COLOR, RenderMode.DEPTH, or 732 | RenderMode.SCALED_DEPTH. 733 | stable_pose : :obj:`StablePose` 734 | A stable pose to render the object in. 735 | mat_props : :obj:`MaterialProperties` 736 | Material properties for the mesh 737 | light_props : :obj:`MaterialProperties` 738 | Lighting properties for the scene 739 | 740 | Returns 741 | ------- 742 | :obj:`list` of :obj:`ObjectRender` 743 | A list of ObjectRender objects generated from the given parameters. 744 | :obj:`list` of :obj:`RigidTransform` 745 | A list of the transformations from object frame to camera frame used 746 | for rendering the images. 747 | :obj:`list` of :obj:`CameraIntrinsics` 748 | A list of the camera intrinsics used for rendering the images. 749 | """ 750 | # save original intrinsics 751 | old_camera_intr = copy.deepcopy(self._camera_intr) 752 | object_to_camera_poses, object_to_camera_normalized_poses, shifted_camera_intrinsics = ws_disc.object_to_camera_poses(old_camera_intr) 753 | logging.info('Rendering %d images' %(len(object_to_camera_poses))) 754 | 755 | images = [] 756 | for T_obj_camera, shifted_camera_intr in zip(object_to_camera_poses, shifted_camera_intrinsics): 757 | self._camera_intr = shifted_camera_intr 758 | images.extend(self.wrapped_images(mesh, [T_obj_camera], render_mode, stable_pose=stable_pose, mat_props=mat_props, light_props=light_props)) 759 | 760 | self._camera_intr = old_camera_intr 761 | return images, object_to_camera_poses, shifted_camera_intrinsics 762 | -------------------------------------------------------------------------------- /meshpy/meshrender.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include "boost/python/extract.hpp" 3 | #include "boost/python/numeric.hpp" 4 | #include 5 | #include 6 | 7 | #include "GL/osmesa.h" 8 | #include 9 | #include 10 | #include 11 | #define GLAPIENTRY 12 | 13 | // global rending constants 14 | float near = 0.05f; 15 | float far = 1e2f; 16 | float scale = (0x0001) << 0; 17 | 18 | // offsets for reading material buffers 19 | int mat_ambient_off = 3; 20 | int mat_diffuse_off = mat_ambient_off + 4; 21 | int mat_specular_off = mat_diffuse_off + 4; 22 | int mat_shininess_off = mat_specular_off + 4; 23 | 24 | // offsets for reading lighting buffers 25 | int light_ambient_off = 0; 26 | int light_diffuse_off = light_ambient_off + 4; 27 | int light_specular_off = light_diffuse_off + 4; 28 | int light_position_off = light_specular_off + 4; 29 | int light_direction_off = light_position_off + 3; 30 | int light_spot_cutoff_off = light_direction_off + 3; 31 | 32 | void uint2uchar(unsigned int in, unsigned char* out){ 33 | out[0] = (in & 0x00ff0000) >> 16; 34 | out[1] = (in & 0x0000ff00) >> 8; 35 | out[2] = in & 0x000000ff; 36 | } 37 | 38 | boost::python::tuple render_mesh(boost::python::list proj_matrices, 39 | unsigned int im_height, 40 | unsigned int im_width, 41 | boost::python::numeric::array verts, 42 | boost::python::numeric::array tris, 43 | boost::python::numeric::array norms, 44 | boost::python::numeric::array mat_props, 45 | boost::python::numeric::array light_props, 46 | bool enable_lighting = false, 47 | bool debug = false) 48 | { 49 | // init rendering vars 50 | OSMesaContext ctx; 51 | boost::python::list color_ims; 52 | boost::python::list depth_ims; 53 | void *buffer; 54 | unsigned char* color_result = NULL; 55 | float* depth_result = NULL; 56 | 57 | // parse input data 58 | int num_projections = boost::python::len(proj_matrices); 59 | long int verts_buflen; 60 | long int tris_buflen; 61 | long int norms_buflen; 62 | long int mat_props_buflen; 63 | long int light_props_buflen; 64 | void const *verts_raw_buffer; 65 | void const *tris_raw_buffer; 66 | void const *norms_raw_buffer; 67 | void const *mat_props_raw_buffer; 68 | void const *light_props_raw_buffer; 69 | 70 | // read numpy buffers 71 | bool verts_readbuf_success = !PyObject_AsReadBuffer(verts.ptr(), &verts_raw_buffer, &verts_buflen); 72 | bool tris_readbuf_success = !PyObject_AsReadBuffer(tris.ptr(), &tris_raw_buffer, &tris_buflen); 73 | bool norms_readbuf_success = !PyObject_AsReadBuffer(norms.ptr(), &norms_raw_buffer, &norms_buflen); 74 | bool mat_props_readbuf_success = !PyObject_AsReadBuffer(mat_props.ptr(), &mat_props_raw_buffer, &mat_props_buflen); 75 | bool light_props_readbuf_success = !PyObject_AsReadBuffer(light_props.ptr(), &light_props_raw_buffer, &light_props_buflen); 76 | 77 | // cast numpy buffers to C arrays 78 | const double* verts_buffer = reinterpret_cast(verts_raw_buffer); 79 | const unsigned int* tris_buffer = reinterpret_cast(tris_raw_buffer); 80 | const double* norms_buffer = reinterpret_cast(norms_raw_buffer); 81 | const double* mat_props_buffer = reinterpret_cast(mat_props_raw_buffer); 82 | const double* light_props_buffer = reinterpret_cast(light_props_raw_buffer); 83 | 84 | // read color 85 | double final_matrix[16]; 86 | unsigned char colorBytes[3]; 87 | colorBytes[0] = (unsigned char)mat_props_buffer[0]; 88 | colorBytes[1] = (unsigned char)mat_props_buffer[1]; 89 | colorBytes[2] = (unsigned char)mat_props_buffer[2]; 90 | 91 | // compute num vertices 92 | unsigned int num_verts = verts_buflen / (3 * sizeof(double)); 93 | unsigned int num_tris = tris_buflen / (3 * sizeof(unsigned int)); 94 | unsigned int num_norms = norms_buflen / (3 * sizeof(double)); 95 | if (debug) { 96 | std::cout << "Num vertices " << num_verts << std::endl; 97 | std::cout << "Num tris " << num_tris << std::endl; 98 | std::cout << "Num norms " << num_norms << std::endl; 99 | std::cout << "Color " << (int)colorBytes[0] << " " << (int)colorBytes[1] << " " << (int)colorBytes[2] << std::endl; 100 | } 101 | 102 | // create an RGBA-mode context 103 | ctx = OSMesaCreateContextExt( OSMESA_RGBA, 16, 0, 0, NULL ); 104 | if (!ctx) { 105 | printf("OSMesaCreateContext failed!\n"); 106 | } 107 | 108 | // allocate the image buffer 109 | buffer = malloc( im_width * im_height * 4 * sizeof(GLubyte) ); 110 | if (!buffer) { 111 | printf("Alloc image buffer failed!\n"); 112 | } 113 | 114 | // bind the buffer to the context and make it current 115 | if (!OSMesaMakeCurrent( ctx, buffer, GL_UNSIGNED_BYTE, im_width, im_height )) { 116 | printf("OSMesaMakeCurrent failed!\n"); 117 | } 118 | OSMesaPixelStore(OSMESA_Y_UP, 0); 119 | 120 | // setup material properties 121 | if (enable_lighting) { 122 | GLfloat mat_ambient[4]; 123 | GLfloat mat_diffuse[4]; 124 | GLfloat mat_specular[4]; 125 | GLfloat mat_shininess[1]; 126 | mat_ambient[0] = (GLfloat)mat_props_buffer[mat_ambient_off + 0]; 127 | mat_ambient[1] = (GLfloat)mat_props_buffer[mat_ambient_off + 1]; 128 | mat_ambient[2] = (GLfloat)mat_props_buffer[mat_ambient_off + 2]; 129 | mat_ambient[3] = (GLfloat)mat_props_buffer[mat_ambient_off + 3]; 130 | 131 | mat_diffuse[0] = (GLfloat)mat_props_buffer[mat_diffuse_off + 0]; 132 | mat_diffuse[1] = (GLfloat)mat_props_buffer[mat_diffuse_off + 1]; 133 | mat_diffuse[2] = (GLfloat)mat_props_buffer[mat_diffuse_off + 2]; 134 | mat_diffuse[3] = (GLfloat)mat_props_buffer[mat_diffuse_off + 3]; 135 | 136 | mat_specular[0] = (GLfloat)mat_props_buffer[mat_specular_off + 0]; 137 | mat_specular[1] = (GLfloat)mat_props_buffer[mat_specular_off + 1]; 138 | mat_specular[2] = (GLfloat)mat_props_buffer[mat_specular_off + 2]; 139 | mat_specular[3] = (GLfloat)mat_props_buffer[mat_specular_off + 3]; 140 | 141 | mat_shininess[0] = (GLfloat)mat_props_buffer[mat_shininess_off + 0]; 142 | 143 | glClearColor(0.0, 0.0, 0.0, 0.0); 144 | glShadeModel(GL_SMOOTH); 145 | glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT, mat_ambient); 146 | glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, mat_diffuse); 147 | glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, mat_specular); 148 | glMaterialfv(GL_FRONT_AND_BACK, GL_SHININESS, mat_shininess); 149 | 150 | // setup lighting properties 151 | GLfloat light_ambient[4]; 152 | GLfloat light_diffuse[4]; 153 | GLfloat light_specular[4]; 154 | GLfloat light_position[4]; 155 | GLfloat light_direction[3]; 156 | GLfloat light_spot_cutoff[1]; 157 | 158 | light_ambient[0] = (GLfloat)light_props_buffer[light_ambient_off + 0]; 159 | light_ambient[1] = (GLfloat)light_props_buffer[light_ambient_off + 1]; 160 | light_ambient[2] = (GLfloat)light_props_buffer[light_ambient_off + 2]; 161 | light_ambient[3] = (GLfloat)light_props_buffer[light_ambient_off + 3]; 162 | 163 | light_diffuse[0] = (GLfloat)light_props_buffer[light_diffuse_off + 0]; 164 | light_diffuse[1] = (GLfloat)light_props_buffer[light_diffuse_off + 1]; 165 | light_diffuse[2] = (GLfloat)light_props_buffer[light_diffuse_off + 2]; 166 | light_diffuse[3] = (GLfloat)light_props_buffer[light_diffuse_off + 3]; 167 | 168 | light_specular[0] = (GLfloat)light_props_buffer[light_specular_off + 0]; 169 | light_specular[1] = (GLfloat)light_props_buffer[light_specular_off + 1]; 170 | light_specular[2] = (GLfloat)light_props_buffer[light_specular_off + 2]; 171 | light_specular[3] = (GLfloat)light_props_buffer[light_specular_off + 3]; 172 | 173 | light_position[0] = (GLfloat)light_props_buffer[light_position_off + 0]; 174 | light_position[1] = (GLfloat)light_props_buffer[light_position_off + 1]; 175 | light_position[2] = (GLfloat)light_props_buffer[light_position_off + 2]; 176 | light_position[3] = 1.0; // always set w to 1 177 | 178 | light_direction[0] = (GLfloat)light_props_buffer[light_direction_off + 0]; 179 | light_direction[1] = (GLfloat)light_props_buffer[light_direction_off + 1]; 180 | light_direction[2] = (GLfloat)light_props_buffer[light_direction_off + 2]; 181 | 182 | light_spot_cutoff[0] = (GLfloat)light_props_buffer[light_spot_cutoff_off + 0]; 183 | 184 | glLightfv(GL_LIGHT0, GL_AMBIENT, light_ambient); 185 | glLightfv(GL_LIGHT0, GL_DIFFUSE, light_diffuse); 186 | glLightfv(GL_LIGHT0, GL_SPECULAR, light_specular); 187 | glLightfv(GL_LIGHT0, GL_POSITION, light_position); 188 | glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction); 189 | glLightfv(GL_LIGHT0, GL_SPOT_CUTOFF, light_spot_cutoff); 190 | 191 | if (debug) { 192 | std::cout << "Light pos " << light_position[0] << " " << light_position[1] << " " << light_position[2] << " " << light_position[3] << std::endl; 193 | std::cout << "Light dir " << light_direction[0] << " " << light_direction[1] << " " << light_direction[2] << std::endl; 194 | } 195 | 196 | // enable lighting 197 | glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, GL_TRUE); 198 | glEnable(GL_LIGHTING); 199 | glEnable(GL_LIGHT0); 200 | } 201 | 202 | // set color 203 | glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE); 204 | glEnable(GL_COLOR_MATERIAL); 205 | 206 | // setup rendering 207 | glEnable(GL_DEPTH_TEST); 208 | glDisable(GL_CULL_FACE); 209 | glPolygonMode(GL_FRONT_AND_BACK, GL_FILL); 210 | 211 | for (unsigned int k = 0; k < num_projections; k++) { 212 | // load next projection matrix 213 | boost::python::object proj_matrix_obj(proj_matrices[k]); 214 | long int proj_buflen; 215 | void const *proj_raw_buffer; 216 | bool proj_readbuf_success = !PyObject_AsReadBuffer(proj_matrix_obj.ptr(), 217 | &proj_raw_buffer, 218 | &proj_buflen); 219 | const double* projection = reinterpret_cast(proj_raw_buffer); 220 | if (debug) { 221 | std::cout << "Proj Matrix " << k << std::endl; 222 | std::cout << projection[0] << " " << projection[1] << " " << projection[2] << " " << projection[3] << std::endl; 223 | std::cout << projection[4] << " " << projection[5] << " " << projection[6] << " " << projection[7] << std::endl; 224 | std::cout << projection[8] << " " << projection[9] << " " << projection[10] << " " << projection[11] << std::endl; 225 | } 226 | 227 | // create projection 228 | double inv_width_scale = 1.0 / (im_width * scale); 229 | double inv_height_scale = 1.0 / (im_height * scale); 230 | double inv_width_scale_1 = inv_width_scale - 1.0; 231 | double inv_height_scale_1_s = -(inv_height_scale - 1.0); 232 | double inv_width_scale_2 = inv_width_scale * 2.0; 233 | double inv_height_scale_2_s = -inv_height_scale * 2.0; 234 | double far_a_near = far + near; 235 | double far_s_near = far - near; 236 | double far_d_near = far_a_near / far_s_near; 237 | final_matrix[ 0] = projection[0+2*4] * inv_width_scale_1 + projection[0+0*4] * inv_width_scale_2; 238 | final_matrix[ 4] = projection[1+2*4] * inv_width_scale_1 + projection[1+0*4] * inv_width_scale_2; 239 | final_matrix[ 8] = projection[2+2*4] * inv_width_scale_1 + projection[2+0*4] * inv_width_scale_2; 240 | final_matrix[ 12] = projection[3+2*4] * inv_width_scale_1 + projection[3+0*4] * inv_width_scale_2; 241 | 242 | final_matrix[ 1] = projection[0+2*4] * inv_height_scale_1_s + projection[0+1*4] * inv_height_scale_2_s; 243 | final_matrix[ 5] = projection[1+2*4] * inv_height_scale_1_s + projection[1+1*4] * inv_height_scale_2_s; 244 | final_matrix[ 9] = projection[2+2*4] * inv_height_scale_1_s + projection[2+1*4] * inv_height_scale_2_s; 245 | final_matrix[13] = projection[3+2*4] * inv_height_scale_1_s + projection[3+1*4] * inv_height_scale_2_s; 246 | 247 | final_matrix[ 2] = projection[0+2*4] * far_d_near; 248 | final_matrix[ 6] = projection[1+2*4] * far_d_near; 249 | final_matrix[10] = projection[2+2*4] * far_d_near; 250 | final_matrix[14] = projection[3+2*4] * far_d_near - (2*far*near)/far_s_near; 251 | 252 | final_matrix[ 3] = projection[0+2*4]; 253 | final_matrix[ 7] = projection[1+2*4]; 254 | final_matrix[11] = projection[2+2*4]; 255 | final_matrix[15] = projection[3+2*4]; 256 | 257 | // load projection and modelview matrices 258 | glMatrixMode(GL_PROJECTION); 259 | glLoadMatrixd(final_matrix); 260 | glMatrixMode(GL_MODELVIEW); 261 | glLoadIdentity(); 262 | 263 | // render mesh 264 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); 265 | glViewport(0, 0, im_width, im_height); 266 | for (unsigned int i = 0; i < num_tris; ++i) { 267 | glColor3ubv(colorBytes); 268 | glBegin(GL_POLYGON); 269 | 270 | unsigned int a = tris_buffer[3*i + 0]; 271 | unsigned int b = tris_buffer[3*i + 1]; 272 | unsigned int c = tris_buffer[3*i + 2]; 273 | 274 | glNormal3dv(&norms_buffer[3 * a]); 275 | glVertex3dv(&verts_buffer[3 * a]); 276 | glNormal3dv(&norms_buffer[3 * b]); 277 | glVertex3dv(&verts_buffer[3 * b]); 278 | glNormal3dv(&norms_buffer[3 * c]); 279 | glVertex3dv(&verts_buffer[3 * c]); 280 | glEnd(); 281 | } 282 | 283 | glFinish(); 284 | 285 | // pull color buffer and flip y axis 286 | int i, j; 287 | GLint out_width, out_height, bytes_per_depth, color_type; 288 | GLboolean succeeded; 289 | unsigned char* p_color_buffer; 290 | succeeded = OSMesaGetColorBuffer(ctx, &out_width, &out_height, &color_type, (void**)&p_color_buffer); 291 | if (color_result == NULL) 292 | color_result = new unsigned char[3 * out_width * out_height]; 293 | for (i = 0; i < out_width; i++) { 294 | for (j = 0; j < out_height; j++) { 295 | int di = i + j * out_width; // index in color buffer 296 | int ri = i + j * out_width; // index in rendered image 297 | color_result[3*ri+0] = p_color_buffer[4*di+0]; 298 | color_result[3*ri+1] = p_color_buffer[4*di+1]; 299 | color_result[3*ri+2] = p_color_buffer[4*di+2]; 300 | } 301 | } 302 | 303 | // pull depth buffer and flip y axis 304 | unsigned short* p_depth_buffer; 305 | succeeded = OSMesaGetDepthBuffer(ctx, &out_width, &out_height, &bytes_per_depth, (void**)&p_depth_buffer); 306 | if (depth_result == NULL) 307 | depth_result = new float[out_width * out_height]; 308 | for(i = 0; i < out_width; i++){ 309 | for(j = 0; j < out_height; j++){ 310 | int di = i + j * out_width; // index in depth buffer 311 | int ri = i + (out_height-1-j)*out_width; // index in rendered image 312 | if (p_depth_buffer[di] == USHRT_MAX) { 313 | depth_result[ri] = 0.0f; 314 | } 315 | else { 316 | depth_result[ri] = near / (1.0f - ((float)p_depth_buffer[di] / USHRT_MAX)); 317 | } 318 | } 319 | } 320 | 321 | // append ndarray color image to list 322 | boost::python::tuple color_shape = boost::python::make_tuple(im_height, im_width, 3); 323 | boost::numpy::dtype color_dt = boost::numpy::dtype::get_builtin(); 324 | boost::numpy::ndarray color_arr = boost::numpy::from_data(color_result, color_dt, color_shape, 325 | boost::python::make_tuple(color_shape[1]*color_shape[2]*sizeof(unsigned char), 326 | color_shape[2]*sizeof(unsigned char), 327 | sizeof(unsigned char)), 328 | boost::python::object()); 329 | color_ims.append(color_arr.copy()); 330 | 331 | // append ndarray depth image to list 332 | boost::python::tuple depth_shape = boost::python::make_tuple(im_height, im_width); 333 | boost::numpy::dtype depth_dt = boost::numpy::dtype::get_builtin(); 334 | boost::numpy::ndarray depth_arr = boost::numpy::from_data(depth_result, depth_dt, depth_shape, 335 | boost::python::make_tuple(depth_shape[1]*sizeof(float), 336 | sizeof(float)), 337 | boost::python::object()); 338 | depth_ims.append(depth_arr.copy()); 339 | } 340 | 341 | // free the image buffer 342 | free( buffer ); 343 | 344 | // destroy the context 345 | OSMesaDestroyContext( ctx ); 346 | 347 | //return depth_ims; 348 | boost::python::tuple ret_tuple = boost::python::make_tuple(color_ims, depth_ims); 349 | 350 | if (color_result != NULL) 351 | delete [] color_result; 352 | if (depth_result != NULL) 353 | delete [] depth_result; 354 | 355 | return ret_tuple; 356 | } 357 | 358 | // Test function for multiplying an array by a scalar 359 | boost::python::list mul_array(boost::python::numeric::array data, int x) 360 | { 361 | // Access a built-in type (an array) 362 | boost::python::numeric::array a = data; 363 | long int bufLen; 364 | void const *buffer; 365 | bool isReadBuffer = !PyObject_AsReadBuffer(a.ptr(), &buffer, &bufLen); 366 | std::cout << "BUFLEN " << bufLen << std::endl; 367 | const double* test = reinterpret_cast(buffer); 368 | int s = bufLen / sizeof(double); 369 | double* mult = new double[s]; 370 | for (int i = 0; i < s; i++) { 371 | mult[i] = x * test[i]; 372 | } 373 | 374 | const boost::python::tuple& shape = boost::python::extract(a.attr("shape")); 375 | std::cout << "Shape " << boost::python::extract(shape[0]) << " " << boost::python::extract(shape[1]) << std::endl; 376 | boost::numpy::dtype dt = boost::numpy::dtype::get_builtin(); 377 | boost::numpy::ndarray result = boost::numpy::from_data(mult, dt, shape, 378 | boost::python::make_tuple(shape[0]*sizeof(double), 379 | sizeof(double)), 380 | boost::python::object()); 381 | 382 | boost::python::list l; 383 | l.append(result); 384 | return l; 385 | } 386 | 387 | // Expose classes and methods to Python 388 | BOOST_PYTHON_MODULE(meshrender) { 389 | Py_Initialize(); 390 | boost::numpy::initialize(); 391 | boost::python::numeric::array::set_module_and_type("numpy", "ndarray"); 392 | 393 | def("mul_array", &mul_array); 394 | def("render_mesh", &render_mesh); 395 | } 396 | -------------------------------------------------------------------------------- /meshpy/obj_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | File for loading and saving meshes from .OBJ files 3 | Author: Jeff Mahler 4 | """ 5 | import os 6 | import mesh 7 | 8 | class ObjFile(object): 9 | """ 10 | A Wavefront .obj file reader and writer. 11 | 12 | Attributes 13 | ---------- 14 | filepath : :obj:`str` 15 | The full path to the .obj file associated with this reader/writer. 16 | """ 17 | 18 | def __init__(self, filepath): 19 | """Construct and initialize a .obj file reader and writer. 20 | 21 | Parameters 22 | ---------- 23 | filepath : :obj:`str` 24 | The full path to the desired .obj file 25 | 26 | Raises 27 | ------ 28 | ValueError 29 | If the file extension is not .obj. 30 | """ 31 | self.filepath_ = filepath 32 | file_root, file_ext = os.path.splitext(self.filepath_) 33 | if file_ext != '.obj': 34 | raise ValueError('Extension %s invalid for OBJs' %(file_ext)) 35 | 36 | @property 37 | def filepath(self): 38 | """Returns the full path to the .obj file associated with this reader/writer. 39 | 40 | Returns 41 | ------- 42 | :obj:`str` 43 | The full path to the .obj file associated with this reader/writer. 44 | """ 45 | return self.filepath_ 46 | 47 | def read(self): 48 | """Reads in the .obj file and returns a Mesh3D representation of that mesh. 49 | 50 | Returns 51 | ------- 52 | :obj:`Mesh3D` 53 | A Mesh3D created from the data in the .obj file. 54 | """ 55 | numVerts = 0 56 | verts = [] 57 | norms = None 58 | faces = [] 59 | tex_coords = [] 60 | face_norms = [] 61 | f = open(self.filepath_, 'r') 62 | 63 | for line in f: 64 | # Break up the line by whitespace 65 | vals = line.split() 66 | if len(vals) > 0: 67 | # Look for obj tags (see http://en.wikipedia.org/wiki/Wavefront_.obj_file) 68 | if vals[0] == 'v': 69 | # Add vertex 70 | v = map(float, vals[1:4]) 71 | verts.append(v) 72 | if vals[0] == 'vn': 73 | # Add normal 74 | if norms is None: 75 | norms = [] 76 | n = map(float, vals[1:4]) 77 | norms.append(n) 78 | if vals[0] == 'f': 79 | # Add faces (includes vertex indices, texture coordinates, and normals) 80 | vi = [] 81 | vti = [] 82 | nti = [] 83 | if vals[1].find('/') == -1: 84 | vi = map(int, vals[1:]) 85 | vi = [i - 1 for i in vi] 86 | else: 87 | for j in range(1, len(vals)): 88 | # Break up like by / to read vert inds, tex coords, and normal inds 89 | val = vals[j] 90 | tokens = val.split('/') 91 | for i in range(len(tokens)): 92 | if i == 0: 93 | vi.append(int(tokens[i]) - 1) # adjust for python 0 - indexing 94 | elif i == 1: 95 | if tokens[i] != '': 96 | vti.append(int(tokens[i])) 97 | elif i == 2: 98 | nti.append(int(tokens[i])) 99 | faces.append(vi) 100 | # Below two lists are currently not in use 101 | tex_coords.append(vti) 102 | face_norms.append(nti) 103 | f.close() 104 | 105 | return mesh.Mesh3D(verts, faces, norms) 106 | 107 | def write(self, mesh): 108 | """Writes a Mesh3D object out to a .obj file format 109 | 110 | Parameters 111 | ---------- 112 | mesh : :obj:`Mesh3D` 113 | The Mesh3D object to write to the .obj file. 114 | 115 | Note 116 | ---- 117 | Does not support material files or texture coordinates. 118 | """ 119 | f = open(self.filepath_, 'w') 120 | vertices = mesh.vertices 121 | faces = mesh.triangles 122 | normals = mesh.normals 123 | 124 | # write human-readable header 125 | f.write('###########################################################\n') 126 | f.write('# OBJ file generated by UC Berkeley Automation Sciences Lab\n') 127 | f.write('#\n') 128 | f.write('# Num Vertices: %d\n' %(vertices.shape[0])) 129 | f.write('# Num Triangles: %d\n' %(faces.shape[0])) 130 | f.write('#\n') 131 | f.write('###########################################################\n') 132 | f.write('\n') 133 | 134 | for v in vertices: 135 | f.write('v %f %f %f\n' %(v[0], v[1], v[2])) 136 | 137 | # write the normals list 138 | if normals is not None and normals.shape[0] > 0: 139 | for n in normals: 140 | f.write('vn %f %f %f\n' %(n[0], n[1], n[2])) 141 | 142 | # write the normals list 143 | for t in faces: 144 | f.write('f %d %d %d\n' %(t[0]+1, t[1]+1, t[2]+1)) # convert back to 1-indexing 145 | 146 | f.close() 147 | -------------------------------------------------------------------------------- /meshpy/off_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | File for loading and saving meshes from .OFF files 3 | Author: Jeff Mahler 4 | """ 5 | import os 6 | import mesh 7 | 8 | class OffFile: 9 | """ 10 | A .off file reader and writer. 11 | 12 | Attributes 13 | ---------- 14 | filepath : :obj:`str` 15 | The full path to the .off file associated with this reader/writer. 16 | """ 17 | def __init__(self, filepath): 18 | ''' 19 | Set the path to the file to open 20 | ''' 21 | self.filepath_ = filepath 22 | file_root, file_ext = os.path.splitext(self.filepath_) 23 | if file_ext.lower() != '.off': 24 | raise Exception('Cannot load file extension %s. Please supply a .off file' %(file_ext)) 25 | 26 | @property 27 | def filepath(self): 28 | """Returns the full path to the .off file associated with this reader/writer. 29 | 30 | Returns 31 | ------- 32 | :obj:`str` 33 | The full path to the .ff file associated with this reader/writer. 34 | """ 35 | return self.filepath_ 36 | 37 | def read(self): 38 | """Reads in the .off file and returns a Mesh3D representation of that mesh. 39 | 40 | Returns 41 | ------- 42 | :obj:`Mesh3D` 43 | A Mesh3D created from the data in the .off file. 44 | """ 45 | verts = [] 46 | faces = [] 47 | f = open(self.filepath_, 'r') 48 | 49 | # parse header (NOTE: we do not support reading edges) 50 | header = f.readline() 51 | tokens = header.split() 52 | if len(tokens) == 1: 53 | header = f.readline() 54 | tokens = header.split() 55 | else: 56 | tokens = tokens[1:] 57 | num_vertices = int(tokens[0]) 58 | num_faces = int(tokens[1]) 59 | 60 | # read vertices 61 | for i in range(num_vertices): 62 | line = f.readline() 63 | tokens = line.split() 64 | vertex = [float(tokens[0]), float(tokens[1]), float(tokens[2])] 65 | verts.append(vertex) 66 | 67 | # read faces 68 | for i in range(num_faces): 69 | line = f.readline() 70 | tokens = line.split() 71 | if int(tokens[0]) != 3: 72 | raise ValueError('Only triangle meshes supported, but OFF file has %d-faces' %(int(tokens[0]))) 73 | face = [int(tokens[1]), int(tokens[2]), int(tokens[3])] 74 | faces.append(face) 75 | 76 | 77 | return mesh.Mesh3D(verts, faces) 78 | 79 | def write(self, mesh): 80 | """Writes a Mesh3D object out to a .off file format 81 | 82 | Parameters 83 | ---------- 84 | mesh : :obj:`Mesh3D` 85 | The Mesh3D object to write to the .obj file. 86 | 87 | Note 88 | ---- 89 | Does not support material files or texture coordinates. 90 | """ 91 | raise NotImplementedError() 92 | 93 | -------------------------------------------------------------------------------- /meshpy/random_variables.py: -------------------------------------------------------------------------------- 1 | """ 2 | Random variables for sampling camera poses 3 | Author: Jeff Mahler 4 | """ 5 | import copy 6 | import logging 7 | 8 | import numpy as np 9 | import scipy.stats as ss 10 | 11 | from autolab_core import Point, RigidTransform, RandomVariable 12 | from autolab_core.utils import sph2cart, cart2sph 13 | from perception import CameraIntrinsics, BinaryImage, ColorImage, DepthImage, ObjectRender, RenderMode 14 | 15 | from mesh_renderer import VirtualCamera, SceneObject 16 | 17 | class CameraSample(object): 18 | """ Struct to encapsulate the results of sampling a camera and its pose. """ 19 | def __init__(self, object_to_camera_pose, camera_intr, 20 | radius, elev, az, roll, tx=0, ty=0, focal=0, 21 | cx=0, cy=0): 22 | self.object_to_camera_pose = object_to_camera_pose 23 | self.camera_intr = camera_intr 24 | self.radius = radius 25 | self.elev = elev 26 | self.az = az 27 | self.roll = roll 28 | self.tx = tx 29 | self.ty = ty 30 | self.focal = focal 31 | self.cx = cx 32 | self.cy = cy 33 | 34 | @property 35 | def T_camera_world(self): 36 | return self.object_to_camera_pose.inverse().as_frames(self.camera_intr.frame, 'world') 37 | 38 | class RenderSample(object): 39 | """ Struct to encapsulate the results of sampling rendered images from a camera. """ 40 | def __init__(self, renders, camera): 41 | self.renders = renders 42 | self.camera = camera 43 | 44 | class UniformViewsphereRandomVariable(RandomVariable): 45 | """ 46 | Uniform distribution over a bounded region of a viewing sphere. 47 | """ 48 | def __init__(self, min_radius, max_radius, 49 | min_elev, max_elev, 50 | min_az=0, max_az=2*np.pi, 51 | min_roll=0, max_roll=2*np.pi, 52 | num_prealloc_samples=1): 53 | """Initialize a ViewsphereDiscretizer. 54 | 55 | Parameters 56 | ---------- 57 | min_radius : float 58 | Minimum radius for viewing sphere. 59 | max_radius : float 60 | Maximum radius for viewing sphere. 61 | min_elev : float 62 | Minimum elevation (angle from z-axis) for camera position. 63 | max_elev : float 64 | Maximum elevation for camera position. 65 | min_az : float 66 | Minimum azimuth (angle from x-axis) for camera position. 67 | max_az : float 68 | Maximum azimuth for camera position. 69 | min_roll : float 70 | Minimum roll (rotation of camera about axis generated by azimuth and 71 | elevation) for camera. 72 | max_roll : float 73 | Maximum roll for camera. 74 | num_prealloc_samples : int 75 | Number of preallocated samples. 76 | """ 77 | # read params 78 | self.min_radius = min_radius 79 | self.max_radius = max_radius 80 | self.min_az = min_az * np.pi 81 | self.max_az = max_az * np.pi 82 | self.min_elev = min_elev * np.pi 83 | self.max_elev = max_elev * np.pi 84 | self.min_roll = min_roll * np.pi 85 | self.max_roll = max_roll * np.pi 86 | self.num_prealloc_samples = num_prealloc_samples 87 | 88 | # setup random variables 89 | self.rad_rv = ss.uniform(loc=self.min_radius, scale=self.max_radius-self.min_radius) 90 | self.elev_rv = ss.uniform(loc=self.min_elev, scale=self.max_elev-self.min_elev) 91 | self.az_rv = ss.uniform(loc=self.min_az, scale=self.max_az-self.min_az) 92 | self.roll_rv = ss.uniform(loc=self.min_roll, scale=self.max_roll-self.min_roll) 93 | 94 | RandomVariable.__init__(self, self.num_prealloc_samples) 95 | 96 | def object_to_camera_pose(self, radius, elev, az, roll): 97 | """ Convert spherical coords to an object-camera pose. """ 98 | # generate camera center from spherical coords 99 | camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze() 100 | camera_z_obj = -camera_center_obj / np.linalg.norm(camera_center_obj) 101 | 102 | # find the canonical camera x and y axes 103 | camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0]) 104 | if np.linalg.norm(camera_x_par_obj) == 0: 105 | camera_x_par_obj = np.array([1, 0, 0]) 106 | camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj) 107 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 108 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 109 | if camera_y_par_obj[2] > 0: 110 | camera_x_par_obj = -camera_x_par_obj 111 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 112 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 113 | 114 | # rotate by the roll 115 | R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj] 116 | R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0], 117 | [np.sin(roll), np.cos(roll), 0], 118 | [0, 0, 1]]) 119 | R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera) 120 | t_obj_camera = camera_center_obj 121 | 122 | # create final transform 123 | T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera, 124 | from_frame=self.frame, to_frame='obj') 125 | return T_obj_camera.inverse() 126 | 127 | def sample(self, size=1): 128 | """ Sample random variables from the model. 129 | 130 | Parameters 131 | ---------- 132 | size : int 133 | number of sample to take 134 | 135 | Returns 136 | ------- 137 | :obj:`list` of :obj:`RigidTransform` 138 | sampled object to camera poses 139 | """ 140 | samples = [] 141 | for i in range(size): 142 | # sample params 143 | radius = self.rad_rv.rvs(size=1)[0] 144 | elev = self.elev_rv.rvs(size=1)[0] 145 | az = self.az_rv.rvs(size=1)[0] 146 | roll = self.roll_rv.rvs(size=1)[0] 147 | 148 | # convert to camera pose 149 | samples.append(self.object_to_camera_pose(radius, elev, az, roll)) 150 | 151 | # not a list if only 1 sample 152 | if size == 1: 153 | return samples[0] 154 | return samples 155 | 156 | class UniformPlanarWorksurfaceRandomVariable(RandomVariable): 157 | """ 158 | Uniform distribution over camera poses and intrinsics for a bounded region of a viewing sphere and planar worksurface. 159 | """ 160 | def __init__(self, frame, config, num_prealloc_samples=1): 161 | """Initialize a ViewsphereDiscretizer. 162 | 163 | Parameters 164 | ---------- 165 | frame: :obj:`str` 166 | string name of the camera frame 167 | config : :obj:`autolab_core.YamlConfig` 168 | configuration containing parameters of random variable 169 | num_prealloc_samples : int 170 | Number of preallocated samples. 171 | 172 | Notes 173 | ----- 174 | Required parameters of config are specified in Other Parameters 175 | 176 | Other Parameters 177 | ---------- 178 | min_f : float 179 | Minimum focal length of camera 180 | max_f : float 181 | Maximum focal length of camera 182 | min_cx : float 183 | Minimum camera optical center in x 184 | max_cx : float 185 | Maximum camera optical center in x 186 | min_cy : float 187 | Minimum camera optical center in y 188 | max_cy : float 189 | Maximum camera optical center in y 190 | im_height : int 191 | Height of camera image 192 | im_width : int 193 | Width of camera image 194 | min_radius : float 195 | Minimum radius for viewing sphere. 196 | max_radius : float 197 | Maximum radius for viewing sphere. 198 | min_elev : float 199 | Minimum elevation (angle from z-axis), in degrees, for camera position. 200 | max_elev : float 201 | Maximum elevation for camera position, in degrees. 202 | min_az : float 203 | Minimum azimuth (angle from x-axis), in degrees, for camera position. 204 | max_az : float 205 | Maximum azimuth, in degrees, for camera position. 206 | min_roll : float 207 | Minimum roll (rotation of camera about axis generated by azimuth and 208 | elevation), in degrees, for camera. 209 | max_roll : float 210 | Maximum roll, in degrees, for camera. 211 | min_x : float 212 | Minimum x translation of object on table 213 | max_x : float 214 | Maximum x translation of object on table 215 | min_y : float 216 | Minimum y translation of object on table 217 | max_y : float 218 | Maximum y translation of object on table 219 | """ 220 | # read params 221 | self.frame = frame 222 | self.config = config 223 | self.num_prealloc_samples = num_prealloc_samples 224 | 225 | self._parse_config(config) 226 | 227 | # setup random variables 228 | 229 | # camera 230 | self.focal_rv = ss.uniform(loc=self.min_f, scale=self.max_f-self.min_f) 231 | self.cx_rv = ss.uniform(loc=self.min_cx, scale=self.max_cx-self.min_cx) 232 | self.cy_rv = ss.uniform(loc=self.min_cy, scale=self.max_cy-self.min_cy) 233 | 234 | # viewsphere 235 | self.rad_rv = ss.uniform(loc=self.min_radius, scale=self.max_radius-self.min_radius) 236 | self.elev_rv = ss.uniform(loc=self.min_elev, scale=self.max_elev-self.min_elev) 237 | self.az_rv = ss.uniform(loc=self.min_az, scale=self.max_az-self.min_az) 238 | self.roll_rv = ss.uniform(loc=self.min_roll, scale=self.max_roll-self.min_roll) 239 | 240 | # table translation 241 | self.tx_rv = ss.uniform(loc=self.min_x, scale=self.max_x-self.min_x) 242 | self.ty_rv = ss.uniform(loc=self.min_y, scale=self.max_y-self.min_y) 243 | 244 | RandomVariable.__init__(self, self.num_prealloc_samples) 245 | 246 | def _parse_config(self, config): 247 | """ Reads parameters from the config into class members """ 248 | # camera params 249 | self.min_f = config['min_f'] 250 | self.max_f = config['max_f'] 251 | self.min_cx = config['min_cx'] 252 | self.max_cx = config['max_cx'] 253 | self.min_cy = config['min_cy'] 254 | self.max_cy = config['max_cy'] 255 | self.im_height = config['im_height'] 256 | self.im_width = config['im_width'] 257 | 258 | # viewsphere params 259 | self.min_radius = config['min_radius'] 260 | self.max_radius = config['max_radius'] 261 | self.min_az = np.deg2rad(config['min_az']) 262 | self.max_az = np.deg2rad(config['max_az']) 263 | self.min_elev = np.deg2rad(config['min_elev']) 264 | self.max_elev = np.deg2rad(config['max_elev']) 265 | self.min_roll = np.deg2rad(config['min_roll']) 266 | self.max_roll = np.deg2rad(config['max_roll']) 267 | 268 | # params of translation in plane 269 | self.min_x = config['min_x'] 270 | self.max_x = config['max_x'] 271 | self.min_y = config['min_y'] 272 | self.max_y = config['max_y'] 273 | 274 | def object_to_camera_pose(self, radius, elev, az, roll, x, y): 275 | """ Convert spherical coords to an object-camera pose. """ 276 | # generate camera center from spherical coords 277 | delta_t = np.array([x, y, 0]) 278 | camera_center_obj = np.array([sph2cart(radius, az, elev)]).squeeze() + delta_t 279 | camera_z_obj = -np.array([sph2cart(radius, az, elev)]).squeeze() 280 | camera_z_obj = camera_z_obj / np.linalg.norm(camera_z_obj) 281 | 282 | # find the canonical camera x and y axes 283 | camera_x_par_obj = np.array([camera_z_obj[1], -camera_z_obj[0], 0]) 284 | if np.linalg.norm(camera_x_par_obj) == 0: 285 | camera_x_par_obj = np.array([1, 0, 0]) 286 | camera_x_par_obj = camera_x_par_obj / np.linalg.norm(camera_x_par_obj) 287 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 288 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 289 | if camera_y_par_obj[2] > 0: 290 | camera_x_par_obj = -camera_x_par_obj 291 | camera_y_par_obj = np.cross(camera_z_obj, camera_x_par_obj) 292 | camera_y_par_obj = camera_y_par_obj / np.linalg.norm(camera_y_par_obj) 293 | 294 | # rotate by the roll 295 | R_obj_camera_par = np.c_[camera_x_par_obj, camera_y_par_obj, camera_z_obj] 296 | R_camera_par_camera = np.array([[np.cos(roll), -np.sin(roll), 0], 297 | [np.sin(roll), np.cos(roll), 0], 298 | [0, 0, 1]]) 299 | R_obj_camera = R_obj_camera_par.dot(R_camera_par_camera) 300 | t_obj_camera = camera_center_obj 301 | 302 | # create final transform 303 | T_obj_camera = RigidTransform(R_obj_camera, t_obj_camera, 304 | from_frame=self.frame, 305 | to_frame='obj') 306 | 307 | return T_obj_camera.inverse() 308 | 309 | def camera_intrinsics(self, T_camera_obj, f, cx, cy): 310 | """ Generate shifted camera intrinsics to simulate cropping """ 311 | # form intrinsics 312 | camera_intr = CameraIntrinsics(self.frame, fx=f, fy=f, 313 | cx=cx, cy=cy, skew=0.0, 314 | height=self.im_height, width=self.im_width) 315 | 316 | # compute new camera center by projecting object 0,0,0 into the camera 317 | center_obj_obj = Point(np.zeros(3), frame='obj') 318 | center_obj_camera = T_camera_obj * center_obj_obj 319 | u_center_obj = camera_intr.project(center_obj_camera) 320 | camera_shifted_intr = copy.deepcopy(camera_intr) 321 | camera_shifted_intr.cx = 2 * camera_intr.cx - float(u_center_obj.x) 322 | camera_shifted_intr.cy = 2 * camera_intr.cy - float(u_center_obj.y) 323 | return camera_shifted_intr 324 | 325 | def sample(self, size=1): 326 | """ Sample random variables from the model. 327 | 328 | Parameters 329 | ---------- 330 | size : int 331 | number of sample to take 332 | 333 | Returns 334 | ------- 335 | :obj:`list` of :obj:`RigidTransform` 336 | sampled object to camera poses 337 | """ 338 | samples = [] 339 | for i in range(size): 340 | # sample camera params 341 | focal = self.focal_rv.rvs(size=1)[0] 342 | cx = self.cx_rv.rvs(size=1)[0] 343 | cy = self.cy_rv.rvs(size=1)[0] 344 | 345 | # sample viewsphere params 346 | radius = self.rad_rv.rvs(size=1)[0] 347 | elev = self.elev_rv.rvs(size=1)[0] 348 | az = self.az_rv.rvs(size=1)[0] 349 | roll = self.roll_rv.rvs(size=1)[0] 350 | 351 | # sample plane translation 352 | tx = self.tx_rv.rvs(size=1)[0] 353 | ty = self.ty_rv.rvs(size=1)[0] 354 | 355 | logging.debug('Sampled') 356 | 357 | logging.debug('focal: %.3f' %(focal)) 358 | logging.debug('cx: %.3f' %(cx)) 359 | logging.debug('cy: %.3f' %(cy)) 360 | 361 | logging.debug('radius: %.3f' %(radius)) 362 | logging.debug('elev: %.3f' %(elev)) 363 | logging.debug('az: %.3f' %(az)) 364 | logging.debug('roll: %.3f' %(roll)) 365 | 366 | logging.debug('tx: %.3f' %(tx)) 367 | logging.debug('ty: %.3f' %(ty)) 368 | 369 | # convert to pose and intrinsics 370 | object_to_camera_pose = self.object_to_camera_pose(radius, elev, az, roll, 371 | tx, ty) 372 | camera_shifted_intr = self.camera_intrinsics(object_to_camera_pose, 373 | focal, cx, cy) 374 | camera_sample = CameraSample(object_to_camera_pose, 375 | camera_shifted_intr, 376 | radius, elev, az, roll, tx=tx, ty=ty, 377 | focal=focal, cx=cx, cy=cy) 378 | 379 | # convert to camera pose 380 | samples.append(camera_sample) 381 | 382 | # not a list if only 1 sample 383 | if size == 1: 384 | return samples[0] 385 | return samples 386 | 387 | class UniformPlanarWorksurfaceImageRandomVariable(RandomVariable): 388 | """ Random variable for sampling images from a camera """ 389 | def __init__(self, mesh, render_modes, frame, config, stable_pose=None, scene_objs=None, num_prealloc_samples=0): 390 | """Initialize a ViewsphereDiscretizer. 391 | 392 | Parameters 393 | ---------- 394 | mesh : :obj:`Mesh3D` 395 | mesh of the object to render 396 | render_modes : :obj:`list` of :obj:`perception.RenderMode` 397 | render modes to use 398 | frame: :obj:`str` 399 | string name of the camera frame 400 | config : :obj:`autolab_core.YamlConfig` 401 | configuration containing parameters of random variable 402 | stable_pose : :obj:`StablePose` 403 | stable pose for the mesh to rest in 404 | scene_objs : :obj:`dict` mapping :obj:`str` to :obj:`SceneObject` 405 | objects to render statically in the scene 406 | num_prealloc_samples : int 407 | Number of preallocated samples. 408 | 409 | Notes 410 | ----- 411 | Required parameters of config are specified in Other Parameters 412 | 413 | Other Parameters 414 | ---------- 415 | min_f : float 416 | Minimum focal length of camera 417 | max_f : float 418 | Maximum focal length of camera 419 | min_cx : float 420 | Minimum camera optical center in x 421 | max_cx : float 422 | Maximum camera optical center in x 423 | min_cy : float 424 | Minimum camera optical center in y 425 | max_cy : float 426 | Maximum camera optical center in y 427 | im_height : int 428 | Height of camera image 429 | im_width : int 430 | Width of camera image 431 | min_radius : float 432 | Minimum radius for viewing sphere. 433 | max_radius : float 434 | Maximum radius for viewing sphere. 435 | min_elev : float 436 | Minimum elevation (angle from z-axis) for camera position. 437 | max_elev : float 438 | Maximum elevation for camera position. 439 | min_az : float 440 | Minimum azimuth (angle from x-axis) for camera position. 441 | max_az : float 442 | Maximum azimuth for camera position. 443 | min_roll : float 444 | Minimum roll (rotation of camera about axis generated by azimuth and 445 | elevation) for camera. 446 | max_roll : float 447 | Maximum roll for camera. 448 | min_x : float 449 | Minimum x translation of object on table 450 | max_x : float 451 | Maximum x translation of object on table 452 | min_y : float 453 | Minimum y translation of object on table 454 | max_y : float 455 | Maximum y translation of object on table 456 | """ 457 | # read params 458 | self.mesh = mesh 459 | self.render_modes = render_modes 460 | self.frame = frame 461 | self.config = config 462 | self.stable_pose = stable_pose 463 | self.scene_objs = scene_objs 464 | self.num_prealloc_samples = num_prealloc_samples 465 | 466 | # init random variables 467 | self.ws_rv = UniformPlanarWorksurfaceRandomVariable(self.frame, self.config, num_prealloc_samples=self.num_prealloc_samples) 468 | 469 | RandomVariable.__init__(self, self.num_prealloc_samples) 470 | 471 | def sample(self, size=1): 472 | """ Sample random variables from the model. 473 | 474 | Parameters 475 | ---------- 476 | size : int 477 | number of sample to take 478 | 479 | Returns 480 | ------- 481 | :obj:`list` of :obj:`RigidTransform` 482 | sampled object to camera poses 483 | """ 484 | samples = [] 485 | for i in range(size): 486 | # sample camera params 487 | camera_sample = self.ws_rv.sample(size=1) 488 | 489 | # render images 490 | camera = VirtualCamera(camera_sample.camera_intr) 491 | for name, scene_obj in self.scene_objs.iteritems(): 492 | camera.add_to_scene(name, scene_obj) 493 | 494 | image_bundle = {} 495 | for render_mode in self.render_modes: 496 | images = camera.wrapped_images(self.mesh, 497 | [camera_sample.object_to_camera_pose], 498 | render_mode, stable_pose=self.stable_pose) 499 | image_bundle[render_mode] = images[0] 500 | 501 | # convert to camera pose 502 | samples.append(RenderSample(image_bundle, camera_sample)) 503 | 504 | # not a list if only 1 sample 505 | if size == 1: 506 | return samples[0] 507 | return samples 508 | 509 | -------------------------------------------------------------------------------- /meshpy/render_modes.py: -------------------------------------------------------------------------------- 1 | """ 2 | Render modes 3 | Author: Jeff Mahler 4 | """ 5 | class RenderMode(object): 6 | """Supported rendering modes. 7 | """ 8 | SEGMASK = 'segmask' 9 | DEPTH = 'depth' 10 | DEPTH_SCENE = 'depth_scene' 11 | SCALED_DEPTH = 'scaled_depth' 12 | COLOR = 'color' 13 | COLOR_SCENE = 'color_scene' 14 | GRAY = 'gray' 15 | GD = 'gd' 16 | RGBD = 'rgbd' 17 | RGBD_SCENE = 'rgbd_scene' 18 | GRAYSCALE = 'gray' 19 | -------------------------------------------------------------------------------- /meshpy/sdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Definition of SDF Class 3 | Author: Sahaana Suri, Jeff Mahler, and Matt Matl 4 | 5 | **Currently assumes clean input** 6 | """ 7 | from abc import ABCMeta, abstractmethod 8 | import logging 9 | import numpy as np 10 | from numbers import Number 11 | 12 | import time 13 | 14 | from autolab_core import RigidTransform, SimilarityTransform, PointCloud, Point, NormalCloud 15 | 16 | from sys import version_info 17 | 18 | if version_info[0] != 3: 19 | range = xrange 20 | 21 | class Sdf: 22 | """ Abstract class for signed distance fields. 23 | """ 24 | __metaclass__ = ABCMeta 25 | 26 | ################################################################## 27 | # General SDF Properties 28 | ################################################################## 29 | @property 30 | def dimensions(self): 31 | """SDF dimension information. 32 | 33 | Returns 34 | ------- 35 | :obj:`numpy.ndarray` of int 36 | The ndarray that contains the dimensions of the sdf. 37 | """ 38 | return self.dims_ 39 | 40 | @property 41 | def origin(self): 42 | """The location of the origin in the SDF grid. 43 | 44 | Returns 45 | ------- 46 | :obj:`numpy.ndarray` of float 47 | The 2- or 3-ndarray that contains the location of 48 | the origin of the mesh grid in real space. 49 | """ 50 | return self.origin_ 51 | 52 | @property 53 | def resolution(self): 54 | """The grid resolution (how wide each grid cell is). 55 | 56 | Returns 57 | ------- 58 | float 59 | The width of each grid cell. 60 | """ 61 | return self.resolution_ 62 | 63 | @property 64 | def center(self): 65 | """Center of grid. 66 | 67 | This basically transforms the world frame to grid center. 68 | 69 | Returns 70 | ------- 71 | :obj:`numpy.ndarray` 72 | """ 73 | return self.center_ 74 | 75 | @property 76 | def gradients(self): 77 | """Gradients of the SDF. 78 | 79 | Returns 80 | ------- 81 | :obj:`list` of :obj:`numpy.ndarray` of float 82 | A list of ndarrays of the same dimension as the SDF. The arrays 83 | are in axis order and specify the gradients for that axis 84 | at each point. 85 | """ 86 | return self.gradients_ 87 | 88 | @property 89 | def data(self): 90 | """The SDF data. 91 | 92 | Returns 93 | ------- 94 | :obj:`numpy.ndarray` of float 95 | The 2- or 3-dimensional ndarray that holds the grid of signed 96 | distances. 97 | """ 98 | return self.data_ 99 | 100 | ################################################################## 101 | # General SDF Abstract Methods 102 | ################################################################## 103 | @abstractmethod 104 | def transform(self, tf): 105 | """Returns a new SDF transformed by similarity tf. 106 | """ 107 | pass 108 | 109 | @abstractmethod 110 | def transform_pt_obj_to_grid(self, x_world, direction = False): 111 | """Transforms points from world frame to grid frame 112 | """ 113 | pass 114 | 115 | @abstractmethod 116 | def transform_pt_grid_to_obj(self, x_grid, direction = False): 117 | """Transforms points from grid frame to world frame 118 | """ 119 | pass 120 | 121 | @abstractmethod 122 | def surface_points(self): 123 | """Returns the points on the surface. 124 | 125 | Returns 126 | ------- 127 | :obj:`tuple` of :obj:`numpy.ndarray` of int, :obj:`numpy.ndarray` of float 128 | The points on the surface and the signed distances at those points. 129 | """ 130 | pass 131 | 132 | @abstractmethod 133 | def __getitem__(self, coords): 134 | """Returns the signed distance at the given coordinates. 135 | 136 | Parameters 137 | ---------- 138 | coords : :obj:`numpy.ndarray` of int 139 | A 2- or 3-dimensional ndarray that indicates the desired 140 | coordinates in the grid. 141 | 142 | Returns 143 | ------- 144 | float 145 | The signed distance at the given coords (interpolated). 146 | """ 147 | pass 148 | 149 | ################################################################## 150 | # Universal SDF Methods 151 | ################################################################## 152 | def transform_to_world(self): 153 | """Returns an sdf object with center in the world frame of reference. 154 | """ 155 | return self.transform(self.pose_, scale=self.scale_) 156 | 157 | def center_world(self): 158 | """Center of grid (basically transforms world frame to grid center) 159 | """ 160 | return self.transform_pt_grid_to_obj(self.center_) 161 | 162 | def on_surface(self, coords): 163 | """Determines whether or not a point is on the object surface. 164 | 165 | Parameters 166 | ---------- 167 | coords : :obj:`numpy.ndarray` of int 168 | A 2- or 3-dimensional ndarray that indicates the desired 169 | coordinates in the grid. 170 | 171 | Returns 172 | ------- 173 | :obj:`tuple` of bool, float 174 | Is the point on the object's surface, and what 175 | is the signed distance at that point? 176 | """ 177 | sdf_val = self[coords] 178 | if np.abs(sdf_val) < self.surface_thresh_: 179 | return True, sdf_val 180 | return False, sdf_val 181 | 182 | def is_out_of_bounds(self, coords): 183 | """Returns True if coords is an out of bounds access. 184 | 185 | Parameters 186 | ---------- 187 | coords : :obj:`numpy.ndarray` of int 188 | A 2- or 3-dimensional ndarray that indicates the desired 189 | coordinates in the grid. 190 | 191 | Returns 192 | ------- 193 | bool 194 | Are the coordinates in coords out of bounds? 195 | """ 196 | return np.array(coords < 0).any() or np.array(coords >= self.dims_).any() 197 | 198 | def _compute_gradients(self): 199 | """Computes the gradients of the SDF. 200 | 201 | Returns 202 | ------- 203 | :obj:`list` of :obj:`numpy.ndarray` of float 204 | A list of ndarrays of the same dimension as the SDF. The arrays 205 | are in axis order and specify the gradients for that axis 206 | at each point. 207 | """ 208 | self.gradients_ = np.gradient(self.data_) 209 | 210 | class Sdf3D(Sdf): 211 | # static indexing vars 212 | num_interpolants = 8 213 | min_coords_x = [0, 2, 3, 5] 214 | max_coords_x = [1, 4, 6, 7] 215 | min_coords_y = [0, 1, 3, 6] 216 | max_coords_y = [2, 4, 5, 7] 217 | min_coords_z = [0, 1, 2, 4] 218 | max_coords_z = [3, 5, 6, 7] 219 | 220 | def __init__(self, sdf_data, origin, resolution, use_abs=True, T_sdf_world=RigidTransform(from_frame='sdf', to_frame='world')): 221 | self.data_ = sdf_data 222 | self.origin_ = origin 223 | self.resolution_ = resolution 224 | self.dims_ = self.data_.shape 225 | 226 | # set up surface params 227 | self.surface_thresh_ = self.resolution_ * np.sqrt(2) / 2 # resolution is max dist from surface when surf is orthogonal to diagonal grid cells 228 | spts, _ = self.surface_points() 229 | self.center_ = 0.5 * (np.min(spts, axis=0) + np.max(spts, axis=0)) 230 | self.points_buf_ = np.zeros([Sdf3D.num_interpolants, 3], dtype=np.int) 231 | self.coords_buf_ = np.zeros([3,]) 232 | self.pts_ = None 233 | 234 | # tranform sdf basis to grid (X and Z axes are flipped!) 235 | t_world_grid = self.resolution_ * self.center_ 236 | s_world_grid = 1.0 / self.resolution_ 237 | t_grid_sdf = self.origin 238 | self.T_grid_sdf_ = SimilarityTransform(translation=t_grid_sdf, 239 | scale=self.resolution, 240 | from_frame='grid', 241 | to_frame='sdf') 242 | self.T_sdf_world_ = T_sdf_world 243 | self.T_grid_world_ = self.T_sdf_world_ * self.T_grid_sdf_ 244 | 245 | self.T_sdf_grid_ = self.T_grid_sdf_.inverse() 246 | self.T_world_grid_ = self.T_grid_world_.inverse() 247 | self.T_world_sdf_ = self.T_sdf_world_.inverse() 248 | 249 | # optionally use only the absolute values (useful for non-closed meshes in 3D) 250 | self.use_abs_ = use_abs 251 | if use_abs: 252 | self.data_ = np.abs(self.data_) 253 | 254 | self._compute_gradients() 255 | 256 | def transform(self, delta_T): 257 | """ Creates a new SDF with a given pose with respect to world coordinates. 258 | 259 | Parameters 260 | ---------- 261 | delta_T : :obj:`autolab_core.RigidTransform` 262 | transform from cur sdf to transformed sdf coords 263 | """ 264 | new_T_sdf_world = self.T_sdf_world_ * delta_T.inverse().as_frames('sdf', 'sdf') 265 | return Sdf3D(self.data_, self.origin_, self.resolution_, use_abs=self.use_abs_, 266 | T_sdf_world=new_T_sdf_world) 267 | 268 | def _signed_distance(self, coords): 269 | """Returns the signed distance at the given coordinates, interpolating 270 | if necessary. 271 | 272 | Parameters 273 | ---------- 274 | coords : :obj:`numpy.ndarray` of int 275 | A 3-dimensional ndarray that indicates the desired 276 | coordinates in the grid. 277 | 278 | Returns 279 | ------- 280 | float 281 | The signed distance at the given coords (interpolated). 282 | 283 | Raises 284 | ------ 285 | IndexError 286 | If the coords vector does not have three entries. 287 | """ 288 | pass 289 | if len(coords) != 3: 290 | raise IndexError('Indexing must be 3 dimensional') 291 | if self.is_out_of_bounds(coords): 292 | logging.debug('Out of bounds access. Snapping to SDF dims') 293 | 294 | # snap to grid dims 295 | self.coords_buf_[0] = max(0, min(coords[0], self.dims_[0] - 1)) 296 | self.coords_buf_[1] = max(0, min(coords[1], self.dims_[1] - 1)) 297 | self.coords_buf_[2] = max(0, min(coords[2], self.dims_[2] - 1)) 298 | 299 | # regular indexing if integers 300 | if np.issubdtype(type(coords[0]), np.integer) and \ 301 | np.issubdtype(type(coords[1]), np.integer) and \ 302 | np.issubdtype(type(coords[2]), np.integer): 303 | return self.data_[int(self.coords_buf_[0]), int(self.coords_buf_[1]), int(self.coords_buf_[2])] 304 | 305 | # otherwise interpolate 306 | min_coords = np.floor(self.coords_buf_) 307 | max_coords = min_coords + 1 # assumed to be on grid 308 | self.points_buf_[Sdf3D.min_coords_x, 0] = min_coords[0] 309 | self.points_buf_[Sdf3D.max_coords_x, 0] = max_coords[0] 310 | self.points_buf_[Sdf3D.min_coords_y, 1] = min_coords[1] 311 | self.points_buf_[Sdf3D.max_coords_y, 1] = max_coords[1] 312 | self.points_buf_[Sdf3D.min_coords_z, 2] = min_coords[2] 313 | self.points_buf_[Sdf3D.max_coords_z, 2] = max_coords[2] 314 | 315 | # bilinearly interpolate points 316 | sd = 0.0 317 | for i in range(Sdf3D.num_interpolants): 318 | p = self.points_buf_[i,:] 319 | if self.is_out_of_bounds(p): 320 | v = 0.0 321 | else: 322 | v = self.data_[p[0], p[1], p[2]] 323 | w = np.prod(-np.abs(p - self.coords_buf_) + 1) 324 | sd = sd + w * v 325 | 326 | return sd 327 | 328 | def __getitem__(self, coords): 329 | """Returns the signed distance at the given coordinates. 330 | 331 | Parameters 332 | ---------- 333 | coords : :obj:`numpy.ndarray` of int 334 | A or 3-dimensional ndarray that indicates the desired 335 | coordinates in the grid. 336 | 337 | Returns 338 | ------- 339 | float 340 | The signed distance at the given coords (interpolated). 341 | 342 | Raises 343 | ------ 344 | IndexError 345 | If the coords vector does not have three entries. 346 | """ 347 | return self._signed_distance(coords) 348 | 349 | def gradient(self, coords): 350 | """Returns the SDF gradient at the given coordinates, interpolating if necessary 351 | 352 | Parameters 353 | ---------- 354 | coords : :obj:`numpy.ndarray` of int 355 | A 3-dimensional ndarray that indicates the desired 356 | coordinates in the grid. 357 | 358 | Returns 359 | ------- 360 | float 361 | The gradient at the given coords (interpolated). 362 | 363 | Raises 364 | ------ 365 | IndexError 366 | If the coords vector does not have three entries. 367 | """ 368 | if len(coords) != 3: 369 | raise IndexError('Indexing must be 3 dimensional') 370 | 371 | # log warning if out of bounds access 372 | if self.is_out_of_bounds(coords): 373 | logging.debug('Out of bounds access. Snapping to SDF dims') 374 | 375 | # snap to grid dims 376 | self.coords_buf_[0] = max(0, min(coords[0], self.dims_[0] - 1)) 377 | self.coords_buf_[1] = max(0, min(coords[1], self.dims_[1] - 1)) 378 | self.coords_buf_[2] = max(0, min(coords[2], self.dims_[2] - 1)) 379 | 380 | # regular indexing if integers 381 | if type(coords[0]) is int and type(coords[1]) is int and type(coords[2]) is int: 382 | self.coords_buf_ = self.coords_buf_.astype(np.int) 383 | return self.data_[self.coords_buf_[0], self.coords_buf_[1], self.coords_buf_[2]] 384 | 385 | # otherwise interpolate 386 | min_coords = np.floor(self.coords_buf_) 387 | max_coords = min_coords + 1 388 | self.points_buf_[Sdf3D.min_coords_x, 0] = min_coords[0] 389 | self.points_buf_[Sdf3D.max_coords_x, 0] = min_coords[0] 390 | self.points_buf_[Sdf3D.min_coords_y, 1] = min_coords[1] 391 | self.points_buf_[Sdf3D.max_coords_y, 1] = max_coords[1] 392 | self.points_buf_[Sdf3D.min_coords_z, 2] = min_coords[2] 393 | self.points_buf_[Sdf3D.max_coords_z, 2] = max_coords[2] 394 | 395 | # bilinear interpolation 396 | g = np.zeros(3) 397 | gp = np.zeros(3) 398 | w_sum = 0.0 399 | for i in range(Sdf3D.num_interpolants): 400 | p = self.points_buf_[i,:] 401 | if self.is_out_of_bounds(p): 402 | gp[0] = 0.0 403 | gp[1] = 0.0 404 | gp[2] = 0.0 405 | else: 406 | gp[0] = self.gradients_[0][p[0], p[1], p[2]] 407 | gp[1] = self.gradients_[1][p[0], p[1], p[2]] 408 | gp[2] = self.gradients_[2][p[0], p[1], p[2]] 409 | 410 | w = np.prod(-np.abs(p - self.coords_buf_) + 1) 411 | g = g + w * gp 412 | 413 | return g 414 | 415 | def curvature(self, coords, delta=0.001): 416 | """ 417 | Returns an approximation to the local SDF curvature (Hessian) at the 418 | given coordinate in grid basis. 419 | 420 | Parameters 421 | --------- 422 | coords : numpy 3-vector 423 | the grid coordinates at which to get the curvature 424 | 425 | Returns 426 | ------- 427 | curvature : 3x3 ndarray of the curvature at the surface points 428 | """ 429 | # perturb local coords 430 | coords_x_up = coords + np.array([delta, 0, 0]) 431 | coords_x_down = coords + np.array([-delta, 0, 0]) 432 | coords_y_up = coords + np.array([0, delta, 0]) 433 | coords_y_down = coords + np.array([0, -delta, 0]) 434 | coords_z_up = coords + np.array([0, 0, delta]) 435 | coords_z_down = coords + np.array([0, 0, -delta]) 436 | 437 | # get gradient 438 | grad_x_up = self.gradient(coords_x_up) 439 | grad_x_down = self.gradient(coords_x_down) 440 | grad_y_up = self.gradient(coords_y_up) 441 | grad_y_down = self.gradient(coords_y_down) 442 | grad_z_up = self.gradient(coords_z_up) 443 | grad_z_down = self.gradient(coords_z_down) 444 | 445 | # finite differences 446 | curvature_x = (grad_x_up - grad_x_down) / (4 * delta) 447 | curvature_y = (grad_y_up - grad_y_down) / (4 * delta) 448 | curvature_z = (grad_z_up - grad_z_down) / (4 * delta) 449 | curvature = np.c_[curvature_x, np.c_[curvature_y, curvature_z]] 450 | curvature = curvature + curvature.T 451 | return curvature 452 | 453 | def surface_normal(self, coords, delta=1.5): 454 | """Returns the sdf surface normal at the given coordinates by 455 | computing the tangent plane using SDF interpolation. 456 | 457 | Parameters 458 | ---------- 459 | coords : :obj:`numpy.ndarray` of int 460 | A 3-dimensional ndarray that indicates the desired 461 | coordinates in the grid. 462 | 463 | delta : float 464 | A radius for collecting surface points near the target coords 465 | for calculating the surface normal. 466 | 467 | Returns 468 | ------- 469 | :obj:`numpy.ndarray` of float 470 | The 3-dimensional ndarray that represents the surface normal. 471 | 472 | Raises 473 | ------ 474 | IndexError 475 | If the coords vector does not have three entries. 476 | """ 477 | if len(coords) != 3: 478 | raise IndexError('Indexing must be 3 dimensional') 479 | 480 | # log warning if out of bounds access 481 | if self.is_out_of_bounds(coords): 482 | logging.debug('Out of bounds access. Snapping to SDF dims') 483 | 484 | # snap to grid dims 485 | coords[0] = max(0, min(coords[0], self.dims_[0] - 1)) 486 | coords[1] = max(0, min(coords[1], self.dims_[1] - 1)) 487 | coords[2] = max(0, min(coords[2], self.dims_[2] - 1)) 488 | index_coords = np.zeros(3) 489 | 490 | # check points on surface 491 | sdf_val = self[coords] 492 | if np.abs(sdf_val) >= self.surface_thresh_: 493 | logging.warning('Cannot compute normal. Point must be on surface') 494 | return None 495 | 496 | # collect all surface points within the delta sphere 497 | X = [] 498 | d = np.zeros(3) 499 | dx = -delta 500 | while dx <= delta: 501 | dy = -delta 502 | while dy <= delta: 503 | dz = -delta 504 | while dz <= delta: 505 | d = np.array([dx, dy, dz]) 506 | if dx != 0 or dy != 0 or dz != 0: 507 | d = delta * d / np.linalg.norm(d) 508 | index_coords[0] = coords[0] + d[0] 509 | index_coords[1] = coords[1] + d[1] 510 | index_coords[2] = coords[2] + d[2] 511 | sdf_val = self[index_coords] 512 | if np.abs(sdf_val) < self.surface_thresh_: 513 | X.append([index_coords[0], index_coords[1], index_coords[2], sdf_val]) 514 | dz += delta 515 | dy += delta 516 | dx += delta 517 | 518 | # fit a plane to the surface points 519 | X.sort(key = lambda x: x[3]) 520 | X = np.array(X)[:,:3] 521 | A = X - np.mean(X, axis=0) 522 | try: 523 | U, S, V = np.linalg.svd(A.T) 524 | n = U[:,2] 525 | except: 526 | logging.warning('Tangent plane does not exist. Returning None.') 527 | return None 528 | return n 529 | 530 | def surface_points(self, grid_basis=True): 531 | """Returns the points on the surface. 532 | 533 | Parameters 534 | ---------- 535 | grid_basis : bool 536 | If False, the surface points are transformed to the world frame. 537 | If True (default), the surface points are left in grid coordinates. 538 | 539 | Returns 540 | ------- 541 | :obj:`tuple` of :obj:`numpy.ndarray` of int, :obj:`numpy.ndarray` of float 542 | The points on the surface and the signed distances at those points. 543 | """ 544 | surface_points = np.where(np.abs(self.data_) < self.surface_thresh_) 545 | x = surface_points[0] 546 | y = surface_points[1] 547 | z = surface_points[2] 548 | surface_points = np.c_[x, np.c_[y, z]] 549 | surface_vals = self.data_[surface_points[:,0], surface_points[:,1], surface_points[:,2]] 550 | if not grid_basis: 551 | surface_points = self.transform_pt_grid_to_obj(surface_points.T) 552 | surface_points = surface_points.T 553 | 554 | return surface_points, surface_vals 555 | 556 | def rescale(self, scale): 557 | """ Rescale an SDF by a given scale factor. 558 | 559 | Parameters 560 | ---------- 561 | scale : float 562 | the amount to scale the SDF 563 | 564 | Returns 565 | ------- 566 | :obj:`Sdf3D` 567 | new sdf with given scale 568 | """ 569 | resolution_tf = scale * self.resolution_ 570 | return Sdf3D(self.data_, self.origin_, resolution_tf, use_abs=self.use_abs_, 571 | T_sdf_world=self.T_sdf_world_) 572 | 573 | def transform_dense(self, delta_T, detailed = False): 574 | """ Transform the grid by pose T and scale with canonical reference 575 | frame at the SDF center with axis alignment. 576 | 577 | Parameters 578 | ---------- 579 | delta_T : SimilarityTransform 580 | the transformation from the current frame of reference to the new frame of reference 581 | detailed : bool 582 | whether or not to use interpolation 583 | 584 | Returns 585 | ------- 586 | :obj:`Sdf3D` 587 | new sdf with grid warped by T 588 | """ 589 | # map all surface points to their new location 590 | start_t = time.clock() 591 | 592 | # form points array 593 | if self.pts_ is None: 594 | [x_ind, y_ind, z_ind] = np.indices(self.dims_) 595 | self.pts_ = np.c_[x_ind.flatten().T, np.c_[y_ind.flatten().T, z_ind.flatten().T]].astype(np.float32) 596 | 597 | # transform points 598 | num_pts = self.pts_.shape[0] 599 | pts_sdf = self.T_grid_sdf_ * PointCloud(self.pts_.T, frame='grid') 600 | pts_sdf_tf = delta_T.as_frames('sdf', 'sdf') * pts_sdf 601 | pts_grid_tf = self.T_sdf_grid_ * pts_sdf_tf 602 | pts_tf = pts_grid_tf.data.T 603 | all_points_t = time.clock() 604 | 605 | # transform the center 606 | origin_sdf = self.T_grid_sdf_ * Point(self.origin_, frame='grid') 607 | origin_sdf_tf = delta_T.as_frames('sdf', 'sdf') * origin_sdf 608 | origin_tf = self.T_sdf_grid_ * origin_sdf_tf 609 | origin_tf = origin_tf.data 610 | 611 | # use same resolution (since indices will be rescaled) 612 | resolution_tf = self.resolution_ 613 | origin_res_t = time.clock() 614 | 615 | # add each point to the new pose 616 | if detailed: 617 | sdf_data_tf = np.zeros([num_pts, 1]) 618 | for i in range(num_pts): 619 | sdf_data_tf[i] = self[pts_tf[i,0], pts_tf[i,1], pts_tf[i,2]] 620 | else: 621 | pts_tf_round = np.round(pts_tf).astype(np.int64) 622 | 623 | # snap to closest boundary 624 | pts_tf_round[:,0] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:,0]], axis=1) 625 | pts_tf_round[:,0] = np.min(np.c_[(self.dims_[0] - 1) * np.ones([num_pts, 1]), pts_tf_round[:,0]], axis=1) 626 | 627 | pts_tf_round[:,1] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:,1]], axis=1) 628 | pts_tf_round[:,1] = np.min(np.c_[(self.dims_[1] - 1) * np.ones([num_pts, 1]), pts_tf_round[:,1]], axis=1) 629 | 630 | pts_tf_round[:,2] = np.max(np.c_[np.zeros([num_pts, 1]), pts_tf_round[:,2]], axis=1) 631 | pts_tf_round[:,2] = np.min(np.c_[(self.dims_[2] - 1) * np.ones([num_pts, 1]), pts_tf_round[:,2]], axis=1) 632 | 633 | sdf_data_tf = self.data_[pts_tf_round[:,0], pts_tf_round[:,1], pts_tf_round[:,2]] 634 | 635 | sdf_data_tf_grid = sdf_data_tf.reshape(self.dims_) 636 | tf_t = time.clock() 637 | 638 | logging.debug('Sdf3D: Time to transform coords: %f' %(all_points_t - start_t)) 639 | logging.debug('Sdf3D: Time to transform origin: %f' %(origin_res_t - all_points_t)) 640 | logging.debug('Sdf3D: Time to transfer sd: %f' %(tf_t - origin_res_t)) 641 | return Sdf3D(sdf_data_tf_grid, origin_tf, resolution_tf, use_abs=self._use_abs_, T_sdf_world=self.T_sdf_world_) 642 | 643 | def transform_pt_obj_to_grid(self, x_sdf, direction = False): 644 | """ Converts a point in sdf coords to the grid basis. If direction then don't translate. 645 | 646 | Parameters 647 | ---------- 648 | x_sdf : numpy 3xN ndarray or numeric scalar 649 | points to transform from sdf basis in meters to grid basis 650 | 651 | Returns 652 | ------- 653 | x_grid : numpy 3xN ndarray or scalar 654 | points in grid basis 655 | """ 656 | if isinstance(x_sdf, Number): 657 | return self.T_world_grid_.scale * x_sdf 658 | if direction: 659 | points_sdf = NormalCloud(x_sdf.astype(np.float32), frame='world') 660 | else: 661 | points_sdf = PointCloud(x_sdf.astype(np.float32), frame='world') 662 | x_grid = self.T_world_grid_ * points_sdf 663 | return x_grid.data 664 | 665 | def transform_pt_grid_to_obj(self, x_grid, direction = False): 666 | """ Converts a point in grid coords to the world basis. If direction then don't translate. 667 | 668 | Parameters 669 | ---------- 670 | x_grid : numpy 3xN ndarray or numeric scalar 671 | points to transform from grid basis to sdf basis in meters 672 | 673 | Returns 674 | ------- 675 | x_sdf : numpy 3xN ndarray 676 | points in sdf basis (meters) 677 | """ 678 | if isinstance(x_grid, Number): 679 | return self.T_grid_world_.scale * x_grid 680 | if direction: 681 | points_grid = NormalCloud(x_grid.astype(np.float32), frame='grid') 682 | else: 683 | points_grid = PointCloud(x_grid.astype(np.float32), frame='grid') 684 | x_sdf = self.T_grid_world_ * points_grid 685 | return x_sdf.data 686 | 687 | @staticmethod 688 | def find_zero_crossing_linear(x1, y1, x2, y2): 689 | """ Find zero crossing using linear approximation""" 690 | # NOTE: use sparingly, approximations can be shoddy 691 | d = x2 - x1 692 | t1 = 0 693 | t2 = np.linalg.norm(d) 694 | v = d / t2 695 | 696 | m = (y2 - y1) / (t2 - t1) 697 | b = y1 698 | t_zc = -b / m 699 | x_zc = x1 + t_zc * v 700 | return x_zc 701 | 702 | @staticmethod 703 | def find_zero_crossing_quadratic(x1, y1, x2, y2, x3, y3, eps = 1.0): 704 | """ Find zero crossing using quadratic approximation along 1d line""" 705 | # compute coords along 1d line 706 | v = x2 - x1 707 | v = v / np.linalg.norm(v) 708 | if v[v!=0].shape[0] == 0: 709 | logging.error('Difference is 0. Probably a bug') 710 | 711 | t1 = 0 712 | t2 = (x2 - x1)[v!=0] / v[v!=0] 713 | t2 = t2[0] 714 | t3 = (x3 - x1)[v!=0] / v[v!=0] 715 | t3 = t3[0] 716 | 717 | # solve for quad approx 718 | x1_row = np.array([t1**2, t1, 1]) 719 | x2_row = np.array([t2**2, t2, 1]) 720 | x3_row = np.array([t3**2, t3, 1]) 721 | X = np.array([x1_row, x2_row, x3_row]) 722 | y_vec = np.array([y1, y2, y3]) 723 | try: 724 | w = np.linalg.solve(X, y_vec) 725 | except np.linalg.LinAlgError: 726 | logging.error('Singular matrix. Probably a bug') 727 | return None 728 | 729 | # get positive roots 730 | possible_t = np.roots(w) 731 | t_zc = None 732 | for i in range(possible_t.shape[0]): 733 | if possible_t[i] >= 0 and possible_t[i] <= 10 and not np.iscomplex(possible_t[i]): 734 | t_zc = possible_t[i] 735 | 736 | # if no positive roots find min 737 | if np.abs(w[0]) < 1e-10: 738 | return None 739 | 740 | if t_zc is None: 741 | t_zc = -w[1] / (2 * w[0]) 742 | 743 | if t_zc < -eps or t_zc > eps: 744 | return None 745 | 746 | x_zc = x1 + t_zc * v 747 | return x_zc 748 | -------------------------------------------------------------------------------- /meshpy/sdf_file.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Reads and writes sdfs to file 3 | Author: Jeff Mahler 4 | ''' 5 | import numpy as np 6 | import os 7 | 8 | import sdf 9 | 10 | class SdfFile: 11 | """ 12 | A Signed Distance Field .sdf file reader and writer. 13 | 14 | Attributes 15 | ---------- 16 | filepath : :obj:`str` 17 | The full path to the .sdf or .csv file associated with this reader/writer. 18 | """ 19 | def __init__(self, filepath): 20 | """Construct and initialize a .sdf file reader and writer. 21 | 22 | Parameters 23 | ---------- 24 | filepath : :obj:`str` 25 | The full path to the desired .sdf or .csv file 26 | 27 | Raises 28 | ------ 29 | ValueError 30 | If the file extension is not .sdf of .csv. 31 | """ 32 | self.filepath_ = filepath 33 | file_root, file_ext = os.path.splitext(self.filepath_) 34 | 35 | if file_ext == '.sdf': 36 | self.use_3d_ = True 37 | elif file_ext == '.csv': 38 | self.use_3d_ = False 39 | else: 40 | raise ValueError('Extension %s invalid for SDFs' %(file_ext)) 41 | 42 | @property 43 | def filepath(self): 44 | """Returns the full path to the file associated with this reader/writer. 45 | 46 | Returns 47 | ------- 48 | :obj:`str` 49 | The full path to the file associated with this reader/writer. 50 | """ 51 | return self.filepath_ 52 | 53 | def read(self): 54 | """Reads in the SDF file and returns a Sdf object. 55 | 56 | Returns 57 | ------- 58 | :obj:`Sdf` 59 | A Sdf created from the data in the file. 60 | """ 61 | if self.use_3d_: 62 | return self._read_3d() 63 | else: 64 | return self._read_2d() 65 | 66 | 67 | def _read_3d(self): 68 | """Reads in a 3D SDF file and returns a Sdf object. 69 | 70 | Returns 71 | ------- 72 | :obj:`Sdf3D` 73 | A 3DSdf created from the data in the file. 74 | """ 75 | if not os.path.exists(self.filepath_): 76 | return None 77 | 78 | my_file = open(self.filepath_, 'r') 79 | nx, ny, nz = [int(i) for i in my_file.readline().split()] #dimension of each axis should all be equal for LSH 80 | ox, oy, oz = [float(i) for i in my_file.readline().split()] #shape origin 81 | dims = np.array([nx, ny, nz]) 82 | origin = np.array([ox, oy, oz]) 83 | 84 | resolution = float(my_file.readline()) # resolution of the grid cells in original mesh coords 85 | sdf_data = np.zeros(dims) 86 | 87 | # loop through file, getting each value 88 | count = 0 89 | for k in range(nz): 90 | for j in range(ny): 91 | for i in range(nx): 92 | sdf_data[i][j][k] = float(my_file.readline()) 93 | count += 1 94 | my_file.close() 95 | return sdf.Sdf3D(sdf_data, origin, resolution) 96 | 97 | def _read_2d(self): 98 | """Reads in a 2D SDF file and returns a Sdf object. 99 | 100 | Returns 101 | ------- 102 | :obj:`Sdf2D` 103 | A 2DSdf created from the data in the file. 104 | """ 105 | if not os.path.exists(self.filepath_): 106 | return None 107 | 108 | sdf_data = np.loadtxt(self.filepath_, delimiter=',') 109 | return sdf.Sdf2D(sdf_data) 110 | 111 | def write(self, sdf): 112 | """Writes an SDF to a file. 113 | 114 | Parameters 115 | ---------- 116 | sdf : :obj:`Sdf` 117 | An Sdf object to write out. 118 | 119 | Note 120 | ---- 121 | This is not currently implemented or supported. 122 | """ 123 | pass 124 | 125 | if __name__ == '__main__': 126 | pass 127 | 128 | -------------------------------------------------------------------------------- /meshpy/stable_pose.py: -------------------------------------------------------------------------------- 1 | """ 2 | A basic struct-like Stable Pose class to make accessing pose probability and rotation matrix easier 3 | 4 | Author: Matt Matl and Nikhil Sharma 5 | """ 6 | import numpy as np 7 | 8 | from autolab_core import RigidTransform 9 | 10 | d_theta = np.deg2rad(1) 11 | 12 | class StablePose(object): 13 | """A representation of a mesh's stable pose. 14 | 15 | Attributes 16 | ---------- 17 | p : float 18 | Probability associated with this stable pose. 19 | r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float 20 | 3x3 rotation matrix that rotates the mesh into the stable pose from 21 | standardized coordinates. 22 | x0 : :obj:`numpy.ndarray` of float 23 | 3D point in the mesh that is resting on the table. 24 | face : :obj:`numpy.ndarray` 25 | 3D vector of indices corresponding to vertices forming the resting face 26 | stp_id : :obj:`str` 27 | A string identifier for the stable pose 28 | T_obj_table : :obj:`RigidTransform` 29 | A RigidTransform representation of the pose's rotation matrix. 30 | """ 31 | def __init__(self, p, r, x0, face=None, stp_id=-1): 32 | """Create a new stable pose object. 33 | 34 | Parameters 35 | ---------- 36 | p : float 37 | Probability associated with this stable pose. 38 | r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float 39 | 3x3 rotation matrix that rotates the mesh into the stable pose from 40 | standardized coordinates. 41 | x0 : :obj:`numpy.ndarray` of float 42 | 3D point in the mesh that is resting on the table. 43 | face : :obj:`numpy.ndarray` 44 | 3D vector of indices corresponding to vertices forming the resting face 45 | stp_id : :obj:`str` 46 | A string identifier for the stable pose 47 | """ 48 | self.p = p 49 | self.r = r 50 | self.x0 = x0 51 | self.face = face 52 | self.id = stp_id 53 | 54 | # fix stable pose bug 55 | if np.abs(np.linalg.det(self.r) + 1) < 0.01: 56 | self.r[1,:] = -self.r[1,:] 57 | 58 | def __eq__(self, other): 59 | """ Check equivalence by rotation about the z axis """ 60 | if not isinstance(other, StablePose): 61 | raise ValueError('Can only compare stable pose objects') 62 | R0 = self.r 63 | R1 = other.r 64 | dR = R1.T.dot(R0) 65 | theta = 0 66 | while theta < 2 * np.pi: 67 | Rz = RigidTransform.z_axis_rotation(theta) 68 | dR = R1.T.dot(Rz).dot(R0) 69 | if np.linalg.norm(dR - np.eye(3)) < 1e-5: 70 | return True 71 | theta += d_theta 72 | return False 73 | 74 | @property 75 | def T_obj_table(self): 76 | return RigidTransform(rotation=self.r, from_frame='obj', to_frame='table') 77 | 78 | 79 | @property 80 | def T_obj_world(self): 81 | T_world_obj = RigidTransform(rotation=self.r.T, 82 | translation=self.x0, 83 | from_frame='world', 84 | to_frame='obj') 85 | return T_world_obj.inverse() 86 | 87 | -------------------------------------------------------------------------------- /meshpy/stp_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | Contains class that allows reading and writing stable pose data. 3 | Run this file with the following command: python stp_file.py MIN_PROB DIR_PATH 4 | 5 | MIN_PROB -- Minimum Probability of a given pose being realized. 6 | DIR_PATH -- Path to directory containing .obj files to be converted to .stp files (e.g. ~/obj_folder/) 7 | 8 | Authors: Nikhil Sharma and Matt Matl 9 | """ 10 | 11 | import os 12 | import numpy as np 13 | 14 | import mesh 15 | import stable_pose as sp 16 | 17 | class StablePoseFile: 18 | """ 19 | A Stable Pose .stp file reader and writer. 20 | 21 | Attributes 22 | ---------- 23 | filepath : :obj:`str` 24 | The full path to the .stp file associated with this reader/writer. 25 | """ 26 | 27 | def __init__(self, filepath): 28 | """Construct and initialize a .stp file reader and writer. 29 | 30 | Parameters 31 | ---------- 32 | filepath : :obj:`str` 33 | The full path to the desired .stp file 34 | 35 | Raises 36 | ------ 37 | ValueError 38 | If the file extension is not .stp. 39 | """ 40 | self.filepath_ = filepath 41 | file_root, file_ext = os.path.splitext(self.filepath_) 42 | if file_ext != '.stp': 43 | raise ValueError('Extension %s invalid for STPs' %(file_ext)) 44 | 45 | @property 46 | def filepath(self): 47 | """Returns the full path to the .stp file associated with this reader/writer. 48 | 49 | Returns 50 | ------- 51 | :obj:`str` 52 | The full path to the .stp file associated with this reader/writer. 53 | """ 54 | return self.filepath_ 55 | 56 | def read(self): 57 | """Reads in the .stp file and returns a list of StablePose objects. 58 | 59 | Returns 60 | ------- 61 | :obj:`list` of :obj`StablePose` 62 | A list of StablePose objects read from the .stp file. 63 | """ 64 | stable_poses = [] 65 | f = open(self.filepath_, "r") 66 | data = [line.split() for line in f] 67 | for i in range(len(data)): 68 | if len(data[i]) > 0 and data[i][0] == "p": 69 | p = float(data[i][1]) 70 | r = [[data[i+1][1], data[i+1][2], data[i+1][3]], [data[i+2][0], data[i+2][1], 71 | data[i+2][2]], [data[i+3][0], data[i+3][1], data[i+3][2]]] 72 | r = np.array(r).astype(np.float64) 73 | x0 = np.array([data[i+4][1], data[i+4][2], data[i+4][3]]).astype(np.float64) 74 | stable_poses.append(sp.StablePose(p, r, x0)) 75 | return stable_poses 76 | 77 | def write(self, stable_poses, min_prob=0): 78 | """Writes out the stable poses for a mesh with a minimum probability filter. 79 | 80 | Parameters 81 | ---------- 82 | stable_poses: :obj:`list` of :obj:`StablePose` 83 | List of stable poses that should be written to the file. 84 | 85 | min_prob : float 86 | The minimum probability for a pose to actually be written to the 87 | file. 88 | """ 89 | R_list = [] 90 | for pose in stable_poses: 91 | if pose.p >= min_prob: 92 | R_list.append([pose.p, pose.r, pose.x0]) 93 | 94 | f = open(self.filepath_[:-4] + ".stp", "w") 95 | f.write("#############################################################\n") 96 | f.write("# STP file generated by UC Berkeley Automation Sciences Lab #\n") 97 | f.write("# #\n") 98 | f.write("# Num Poses: %d" %len(R_list)) 99 | for _ in range(46 - len(str(len(R_list)))): 100 | f.write(" ") 101 | f.write(" #\n") 102 | f.write("# Min Probability: %s" %str(min_prob)) 103 | for _ in range(40 - len(str(min_prob))): 104 | f.write(" ") 105 | f.write(" #\n") 106 | f.write("# #\n") 107 | f.write("#############################################################\n") 108 | f.write("\n") 109 | 110 | # adding R matrices to .stp file 111 | pose_index = 1 112 | for i in range(len(R_list)): 113 | f.write("p %f\n" %R_list[i][0]) 114 | f.write("r %f %f %f\n" %(R_list[i][1][0][0], R_list[i][1][0][1], R_list[i][1][0][2])) 115 | f.write(" %f %f %f\n" %(R_list[i][1][1][0], R_list[i][1][1][1], R_list[i][1][1][2])) 116 | f.write(" %f %f %f\n" %(R_list[i][1][2][0], R_list[i][1][2][1], R_list[i][1][2][2])) 117 | f.write("x0 %f %f %f\n" %(R_list[i][2][0], R_list[i][2][1], R_list[i][2][2])) 118 | f.write("\n\n") 119 | f.close() 120 | 121 | if __name__ == '__main__': 122 | pass 123 | -------------------------------------------------------------------------------- /meshpy/urdf_writer.py: -------------------------------------------------------------------------------- 1 | """ 2 | File for loading and saving meshes as URDF files 3 | Author: Jeff Mahler 4 | """ 5 | import IPython 6 | import logging 7 | import numpy as np 8 | import os 9 | from subprocess import Popen 10 | 11 | import xml.etree.cElementTree as et 12 | 13 | from mesh import Mesh3D 14 | from obj_file import ObjFile 15 | 16 | def split_vhacd_output(mesh_filename): 17 | """ Splits the output of vhacd into multiple .OBJ files. 18 | 19 | Parameters 20 | ---------- 21 | mesh_filename : :obj:`str` 22 | the filename of the mesh from v-hacd 23 | 24 | Returns 25 | ------- 26 | :obj:`list` of :obj:`str` 27 | the string filenames of the individual convex pieces 28 | """ 29 | # read params 30 | file_root, file_ext = os.path.splitext(mesh_filename) 31 | f = open(mesh_filename, 'r') 32 | lines = f.readlines() 33 | line_num = 0 34 | num_lines = len(lines) 35 | num_verts = 0 36 | vert_offset = 0 37 | cvx_piece_f = None 38 | out_filenames = [] 39 | 40 | # create a new output .OBJ file for each instance of "{n} convex" in the input file 41 | while line_num < num_lines: 42 | line = lines[line_num] 43 | tokens = line.split() 44 | 45 | # new convex piece 46 | if tokens[0] == 'o': 47 | # write old convex piece to file 48 | if cvx_piece_f is not None: 49 | cvx_piece_f.close() 50 | 51 | # init new convex piece 52 | cvx_piece_name = tokens[1] 53 | out_filename = '%s_%s%s' %(file_root, cvx_piece_name, file_ext) 54 | logging.info('Writing %s' %(out_filename)) 55 | cvx_piece_f = open(out_filename, 'w') 56 | vert_offset = num_verts 57 | out_filenames.append(out_filename) 58 | # add to vertices 59 | elif tokens[0] == 'v': 60 | cvx_piece_f.write(line) 61 | num_verts += 1 62 | elif tokens[0] == 'f': 63 | v0 = int(tokens[1]) - vert_offset 64 | v1 = int(tokens[2]) - vert_offset 65 | v2 = int(tokens[3]) - vert_offset 66 | f_line = 'f %d %d %d\n' %(v0, v1, v2) 67 | cvx_piece_f.write(f_line) 68 | 69 | line_num += 1 70 | 71 | # close the file 72 | if cvx_piece_f is not None: 73 | cvx_piece_f.close() 74 | return out_filenames 75 | 76 | def convex_decomposition(mesh, cache_dir='', name='mesh'): 77 | """ Performs a convex deomposition of the mesh using V-HACD. 78 | 79 | Parameters 80 | ---------- 81 | cache_dir : str 82 | a directory to store the intermediate files 83 | name : str 84 | the name of the mesh for the cache file 85 | 86 | Returns 87 | ------- 88 | :obj:`list` of :obj:`Mesh3D` 89 | list of mesh objects comprising the convex pieces of the object, or None if vhacd failed 90 | :obj:`list` of str 91 | string file roots of the convex pieces 92 | float 93 | total volume of the convex pieces 94 | """ 95 | # save to file 96 | if not os.path.exists(cache_dir): 97 | os.mkdir(cache_dir) 98 | obj_filename = os.path.join(cache_dir, '%s.obj' %(name)) 99 | vhacd_out_filename = os.path.join(cache_dir, '%s_vhacd.obj' %(name)) 100 | log_filename = os.path.join(cache_dir, 'vhacd_log.txt') 101 | print obj_filename 102 | ObjFile(obj_filename).write(mesh) 103 | 104 | # use v-hacd for convex decomposition 105 | cvx_decomp_cmd = 'vhacd --input %s --output %s --log %s' %(obj_filename, 106 | vhacd_out_filename, 107 | log_filename) 108 | vhacd_process = Popen(cvx_decomp_cmd, bufsize=-1, close_fds=True, shell=True) 109 | vhacd_process.wait() 110 | 111 | # check success 112 | if not os.path.exists(vhacd_out_filename): 113 | logging.error('Output mesh file %s not found. V-HACD failed. Is V-HACD installed?' %(vhacd_out_filename)) 114 | return None 115 | 116 | # create separate convex piece files 117 | convex_piece_files = split_vhacd_output(vhacd_out_filename) 118 | 119 | # read convex pieces 120 | convex_piece_meshes = [] 121 | convex_piece_filenames = [] 122 | convex_pieces_volume = 0.0 123 | 124 | # read in initial meshes for global properties 125 | for convex_piece_filename in convex_piece_files: 126 | 127 | # read in meshes 128 | obj_file_path, obj_file_root = os.path.split(convex_piece_filename) 129 | of = ObjFile(convex_piece_filename) 130 | convex_piece = of.read() 131 | convex_pieces_volume += convex_piece.total_volume() 132 | convex_piece_meshes.append(of.read()) 133 | convex_piece_filenames.append(obj_file_root) 134 | 135 | return convex_piece_meshes, convex_piece_filenames, convex_pieces_volume 136 | 137 | class UrdfWriter(object): 138 | """ 139 | A .urdf file writer. 140 | 141 | Attributes 142 | ---------- 143 | filepath : :obj:`str` 144 | The full path to the .urdf file associated with this writer. 145 | """ 146 | 147 | def __init__(self, filepath): 148 | """Construct and initialize a .urdf file writer. 149 | 150 | Parameters 151 | ---------- 152 | filepath : :obj:`str` 153 | The full path to the directory in which to save the URDF file 154 | 155 | Raises 156 | ------ 157 | ValueError 158 | If the fullpath is not a directory 159 | """ 160 | self.filepath_ = filepath 161 | file_root, file_ext = os.path.splitext(self.filepath_) 162 | file_path, file_name = os.path.split(file_root) 163 | self.name_ = file_name 164 | if file_ext != '': 165 | raise ValueError('URDF path must be a directory') 166 | 167 | @property 168 | def filepath(self): 169 | """Returns the full path to the URDF directory associated with this writer. 170 | 171 | Returns 172 | ------- 173 | :obj:`str` 174 | The full path to the URDF directory associated with this writer. 175 | """ 176 | return self.filepath_ 177 | 178 | @property 179 | def urdf_filename(self): 180 | """Returns the full path to the URDF file associated with this writer. 181 | 182 | Returns 183 | ------- 184 | :obj:`str` 185 | The full path to the URDF file associated with this writer. 186 | """ 187 | return os.path.join(self.filepath_, '%s.urdf' %(self.name_)) 188 | 189 | def write(self, mesh): 190 | """Writes a Mesh3D object to a .urdf file. 191 | First decomposes the mesh using V-HACD, then writes to a .URDF 192 | 193 | Parameters 194 | ---------- 195 | mesh : :obj:`Mesh3D` 196 | The Mesh3D object to write to the .urdf file. 197 | 198 | Note 199 | ---- 200 | Requires v-hacd installation. 201 | Does not support moveable joints. 202 | """ 203 | # perform convex decomp 204 | convex_piece_meshes, convex_piece_filenames, convex_pieces_volume = convex_decomposition(mesh, cache_dir=self.filepath_, name=self.name_) 205 | 206 | # get the masses and moments of inertia 207 | effective_density = mesh.total_volume() / convex_pieces_volume 208 | 209 | # open an XML tree 210 | root = et.Element('robot', name='root') 211 | 212 | # loop through all pieces 213 | prev_piece_name = None 214 | for convex_piece, filename in zip(convex_piece_meshes, convex_piece_filenames): 215 | # set the mass properties 216 | convex_piece.center_of_mass = mesh.center_of_mass 217 | convex_piece.density = effective_density * mesh.density 218 | 219 | _, file_root = os.path.split(filename) 220 | file_root, _ = os.path.splitext(file_root) 221 | obj_filename = 'package://%s/%s' %(self.name_, filename) 222 | 223 | # write to xml 224 | piece_name = 'link_%s'%(file_root) 225 | I = convex_piece.inertia 226 | link = et.SubElement(root, 'link', name=piece_name) 227 | 228 | inertial = et.SubElement(link, 'inertial') 229 | origin = et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") 230 | mass = et.SubElement(inertial, 'mass', value='%.2E'%convex_piece.mass) 231 | inertia = et.SubElement(inertial, 'inertia', ixx='%.2E'%I[0,0], ixy='%.2E'%I[0,1], ixz='%.2E'%I[0,2], 232 | iyy='%.2E'%I[1,1], iyz='%.2E'%I[1,2], izz='%.2E'%I[2,2]) 233 | 234 | visual = et.SubElement(link, 'visual') 235 | origin = et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") 236 | geometry = et.SubElement(visual, 'geometry') 237 | mesh_element = et.SubElement(geometry, 'mesh', filename=obj_filename) 238 | material = et.SubElement(visual, 'material', name='') 239 | color = et.SubElement(material, 'color', rgba="0.75 0.75 0.75 1") 240 | 241 | collision = et.SubElement(link, 'collision') 242 | origin = et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") 243 | geometry = et.SubElement(collision, 'geometry') 244 | mesh_element = et.SubElement(geometry, 'mesh', filename=obj_filename) 245 | 246 | if prev_piece_name is not None: 247 | joint = et.SubElement(root, 'joint', name='%s_joint'%(piece_name), type='fixed') 248 | origin = et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") 249 | parent = et.SubElement(joint, 'parent', link=prev_piece_name) 250 | child = et.SubElement(joint, 'child', link=piece_name) 251 | 252 | prev_piece_name = piece_name 253 | 254 | # write URDF file 255 | tree = et.ElementTree(root) 256 | tree.write(self.urdf_filename) 257 | 258 | # write config file 259 | root = et.Element('model') 260 | model = et.SubElement(root, 'name') 261 | model.text = self.name_ 262 | version = et.SubElement(root, 'version') 263 | version.text = '1.0' 264 | sdf = et.SubElement(root, 'sdf', version='1.4') 265 | urdf_root, urdf_ext = os.path.splitext(self.urdf_filename) 266 | urdf_path, urdf_name = os.path.split(urdf_root) 267 | sdf.text = urdf_name 268 | 269 | author = et.SubElement(root, 'author') 270 | et.SubElement(author, 'name').text = 'AUTOLAB meshpy' 271 | et.SubElement(author, 'email').text = 'jmahler@berkeley.edu' 272 | 273 | description = et.SubElement(root, 'description') 274 | description.text = 'My awesome %s' %(self.name_) 275 | 276 | tree = et.ElementTree(root) 277 | config_filename = os.path.join(self.filepath_, 'model.config') 278 | tree.write(config_filename) 279 | 280 | def write_pieces(self, meshes, center_of_mass=np.zeros(3), density=1.0): 281 | """Writes a list of Mesh3D object to a .urdf file. 282 | 283 | Parameters 284 | ---------- 285 | meshes : :obj:`list` of :obj:`Mesh3D` 286 | The Mesh3D objects to write to the .urdf file. 287 | center_of_mass : :obj:`numpy.ndarray` 288 | The center of mass of the combined object. Defaults to zero. 289 | desnity : float 290 | The density fo the mesh pieces 291 | 292 | Note 293 | ---- 294 | Does not support moveable joints. 295 | """ 296 | # create output directory 297 | out_dir = self.filepath_ 298 | if not os.path.exists(out_dir): 299 | os.mkdir(out_dir) 300 | 301 | # read convex pieces 302 | mesh_filenames = [] 303 | 304 | # write meshes to reference with URDF files 305 | for i, mesh in enumerate(meshes): 306 | # read in meshes 307 | obj_file_root = '%s_%04d.obj' %(self.name_, i) 308 | obj_filename = os.path.join(out_dir, obj_file_root) 309 | ObjFile(obj_filename).write(mesh) 310 | mesh_filenames.append(obj_file_root) 311 | 312 | # open an XML tree 313 | root = et.Element('robot', name='root') 314 | 315 | # loop through all pieces 316 | prev_piece_name = None 317 | for mesh, filename in zip(meshes, mesh_filenames): 318 | # set the mass properties 319 | mesh.center_of_mass = center_of_mass 320 | mesh.density = density 321 | 322 | _, file_root = os.path.split(filename) 323 | file_root, _ = os.path.splitext(file_root) 324 | obj_filename = 'package://%s/%s' %(self.name_, filename) 325 | 326 | # write to xml 327 | piece_name = 'link_%s'%(file_root) 328 | I = mesh.inertia 329 | link = et.SubElement(root, 'link', name=piece_name) 330 | 331 | inertial = et.SubElement(link, 'inertial') 332 | origin = et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") 333 | mass = et.SubElement(inertial, 'mass', value='%.2E'%mesh.mass) 334 | inertia = et.SubElement(inertial, 'inertia', ixx='%.2E'%I[0,0], ixy='%.2E'%I[0,1], ixz='%.2E'%I[0,2], 335 | iyy='%.2E'%I[1,1], iyz='%.2E'%I[1,2], izz='%.2E'%I[2,2]) 336 | 337 | visual = et.SubElement(link, 'visual') 338 | origin = et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") 339 | geometry = et.SubElement(visual, 'geometry') 340 | mesh_element = et.SubElement(geometry, 'mesh', filename=obj_filename) 341 | material = et.SubElement(visual, 'material', name='') 342 | color = et.SubElement(material, 'color', rgba="0.75 0.75 0.75 1") 343 | 344 | collision = et.SubElement(link, 'collision') 345 | origin = et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") 346 | geometry = et.SubElement(collision, 'geometry') 347 | mesh_element = et.SubElement(geometry, 'mesh', filename=obj_filename) 348 | 349 | if prev_piece_name is not None: 350 | joint = et.SubElement(root, 'joint', name='%s_joint'%(piece_name), type='fixed') 351 | origin = et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") 352 | parent = et.SubElement(joint, 'parent', link=prev_piece_name) 353 | child = et.SubElement(joint, 'child', link=piece_name) 354 | 355 | prev_piece_name = piece_name 356 | 357 | # write URDF file 358 | tree = et.ElementTree(root) 359 | tree.write(self.urdf_filename) 360 | 361 | # write config file 362 | root = et.Element('model') 363 | model = et.SubElement(root, 'name') 364 | model.text = self.name_ 365 | version = et.SubElement(root, 'version') 366 | version.text = '1.0' 367 | sdf = et.SubElement(root, 'sdf', version='1.4') 368 | urdf_root, urdf_ext = os.path.splitext(self.urdf_filename) 369 | urdf_path, urdf_name = os.path.split(urdf_root) 370 | sdf.text = urdf_name 371 | 372 | author = et.SubElement(root, 'author') 373 | et.SubElement(author, 'name').text = 'AUTOLAB meshpy' 374 | et.SubElement(author, 'email').text = 'jmahler@berkeley.edu' 375 | 376 | description = et.SubElement(root, 'description') 377 | description.text = 'My awesome %s' %(self.name_) 378 | 379 | tree = et.ElementTree(root) 380 | config_filename = os.path.join(out_dir, 'model.config') 381 | tree.write(config_filename) 382 | 383 | 384 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | meshpy 4 | 0.1.0 5 | The meshpy package 6 | 7 | 8 | 9 | 10 | todo 11 | 12 | 13 | 14 | 15 | 16 | Apache v2.0 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | catkin 43 | rospy 44 | message_generation 45 | rospy 46 | message_runtime 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -e . 2 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup of meshpy python codebase 3 | Author: Jeff Mahler 4 | """ 5 | from setuptools import setup 6 | from setuptools.command.install import install 7 | from setuptools.command.develop import develop 8 | import os 9 | 10 | class PostDevelopCmd(develop): 11 | def run(self): 12 | os.system('sh install_meshrender.sh') 13 | develop.run(self) 14 | 15 | class PostInstallCmd(install): 16 | def run(self): 17 | os.system('sh install_meshrender.sh') 18 | install.run(self) 19 | 20 | requirements = [ 21 | 'numpy', 22 | 'scipy', 23 | 'sklearn', 24 | 'Pillow', 25 | ] 26 | 27 | setup(name='meshpy', 28 | version='0.1.0', 29 | description='MeshPy project code', 30 | author='Matt Matl', 31 | author_email='mmatl@berkeley.edu', 32 | package_dir = {'': '.'}, 33 | packages=['meshpy'], 34 | #ext_modules = [meshrender], 35 | install_requires=requirements, 36 | test_suite='test', 37 | cmdclass={ 38 | 'install': PostInstallCmd, 39 | 'develop': PostDevelopCmd 40 | } 41 | ) 42 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/BerkeleyAutomation/meshpy/4e0fc8ca90ae8cb05dae2a103d2226e91c3e54c7/test/__init__.py -------------------------------------------------------------------------------- /test/data/bad_tetrahedron.obj: -------------------------------------------------------------------------------- 1 | # A simple tetrahedron 2 | v 1.0 0.0 0.0 3 | v 0.0 1.0 0.0 4 | v -1.0 0.0 0.0 5 | v 0.0 0.0 1.0 6 | v 0.0 0.0 0.0 7 | v 0.0 0.1 0.0 8 | f 4 1 2 9 | f 4 2 3 10 | f 4 3 1 11 | f 1 3 2 12 | f 10 3 1 13 | f 2 8 1 14 | -------------------------------------------------------------------------------- /test/data/tetrahedron.obj: -------------------------------------------------------------------------------- 1 | # A simple tetrahedron 2 | v 1.0 0.0 0.0 3 | v 0.0 1.0 0.0 4 | v -1.0 0.0 0.0 5 | v 0.0 0.0 1.0 6 | f 4 1 2 7 | f 4 2 3 8 | f 4 3 1 9 | f 1 3 2 10 | -------------------------------------------------------------------------------- /test/mesh_test.py: -------------------------------------------------------------------------------- 1 | from unittest import TestCase 2 | import numpy as np 3 | from meshpy import Mesh3D 4 | 5 | class TestMesh(TestCase): 6 | 7 | def test_init(self): 8 | verts = [[1,0,0],[0,1,0],[-1,0,0],[0,0,1]] 9 | tris = [[3,0,1],[3,1,2],[3,2,0],[0,2,1]] 10 | d = 1.2 11 | m = Mesh3D(verts, tris, density=d) 12 | self.assertTrue(isinstance(m, Mesh3D)) 13 | self.assertEqual(m.vertices.shape, (4,3)) 14 | self.assertEqual(m.vertices.tolist(), verts) 15 | self.assertEqual(m.triangles.shape, (4,3)) 16 | self.assertEqual(m.triangles.tolist(), tris) 17 | self.assertEqual(m.density, d) 18 | self.assertEqual(m.bb_center.tolist(), [0.0, 0.5, 0.5]) 19 | self.assertEqual(m.centroid.tolist(), [0.0, 0.25, 0.25]) 20 | 21 | def test_read(self): 22 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 23 | self.assertTrue(isinstance(m, Mesh3D)) 24 | self.assertTrue(m.vertices.shape == (4,3)) 25 | self.assertTrue(m.triangles.shape == (4,3)) 26 | 27 | def test_min_coords(self): 28 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 29 | self.assertTrue(m.min_coords().tolist() == [-1.0, 0.0, 0.0]) 30 | 31 | def test_max_coords(self): 32 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 33 | self.assertTrue(m.max_coords().tolist() == [1.0, 1.0, 1.0]) 34 | 35 | def test_bounding_box(self): 36 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 37 | minc, maxc = m.bounding_box() 38 | self.assertTrue(minc.tolist() == [-1.0, 0.0, 0.0]) 39 | self.assertTrue(maxc.tolist() == [1.0, 1.0, 1.0]) 40 | 41 | def test_bounding_box_mesh(self): 42 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 43 | bbm = m.bounding_box_mesh() 44 | self.assertTrue(isinstance(bbm, Mesh3D)) 45 | self.assertEqual(bbm.vertices.shape, (8,3)) 46 | self.assertEqual(bbm.triangles.shape, (12,3)) 47 | self.assertEqual(bbm.bb_center.tolist(), [0.0, 0.5, 0.5]) 48 | self.assertEqual(bbm.centroid.tolist(), [0.0, 0.5, 0.5]) 49 | 50 | def test_principal_dims(self): 51 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 52 | pd = m.principal_dims() 53 | self.assertEqual(pd.tolist(), [2.0, 1.0, 1.0]) 54 | 55 | def test_support(self): 56 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 57 | s = m.support(np.array([1,0,0])) 58 | self.assertEqual(s.shape, (3,)) 59 | 60 | def test_tri_centers(self): 61 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 62 | centers = m.tri_centers() 63 | self.assertEqual(centers.shape, (4,3)) 64 | self.assertTrue([0.0, 1.0/3.0, 0.0] in centers.tolist()) 65 | 66 | def test_tri_normals(self): 67 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 68 | n = m.tri_normals(True) 69 | self.assertEqual(n.shape, (4,3)) 70 | self.assertTrue([0.0, -1.0, 0.0] in n.tolist()) 71 | self.assertTrue([0.0, 0.0, -1.0] in n.tolist()) 72 | 73 | def test_total_volume(self): 74 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 75 | v = m.total_volume() 76 | self.assertEqual(v, 1.0/3.0) 77 | 78 | def test_covariance(self): 79 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 80 | cv = m.covariance() 81 | actual_cov = np.array([[1.0/30.0, 0.0, 0.0], 82 | [0.0, 1.0/30.0, 1.0/60.0], 83 | [0.0, 1.0/60.0, 1.0/30.0]]) 84 | self.assertEqual(np.round(cv, 5).tolist(), np.round(actual_cov, 5).tolist()) 85 | 86 | def test_remove_bad_tris(self): 87 | m = Mesh3D.load('test/data/bad_tetrahedron.obj', 'test/cache') 88 | self.assertEqual(m.triangles.shape[0], 6) 89 | m.remove_bad_tris() 90 | self.assertEqual(m.triangles.shape[0], 4) 91 | 92 | def test_remove_unreferenced_vertices(self): 93 | m = Mesh3D.load('test/data/bad_tetrahedron.obj', 'test/cache') 94 | self.assertEqual(m.vertices.shape[0], 6) 95 | m.remove_bad_tris() 96 | m.remove_unreferenced_vertices() 97 | self.assertEqual(m.vertices.shape[0], 4) 98 | 99 | def test_center_vertices_avg(self): 100 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 101 | m.center_vertices_avg() 102 | self.assertEqual(m.centroid.tolist(), [0,0,0]) 103 | self.assertTrue([0,-0.25,0.75] in m.vertices.tolist()) 104 | 105 | def test_center_vertices_bb(self): 106 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 107 | m.center_vertices_bb() 108 | self.assertEqual(m.bb_center.tolist(), [0,0,0]) 109 | self.assertTrue([0,-0.5,0.5] in m.vertices.tolist()) 110 | 111 | def test_normalize_vertices(self): 112 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 113 | m.normalize_vertices() 114 | new_verts = [[-0.3536, 0, -1], 115 | [0.3536, 0.7071, 0], 116 | [-0.3536, 0, 1], 117 | [0.3536, -0.7071, 0]] 118 | self.assertEqual(np.round(m.bb_center, 5).tolist(), [0,0,0]) 119 | self.assertEqual(np.round(m.vertices, 4).tolist(), new_verts) 120 | 121 | def test_copy(self): 122 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 123 | x = m.copy() 124 | self.assertEqual(m.vertices.tolist(), x.vertices.tolist()) 125 | self.assertEqual(m.triangles.tolist(), x.triangles.tolist()) 126 | 127 | def test_subdivide(self): 128 | pass 129 | #m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 130 | #x = m.subdivide() 131 | #self.assertEqual(m.vertices.shape[0], 10) 132 | #self.assertEqual(m.triangles.shape[0], 16) 133 | 134 | def test_transform(self): 135 | pass 136 | 137 | def test_get_T_surface_obj(self): 138 | pass 139 | 140 | def test_rescale_dimension(self): 141 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 142 | m2 = m.copy() 143 | m2.rescale_dimension(0.5, Mesh3D.ScalingTypeMin) 144 | self.assertEqual(m2.min_coords().tolist(), [-0.5, 0.0, 0.0]) 145 | self.assertEqual(m2.max_coords().tolist(), [0.5, 0.5, 0.5]) 146 | m3 = m.copy() 147 | m3.rescale_dimension(0.5, Mesh3D.ScalingTypeMin) 148 | self.assertEqual(m3.min_coords().tolist(), [-0.5, 0.0, 0.0]) 149 | self.assertEqual(m3.max_coords().tolist(), [0.5, 0.5, 0.5]) 150 | 151 | def test_rescale(self): 152 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 153 | m.rescale(0.5) 154 | self.assertEqual(m.min_coords().tolist(), [-0.5, 0.0, 0.0]) 155 | self.assertEqual(m.max_coords().tolist(), [0.5, 0.5, 0.5]) 156 | 157 | def test_convex_hull(self): 158 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 159 | cvh = m.convex_hull() 160 | self.assertEqual(cvh.min_coords().tolist(), [-1, 0, 0]) 161 | self.assertEqual(cvh.max_coords().tolist(), [1, 1, 1]) 162 | 163 | def test_stable_poses(self): 164 | m = Mesh3D.load('test/data/tetrahedron.obj', 'test/cache') 165 | m.center_of_mass = m.centroid 166 | stps = m.stable_poses() 167 | self.assertEqual(len(stps), 4) 168 | 169 | def test_visualize(self): 170 | pass 171 | 172 | if __name__ == '__main__': 173 | unittest.main() 174 | -------------------------------------------------------------------------------- /test/mesh_visualizer.py: -------------------------------------------------------------------------------- 1 | import mayavi.mlab as mv 2 | 3 | class MeshVisualizer(object): 4 | """A class for visualizing meshes. 5 | """ 6 | 7 | def __init__(self, mesh): 8 | """Initialize a MeshVisualizer. 9 | 10 | Parameters 11 | ---------- 12 | mesh : :obj:`Mesh3D` 13 | The mesh to apply visualizations to. 14 | """ 15 | 16 | self.mesh_ = mesh 17 | 18 | def visualize(self, color=(0.5, 0.5, 0.5), style='surface', opacity=1.0): 19 | """Plots visualization of mesh using MayaVI. 20 | 21 | Parameters 22 | ---------- 23 | color : :obj:`tuple` of float 24 | 3-tuple of floats in [0,1] to give the mesh's color 25 | 26 | style : :obj:`str` 27 | Either 'surface', which produces an opaque surface, or 28 | 'wireframe', which produces a wireframe. 29 | 30 | opacity : float 31 | A value in [0,1] indicating the opacity of the mesh. 32 | Zero is transparent, one is opaque. 33 | 34 | Returns 35 | ------- 36 | :obj:`mayavi.modules.surface.Surface` 37 | The displayed surface. 38 | """ 39 | surface = mv.triangular_mesh(self.mesh_.vertices_[:,0], 40 | self.mesh_.vertices_[:,1], 41 | self.mesh_.vertices_[:,2], 42 | self.mesh_.triangles_, representation=style, 43 | color=color, opacity=opacity) 44 | return surface 45 | 46 | -------------------------------------------------------------------------------- /tools/convert_image_to_obj.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to convert a directory of 3D models to .OBJ wavefront format for use in meshpy using meshlabserver. 3 | Author: Jeff Mahler 4 | """ 5 | import argparse 6 | import logging 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | import os 10 | import sys 11 | 12 | import autolab_core.utils as utils 13 | from perception import BinaryImage 14 | from meshpy import ImageToMeshConverter, ObjFile 15 | from visualization import Visualizer2D as vis2d 16 | from visualization import Visualizer3D as vis 17 | 18 | if __name__ == '__main__': 19 | # set up logger 20 | logging.getLogger().setLevel(logging.INFO) 21 | 22 | # parse args 23 | parser = argparse.ArgumentParser(description='Convert an image into an extruded 3D mesh model') 24 | parser.add_argument('input_image', type=str, help='path to image to convert') 25 | parser.add_argument('--extrusion', type=float, default=1000, help='amount to extrude') 26 | parser.add_argument('--scale_factor', type=float, default=1.0, help='scale factor to apply to the mesh') 27 | parser.add_argument('--output_filename', type=str, default=None, help='output obj filename') 28 | 29 | args = parser.parse_args() 30 | image_filename = args.input_image 31 | extrusion = args.extrusion 32 | scale_factor = args.scale_factor 33 | output_filename = args.output_filename 34 | 35 | # read the image 36 | binary_im = BinaryImage.open(image_filename) 37 | sdf = binary_im.to_sdf() 38 | #plt.figure() 39 | #plt.imshow(sdf) 40 | #plt.show() 41 | 42 | # convert to a mesh 43 | mesh = ImageToMeshConverter.binary_image_to_mesh(binary_im, extrusion=extrusion, scale_factor=scale_factor) 44 | vis.figure() 45 | vis.mesh(mesh) 46 | vis.show() 47 | 48 | # optionally save 49 | if output_filename is not None: 50 | file_root, file_ext = os.path.splitext(output_filename) 51 | binary_im.save(file_root+'.jpg') 52 | ObjFile(file_root+'.obj').write(mesh) 53 | np.savetxt(file_root+'.csv', sdf, delimiter=',', 54 | header='%d %d'%(sdf.shape[0], sdf.shape[1])) 55 | -------------------------------------------------------------------------------- /tools/convert_to_obj.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to convert a directory of 3D models to .OBJ wavefront format for use in meshpy using meshlabserver. 3 | Author: Jeff Mahler 4 | """ 5 | import argparse 6 | import logging 7 | import os 8 | import sys 9 | 10 | import autolab_core.utils as utils 11 | 12 | SUPPORTED_EXTENSIONS = ['.wrl', '.obj', '.off', '.ply', '.stl', '.3ds'] 13 | 14 | if __name__ == '__main__': 15 | # set up logger 16 | logging.getLogger().setLevel(logging.INFO) 17 | 18 | # parse args 19 | parser = argparse.ArgumentParser(description='Convert a directory of 3D models into .OBJ format using meshlab') 20 | parser.add_argument('input_dir', type=str, help='directory containing 3D model files to convert') 21 | parser.add_argument('--output_dir', type=str, default=None, help='directory to save .OBJ files to') 22 | args = parser.parse_args() 23 | data_dir = args.input_dir 24 | output_dir = args.output_dir 25 | if output_dir is None: 26 | output_dir = data_dir 27 | 28 | # get model filenames 29 | model_filenames = [] 30 | for ext in SUPPORTED_EXTENSIONS: 31 | model_filenames.extend(utils.filenames(data_dir, tag=ext)) 32 | model_file_roots = [] 33 | for model_filename in model_filenames: 34 | root, _ = os.path.splitext(model_filename) 35 | model_file_roots.append(root) 36 | # create obj filenames 37 | obj_filenames = [f + '.obj' for f in model_file_roots] 38 | obj_filenames = [f.replace(data_dir, output_dir) for f in obj_filenames] 39 | num_files = len(obj_filenames) 40 | 41 | # convert using meshlab server 42 | i = 0 43 | for obj_filename, model_filename in zip(obj_filenames, model_filenames): 44 | logging.info('Converting %s (%d of %d)' %(model_filename, i+1, num_files)) 45 | 46 | # call meshlabserver 47 | meshlabserver_cmd = 'meshlabserver -i %s -o %s' %(model_filename, obj_filename) 48 | os.system(meshlabserver_cmd) 49 | 50 | 51 | 52 | 53 | -------------------------------------------------------------------------------- /tools/mesh_to_urdf.py: -------------------------------------------------------------------------------- 1 | """ 2 | Script to generate a urdf for a mesh with a convex decomposition to preserve the geometry 3 | Author: Jeff 4 | """ 5 | import argparse 6 | import glob 7 | import IPython 8 | import logging 9 | import numpy as np 10 | import os 11 | from subprocess import Popen 12 | import sys 13 | 14 | import xml.etree.cElementTree as et 15 | 16 | from autolab_core import YamlConfig 17 | from meshpy import Mesh3D, ObjFile, UrdfWriter 18 | 19 | OBJ_EXT = '.obj' 20 | 21 | if __name__ == '__main__': 22 | logging.getLogger().setLevel(logging.INFO) 23 | 24 | # read args 25 | parser = argparse.ArgumentParser(description='Convert a mesh to a URDF') 26 | parser.add_argument('mesh_filename', type=str, help='OBJ filename of the mesh to convert') 27 | parser.add_argument('output_dir', type=str, help='directory to store output urdf in') 28 | parser.add_argument('--config', type=str, default='cfg/tools/convex_decomposition.yaml', 29 | help='config file for urdf conversion') 30 | args = parser.parse_args() 31 | 32 | # open config 33 | config_filename = args.config 34 | config = YamlConfig(config_filename) 35 | 36 | # check valid mesh filename 37 | mesh_filename = args.mesh_filename 38 | mesh_root, mesh_ext = os.path.splitext(mesh_filename) 39 | if mesh_ext.lower() != OBJ_EXT: 40 | logging.error('Extension %s not supported' %(mesh_ext)) 41 | exit(0) 42 | 43 | # open mesh 44 | of = ObjFile(mesh_filename) 45 | mesh = of.read() 46 | mesh.density = config['object_density'] 47 | 48 | # create output dir for urdf 49 | output_dir = args.output_dir 50 | writer = UrdfWriter(output_dir) 51 | writer.write(mesh) 52 | -------------------------------------------------------------------------------- /tools/test_stable_pose.py: -------------------------------------------------------------------------------- 1 | """ 2 | Regressive test for stable poses. Qualitative only. 3 | Author: Jeff Mahler 4 | """ 5 | import IPython 6 | import numpy as np 7 | import os 8 | import random 9 | import sys 10 | 11 | from autolab_core import Point, RigidTransform 12 | from meshpy import ObjFile, Mesh3D 13 | from visualization import Visualizer3D as vis 14 | 15 | if __name__ == '__main__': 16 | mesh_name = sys.argv[1] 17 | 18 | #np.random.seed(111) 19 | #random.seed(111) 20 | 21 | # read mesh 22 | mesh = ObjFile(mesh_name).read() 23 | 24 | mesh.vertices_ = np.load('../dex-net/data/meshes/lego_vertices.npy') 25 | mesh.center_of_mass = np.load('../dex-net/data/meshes/lego_com.npy') 26 | 27 | #T_obj_table = RigidTransform(rotation=[0.92275663, 0.13768089, 0.35600924, -0.05311874], 28 | # from_frame='obj', to_frame='table') 29 | T_obj_table = RigidTransform(rotation=[-0.1335021, 0.87671711, 0.41438141, 0.20452958], 30 | from_frame='obj', to_frame='table') 31 | 32 | stable_pose = mesh.resting_pose(T_obj_table) 33 | #print stable_pose.r 34 | 35 | table_dim = 0.3 36 | T_obj_table_plot = mesh.get_T_surface_obj(T_obj_table) 37 | T_obj_table_plot.translation[0] += 0.1 38 | vis.figure() 39 | vis.mesh(mesh, T_obj_table_plot, 40 | color=(1,0,0), style='wireframe') 41 | vis.points(Point(mesh.center_of_mass, 'obj'), T_obj_table_plot, 42 | color=(1,0,1), scale=0.01) 43 | vis.pose(T_obj_table_plot, alpha=0.1) 44 | vis.mesh_stable_pose(mesh, stable_pose, dim=table_dim, 45 | color=(0,1,0), style='surface') 46 | vis.pose(stable_pose.T_obj_table, alpha=0.1) 47 | vis.show() 48 | exit(0) 49 | 50 | # compute stable poses 51 | vis.figure() 52 | vis.mesh(mesh, color=(1,1,0), style='surface') 53 | vis.mesh(mesh.convex_hull(), color=(1,0,0)) 54 | 55 | stable_poses = mesh.stable_poses() 56 | 57 | vis.show() 58 | --------------------------------------------------------------------------------