├── .gitignore ├── .travis.yml ├── CMakeLists.txt ├── LICENSE ├── MANIFEST.in ├── README.md ├── docs ├── Makefile ├── gh_deploy.sh └── source │ ├── api │ ├── camera.rst │ ├── lighting.rst │ ├── material.rst │ ├── random_variables.rst │ ├── scene.rst │ ├── sceneobject.rst │ └── viewer.rst │ ├── conf.py │ ├── examples │ └── example.rst │ ├── index.rst │ └── install │ └── install.rst ├── examples ├── models │ ├── bar_clamp.obj │ ├── pawn.obj │ └── pawn_large.obj └── test_viewer.py ├── meshrender ├── __init__.py ├── camera.py ├── constants.py ├── light.py ├── material.py ├── random_variables.py ├── render.py ├── scene.py ├── scene_object.py ├── shaders.py ├── trackball.py ├── version.py └── viewer.py ├── package.xml ├── setup.py └── tests └── test_temporary.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Images 10 | *.jpg 11 | *.png 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | 53 | # Translations 54 | *.mo 55 | *.pot 56 | 57 | # Django stuff: 58 | *.log 59 | .static_storage/ 60 | .media/ 61 | local_settings.py 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # SageMath parsed files 86 | *.sage.py 87 | 88 | # Environments 89 | .env 90 | .venv 91 | env/ 92 | venv/ 93 | ENV/ 94 | env.bak/ 95 | venv.bak/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | sudo: required 3 | dist: trusty 4 | python: 5 | - '2.7' 6 | - '3.5' 7 | - '3.6' 8 | before_install: 9 | - pip install --upgrade pip setuptools wheel 10 | - pip install --only-binary=numpy,scipy numpy scipy 11 | - pip install matplotlib==2.2.3 12 | - pip install PyOpenGL 13 | install: 14 | - pip install . 15 | - pip install nose2 16 | script: 17 | - cd tests 18 | - nose2 19 | - cd .. 20 | deploy: 21 | provider: pypi 22 | skip_existing: true 23 | user: mmatl 24 | on: 25 | tags: true 26 | branch: master 27 | password: 28 | secure: VMn+8D74ayInKRyw1tYP7g1Ad/iG8co8hV0dAH1H3sCp90GVm0smvCH9hTXJsxKvMdHAKxGRlhbf7biZ8k2mP7pEOqan4J8rnAXSqkPlfBnt5UNvGwkLC+2cBYOHrM3bfyv9UtQvI4opyUkT4wSYhCM4C2+0r17wV8dotzGCfnoQF5EbmurBKa4fvX6gRxQQqDt9BBCJ8ScODMB6xo9taZJFscjbvXT791ue5OegPJodf5euttpYzeKY+bnTuxV1H2JqVgNZ2IgbplwoZvCmos5Mra9+QEFIY/M6hAuhudhSeP/as++mnqA9daIyipuePWaE9u+aENc80Q1xnpglfeJm39PcFTyG2SbeBOjKLiNpX1p0SWFgU8CxW5oL+vNfIIdh2lps3dl/jOf6gFupC0PSdEbIehHjY+tHt+cRY1VgqHhRfoFyACerihz0lxiA+Buev/6cfmKC9C8fwMM7KOlZBAknMSwvosdosL1uLb+EBG9rrmATgtoB3PkzEtRMEbs7aFQR+aCa9DQpfnRJDt14AScNqQAQl3vTnLSIDzvkL/gJNZm5ix3jgBLnvpxA7lLJRng10n7vTbvt8DELhSbSHXFj4Pw/aV8R/VjoVSIVIQO4ClLMpACuA75M4CTNAsWzklhXuX7c4H2ZliNJ5X+4WxskzBPI6xXiiXPTTvk= 29 | distributions: sdist bdist_wheel 30 | notifications: 31 | email: false 32 | -------------------------------------------------------------------------------- /CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(meshrender) 3 | 4 | ## Find catkin macros and libraries 5 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 6 | ## is used, also find other catkin packages 7 | find_package(catkin REQUIRED) 8 | 9 | ## System dependencies are found with CMake's conventions 10 | # find_package(Boost REQUIRED COMPONENTS system) 11 | 12 | 13 | ## Uncomment this if the package has a setup.py. This macro ensures 14 | ## modules and global scripts declared therein get installed 15 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 16 | catkin_python_setup() 17 | 18 | ################################################ 19 | ## Declare ROS messages, services and actions ## 20 | ################################################ 21 | 22 | ## To declare and build messages, services or actions from within this 23 | ## package, follow these steps: 24 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 25 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 26 | ## * In the file package.xml: 27 | ## * add a build_depend tag for "message_generation" 28 | ## * add a build_depend and a run_depend tag for each package in MSG_DEP_SET 29 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 30 | ## but can be declared for certainty nonetheless: 31 | ## * add a run_depend tag for "message_runtime" 32 | ## * In this file (CMakeLists.txt): 33 | ## * add "message_generation" and every package in MSG_DEP_SET to 34 | ## find_package(catkin REQUIRED COMPONENTS ...) 35 | ## * add "message_runtime" and every package in MSG_DEP_SET to 36 | ## catkin_package(CATKIN_DEPENDS ...) 37 | ## * uncomment the add_*_files sections below as needed 38 | ## and list every .msg/.srv/.action file to be processed 39 | ## * uncomment the generate_messages entry below 40 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 41 | 42 | ## Generate messages in the 'msg' folder 43 | # add_message_files( 44 | # FILES 45 | # Message1.msg 46 | # Message2.msg 47 | # ) 48 | 49 | ## Generate services in the 'srv' folder 50 | #add_service_files( 51 | # FILES 52 | # RigidTransformPublisher.srv 53 | # RigidTransformListener.srv 54 | #) 55 | 56 | ## Generate actions in the 'action' folder 57 | # add_action_files( 58 | # FILES 59 | # Action1.action 60 | # Action2.action 61 | # ) 62 | 63 | ## Generate added messages and services with any dependencies listed here 64 | #generate_messages( 65 | # DEPENDENCIES 66 | # std_msgs # Or other packages containing msgs 67 | #) 68 | 69 | ################################################ 70 | ## Declare ROS dynamic reconfigure parameters ## 71 | ################################################ 72 | 73 | ## To declare and build dynamic reconfigure parameters within this 74 | ## package, follow these steps: 75 | ## * In the file package.xml: 76 | ## * add a build_depend and a run_depend tag for "dynamic_reconfigure" 77 | ## * In this file (CMakeLists.txt): 78 | ## * add "dynamic_reconfigure" to 79 | ## find_package(catkin REQUIRED COMPONENTS ...) 80 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 81 | ## and list every .cfg file to be processed 82 | 83 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 84 | # generate_dynamic_reconfigure_options( 85 | # cfg/DynReconf1.cfg 86 | # cfg/DynReconf2.cfg 87 | # ) 88 | 89 | ################################### 90 | ## catkin specific configuration ## 91 | ################################### 92 | ## The catkin_package macro generates cmake config files for your package 93 | ## Declare things to be passed to dependent projects 94 | ## INCLUDE_DIRS: uncomment this if you package contains header files 95 | ## LIBRARIES: libraries you create in this project that dependent projects also need 96 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 97 | ## DEPENDS: system dependencies of this project that dependent projects also need 98 | catkin_package( 99 | # INCLUDE_DIRS include 100 | # LIBRARIES yumipy 101 | # CATKIN_DEPENDS rospy 102 | # DEPENDS system_lib 103 | ) 104 | 105 | ########### 106 | ## Build ## 107 | ########### 108 | 109 | ## Specify additional locations of header files 110 | ## Your package locations should be listed before other locations 111 | # include_directories(include) 112 | include_directories( 113 | ${catkin_INCLUDE_DIRS} 114 | ) 115 | 116 | ## Declare a C++ library 117 | # add_library(yumipy 118 | # src/${PROJECT_NAME}/yumipy.cpp 119 | # ) 120 | 121 | ## Add cmake target dependencies of the library 122 | ## as an example, code may need to be generated before libraries 123 | ## either from message generation or dynamic reconfigure 124 | # add_dependencies(yumipy ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 125 | 126 | ## Declare a C++ executable 127 | # add_executable(yumipy_node src/yumipy_node.cpp) 128 | 129 | ## Add cmake target dependencies of the executable 130 | ## same as for the library above 131 | # add_dependencies(yumipy_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 132 | 133 | ## Specify libraries to link a library or executable target against 134 | # target_link_libraries(yumipy_node 135 | # ${catkin_LIBRARIES} 136 | # ) 137 | 138 | ############# 139 | ## Install ## 140 | ############# 141 | 142 | # all install targets should use catkin DESTINATION variables 143 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 144 | 145 | ## Mark executable scripts (Python etc.) for installation 146 | ## in contrast to setup.py, you can choose the destination 147 | # install(PROGRAMS 148 | # scripts/my_python_script 149 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 150 | # ) 151 | 152 | ## Mark executables and/or libraries for installation 153 | # install(TARGETS yumipy yumipy_node 154 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 155 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 156 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 157 | # ) 158 | 159 | ## Mark cpp header files for installation 160 | # install(DIRECTORY include/${PROJECT_NAME}/ 161 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 162 | # FILES_MATCHING PATTERN "*.h" 163 | # PATTERN ".svn" EXCLUDE 164 | # ) 165 | 166 | ## Mark other files for installation (e.g. launch and bag files, etc.) 167 | # install(FILES 168 | # # myfile1 169 | # # myfile2 170 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 171 | # ) 172 | 173 | ############# 174 | ## Testing ## 175 | ############# 176 | 177 | ## Add gtest based cpp test target and link libraries 178 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_yumipy.cpp) 179 | # if(TARGET ${PROJECT_NAME}-test) 180 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 181 | # endif() 182 | 183 | ## Add folders to be run by python nosetests 184 | # catkin_add_nosetests(test) 185 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2017 Berkeley AUTOLAB & University of California, Berkeley 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | # Include the license 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # meshrender 2 | ### NOTE: SUPPORT FOR THIS REPOSITORY IS BEING ENDED SOON. BUT FEAR NOT, THERE IS A (MUCH BETTER) REPLACEMENT! 3 | I've written a much better GLTF 2.0 compliant renderer that can do shadows, textures, and physically-based rendering. Check it out here: https://github.com/mmatl/pyrender 4 | 5 | ### A simple, OpenGL 3+ based Python scene renderer 6 | 7 | [![Build Status](https://travis-ci.org/BerkeleyAutomation/meshrender.svg?branch=master)](https://travis-ci.org/BerkeleyAutomation/meshrender) 8 | 9 | 10 | This package, which is build on top of PyOpenGL, is designed to make it easy 11 | to render images of 3D scenes in pure Python. 12 | It supports a scene abstraction and allows users to specify material properties, 13 | camera intrinsics, and lighting. 14 | 15 | Extensive API documentation is provided [here](https://BerkeleyAutomation.github.io/meshrender), 16 | but an example of using the library to render color and depth images is shown below. 17 | 18 | ```python 19 | import numpy as np 20 | import trimesh 21 | from autolab_core import RigidTransform 22 | from perception import CameraIntrinsics, RenderMode 23 | 24 | from meshrender import Scene, MaterialProperties, AmbientLight, PointLight, SceneObject, VirtualCamera 25 | 26 | # Start with an empty scene 27 | scene = Scene() 28 | 29 | #==================================== 30 | # Add objects to the scene 31 | #==================================== 32 | 33 | # Begin by loading meshes 34 | cube_mesh = trimesh.load_mesh('cube.obj') 35 | sphere_mesh = trimesh.load_mesh('sphere.obj') 36 | 37 | # Set up each object's pose in the world 38 | cube_pose = RigidTransform( 39 | rotation=np.eye(3), 40 | translation=np.array([0.0, 0.0, 0.0]), 41 | from_frame='obj', 42 | to_frame='world' 43 | ) 44 | sphere_pose = RigidTransform( 45 | rotation=np.eye(3), 46 | translation=np.array([1.0, 1.0, 0.0]), 47 | from_frame='obj', 48 | to_frame='world' 49 | ) 50 | 51 | # Set up each object's material properties 52 | cube_material = MaterialProperties( 53 | color = np.array([0.1, 0.1, 0.5]), 54 | k_a = 0.3, 55 | k_d = 1.0, 56 | k_s = 1.0, 57 | alpha = 10.0, 58 | smooth=False 59 | ) 60 | sphere_material = MaterialProperties( 61 | color = np.array([0.1, 0.1, 0.5]), 62 | k_a = 0.3, 63 | k_d = 1.0, 64 | k_s = 1.0, 65 | alpha = 10.0, 66 | smooth=True 67 | ) 68 | 69 | # Create SceneObjects for each object 70 | cube_obj = SceneObject(cube_mesh, cube_pose, cube_material) 71 | sphere_obj = SceneObject(sphere_mesh, sphere_pose, sphere_material) 72 | 73 | # Add the SceneObjects to the scene 74 | scene.add_object('cube', cube_obj) 75 | scene.add_object('sphere', sphere_obj) 76 | 77 | #==================================== 78 | # Add lighting to the scene 79 | #==================================== 80 | 81 | # Create an ambient light 82 | ambient = AmbientLight( 83 | color=np.array([1.0, 1.0, 1.0]), 84 | strength=1.0 85 | ) 86 | 87 | # Create a point light 88 | point = PointLight( 89 | location=np.array([1.0, 2.0, 3.0]), 90 | color=np.array([1.0, 1.0, 1.0]), 91 | strength=10.0 92 | ) 93 | 94 | # Add the lights to the scene 95 | scene.ambient_light = ambient # only one ambient light per scene 96 | scene.add_light('point_light_one', point) 97 | 98 | #==================================== 99 | # Add a camera to the scene 100 | #==================================== 101 | 102 | # Set up camera intrinsics 103 | ci = CameraIntrinsics( 104 | frame = 'camera', 105 | fx = 525.0, 106 | fy = 525.0, 107 | cx = 319.5, 108 | cy = 239.5, 109 | skew=0.0, 110 | height=480, 111 | width=640 112 | ) 113 | 114 | # Set up the camera pose (z axis faces away from scene, x to right, y up) 115 | cp = RigidTransform( 116 | rotation = np.array([ 117 | [0.0, 0.0, -1.0], 118 | [0.0, 1.0, 0.0], 119 | [1.0, 0.0, 0.0] 120 | ]), 121 | translation = np.array([-0.3, 0.0, 0.0]), 122 | from_frame='camera', 123 | to_frame='world' 124 | ) 125 | 126 | # Create a VirtualCamera 127 | camera = VirtualCamera(ci, cp) 128 | 129 | # Add the camera to the scene 130 | scene.camera = camera 131 | 132 | #==================================== 133 | # Render images 134 | #==================================== 135 | 136 | # Render raw numpy arrays containing color and depth 137 | color_image_raw, depth_image_raw = scene.render(render_color=True) 138 | 139 | # Alternatively, just render a depth image 140 | depth_image_raw = scene.render(render_color=False) 141 | 142 | # Alternatively, collect wrapped images 143 | wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render( 144 | [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK] 145 | ) 146 | ``` 147 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | GH_PAGES_SOURCES = docs meshrender 10 | 11 | # User-friendly check for sphinx-build 12 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 13 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 14 | endif 15 | 16 | # Internal variables. 17 | PAPEROPT_a4 = -D latex_paper_size=a4 18 | PAPEROPT_letter = -D latex_paper_size=letter 19 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 20 | # the i18n builder cannot share the environment and doctrees with the others 21 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 22 | 23 | .PHONY: help 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " applehelp to make an Apple Help Book" 34 | @echo " devhelp to make HTML files and a Devhelp project" 35 | @echo " epub to make an epub" 36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 37 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 39 | @echo " text to make text files" 40 | @echo " man to make manual pages" 41 | @echo " texinfo to make Texinfo files" 42 | @echo " info to make Texinfo files and run them through makeinfo" 43 | @echo " gettext to make PO message catalogs" 44 | @echo " changes to make an overview of all changed/added/deprecated items" 45 | @echo " xml to make Docutils-native XML files" 46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 47 | @echo " linkcheck to check all external links for integrity" 48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 49 | @echo " coverage to run coverage check of the documentation (if enabled)" 50 | 51 | .PHONY: clean 52 | clean: 53 | rm -rf $(BUILDDIR)/* 54 | 55 | .PHONY: gh-pages 56 | gh-pages: 57 | git checkout gh-pages && \ 58 | cd .. && \ 59 | git rm -rf . && git clean -fxd && \ 60 | git checkout master $(GH_PAGES_SOURCES) && \ 61 | git reset HEAD && \ 62 | cd docs && \ 63 | make html && \ 64 | cd .. && \ 65 | mv -fv docs/build/html/* ./ && \ 66 | touch .nojekyll && \ 67 | rm -rf $(GH_PAGES_SOURCES) && \ 68 | git add -A && \ 69 | git commit -m "Generated gh-pages for `git log master -1 --pretty=short --abbrev-commit`" && \ 70 | git push origin --delete gh-pages && \ 71 | git push origin gh-pages ; \ 72 | git checkout master 73 | 74 | .PHONY: html 75 | html: 76 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 77 | @echo 78 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 79 | 80 | .PHONY: dirhtml 81 | dirhtml: 82 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 83 | @echo 84 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 85 | 86 | .PHONY: singlehtml 87 | singlehtml: 88 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 89 | @echo 90 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 91 | 92 | .PHONY: pickle 93 | pickle: 94 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 95 | @echo 96 | @echo "Build finished; now you can process the pickle files." 97 | 98 | .PHONY: json 99 | json: 100 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 101 | @echo 102 | @echo "Build finished; now you can process the JSON files." 103 | 104 | .PHONY: htmlhelp 105 | htmlhelp: 106 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 107 | @echo 108 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 109 | ".hhp project file in $(BUILDDIR)/htmlhelp." 110 | 111 | .PHONY: qthelp 112 | qthelp: 113 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 114 | @echo 115 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 116 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 117 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/meshrender.qhcp" 118 | @echo "To view the help file:" 119 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/meshrender.qhc" 120 | 121 | .PHONY: applehelp 122 | applehelp: 123 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 124 | @echo 125 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 126 | @echo "N.B. You won't be able to view it unless you put it in" \ 127 | "~/Library/Documentation/Help or install it in your application" \ 128 | "bundle." 129 | 130 | .PHONY: devhelp 131 | devhelp: 132 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 133 | @echo 134 | @echo "Build finished." 135 | @echo "To view the help file:" 136 | @echo "# mkdir -p $$HOME/.local/share/devhelp/meshrender" 137 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/meshrender" 138 | @echo "# devhelp" 139 | 140 | .PHONY: epub 141 | epub: 142 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 143 | @echo 144 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 145 | 146 | .PHONY: latex 147 | latex: 148 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 149 | @echo 150 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 151 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 152 | "(use \`make latexpdf' here to do that automatically)." 153 | 154 | .PHONY: latexpdf 155 | latexpdf: 156 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 157 | @echo "Running LaTeX files through pdflatex..." 158 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 159 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 160 | 161 | .PHONY: latexpdfja 162 | latexpdfja: 163 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 164 | @echo "Running LaTeX files through platex and dvipdfmx..." 165 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 166 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 167 | 168 | .PHONY: text 169 | text: 170 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 171 | @echo 172 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 173 | 174 | .PHONY: man 175 | man: 176 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 177 | @echo 178 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 179 | 180 | .PHONY: texinfo 181 | texinfo: 182 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 183 | @echo 184 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 185 | @echo "Run \`make' in that directory to run these through makeinfo" \ 186 | "(use \`make info' here to do that automatically)." 187 | 188 | .PHONY: info 189 | info: 190 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 191 | @echo "Running Texinfo files through makeinfo..." 192 | make -C $(BUILDDIR)/texinfo info 193 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 194 | 195 | .PHONY: gettext 196 | gettext: 197 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 198 | @echo 199 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 200 | 201 | .PHONY: changes 202 | changes: 203 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 204 | @echo 205 | @echo "The overview file is in $(BUILDDIR)/changes." 206 | 207 | .PHONY: linkcheck 208 | linkcheck: 209 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 210 | @echo 211 | @echo "Link check complete; look for any errors in the above output " \ 212 | "or in $(BUILDDIR)/linkcheck/output.txt." 213 | 214 | .PHONY: doctest 215 | doctest: 216 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 217 | @echo "Testing of doctests in the sources finished, look at the " \ 218 | "results in $(BUILDDIR)/doctest/output.txt." 219 | 220 | .PHONY: coverage 221 | coverage: 222 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 223 | @echo "Testing of coverage in the sources finished, look at the " \ 224 | "results in $(BUILDDIR)/coverage/python.txt." 225 | 226 | .PHONY: xml 227 | xml: 228 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 229 | @echo 230 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 231 | 232 | .PHONY: pseudoxml 233 | pseudoxml: 234 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 235 | @echo 236 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 237 | -------------------------------------------------------------------------------- /docs/gh_deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | make gh-pages 3 | cd ../docs 4 | -------------------------------------------------------------------------------- /docs/source/api/camera.rst: -------------------------------------------------------------------------------- 1 | Camera 2 | ====== 3 | The `VirualCamera` object provides a wrapper for the intrinsics and pose of 4 | a camera in a scene. 5 | The camera's intrinsics are represented by a `CameraIntrinsics` object from 6 | the Berkeley AUTOLab's `perception` package, and the camera's pose is represented 7 | by a `RigidTransform` object from the `autolab_core` package. 8 | 9 | The camera's frame of reference is given by an x-axis pointing to the right, 10 | a y-axis pointing up, and a z-axis pointing away from the scene (i.e. into the eye of the camera) 11 | along the optical axis. 12 | 13 | VirtualCamera 14 | ~~~~~~~~~~~~~ 15 | .. autoclass:: meshrender.VirtualCamera 16 | 17 | -------------------------------------------------------------------------------- /docs/source/api/lighting.rst: -------------------------------------------------------------------------------- 1 | Lighting 2 | ======== 3 | There are several types of lighing available in `meshrender`, including 4 | ambient, directional, and point lights. 5 | All lighting classes are subclasses of `Light`. 6 | 7 | 8 | AmbientLight 9 | ~~~~~~~~~~~~ 10 | .. autoclass:: meshrender.AmbientLight 11 | 12 | DirectionalLight 13 | ~~~~~~~~~~~~~~~~ 14 | .. autoclass:: meshrender.DirectionalLight 15 | 16 | PointLight 17 | ~~~~~~~~~~ 18 | .. autoclass:: meshrender.PointLight 19 | -------------------------------------------------------------------------------- /docs/source/api/material.rst: -------------------------------------------------------------------------------- 1 | Material 2 | ======== 3 | Each object has a set of material properties which define its color, 4 | shininess, smoothness, and response to ambient, diffuse, and specular lighting. 5 | Currently, per-vertex coloration and texture mapping are not supported, although 6 | we plan to add support for them soon. 7 | 8 | MaterialProperties 9 | ~~~~~~~~~~~~~~~~~~ 10 | .. autoclass:: meshrender.MaterialProperties 11 | -------------------------------------------------------------------------------- /docs/source/api/random_variables.rst: -------------------------------------------------------------------------------- 1 | Random Variables 2 | ================ 3 | A set of random variables for sampling camera positions and rendering the scene from those positions. 4 | 5 | CameraSample 6 | ~~~~~~~~~~~~ 7 | .. autoclass:: meshrender.CameraSample 8 | 9 | RenderSample 10 | ~~~~~~~~~~~~ 11 | .. autoclass:: meshrender.RenderSample 12 | 13 | UniformViewsphereRandomVariable 14 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 15 | .. autoclass:: meshrender.UniformViewsphereRandomVariable 16 | 17 | UniformPlanarWorksurfaceRandomVariable 18 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 19 | .. autoclass:: meshrender.UniformPlanarWorksurfaceRandomVariable 20 | 21 | UniformPlanarWorksurfaceImageRandomVariable 22 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 23 | .. autoclass:: meshrender.UniformPlanarWorksurfaceImageRandomVariable 24 | 25 | -------------------------------------------------------------------------------- /docs/source/api/scene.rst: -------------------------------------------------------------------------------- 1 | Scene 2 | ===== 3 | The `Scene` object provides the central abstraction of the `meshrender` package. 4 | It manages objects, lights, and the camera, and it wraps the key functions that 5 | render images. 6 | 7 | Scene 8 | ~~~~~ 9 | .. autoclass:: meshrender.Scene 10 | -------------------------------------------------------------------------------- /docs/source/api/sceneobject.rst: -------------------------------------------------------------------------------- 1 | Scene Objects 2 | ============= 3 | Objects in a scene are represented by a triangluar mesh (a `Trimesh` from the `trimesh` package), 4 | a 3D pose relative to the world (a `RigidTransform` from `autolab_core`), and a set of 5 | material properties (`MaterialProperties`). 6 | 7 | SceneObject 8 | ~~~~~~~~~~~ 9 | .. autoclass:: meshrender.SceneObject 10 | 11 | InstancedSceneObject 12 | ~~~~~~~~~~~~~~~~~~~~ 13 | .. autoclass:: meshrender.InstancedSceneObject 14 | -------------------------------------------------------------------------------- /docs/source/api/viewer.rst: -------------------------------------------------------------------------------- 1 | Viewer 2 | ====== 3 | This package comes with a 3D viewer which allows one to navigate around a scene 4 | using a camera controlled by a virtual trackball. 5 | 6 | SceneViewer 7 | ~~~~~~~~~~~ 8 | .. autoclass:: meshrender.SceneViewer 9 | 10 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # core documentation build configuration file, created by 4 | # sphinx-quickstart on Sun Oct 16 14:33:48 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | import sphinx_rtd_theme 18 | from meshrender import __version__ 19 | 20 | # If extensions (or modules to document with autodoc) are in another directory, 21 | # add these directories to sys.path here. If the directory is relative to the 22 | # documentation root, use os.path.abspath to make it absolute, like shown here. 23 | sys.path.insert(0, os.path.abspath('../../')) 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | #needs_sphinx = '1.0' 29 | 30 | # Add any Sphinx extension module names here, as strings. They can be 31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 32 | # ones. 33 | extensions = [ 34 | 'sphinx.ext.autodoc', 'sphinxcontrib.napoleon' 35 | ] 36 | autoclass_content = 'class' 37 | autodoc_member_order = 'bysource' 38 | autodoc_default_flags = ['members', 'show-inheritance'] 39 | napoleon_include_special_with_doc = True 40 | napoleon_include_init_with_doc = True 41 | 42 | # Add any paths that contain templates here, relative to this directory. 43 | templates_path = ['_templates'] 44 | 45 | # The suffix(es) of source filenames. 46 | # You can specify multiple suffix as a list of string: 47 | # source_suffix = ['.rst', '.md'] 48 | source_suffix = '.rst' 49 | 50 | # The encoding of source files. 51 | #source_encoding = 'utf-8-sig' 52 | 53 | # The master toctree document. 54 | master_doc = 'index' 55 | 56 | # General information about the project. 57 | project = u'meshrender' 58 | copyright = u'2017, Matthew Matl' 59 | author = u'Matthew Matl' 60 | 61 | # The version info for the project you're documenting, acts as replacement for 62 | # |version| and |release|, also used in various other places throughout the 63 | # built documents. 64 | # 65 | # The short X.Y version. 66 | version = __version__ 67 | # The full version, including alpha/beta/rc tags. 68 | release = __version__ 69 | 70 | # The language for content autogenerated by Sphinx. Refer to documentation 71 | # for a list of supported languages. 72 | # 73 | # This is also used if you do content translation via gettext catalogs. 74 | # Usually you set "language" from the command line for these cases. 75 | language = None 76 | 77 | # There are two options for replacing |today|: either, you set today to some 78 | # non-false value, then it is used: 79 | #today = '' 80 | # Else, today_fmt is used as the format for a strftime call. 81 | #today_fmt = '%B %d, %Y' 82 | 83 | # List of patterns, relative to source directory, that match files and 84 | # directories to ignore when looking for source files. 85 | exclude_patterns = [] 86 | 87 | # The reST default role (used for this markup: `text`) to use for all 88 | # documents. 89 | #default_role = None 90 | 91 | # If true, '()' will be appended to :func: etc. cross-reference text. 92 | #add_function_parentheses = True 93 | 94 | # If true, the current module name will be prepended to all description 95 | # unit titles (such as .. function::). 96 | #add_module_names = True 97 | 98 | # If true, sectionauthor and moduleauthor directives will be shown in the 99 | # output. They are ignored by default. 100 | #show_authors = False 101 | 102 | # The name of the Pygments (syntax highlighting) style to use. 103 | pygments_style = 'sphinx' 104 | 105 | # A list of ignored prefixes for module index sorting. 106 | #modindex_common_prefix = [] 107 | 108 | # If true, keep warnings as "system message" paragraphs in the built documents. 109 | #keep_warnings = False 110 | 111 | # If true, `todo` and `todoList` produce output, else they produce nothing. 112 | todo_include_todos = False 113 | 114 | 115 | # -- Options for HTML output ---------------------------------------------- 116 | 117 | # The theme to use for HTML and HTML Help pages. See the documentation for 118 | # a list of builtin themes. 119 | html_theme = 'sphinx_rtd_theme' 120 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 121 | 122 | # Theme options are theme-specific and customize the look and feel of a theme 123 | # further. For a list of options available for each theme, see the 124 | # documentation. 125 | #html_theme_options = {} 126 | 127 | # Add any paths that contain custom themes here, relative to this directory. 128 | #html_theme_path = [] 129 | 130 | # The name for this set of Sphinx documents. If None, it defaults to 131 | # " v documentation". 132 | #html_title = None 133 | 134 | # A shorter title for the navigation bar. Default is the same as html_title. 135 | #html_short_title = None 136 | 137 | # The name of an image file (relative to this directory) to place at the top 138 | # of the sidebar. 139 | #html_logo = None 140 | 141 | # The name of an image file (relative to this directory) to use as a favicon of 142 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 143 | # pixels large. 144 | #html_favicon = None 145 | 146 | # Add any paths that contain custom static files (such as style sheets) here, 147 | # relative to this directory. They are copied after the builtin static files, 148 | # so a file named "default.css" will overwrite the builtin "default.css". 149 | html_static_path = ['_static'] 150 | 151 | # Add any extra paths that contain custom files (such as robots.txt or 152 | # .htaccess) here, relative to this directory. These files are copied 153 | # directly to the root of the documentation. 154 | #html_extra_path = [] 155 | 156 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 157 | # using the given strftime format. 158 | #html_last_updated_fmt = '%b %d, %Y' 159 | 160 | # If true, SmartyPants will be used to convert quotes and dashes to 161 | # typographically correct entities. 162 | #html_use_smartypants = True 163 | 164 | # Custom sidebar templates, maps document names to template names. 165 | #html_sidebars = {} 166 | 167 | # Additional templates that should be rendered to pages, maps page names to 168 | # template names. 169 | #html_additional_pages = {} 170 | 171 | # If false, no module index is generated. 172 | #html_domain_indices = True 173 | 174 | # If false, no index is generated. 175 | #html_use_index = True 176 | 177 | # If true, the index is split into individual pages for each letter. 178 | #html_split_index = False 179 | 180 | # If true, links to the reST sources are added to the pages. 181 | #html_show_sourcelink = True 182 | 183 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 184 | #html_show_sphinx = True 185 | 186 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 187 | #html_show_copyright = True 188 | 189 | # If true, an OpenSearch description file will be output, and all pages will 190 | # contain a tag referring to it. The value of this option must be the 191 | # base URL from which the finished HTML is served. 192 | #html_use_opensearch = '' 193 | 194 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 195 | #html_file_suffix = None 196 | 197 | # Language to be used for generating the HTML full-text search index. 198 | # Sphinx supports the following languages: 199 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 200 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 201 | #html_search_language = 'en' 202 | 203 | # A dictionary with options for the search language support, empty by default. 204 | # Now only 'ja' uses this config value 205 | #html_search_options = {'type': 'default'} 206 | 207 | # The name of a javascript file (relative to the configuration directory) that 208 | # implements a search results scorer. If empty, the default will be used. 209 | #html_search_scorer = 'scorer.js' 210 | 211 | # Output file base name for HTML help builder. 212 | htmlhelp_basename = 'coredoc' 213 | 214 | # -- Options for LaTeX output --------------------------------------------- 215 | 216 | latex_elements = { 217 | # The paper size ('letterpaper' or 'a4paper'). 218 | #'papersize': 'letterpaper', 219 | 220 | # The font size ('10pt', '11pt' or '12pt'). 221 | #'pointsize': '10pt', 222 | 223 | # Additional stuff for the LaTeX preamble. 224 | #'preamble': '', 225 | 226 | # Latex figure (float) alignment 227 | #'figure_align': 'htbp', 228 | } 229 | 230 | # Grouping the document tree into LaTeX files. List of tuples 231 | # (source start file, target name, title, 232 | # author, documentclass [howto, manual, or own class]). 233 | latex_documents = [ 234 | (master_doc, 'meshrender.tex', u'meshrender Documentation', 235 | u'Matthew Matl', 'manual'), 236 | ] 237 | 238 | # The name of an image file (relative to this directory) to place at the top of 239 | # the title page. 240 | #latex_logo = None 241 | 242 | # For "manual" documents, if this is true, then toplevel headings are parts, 243 | # not chapters. 244 | #latex_use_parts = False 245 | 246 | # If true, show page references after internal links. 247 | #latex_show_pagerefs = False 248 | 249 | # If true, show URL addresses after external links. 250 | #latex_show_urls = False 251 | 252 | # Documents to append as an appendix to all manuals. 253 | #latex_appendices = [] 254 | 255 | # If false, no module index is generated. 256 | #latex_domain_indices = True 257 | 258 | 259 | # -- Options for manual page output --------------------------------------- 260 | 261 | # One entry per manual page. List of tuples 262 | # (source start file, name, description, authors, manual section). 263 | man_pages = [ 264 | (master_doc, 'meshrender', u'meshrender Documentation', 265 | [author], 1) 266 | ] 267 | 268 | # If true, show URL addresses after external links. 269 | #man_show_urls = False 270 | 271 | 272 | # -- Options for Texinfo output ------------------------------------------- 273 | 274 | # Grouping the document tree into Texinfo files. List of tuples 275 | # (source start file, target name, title, author, 276 | # dir menu entry, description, category) 277 | texinfo_documents = [ 278 | (master_doc, 'meshrender', u'meshrender Documentation', 279 | author, 'meshrender', 'One line description of project.', 280 | 'Miscellaneous'), 281 | ] 282 | 283 | # Documents to append as an appendix to all manuals. 284 | #texinfo_appendices = [] 285 | 286 | # If false, no module index is generated. 287 | #texinfo_domain_indices = True 288 | 289 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 290 | #texinfo_show_urls = 'footnote' 291 | 292 | # If true, do not generate a @detailmenu in the "Top" node's menu. 293 | #texinfo_no_detailmenu = False 294 | -------------------------------------------------------------------------------- /docs/source/examples/example.rst: -------------------------------------------------------------------------------- 1 | Example Usage 2 | ============= 3 | 4 | Everything in the `meshrender` model revolves around the `Scene` object, 5 | which manages the lights, objects, and camera in the world. 6 | 7 | Simple Example 8 | -------------- 9 | 10 | In this example, we will render a pair of triangular meshes, illuminated by a point light source. 11 | 12 | .. code-block:: python 13 | 14 | import numpy as np 15 | import trimesh 16 | from autolab_core import RigidTransform 17 | from perception import CameraIntrinsics, RenderMode 18 | 19 | from meshrender import Scene, MaterialProperties, AmbientLight, PointLight, SceneObject, VirtualCamera 20 | 21 | # Start with an empty scene 22 | scene = Scene() 23 | 24 | #==================================== 25 | # Add objects to the scene 26 | #==================================== 27 | 28 | # Begin by loading meshes 29 | cube_mesh = trimesh.load_mesh('cube.obj') 30 | sphere_mesh = trimesh.load_mesh('sphere.obj') 31 | 32 | # Set up each object's pose in the world 33 | cube_pose = RigidTransform( 34 | rotation=np.eye(3), 35 | translation=np.array([0.0, 0.0, 0.0]), 36 | from_frame='obj', 37 | to_frame='world' 38 | ) 39 | sphere_pose = RigidTransform( 40 | rotation=np.eye(3), 41 | translation=np.array([1.0, 1.0, 0.0]), 42 | from_frame='obj', 43 | to_frame='world' 44 | ) 45 | 46 | # Set up each object's material properties 47 | cube_material = MaterialProperties( 48 | color = np.array([0.1, 0.1, 0.5]), 49 | k_a = 0.3, 50 | k_d = 1.0, 51 | k_s = 1.0, 52 | alpha = 10.0, 53 | smooth=False 54 | ) 55 | sphere_material = MaterialProperties( 56 | color = np.array([0.1, 0.1, 0.5]), 57 | k_a = 0.3, 58 | k_d = 1.0, 59 | k_s = 1.0, 60 | alpha = 10.0, 61 | smooth=True 62 | ) 63 | 64 | # Create SceneObjects for each object 65 | cube_obj = SceneObject(cube_mesh, cube_pose, cube_material) 66 | sphere_obj = SceneObject(sphere_mesh, sphere_pose, sphere_material) 67 | 68 | # Add the SceneObjects to the scene 69 | scene.add_object('cube', cube_obj) 70 | scene.add_object('sphere', sphere_obj) 71 | 72 | #==================================== 73 | # Add lighting to the scene 74 | #==================================== 75 | 76 | # Create an ambient light 77 | ambient = AmbientLight( 78 | color=np.array([1.0, 1.0, 1.0]), 79 | strength=1.0 80 | ) 81 | 82 | # Create a point light 83 | point = PointLight( 84 | location=np.array([1.0, 2.0, 3.0]), 85 | color=np.array([1.0, 1.0, 1.0]), 86 | strength=10.0 87 | ) 88 | 89 | # Add the lights to the scene 90 | scene.ambient_light = ambient # only one ambient light per scene 91 | scene.add_light('point_light_one', point) 92 | 93 | #==================================== 94 | # Add a camera to the scene 95 | #==================================== 96 | 97 | # Set up camera intrinsics 98 | ci = CameraIntrinsics( 99 | frame = 'camera', 100 | fx = 525.0, 101 | fy = 525.0, 102 | cx = 319.5, 103 | cy = 239.5, 104 | skew=0.0, 105 | height=480, 106 | width=640 107 | ) 108 | 109 | # Set up the camera pose (z axis faces away from scene, x to right, y up) 110 | cp = RigidTransform( 111 | rotation = np.array([ 112 | [0.0, 0.0, -1.0], 113 | [0.0, 1.0, 0.0], 114 | [1.0, 0.0, 0.0] 115 | ]), 116 | translation = np.array([-0.3, 0.0, 0.0]), 117 | from_frame='camera', 118 | to_frame='world' 119 | ) 120 | 121 | # Create a VirtualCamera 122 | camera = VirtualCamera(ci, cp) 123 | 124 | # Add the camera to the scene 125 | scene.camera = camera 126 | 127 | #==================================== 128 | # Render images 129 | #==================================== 130 | 131 | # Render raw numpy arrays containing color and depth 132 | color_image_raw, depth_image_raw = scene.render(render_color=True) 133 | 134 | # Alternatively, just render a depth image 135 | depth_image_raw = scene.render(render_color=False) 136 | 137 | # Alternatively, collect wrapped images 138 | wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render( 139 | [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK] 140 | ) 141 | 142 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. core documentation master file, created by 2 | sphinx-quickstart on Sun Oct 16 14:33:48 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Meshrender Documentation 7 | ======================== 8 | Welcome to the documentation for meshrender! 9 | This module, which is build on top of PyOpenGL, is designed to make it easy 10 | to render images of 3D scenes in pure Python. 11 | It supports a scene abstraction and allows users to specify material properties, 12 | camera intrinsics, and lighting. 13 | 14 | .. toctree:: 15 | :maxdepth: 2 16 | :caption: Installation Guide 17 | 18 | install/install.rst 19 | 20 | .. toctree:: 21 | :maxdepth: 2 22 | :caption: API Documentation 23 | :glob: 24 | 25 | api/* 26 | 27 | .. toctree:: 28 | :maxdepth: 2 29 | :caption: Examples 30 | 31 | examples/example.rst 32 | 33 | Indices and tables 34 | ================== 35 | 36 | * :ref:`genindex` 37 | * :ref:`modindex` 38 | * :ref:`search` 39 | 40 | -------------------------------------------------------------------------------- /docs/source/install/install.rst: -------------------------------------------------------------------------------- 1 | Python Installation 2 | ~~~~~~~~~~~~~~~~~~~ 3 | 4 | This package is installable via `pip` :: 5 | 6 | $ pip install meshrender 7 | 8 | Documentation 9 | ~~~~~~~~~~~~~ 10 | 11 | Building 12 | """""""" 13 | Building `meshrender`'s documentation requires a few extra dependencies -- 14 | specifically, `sphinx`_ and a few plugins. 15 | 16 | .. _sphinx: http://www.sphinx-doc.org/en/1.4.8/ 17 | 18 | To install the dependencies required, simply change directories into the `autolab_core` source and run :: 19 | 20 | $ pip install .[docs] 21 | 22 | Then, go to the `docs` directory and run ``make`` with the appropriate target. 23 | For example, :: 24 | 25 | $ cd docs/ 26 | $ make html 27 | 28 | will generate a set of web pages. Any documentation files 29 | generated in this manner can be found in `docs/build`. 30 | 31 | Deploying 32 | """"""""" 33 | To deploy documentation to the Github Pages site for the repository, 34 | simply push any changes to the documentation source to master 35 | and then run :: 36 | 37 | $ . gh_deploy.sh 38 | 39 | from the `docs` folder. This script will automatically checkout the 40 | ``gh-pages`` branch, build the documentation from source, and push it 41 | to Github. 42 | 43 | -------------------------------------------------------------------------------- /examples/test_viewer.py: -------------------------------------------------------------------------------- 1 | import pyglet 2 | pyglet.options['shadow_window'] = False 3 | import numpy as np 4 | import trimesh 5 | from autolab_core import RigidTransform 6 | from perception import CameraIntrinsics, RenderMode, ColorImage, DepthImage 7 | 8 | import os 9 | #os.environ['MESHRENDER_EGL_OFFSCREEN'] = 't' 10 | from meshrender import Scene, MaterialProperties, AmbientLight, PointLight, SceneObject, VirtualCamera, DirectionalLight, SceneViewer, UniformPlanarWorksurfaceImageRandomVariable, InstancedSceneObject 11 | 12 | # Start with an empty scene 13 | scene = Scene() 14 | 15 | #==================================== 16 | # Add objects to the scene 17 | #==================================== 18 | 19 | # Begin by loading meshes 20 | pawn_mesh = trimesh.load_mesh('./models/pawn.obj') 21 | #pawn_mesh = trimesh.load_mesh('./models/pawn_large.obj') 22 | bar_mesh = trimesh.load_mesh('./models/bar_clamp.obj') 23 | 24 | # Set up each object's pose in the world 25 | pawn_pose = RigidTransform( 26 | rotation=np.eye(3), 27 | translation=np.array([0.0, 0.0, 0.0]), 28 | from_frame='obj', 29 | to_frame='world' 30 | ) 31 | bar_pose = RigidTransform( 32 | rotation=np.eye(3), 33 | translation=np.array([0.1, 0.07, 0.00]), 34 | from_frame='obj', 35 | to_frame='world' 36 | ) 37 | 38 | # Set up each object's material properties 39 | pawn_material = MaterialProperties( 40 | color = 5.0*np.array([0.1, 0.1, 0.1]), 41 | k_a = 0.3, 42 | k_d = 0.5, 43 | k_s = 0.2, 44 | alpha = 10.0, 45 | smooth=False, 46 | wireframe=False 47 | ) 48 | #bar_material = MaterialProperties( 49 | # color = 7.0*np.array([0.1, 0.1, 0.1]), 50 | # k_a = 0.5, 51 | # k_d = 0.3, 52 | # k_s = 0.1, 53 | # alpha = 10.0, 54 | # smooth=False 55 | #) 56 | bar_material = pawn_material 57 | 58 | # Create SceneObjects for each object 59 | pawn_obj = SceneObject(pawn_mesh, pawn_pose, pawn_material) 60 | bar_obj = SceneObject(bar_mesh, bar_pose, bar_material) 61 | pawn_inst_obj = InstancedSceneObject(pawn_mesh, [pawn_pose, bar_pose], colors=np.array([[0,0,1],[0,1,0]]), material=pawn_material) 62 | 63 | # Add the SceneObjects to the scene 64 | scene.add_object('pawn', pawn_inst_obj) 65 | scene.add_object('bar', bar_obj) 66 | 67 | #==================================== 68 | # Add lighting to the scene 69 | #==================================== 70 | 71 | # Create an ambient light 72 | ambient = AmbientLight( 73 | color=np.array([1.0, 1.0, 1.0]), 74 | strength=1.0 75 | ) 76 | 77 | # Create a point light 78 | 79 | points = [] 80 | #for i in range(6): 81 | # points.append( 82 | # PointLight( 83 | # location=np.array([-3.0, 3.0-i, 3.0]), 84 | # color=np.array([1.0, 1.0, 1.0]), 85 | # strength=4.0 86 | # ) 87 | # ) 88 | # 89 | #for i, point in enumerate(points): 90 | # scene.add_light('point_{}'.format(i), point) 91 | 92 | # Add the lights to the scene 93 | scene.ambient_light = ambient # only one ambient light per scene 94 | 95 | dl = DirectionalLight( 96 | direction=np.array([0.0, 0.0, -1.0]), 97 | color=np.array([1.0, 1.0, 1.0]), 98 | strength=2.0 99 | ) 100 | scene.add_light('direc', dl) 101 | 102 | #==================================== 103 | # Add a camera to the scene 104 | #==================================== 105 | 106 | # Set up camera intrinsics 107 | ci = CameraIntrinsics( 108 | frame = 'camera', 109 | fx = 525.0, 110 | fy = 525.0, 111 | cx = 320.0, 112 | cy = 240.0, 113 | skew=0.0, 114 | height=480, 115 | width=640 116 | ) 117 | 118 | # Set up the camera pose (z axis faces away from scene, x to right, y up) 119 | cp = RigidTransform( 120 | rotation = np.array([ 121 | [0.0, 0.0, 1.0], 122 | [0.0, -1.0, 0.0], 123 | [1.0, 0.0, 0.0] 124 | ]), 125 | translation = np.array([-0.3, 0.0, 0.0]), 126 | from_frame='camera', 127 | to_frame='world' 128 | ) 129 | 130 | # Create a VirtualCamera 131 | camera = VirtualCamera(ci, cp) 132 | 133 | # Add the camera to the scene 134 | scene.camera = camera 135 | 136 | #==================================== 137 | # Render images 138 | #==================================== 139 | 140 | # Render raw numpy arrays containing color and depth 141 | color_image_raw, depth_image_raw = scene.render(render_color=True) 142 | 143 | # Alternatively, just render a depth image 144 | depth_image_raw = scene.render(render_color=False) 145 | 146 | # Alternatively, collect wrapped images 147 | wrapped_color, wrapped_depth, wrapped_segmask = scene.wrapped_render( 148 | [RenderMode.COLOR, RenderMode.DEPTH, RenderMode.SEGMASK] 149 | ) 150 | 151 | wrapped_color.save('output/color.jpg') 152 | wrapped_depth.save('output/depth.jpg') 153 | 154 | # Test random variables 155 | cfg = { 156 | 'focal_length': { 157 | 'min' : 520, 158 | 'max' : 530, 159 | }, 160 | 'delta_optical_center': { 161 | 'min' : 0.0, 162 | 'max' : 0.0, 163 | }, 164 | 'radius': { 165 | 'min' : 0.5, 166 | 'max' : 0.7, 167 | }, 168 | 'azimuth': { 169 | 'min' : 0.0, 170 | 'max' : 360.0, 171 | }, 172 | 'elevation': { 173 | 'min' : 0.10, 174 | 'max' : 10.0, 175 | }, 176 | 'roll': { 177 | 'min' : -0.2, 178 | 'max' : 0.2, 179 | }, 180 | 'x': { 181 | 'min' : -0.01, 182 | 'max' : 0.01, 183 | }, 184 | 'y': { 185 | 'min' : -0.01, 186 | 'max' : 0.01, 187 | }, 188 | 'im_width': 600, 189 | 'im_height': 600 190 | } 191 | 192 | urv = UniformPlanarWorksurfaceImageRandomVariable('pawn', scene, [RenderMode.COLOR], 'camera', cfg) 193 | renders = urv.sample(10, front_and_back=True) 194 | 195 | for i, render in enumerate(renders): 196 | color = render.renders[RenderMode.COLOR] 197 | color.save('output/random_{}.jpg'.format(i)) 198 | 199 | v = SceneViewer(scene, raymond_lighting=True) 200 | -------------------------------------------------------------------------------- /meshrender/__init__.py: -------------------------------------------------------------------------------- 1 | from .version import __version__ 2 | from .camera import VirtualCamera 3 | from .material import MaterialProperties 4 | from .scene_object import SceneObject, InstancedSceneObject 5 | from .light import Light, AmbientLight, DirectionalLight, PointLight 6 | from .scene import Scene 7 | from .random_variables import CameraSample, RenderSample, \ 8 | ViewsphereDiscretizer, \ 9 | UniformPlanarWorksurfaceRandomVariable, \ 10 | UniformPlanarWorksurfaceImageRandomVariable 11 | from .viewer import SceneViewer 12 | -------------------------------------------------------------------------------- /meshrender/camera.py: -------------------------------------------------------------------------------- 1 | """A virtual camera in a 3D scene. 2 | """ 3 | import numpy as np 4 | 5 | from perception import CameraIntrinsics 6 | from autolab_core import RigidTransform 7 | 8 | from .constants import Z_NEAR, Z_FAR 9 | 10 | class VirtualCamera(object): 11 | """A virtual camera, including its intrinsics and its pose. 12 | """ 13 | 14 | def __init__(self, intrinsics, T_camera_world=RigidTransform(from_frame='camera', to_frame='world'), 15 | z_near=Z_NEAR, z_far=Z_FAR): 16 | """Initialize a virtual camera with the given intrinsics and initial pose in the world. 17 | 18 | Parameters 19 | ---------- 20 | intrinsics : percetion.CameraIntrinsics 21 | The intrinsic properties of the camera, from the Berkeley AUTOLab's perception module. 22 | T_camera_world : autolab_core.RigidTransform 23 | A transform from camera to world coordinates that indicates 24 | the camera's pose. The camera frame's x axis points right, 25 | its y axis points down, and its z axis points towards 26 | the scene (i.e. standard OpenCV coordinates). 27 | z_near : float 28 | The near-plane clipping distance. 29 | z_far : float 30 | The far-plane clipping distance. 31 | """ 32 | if not isinstance(intrinsics, CameraIntrinsics): 33 | raise ValueError('intrinsics must be an object of type CameraIntrinsics') 34 | 35 | self._intrinsics = intrinsics 36 | self.T_camera_world = T_camera_world 37 | self._z_near = z_near 38 | self._z_far = z_far 39 | 40 | @property 41 | def intrinsics(self): 42 | """perception.CameraIntrinsics: The camera's intrinsic parameters. 43 | """ 44 | return self._intrinsics 45 | 46 | @property 47 | def T_camera_world(self): 48 | """autolab_core.RigidTransform: The camera's pose relative to the world frame. 49 | """ 50 | return self._T_camera_world 51 | 52 | @T_camera_world.setter 53 | def T_camera_world(self, T): 54 | if not isinstance(T, RigidTransform): 55 | raise ValueError('transform must be an object of type RigidTransform') 56 | if not T.from_frame == self._intrinsics.frame or not T.to_frame == 'world': 57 | raise ValueError('transform must be from {} -> world, got {} -> {}'.format(self._intrinsics.frame, T.from_frame, T.to_frame)) 58 | self._T_camera_world = T 59 | 60 | @property 61 | def z_near(self): 62 | """float: The near clipping distance. 63 | """ 64 | return self._z_near 65 | 66 | @z_near.setter 67 | def z_near(self, z_near): 68 | self._z_near = z_near 69 | 70 | @property 71 | def z_far(self): 72 | """float: The far clipping distance. 73 | """ 74 | return self._z_far 75 | 76 | @z_far.setter 77 | def z_far(self, z_far): 78 | self._z_far = z_far 79 | 80 | @property 81 | def V(self): 82 | """(4,4) float: A homogenous rigid transform matrix mapping world coordinates 83 | to camera coordinates. Equivalent to the OpenGL View matrix. 84 | 85 | Note that the OpenGL camera coordinate system has x to the right, y up, and z away 86 | from the scene towards the eye! 87 | """ 88 | T_camera_world_GL = self.T_camera_world.matrix.copy() 89 | T_camera_world_GL[:3,2] = -T_camera_world_GL[:3,2] # Reverse Z axis 90 | T_camera_world_GL[:3,1] = -T_camera_world_GL[:3,1] # Reverse Y axis; 91 | T_world_camera_GL = np.linalg.inv(T_camera_world_GL) 92 | return T_world_camera_GL 93 | 94 | @property 95 | def P(self): 96 | """(4,4) float: A homogenous projective matrix for the camera, equivalent 97 | to the OpenGL Projection matrix. 98 | """ 99 | P = np.zeros((4,4)) 100 | P[0][0] = 2.0 * self.intrinsics.fx / self.intrinsics.width 101 | P[1][1] = 2.0 * self.intrinsics.fy / self.intrinsics.height 102 | P[0][2] = 1.0 - 2.0 * self.intrinsics.cx / self.intrinsics.width 103 | P[1][2] = 2.0 * self.intrinsics.cy / self.intrinsics.height - 1.0 104 | P[2][2] = -(self._z_far + self._z_near) / (self._z_far - self._z_near) 105 | P[3][2] = -1.0 106 | P[2][3] = -(2.0 * self._z_far * self._z_near) / (self._z_far - self._z_near) 107 | return P 108 | 109 | 110 | def resize(self, new_width, new_height): 111 | """Reset the camera intrinsics for a new width and height viewing window. 112 | 113 | Parameters 114 | ---------- 115 | new_width : int 116 | The new window width, in pixels. 117 | new_height : int 118 | The new window height, in pixels. 119 | """ 120 | x_scale = float(new_width) / self.intrinsics.width 121 | y_scale = float(new_height) / self.intrinsics.height 122 | center_x = float(self.intrinsics.width-1)/2 123 | center_y = float(self.intrinsics.height-1)/2 124 | orig_cx_diff = self.intrinsics.cx - center_x 125 | orig_cy_diff = self.intrinsics.cy - center_y 126 | scaled_center_x = float(new_width-1) / 2 127 | scaled_center_y = float(new_height-1) / 2 128 | cx = scaled_center_x + x_scale * orig_cx_diff 129 | cy = scaled_center_y + y_scale * orig_cy_diff 130 | fx = self.intrinsics.fx * x_scale 131 | fy = self.intrinsics.fy * x_scale 132 | scaled_intrinsics = CameraIntrinsics(frame=self.intrinsics.frame, 133 | fx=fx, fy=fy, skew=self.intrinsics.skew, 134 | cx=cx, cy=cy, height=new_height, width=new_width) 135 | self._intrinsics = scaled_intrinsics 136 | 137 | -------------------------------------------------------------------------------- /meshrender/constants.py: -------------------------------------------------------------------------------- 1 | Z_NEAR = 0.05 # Near clipping plane, in meters 2 | Z_FAR = 100.0 # Far clipping plane, in meters 3 | MAX_N_LIGHTS = 10 # Maximum number of lights allowed 4 | OPEN_GL_MAJOR = 4 # Target OpenGL Major Version 5 | OPEN_GL_MINOR = 1 # Target OpenGL Minor Version 6 | -------------------------------------------------------------------------------- /meshrender/light.py: -------------------------------------------------------------------------------- 1 | """A set of all allowed lights in a Scene. 2 | """ 3 | import numpy as np 4 | 5 | class Light(object): 6 | """Base class for all light objects. 7 | """ 8 | 9 | def __init__(self, color, strength): 10 | """Initialize a light with the given color and strength. 11 | 12 | Parameters 13 | ---------- 14 | color : (3,) float 15 | The RGB color of the light in (0,1). 16 | strength : float 17 | The strength of the light. 18 | """ 19 | self._color = color 20 | self._strength = strength 21 | 22 | @property 23 | def color(self): 24 | """(3,) float: The RGB color of the light in (0,1). 25 | """ 26 | return self._color 27 | 28 | @property 29 | def strength(self): 30 | """float: The strength of the light. 31 | """ 32 | return self._strength 33 | 34 | class AmbientLight(Light): 35 | """An ambient light, which flatly shades all objects in the world. 36 | """ 37 | 38 | def __init__(self, color, strength): 39 | """Initialize an ambient light with the given color and strength. 40 | 41 | Parameters 42 | ---------- 43 | color : (3,) float 44 | The RGB color of the light in (0,1). 45 | strength : float 46 | The strength of the light. 47 | """ 48 | super(AmbientLight, self).__init__(color, strength) 49 | 50 | 51 | class DirectionalLight(Light): 52 | """A far-away light with a given direction. 53 | """ 54 | 55 | def __init__(self, direction, color, strength): 56 | """Initialize a directional light with the given direction, color, and strength. 57 | 58 | Parameters 59 | ---------- 60 | direction : (3,) float 61 | A unit vector indicating the direction of the light. 62 | color : (3,) float 63 | The RGB color of the light in (0,1). 64 | strength : float 65 | The strength of the light. 66 | """ 67 | self._direction = direction 68 | if np.linalg.norm(direction) > 0: 69 | self._direction = direction / np.linalg.norm(direction) 70 | super(DirectionalLight, self).__init__(color, strength) 71 | 72 | @property 73 | def direction(self): 74 | """(3,) float: A unit vector indicating the direction of the light. 75 | """ 76 | return self._direction 77 | 78 | @direction.setter 79 | def direction(self, d): 80 | self._direction = d 81 | 82 | class PointLight(Light): 83 | """A nearby point light source that shines in all directions. 84 | """ 85 | 86 | def __init__(self, location, color, strength): 87 | """Initialize a point light with the given location, color, and strength. 88 | 89 | Parameters 90 | ---------- 91 | location : (3,) float 92 | The 3D location of the point light. 93 | color : (3,) float 94 | The RGB color of the light in (0,1). 95 | strength : float 96 | The strength of the light. 97 | """ 98 | self._location = location 99 | super(PointLight, self).__init__(color, strength) 100 | 101 | @property 102 | def location(self): 103 | """(3,) float: The 3D location of the point light. 104 | """ 105 | return self._location 106 | 107 | @location.setter 108 | def location(self, l): 109 | self._location = l 110 | -------------------------------------------------------------------------------- /meshrender/material.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | class MaterialProperties(object): 4 | """A set of material properties describing how an object will look. 5 | """ 6 | 7 | def __init__(self, color=np.array([0.5, 0.5, 0.5]), 8 | k_a=1.0, k_d=1.0, k_s = 1.0, alpha=1.0, 9 | smooth=False, wireframe=False): 10 | """Initialize a set of material properties. 11 | 12 | Parameters 13 | ---------- 14 | color : (3,) float 15 | The RGB color of the object in (0,1). 16 | k_a : float 17 | A multiplier for ambient lighting. 18 | k_d : float 19 | A multiplier for diffuse lighting. 20 | k_s : float 21 | A multiplier for specular lighting. 22 | alpha : float 23 | A multiplier for shininess (higher values indicate 24 | more reflectivity and smaller highlights). 25 | smooth : bool 26 | If True, normals will be interpolated to smooth the mesh. 27 | wireframe : bool 28 | If True, the mesh will be rendered as a wireframe. 29 | """ 30 | self._color = color 31 | self._k_a = k_a 32 | self._k_d = k_d 33 | self._k_s = k_s 34 | self._alpha = alpha 35 | self._smooth = smooth 36 | self._wireframe = wireframe 37 | 38 | @property 39 | def color(self): 40 | """(3,) float: The RGB color of the object in (0,1). 41 | """ 42 | return self._color 43 | 44 | @property 45 | def k_a(self): 46 | """float: A multiplier for ambient lighting. 47 | """ 48 | return self._k_a 49 | 50 | @property 51 | def k_d(self): 52 | """float: A multiplier for diffuse lighting. 53 | """ 54 | return self._k_d 55 | 56 | @property 57 | def k_s(self): 58 | """float: A multiplier for specular lighting. 59 | """ 60 | return self._k_s 61 | 62 | @property 63 | def alpha(self): 64 | """float: A multiplier for shininess. 65 | """ 66 | return self._alpha 67 | 68 | @property 69 | def smooth(self): 70 | """bool: If True, indicates a smooth rather than piecewise planar surface. 71 | """ 72 | return self._smooth 73 | 74 | @property 75 | def wireframe(self): 76 | """bool: If True, the mesh will be rendered as a wireframe. 77 | """ 78 | return self._wireframe 79 | 80 | def copy(self): 81 | return MaterialProperties( 82 | self.color.copy(), 83 | self.k_a, 84 | self.k_d, 85 | self.k_s, 86 | self.alpha, 87 | self.smooth, 88 | self.wireframe 89 | ) 90 | -------------------------------------------------------------------------------- /meshrender/random_variables.py: -------------------------------------------------------------------------------- 1 | """ 2 | Random variables for sampling camera poses. 3 | Author: Jeff Mahler 4 | """ 5 | import copy 6 | import logging 7 | 8 | import numpy as np 9 | import scipy.stats as ss 10 | 11 | from autolab_core import Point, RigidTransform, RandomVariable, transformations 12 | from autolab_core.utils import sph2cart, cart2sph 13 | from perception import CameraIntrinsics, RenderMode 14 | 15 | from .camera import VirtualCamera 16 | 17 | class CameraSample(object): 18 | """Struct to encapsulate the results of sampling a camera and its pose. 19 | 20 | Attributes 21 | ---------- 22 | object_to_camera_pose : autolab_core.RigidTransform 23 | A transfrom from the object frame to the camera frame. 24 | camera_intr : perception.CameraIntrinsics 25 | The camera's intrinsics. 26 | radius : float 27 | The distance from the center of the object's frame to the camera's eye. 28 | elev : float 29 | The angle of elevation to the camera from the object frame. 30 | az : float 31 | The angle of rotation of the camera's eye about the object's z axis, starting 32 | from the x axis. 33 | roll : float 34 | The roll angle of the camera about its viewing axis. 35 | tx : float 36 | The x-axis translation of the object. 37 | ty : float 38 | The y-axis translation of the object. 39 | focal : float 40 | The focal length of the camera. 41 | cx : float 42 | The x-axis optical center of the camera. 43 | cy : float 44 | The y-axis optical center of the camera. 45 | """ 46 | def __init__(self, camera_to_world_pose, camera_intr, 47 | radius, elev, az, roll, tx=0, ty=0, focal=0, 48 | cx=0, cy=0): 49 | self.camera_to_world_pose = camera_to_world_pose 50 | self.camera_intr = camera_intr 51 | self.radius = radius 52 | self.elev = elev 53 | self.az = az 54 | self.roll = roll 55 | self.tx = tx 56 | self.ty = ty 57 | self.focal = focal 58 | self.cx = cx 59 | self.cy = cy 60 | 61 | @property 62 | def T_camera_world(self): 63 | return self.camera_to_world_pose 64 | 65 | class RenderSample(object): 66 | """Struct to encapsulate the results of sampling rendered images from a camera. 67 | 68 | Attributes 69 | ---------- 70 | renders : dict 71 | A dictionary mapping perception.RenderMode types to perception.Image classes. 72 | camera : CameraSample 73 | The camera sample that produced this render sample. 74 | """ 75 | def __init__(self, renders, camera): 76 | self.renders = renders 77 | self.camera = camera 78 | 79 | class ViewsphereDiscretizer(object): 80 | 81 | @staticmethod 82 | def get_camera_poses(config, frame='world'): 83 | """Get a list of camera-to-frame poses broken up uniformly about a viewsphere. 84 | 85 | Parameters 86 | ---------- 87 | config : autolab_core.YamlConfig 88 | A configuration containing parameters of the random variable. 89 | frame : str 90 | The name of the target world frame. 91 | 92 | Notes 93 | ----- 94 | Required parameters of config are specified in Other Parameters. 95 | 96 | Other Parameters 97 | ---------------- 98 | radius: Distance from camera to world origin. 99 | min : float 100 | max : float 101 | n : int 102 | azimuth: Azimuth (angle from x-axis) of camera in degrees. 103 | min : float 104 | max : float 105 | n : int 106 | elevation: Elevation (angle from z-axis) of camera in degrees. 107 | min : float 108 | max : float 109 | n : int 110 | roll: Roll (angle about view direction) of camera in degrees. 111 | min : float 112 | max : float 113 | n : int 114 | 115 | Returns 116 | ------- 117 | list of autolab_core.RigidTransform 118 | A list of camera-to-frame transforms. 119 | """ 120 | min_radius = config['radius']['min'] 121 | max_radius = config['radius']['max'] 122 | num_radius = config['radius']['n'] 123 | radii = np.linspace(min_radius, max_radius, num_radius) 124 | 125 | min_azimuth = config['azimuth']['min'] 126 | max_azimuth = config['azimuth']['max'] 127 | num_azimuth = config['azimuth']['n'] 128 | azimuths = np.linspace(min_azimuth, max_azimuth, num_azimuth) 129 | 130 | min_elev = config['elev']['min'] 131 | max_elev = config['elev']['max'] 132 | num_elev = config['elev']['n'] 133 | elevs = np.linspace(min_elev, max_elev, num_elev) 134 | 135 | min_roll = config['roll']['min'] 136 | max_roll = config['roll']['max'] 137 | num_roll = config['roll']['n'] 138 | rolls = np.linspace(min_roll, max_roll, num_roll) 139 | 140 | camera_to_frame_tfs = [] 141 | for r in radii: 142 | for a in azimuths: 143 | for e in elevs: 144 | for roll in rolls: 145 | cam_center = np.array([sph2cart(r, a, e)]).squeeze() 146 | cz = -cam_center / np.linalg.norm(cam_center) 147 | cx = np.array([cz[1], -cz[0], 0]) 148 | if np.linalg.norm(cx) == 0: 149 | cx = np.array([1.0, 0.0, 0.0]) 150 | cx = cx / np.linalg.norm(cx) 151 | cy = np.cross(cz, cx) 152 | cy = cy / np.linalg.norm(cy) 153 | if cy[2] > 0: 154 | cx = -cx 155 | cy = np.cross(cz, cx) 156 | cy = cy / np.linalg.norm(cy) 157 | R_cam_frame = np.array([cx, cy, cz]).T 158 | R_roll = RigidTransform.z_axis_rotation(roll) 159 | R_cam_frame = R_cam_frame.dot(R_roll) 160 | 161 | T_camera_frame = RigidTransform(R_cam_frame, cam_center, 162 | from_frame='camera', to_frame=frame) 163 | camera_to_frame_tfs.append(T_camera_frame) 164 | return camera_to_frame_tfs 165 | 166 | class UniformPlanarWorksurfaceRandomVariable(RandomVariable): 167 | """Uniform distribution over camera poses and intrinsics about a viewsphere over a planar worksurface. 168 | The camera is positioned pointing towards (0,0,0). 169 | """ 170 | 171 | def __init__(self, frame, config, num_prealloc_samples=1): 172 | """Initialize a UniformPlanarWorksurfaceRandomVariable. 173 | 174 | Parameters 175 | ---------- 176 | frame : str 177 | string name of the camera frame 178 | config : autolab_core.YamlConfig 179 | configuration containing parameters of random variable 180 | num_prealloc_samples : int 181 | Number of preallocated samples. 182 | 183 | Notes 184 | ----- 185 | Required parameters of config are specified in Other Parameters 186 | 187 | Other Parameters 188 | ---------- 189 | focal_length : Focal length of the camera 190 | min : float 191 | max : float 192 | delta_optical_center: Change in optical center from neutral. 193 | min : float 194 | max : float 195 | radius: Distance from camera to world origin. 196 | min : float 197 | max : float 198 | azimuth: Azimuth (angle from x-axis) of camera in degrees. 199 | min : float 200 | max : float 201 | elevation: Elevation (angle from z-axis) of camera in degrees. 202 | min : float 203 | max : float 204 | roll: Roll (angle about view direction) of camera in degrees. 205 | min : float 206 | max : float 207 | x: Translation of world center in x axis. 208 | min : float 209 | max : float 210 | y: Translation of world center in y axis. 211 | min : float 212 | max : float 213 | im_height : float Height of image in pixels. 214 | im_width : float Width of image in pixels. 215 | """ 216 | # read params 217 | self.frame = frame 218 | self.config = config 219 | self.num_prealloc_samples = num_prealloc_samples 220 | 221 | self._parse_config(config) 222 | 223 | # setup random variables 224 | 225 | # camera 226 | self.focal_rv = ss.uniform(loc=self.min_f, scale=self.max_f-self.min_f) 227 | self.cx_rv = ss.uniform(loc=self.min_cx, scale=self.max_cx-self.min_cx) 228 | self.cy_rv = ss.uniform(loc=self.min_cy, scale=self.max_cy-self.min_cy) 229 | 230 | # viewsphere 231 | self.rad_rv = ss.uniform(loc=self.min_radius, scale=self.max_radius-self.min_radius) 232 | self.elev_rv = ss.uniform(loc=self.min_elev, scale=self.max_elev-self.min_elev) 233 | self.az_rv = ss.uniform(loc=self.min_az, scale=self.max_az-self.min_az) 234 | self.roll_rv = ss.uniform(loc=self.min_roll, scale=self.max_roll-self.min_roll) 235 | 236 | # table translation 237 | self.tx_rv = ss.uniform(loc=self.min_x, scale=self.max_x-self.min_x) 238 | self.ty_rv = ss.uniform(loc=self.min_y, scale=self.max_y-self.min_y) 239 | 240 | RandomVariable.__init__(self, self.num_prealloc_samples) 241 | 242 | def _parse_config(self, config): 243 | """Reads parameters from the config into class members. 244 | """ 245 | # camera params 246 | self.min_f = config['focal_length']['min'] 247 | self.max_f = config['focal_length']['max'] 248 | self.min_delta_c = config['delta_optical_center']['min'] 249 | self.max_delta_c = config['delta_optical_center']['max'] 250 | self.im_height = config['im_height'] 251 | self.im_width = config['im_width'] 252 | 253 | self.mean_cx = float(self.im_width - 1) / 2 254 | self.mean_cy = float(self.im_height - 1) / 2 255 | self.min_cx = self.mean_cx + self.min_delta_c 256 | self.max_cx = self.mean_cx + self.max_delta_c 257 | self.min_cy = self.mean_cy + self.min_delta_c 258 | self.max_cy = self.mean_cy + self.max_delta_c 259 | 260 | # viewsphere params 261 | self.min_radius = config['radius']['min'] 262 | self.max_radius = config['radius']['max'] 263 | self.min_az = np.deg2rad(config['azimuth']['min']) 264 | self.max_az = np.deg2rad(config['azimuth']['max']) 265 | self.min_elev = np.deg2rad(config['elevation']['min']) 266 | self.max_elev = np.deg2rad(config['elevation']['max']) 267 | self.min_roll = np.deg2rad(config['roll']['min']) 268 | self.max_roll = np.deg2rad(config['roll']['max']) 269 | 270 | # params of translation in plane 271 | self.min_x = config['x']['min'] 272 | self.max_x = config['x']['max'] 273 | self.min_y = config['y']['min'] 274 | self.max_y = config['y']['max'] 275 | 276 | def camera_to_world_pose(self, radius, elev, az, roll, x, y): 277 | """Convert spherical coords to a camera pose in the world. 278 | """ 279 | # generate camera center from spherical coords 280 | delta_t = np.array([x, y, 0]) 281 | camera_z = np.array([sph2cart(radius, az, elev)]).squeeze() 282 | camera_center = camera_z + delta_t 283 | camera_z = -camera_z / np.linalg.norm(camera_z) 284 | 285 | # find the canonical camera x and y axes 286 | camera_x = np.array([camera_z[1], -camera_z[0], 0]) 287 | x_norm = np.linalg.norm(camera_x) 288 | if x_norm == 0: 289 | camera_x = np.array([1, 0, 0]) 290 | else: 291 | camera_x = camera_x / x_norm 292 | camera_y = np.cross(camera_z, camera_x) 293 | camera_y = camera_y / np.linalg.norm(camera_y) 294 | 295 | # Reverse the x direction if needed so that y points down 296 | if camera_y[2] > 0: 297 | camera_x = -camera_x 298 | camera_y = np.cross(camera_z, camera_x) 299 | camera_y = camera_y / np.linalg.norm(camera_y) 300 | 301 | # rotate by the roll 302 | R = np.vstack((camera_x, camera_y, camera_z)).T 303 | roll_rot_mat = transformations.rotation_matrix(roll, camera_z, np.zeros(3))[:3,:3] 304 | R = roll_rot_mat.dot(R) 305 | T_camera_world = RigidTransform(R, camera_center, from_frame=self.frame, to_frame='world') 306 | 307 | return T_camera_world 308 | 309 | def camera_intrinsics(self, T_camera_world, f, cx, cy): 310 | """Generate shifted camera intrinsics to simulate cropping. 311 | """ 312 | # form intrinsics 313 | camera_intr = CameraIntrinsics(self.frame, fx=f, fy=f, 314 | cx=cx, cy=cy, skew=0.0, 315 | height=self.im_height, width=self.im_width) 316 | 317 | return camera_intr 318 | 319 | def sample(self, size=1): 320 | """Sample random variables from the model. 321 | 322 | Parameters 323 | ---------- 324 | size : int 325 | number of sample to take 326 | 327 | Returns 328 | ------- 329 | :obj:`list` of :obj:`CameraSample` 330 | sampled camera intrinsics and poses 331 | """ 332 | samples = [] 333 | for i in range(size): 334 | # sample camera params 335 | focal = self.focal_rv.rvs(size=1)[0] 336 | cx = self.cx_rv.rvs(size=1)[0] 337 | cy = self.cy_rv.rvs(size=1)[0] 338 | 339 | # sample viewsphere params 340 | radius = self.rad_rv.rvs(size=1)[0] 341 | elev = self.elev_rv.rvs(size=1)[0] 342 | az = self.az_rv.rvs(size=1)[0] 343 | roll = self.roll_rv.rvs(size=1)[0] 344 | 345 | # sample plane translation 346 | tx = self.tx_rv.rvs(size=1)[0] 347 | ty = self.ty_rv.rvs(size=1)[0] 348 | 349 | logging.debug('Sampled') 350 | 351 | logging.debug('focal: %.3f' %(focal)) 352 | logging.debug('cx: %.3f' %(cx)) 353 | logging.debug('cy: %.3f' %(cy)) 354 | 355 | logging.debug('radius: %.3f' %(radius)) 356 | logging.debug('elev: %.3f' %(elev)) 357 | logging.debug('az: %.3f' %(az)) 358 | logging.debug('roll: %.3f' %(roll)) 359 | 360 | logging.debug('tx: %.3f' %(tx)) 361 | logging.debug('ty: %.3f' %(ty)) 362 | 363 | # convert to pose and intrinsics 364 | T_camera_world = self.camera_to_world_pose(radius, elev, az, roll, tx, ty) 365 | camera_shifted_intr = self.camera_intrinsics(T_camera_world, 366 | focal, cx, cy) 367 | camera_sample = CameraSample(T_camera_world, 368 | camera_shifted_intr, 369 | radius, elev, az, roll, tx=tx, ty=ty, 370 | focal=focal, cx=cx, cy=cy) 371 | 372 | # convert to camera pose 373 | samples.append(camera_sample) 374 | 375 | # not a list if only 1 sample 376 | if size == 1: 377 | return samples[0] 378 | return samples 379 | 380 | class UniformPlanarWorksurfaceImageRandomVariable(RandomVariable): 381 | """Random variable for sampling images from a camera positioned about an object on a table. 382 | """ 383 | 384 | def __init__(self, object_name, scene, render_modes, frame, config, num_prealloc_samples=0): 385 | """Initialize a UniformPlanarWorksurfaceImageRandomVariable. 386 | 387 | Parameters 388 | ---------- 389 | object_name : str 390 | The name of the object to render views about 391 | scene : Scene 392 | The scene to be rendered which contains the target object. 393 | render_modes : list of perception.RenderMode 394 | A list of RenderModes that indicate the wrapped images to return. 395 | frame : str 396 | The name of the camera's frame of reference. 397 | config : autolab_core.YamlConfig 398 | A configuration containing parameters of the random variable. 399 | num_prealloc_samples : int 400 | Number of preallocated samples. 401 | 402 | Notes 403 | ----- 404 | Required parameters of config are specified in Other Parameters. 405 | 406 | Other Parameters 407 | ---------------- 408 | focal_length : Focal length of the camera 409 | min : float 410 | max : float 411 | delta_optical_center: Change in optical center from neutral. 412 | min : float 413 | max : float 414 | radius: Distance from camera to world origin. 415 | min : float 416 | max : float 417 | azimuth: Azimuth (angle from x-axis) of camera in degrees. 418 | min : float 419 | max : float 420 | elevation: Elevation (angle from z-axis) of camera in degrees. 421 | min : float 422 | max : float 423 | roll: Roll (angle about view direction) of camera in degrees. 424 | min : float 425 | max : float 426 | x: Translation of world center in x axis. 427 | min : float 428 | max : float 429 | y: Translation of world center in y axis. 430 | min : float 431 | max : float 432 | im_height : float Height of image in pixels. 433 | im_width : float Width of image in pixels. 434 | """ 435 | # read params 436 | self.object_name = object_name 437 | self.scene = scene 438 | self.render_modes = render_modes 439 | self.frame = frame 440 | self.config = config 441 | self.num_prealloc_samples = num_prealloc_samples 442 | 443 | # init random variables 444 | self.ws_rv = UniformPlanarWorksurfaceRandomVariable(self.frame, self.config, num_prealloc_samples=self.num_prealloc_samples) 445 | 446 | RandomVariable.__init__(self, self.num_prealloc_samples) 447 | 448 | def sample(self, size=1, front_and_back=False): 449 | """ Sample random variables from the model. 450 | 451 | Parameters 452 | ---------- 453 | size : int 454 | Number of samples to take 455 | front_and_back : bool 456 | If True, all normals are treated as facing the camera 457 | 458 | Returns 459 | ------- 460 | list of RenderSample 461 | A list of samples of renders taken with random camera poses about the scene. 462 | If size was 1, returns a single sample rather than a list. 463 | """ 464 | # Save scene's original camera 465 | orig_camera = self.scene.camera 466 | 467 | obj_xy = np.array(self.scene.objects[self.object_name].T_obj_world.translation) 468 | obj_xy[2] = 0.0 469 | 470 | samples = [] 471 | for i in range(size): 472 | # sample camera params 473 | camera_sample = self.ws_rv.sample(size=1) 474 | 475 | # Compute the camera-to-world transform from the object-to-camera transform 476 | T_camera_world = camera_sample.camera_to_world_pose 477 | T_camera_world.translation += obj_xy 478 | 479 | # Set the scene's camera 480 | camera = VirtualCamera(camera_sample.camera_intr, T_camera_world) 481 | self.scene.camera = camera 482 | 483 | # Render the scene and grab the appropriate wrapped images 484 | images = self.scene.wrapped_render(self.render_modes, front_and_back=front_and_back) 485 | 486 | # If a segmask was requested, re-render the scene after disabling all other objects. 487 | seg_image = None 488 | if RenderMode.SEGMASK in self.render_modes: 489 | # Disable every object that isn't the target 490 | for obj in self.scene.objects.keys(): 491 | if obj != self.object_name: 492 | self.scene.objects[obj].enabled = False 493 | 494 | # Compute the Seg Image 495 | seg_image = self.scene.wrapped_render([RenderMode.SEGMASK], front_and_back=front_and_back)[0] 496 | 497 | # Re-enable every object 498 | for obj in self.scene.objects.keys(): 499 | self.scene.objects[obj].enabled = True 500 | 501 | renders = { m : i for m, i in zip(self.render_modes, images) } 502 | if seg_image: 503 | renders[RenderMode.SEGMASK] = seg_image 504 | 505 | samples.append(RenderSample(renders, camera_sample)) 506 | 507 | self.scene.camera = orig_camera 508 | 509 | # not a list if only 1 sample 510 | if size == 1: 511 | return samples[0] 512 | return samples 513 | -------------------------------------------------------------------------------- /meshrender/render.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import numpy as np 3 | import weakref 4 | import os 5 | 6 | _USE_EGL_OFFSCREEN = False 7 | if 'MESHRENDER_EGL_OFFSCREEN' in os.environ: 8 | os.environ['PYOPENGL_PLATFORM'] = 'egl' 9 | _USE_EGL_OFFSCREEN = True 10 | 11 | try: 12 | import OpenGL 13 | from OpenGL.GL import * 14 | from OpenGL.GL import shaders 15 | from OpenGL.arrays import * 16 | except Exception: 17 | import logging 18 | logging.warning('Cannot import OpenGL -- rendering will be broken!') 19 | 20 | from .constants import MAX_N_LIGHTS 21 | from .light import AmbientLight, PointLight, DirectionalLight 22 | from .shaders import vertex_shader, fragment_shader, depth_vertex_shader, depth_fragment_shader 23 | from .scene_object import InstancedSceneObject 24 | 25 | # Create static c_void_p objects to avoid leaking memory 26 | C_VOID_PS = [] 27 | for i in range(5): 28 | C_VOID_PS.append(ctypes.c_void_p(4*4*i)) 29 | 30 | class OpenGLRenderer(object): 31 | """An OpenGL 3.0+ renderer, based on PyOpenGL. 32 | """ 33 | 34 | def __init__(self, scene): 35 | """Initialize a renderer for a given scene. 36 | 37 | Parameters 38 | ---------- 39 | scene : Scene 40 | A scene description. 41 | """ 42 | self.scene = scene 43 | self._width = self.scene.camera.intrinsics.width 44 | self._height = self.scene.camera.intrinsics.height 45 | self._vaids = None 46 | self._colorbuf, self._depthbuf = None, None 47 | self._framebuf = None 48 | 49 | # Initialize the OpenGL context 50 | self._init_gl_context() 51 | 52 | # Bind the frame buffer for offscreen rendering 53 | self._bind_frame_buffer() 54 | 55 | # Use the depth test functionality of OpenGL. Don't clip -- many normals may be backwards. 56 | glEnable(GL_DEPTH_TEST) 57 | glDepthMask(GL_TRUE) 58 | glDepthFunc(GL_LESS) 59 | glDepthRange(0.0, 1.0) 60 | 61 | # Load the meshes into VAO's 62 | self._buffers = None 63 | self._vaids = self._load_meshes() 64 | 65 | # Load the shaders 66 | # Fix for pyopengl -- bind a framebuffer 67 | glBindVertexArray(self._vaids[0]) 68 | self._full_shader = self._load_shaders(vertex_shader, fragment_shader) 69 | self._depth_shader = self._load_shaders(depth_vertex_shader, depth_fragment_shader) 70 | glBindVertexArray(0) 71 | 72 | 73 | def _init_gl_context(self): 74 | if _USE_EGL_OFFSCREEN: 75 | self._init_egl() 76 | else: 77 | self._init_pyglet() 78 | 79 | 80 | def _make_gl_context_current(self): 81 | if not _USE_EGL_OFFSCREEN: 82 | if self._window: 83 | self._window.switch_to() 84 | 85 | 86 | def _init_pyglet(self): 87 | import pyglet 88 | pyglet.options['shadow_window'] = False 89 | 90 | self._window = None 91 | conf = pyglet.gl.Config( 92 | depth_size=24, 93 | double_buffer=True, 94 | major_version=3, 95 | minor_version=2 96 | ) 97 | try: 98 | self._window = pyglet.window.Window(config=conf, visible=False, 99 | resizable=False, width=1, height=1) 100 | except Exception as e: 101 | raise ValueError('Failed to initialize Pyglet window with an OpenGL >= 3+ context. ' \ 102 | 'If you\'re logged in via SSH, ensure that you\'re running your script ' \ 103 | 'with vglrun (i.e. VirtualGL). Otherwise, the internal error message was: ' \ 104 | '"{}"'.format(e.message)) 105 | 106 | def _init_egl(self): 107 | from OpenGL.EGL import EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, EGL_BLUE_SIZE, \ 108 | EGL_RED_SIZE, EGL_GREEN_SIZE, EGL_DEPTH_SIZE, \ 109 | EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, EGL_HEIGHT, \ 110 | EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, EGL_CONFORMANT, \ 111 | EGL_OPENGL_BIT, EGL_CONFIG_CAVEAT, EGL_NONE, \ 112 | EGL_DEFAULT_DISPLAY, EGL_NO_CONTEXT, EGL_WIDTH, \ 113 | EGL_OPENGL_API, \ 114 | eglGetDisplay, eglInitialize, eglChooseConfig, \ 115 | eglBindAPI, eglCreatePbufferSurface, \ 116 | eglCreateContext, eglMakeCurrent, EGLConfig 117 | 118 | self._egl_display = None 119 | self._egl_surface = None 120 | self._egl_context = None 121 | 122 | config_attributes = arrays.GLintArray.asArray([ 123 | EGL_SURFACE_TYPE, EGL_PBUFFER_BIT, 124 | EGL_BLUE_SIZE, 8, 125 | EGL_RED_SIZE, 8, 126 | EGL_GREEN_SIZE, 8, 127 | EGL_DEPTH_SIZE, 24, 128 | EGL_COLOR_BUFFER_TYPE, EGL_RGB_BUFFER, 129 | EGL_RENDERABLE_TYPE, EGL_OPENGL_BIT, 130 | EGL_CONFORMANT, EGL_OPENGL_BIT, 131 | EGL_NONE 132 | ]) 133 | major, minor = ctypes.c_long(), ctypes.c_long() 134 | num_configs = ctypes.c_long() 135 | configs = (EGLConfig*1)() 136 | 137 | # Cache DISPLAY if necessary and get an off-screen EGL display 138 | orig_dpy = None 139 | if 'DISPLAY' in os.environ: 140 | orig_dpy = os.environ['DISPLAY'] 141 | del os.environ['DISPLAY'] 142 | self._egl_display = eglGetDisplay(EGL_DEFAULT_DISPLAY) 143 | if orig_dpy is not None: 144 | os.environ['DISPLAY'] = orig_dpy 145 | 146 | # Initialize EGL 147 | eglInitialize(self._egl_display, major, minor) 148 | eglChooseConfig(self._egl_display, config_attributes, configs, 1, num_configs) 149 | 150 | # Bind EGL to the OpenGL API 151 | eglBindAPI(EGL_OPENGL_API) 152 | 153 | # Create an EGL pbuffer 154 | self._egl_surface = eglCreatePbufferSurface(self._egl_display, configs[0], 155 | [EGL_WIDTH, self._width, EGL_HEIGHT, self._height, EGL_NONE]) 156 | 157 | # Create an EGL context 158 | self._egl_context = eglCreateContext(self._egl_display, configs[0], EGL_NO_CONTEXT, None) 159 | 160 | # Make the EGL context current 161 | eglMakeCurrent(self._egl_display, self._egl_surface, self._egl_surface, self._egl_context) 162 | 163 | @property 164 | def scene(self): 165 | return self._scene() 166 | 167 | @scene.setter 168 | def scene(self, s): 169 | self._scene = weakref.ref(s) 170 | 171 | def render(self, render_color=True, front_and_back=False): 172 | """Render raw images of the scene. 173 | 174 | Parameters 175 | ---------- 176 | render_color : bool 177 | If True, both a color and a depth image are returned. 178 | If False, only a depth image is returned. 179 | 180 | front_and_back : bool 181 | If True, all normals are treated as facing the camera. 182 | 183 | Returns 184 | ------- 185 | tuple of (h, w, 3) uint8, (h, w) float32 186 | A raw RGB color image with pixel values in [0, 255] and a depth image 187 | with true depths expressed as floats. If render_color was False, 188 | only the depth image is returned. 189 | 190 | Note 191 | ----- 192 | This function can be called repeatedly, regardless of changes to the scene 193 | (i.e. moving SceneObjects, adding and removing lights, moving the camera). 194 | However, adding or removing objects causes a new OpenGL context to be created, 195 | so put all the objects in the scene before calling it. 196 | 197 | Note 198 | ---- 199 | Values listed as 0.0 in the depth image are actually at infinity 200 | (i.e. no object present at that pixel). 201 | """ 202 | self._make_gl_context_current() 203 | 204 | # Reload the frame buffers if the width or height of the camera changed 205 | width = self.scene.camera.intrinsics.width 206 | height = self.scene.camera.intrinsics.height 207 | if width != self._width or height != self._height: 208 | self._width = width 209 | self._height = height 210 | self._bind_frame_buffer() 211 | 212 | if render_color: 213 | return self._color_and_depth(front_and_back) 214 | else: 215 | return self._depth() 216 | 217 | def close(self): 218 | """Destroy the OpenGL context attached to this renderer. 219 | 220 | Warning 221 | ------- 222 | Once this has been called, the OpenGLRenderer object should be discarded. 223 | """ 224 | # Delete shaders 225 | if self._full_shader: 226 | glDeleteProgram(self._full_shader) 227 | self._full_shader = None 228 | if self._depth_shader: 229 | glDeleteProgram(self._depth_shader) 230 | self._depth_shader = None 231 | 232 | # Delete all mesh geometry 233 | if self._buffers: 234 | glDeleteBuffers(len(self._buffers), self._buffers) 235 | self._buffers = None 236 | 237 | # Delete framebuffers and renderbuffers 238 | if self._colorbuf and self._depthbuf: 239 | glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf]) 240 | self._colorbuf = None 241 | self._depthbuf = None 242 | 243 | if self._framebuf: 244 | glDeleteFramebuffers(1, [self._framebuf]) 245 | self._framebuf = None 246 | 247 | OpenGL.contextdata.cleanupContext() 248 | if _USE_EGL_OFFSCREEN: 249 | from OpenGL.EGL import eglDestroySurface, eglDestroyContext, eglTerminate 250 | if self._egl_display is not None: 251 | if self._egl_context is not None: 252 | eglDestroyContext(self._egl_display, self._egl_context) 253 | self._egl_context = None 254 | if self._egl_surface: 255 | eglDestroySurface(self._egl_display, self._egl_surface) 256 | self._egl_surface = None 257 | eglTerminate(self._egl_display) 258 | self._egl_display = None 259 | else: 260 | if self._window is not None: 261 | try: 262 | self._window.context.destroy() 263 | self._window.close() 264 | except: 265 | pass 266 | self._window = None 267 | 268 | def _bind_frame_buffer(self): 269 | """Bind the frame buffer for offscreen rendering. 270 | """ 271 | # Release the color and depth buffers if they exist: 272 | if self._framebuf is not None: 273 | glDeleteRenderbuffers(2, [self._colorbuf, self._depthbuf]) 274 | glDeleteFramebuffers(1, [self._framebuf]) 275 | 276 | # Initialize the Framebuffer into which we will perform off-screen rendering 277 | self._colorbuf, self._depthbuf = glGenRenderbuffers(2) 278 | glBindRenderbuffer(GL_RENDERBUFFER, self._colorbuf) 279 | glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, self._width, self._height) 280 | glBindRenderbuffer(GL_RENDERBUFFER, self._depthbuf) 281 | glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, self._width, self._height) 282 | 283 | self._framebuf = glGenFramebuffers(1) 284 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf) 285 | glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, self._colorbuf) 286 | glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self._depthbuf) 287 | 288 | def _load_shaders(self, vertex_shader, fragment_shader): 289 | """Load and compile shaders from strings. 290 | """ 291 | shader = shaders.compileProgram( 292 | shaders.compileShader(vertex_shader, GL_VERTEX_SHADER), 293 | shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER) 294 | ) 295 | 296 | return shader 297 | 298 | def _load_meshes(self): 299 | """Load the scene's meshes into vertex buffers. 300 | """ 301 | VA_ids = glGenVertexArrays(len(self.scene.objects)) 302 | self._buffers = [] 303 | 304 | if len(self.scene.objects) == 1: 305 | VA_ids = [VA_ids] 306 | 307 | null = C_VOID_PS[0] 308 | for VA_id, obj in zip(VA_ids, self.scene.objects.values()): 309 | mesh = obj.mesh 310 | material = obj.material 311 | 312 | glBindVertexArray(VA_id) 313 | 314 | if material.smooth: 315 | # If smooth is True, we use indexed element arrays and set only one normal per vertex. 316 | 317 | # Set up the vertex VBO 318 | vertexbuffer = glGenBuffers(1) 319 | glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) 320 | glEnableVertexAttribArray(0) 321 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null) 322 | glBufferData(GL_ARRAY_BUFFER, 323 | 4*3*len(mesh.vertices), 324 | np.array(mesh.vertices.flatten(), dtype=np.float32), 325 | GL_STATIC_DRAW) 326 | 327 | # Set up the normal VBO 328 | normalbuffer = glGenBuffers(1) 329 | glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) 330 | glEnableVertexAttribArray(1) 331 | glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null) 332 | glBufferData(GL_ARRAY_BUFFER, 333 | 4*3*len(mesh.vertex_normals), 334 | np.array(mesh.vertex_normals.flatten(), dtype=np.float32), 335 | GL_STATIC_DRAW) 336 | 337 | # Set up the element index buffer 338 | elementbuffer = glGenBuffers(1) 339 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer) 340 | glBufferData(GL_ELEMENT_ARRAY_BUFFER, 341 | 4*3*len(mesh.faces), 342 | np.array(mesh.faces.flatten(), dtype=np.int32), 343 | GL_STATIC_DRAW) 344 | self._buffers.extend([vertexbuffer, elementbuffer, normalbuffer]) 345 | else: 346 | # If smooth is False, we treat each triangle independently 347 | # and set vertex normals to corresponding face normals. 348 | 349 | # Set up the vertices 350 | vertexbuffer = glGenBuffers(1) 351 | glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) 352 | glEnableVertexAttribArray(0) 353 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, null) 354 | glBufferData(GL_ARRAY_BUFFER, 355 | 4*3*3*len(mesh.triangles), 356 | np.array(mesh.triangles.flatten(), dtype=np.float32), 357 | GL_STATIC_DRAW) 358 | 359 | # Set up the normals 360 | normalbuffer = glGenBuffers(1) 361 | glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) 362 | glEnableVertexAttribArray(1) 363 | glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, null) 364 | normals = np.repeat(mesh.face_normals, 3, axis=0).astype(np.float32) 365 | normals = normals.flatten() 366 | glBufferData(GL_ARRAY_BUFFER, 367 | 4*len(normals), 368 | normals, 369 | GL_STATIC_DRAW) 370 | 371 | self._buffers.extend([vertexbuffer, normalbuffer]) 372 | 373 | glVertexAttribDivisor(0, 0) 374 | glVertexAttribDivisor(1, 0) 375 | 376 | # Set up model matrix buffer 377 | modelbuf = glGenBuffers(1) 378 | self._buffers.extend([modelbuf]) 379 | glBindBuffer(GL_ARRAY_BUFFER, modelbuf) 380 | for i in range(4): 381 | glEnableVertexAttribArray(2 + i) 382 | glVertexAttribPointer(2 + i, 4, GL_FLOAT, GL_FALSE, 4*16, C_VOID_PS[i]) 383 | glVertexAttribDivisor(2 + i, 1) 384 | 385 | if isinstance(obj, InstancedSceneObject): 386 | glBufferData(GL_ARRAY_BUFFER, 4*16*len(obj.poses), None, GL_STATIC_DRAW) 387 | data = obj.raw_pose_data.flatten().astype(np.float32) 388 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16*len(obj.poses), data) 389 | else: 390 | glBufferData(GL_ARRAY_BUFFER, 4*16, None, GL_STATIC_DRAW) 391 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16, np.eye(4).flatten().astype(np.float32)) 392 | 393 | # Set up color buffer 394 | colorbuf = glGenBuffers(1) 395 | self._buffers.extend([colorbuf]) 396 | glBindBuffer(GL_ARRAY_BUFFER, colorbuf) 397 | glEnableVertexAttribArray(6) 398 | glVertexAttribPointer(6, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 399 | glVertexAttribDivisor(6, 1) 400 | 401 | if isinstance(obj, InstancedSceneObject): 402 | glBufferData(GL_ARRAY_BUFFER, 4*3*len(obj.colors), None, GL_STATIC_DRAW) 403 | data = obj.colors.flatten().astype(np.float32) 404 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3*len(obj.colors), data) 405 | else: 406 | glBufferData(GL_ARRAY_BUFFER, 4*3, None, GL_STATIC_DRAW) 407 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3, obj.material.color.astype(np.float32)) 408 | 409 | # Unbind all buffers 410 | glBindVertexArray(0) 411 | glBindBuffer(GL_ARRAY_BUFFER, 0) 412 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) 413 | 414 | return VA_ids 415 | 416 | def _depth(self): 417 | """Render a depth image of the scene. 418 | """ 419 | camera = self.scene.camera 420 | width = camera.intrinsics.width 421 | height = camera.intrinsics.height 422 | 423 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf) 424 | glViewport(0, 0, width, height) 425 | 426 | glClearColor(0.0, 0.0, 0.0, 1.0) 427 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) 428 | 429 | glUseProgram(self._depth_shader) 430 | 431 | # Get Uniform Locations from Shader 432 | v_id = glGetUniformLocation(self._depth_shader, 'V') 433 | p_id = glGetUniformLocation(self._depth_shader, 'P') 434 | m_id = glGetUniformLocation(self._depth_shader, 'M') 435 | 436 | glUniformMatrix4fv(v_id, 1, GL_TRUE, camera.V) 437 | glUniformMatrix4fv(p_id, 1, GL_TRUE, camera.P) 438 | 439 | for vaid, obj in zip(self._vaids, self.scene.objects.values()): 440 | if not obj.enabled: 441 | continue 442 | material = obj.material 443 | mesh = obj.mesh 444 | 445 | glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix) 446 | 447 | glBindVertexArray(vaid) 448 | 449 | n_instances = 1 450 | if isinstance(obj, InstancedSceneObject): 451 | n_instances = obj.n_instances 452 | 453 | if material.smooth: 454 | glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances) 455 | else: 456 | glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances) 457 | 458 | glBindVertexArray(0) 459 | 460 | glUseProgram(0) 461 | 462 | glFlush() 463 | 464 | # Extract the z buffer 465 | glBindFramebuffer(GL_READ_FRAMEBUFFER, self._framebuf) 466 | depth_buf = (GLfloat * (width * height))(0) 467 | glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT, depth_buf) 468 | 469 | # Re-format it into a numpy array 470 | depth_im = np.frombuffer(depth_buf, dtype=np.float32).reshape((height, width)) 471 | depth_im = np.flip(depth_im, axis=0) 472 | inf_inds = (depth_im == 1.0) 473 | depth_im = 2.0 * depth_im - 1.0 474 | z_near, z_far = camera.z_near, camera.z_far 475 | depth_im = 2.0 * z_near * z_far / (z_far + z_near - depth_im * (z_far - z_near)) 476 | depth_im[inf_inds] = 0.0 477 | 478 | return depth_im 479 | 480 | def _color_and_depth(self, front_and_back): 481 | """Render a color image and a depth image of the scene. 482 | """ 483 | scene = self.scene 484 | camera = scene.camera 485 | width = camera.intrinsics.width 486 | height = camera.intrinsics.height 487 | 488 | glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._framebuf) 489 | glViewport(0, 0, width, height) 490 | 491 | glClearColor(.93, .93, 1, 1) 492 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) 493 | 494 | glUseProgram(self._full_shader) 495 | 496 | # Get Uniform Locations from Shader 497 | p_id = glGetUniformLocation(self._full_shader, 'P') 498 | v_id = glGetUniformLocation(self._full_shader, 'V') 499 | m_id = glGetUniformLocation(self._full_shader, 'M') 500 | matprop_id = glGetUniformLocation(self._full_shader, 'material_properties') 501 | ambient_id = glGetUniformLocation(self._full_shader, 'ambient_light_info') 502 | directional_id = glGetUniformLocation(self._full_shader, "directional_light_info") 503 | n_directional_id = glGetUniformLocation(self._full_shader, "n_directional_lights") 504 | point_id = glGetUniformLocation(self._full_shader, "point_light_info") 505 | n_point_id = glGetUniformLocation(self._full_shader, "n_point_lights") 506 | front_and_back_id = glGetUniformLocation(self._full_shader, "front_and_back") 507 | 508 | # Bind bad normals id 509 | glUniform1i(front_and_back_id, int(front_and_back)) 510 | 511 | # Bind view matrix 512 | glUniformMatrix4fv(v_id, 1, GL_TRUE, scene.camera.V) 513 | glUniformMatrix4fv(p_id, 1, GL_TRUE, scene.camera.P) 514 | 515 | # Bind ambient lighting 516 | glUniform4fv(ambient_id, 1, np.hstack((scene.ambient_light.color, 517 | scene.ambient_light.strength))) 518 | 519 | # Bind directional lighting 520 | glUniform1i(n_directional_id, len(scene.directional_lights)) 521 | directional_info = np.zeros((2*MAX_N_LIGHTS, 4)) 522 | for i, dlight in enumerate(scene.directional_lights): 523 | directional_info[2*i,:] = np.hstack((dlight.color, dlight.strength)) 524 | directional_info[2*i+1,:] = np.hstack((dlight.direction, 0)) 525 | glUniform4fv(directional_id, 2*MAX_N_LIGHTS, directional_info.flatten()) 526 | 527 | # Bind point lighting 528 | glUniform1i(n_point_id, len(scene.point_lights)) 529 | point_info = np.zeros((2*MAX_N_LIGHTS, 4)) 530 | for i, plight in enumerate(scene.point_lights): 531 | point_info[2*i,:] = np.hstack((plight.color, plight.strength)) 532 | point_info[2*i+1,:] = np.hstack((plight.location, 1)) 533 | glUniform4fv(point_id, 2*MAX_N_LIGHTS, point_info.flatten()) 534 | 535 | for vaid, obj in zip(self._vaids, scene.objects.values()): 536 | if not obj.enabled: 537 | continue 538 | 539 | mesh = obj.mesh 540 | material = obj.material 541 | 542 | glBindVertexArray(vaid) 543 | 544 | glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix) 545 | glUniform4fv(matprop_id, 1, np.array([material.k_a, material.k_d, material.k_s, material.alpha])) 546 | 547 | if material.wireframe: 548 | glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) 549 | else: 550 | glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) 551 | 552 | n_instances = 1 553 | if isinstance(obj, InstancedSceneObject): 554 | n_instances = obj.n_instances 555 | 556 | if material.smooth: 557 | glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances) 558 | else: 559 | glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances) 560 | 561 | glBindVertexArray(0) 562 | 563 | glUseProgram(0) 564 | 565 | glFlush() 566 | 567 | # Extract the color and depth buffers 568 | glBindFramebuffer(GL_READ_FRAMEBUFFER, self._framebuf) 569 | color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) 570 | depth_buf = glReadPixels(0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT) 571 | 572 | # Re-format them into numpy arrays 573 | color_im = np.frombuffer(color_buf, dtype=np.uint8).reshape((height, width, 3)) 574 | color_im = np.flip(color_im, axis=0) 575 | 576 | depth_im = np.frombuffer(depth_buf, dtype=np.float32).reshape((height, width)) 577 | depth_im = np.flip(depth_im, axis=0) 578 | inf_inds = (depth_im == 1.0) 579 | depth_im = 2.0 * depth_im - 1.0 580 | z_near, z_far = camera.z_near, camera.z_far 581 | depth_im = 2.0 * z_near * z_far / (z_far + z_near - depth_im * (z_far - z_near)) 582 | depth_im[inf_inds] = 0.0 583 | 584 | return color_im, depth_im 585 | 586 | def __del__(self): 587 | self.close() 588 | 589 | -------------------------------------------------------------------------------- /meshrender/scene.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from perception import BinaryImage, ColorImage, DepthImage, RgbdImage, GdImage, RenderMode 4 | 5 | from .camera import VirtualCamera 6 | from .scene_object import SceneObject 7 | from .material import MaterialProperties 8 | from .light import AmbientLight, PointLight, DirectionalLight 9 | from .constants import MAX_N_LIGHTS 10 | from .render import OpenGLRenderer 11 | 12 | class Scene(object): 13 | """A scene containing objects and lights for 3D OpenGL rendering. 14 | """ 15 | 16 | def __init__(self, background_color=np.array([1.0, 1.0, 1.0]), 17 | camera=None): 18 | """Initialize a Scene object. 19 | 20 | Parameters 21 | ---------- 22 | background_color : (3,) float 23 | The background color for the scene. 24 | camera : :obj:`VirtualCamera` 25 | Camera to use for rendering 26 | """ 27 | self._objects = {} 28 | self._lights = {} 29 | self._ambient_light = AmbientLight(np.array([0.,0.,0.]), 0.0) 30 | self._background_color = background_color 31 | 32 | self._renderer = None 33 | self.camera = camera 34 | 35 | @property 36 | def background_color(self): 37 | """(3,) float: The background color for the scene. 38 | """ 39 | return self._background_color 40 | 41 | @background_color.setter 42 | def background_color(self, bgcolor): 43 | self._background_color = bgcolor 44 | 45 | @property 46 | def objects(self): 47 | """dict: Dictionary mapping object names to their corresponding SceneObject. 48 | """ 49 | return self._objects 50 | 51 | @property 52 | def lights(self): 53 | """dict: Dictionary mapping light names to their corresponding PointLight or DirectionalLight. 54 | 55 | Note that this doesn't include the ambient light, since only one of those can exist at a time. 56 | """ 57 | return self._lights 58 | 59 | @property 60 | def point_lights(self): 61 | """list of PointLight: The set of point lights active in the scene. 62 | """ 63 | return [x for x in self.lights.values() if isinstance(x, PointLight)] 64 | 65 | @property 66 | def directional_lights(self): 67 | """list of DirectionalLight: The set of directional lights active in the scene. 68 | """ 69 | return [x for x in self.lights.values() if isinstance(x, DirectionalLight)] 70 | 71 | @property 72 | def ambient_light(self): 73 | """AmbientLight: The ambient light active in the scene. 74 | """ 75 | return self._ambient_light 76 | 77 | @ambient_light.setter 78 | def ambient_light(self, light): 79 | if not isinstance(light, AmbientLight): 80 | raise ValueError('Scene only accepts ambient lights of type AmbientLight') 81 | self._ambient_light = light 82 | 83 | @property 84 | def camera(self): 85 | """VirualCamera: The scene's camera (None if unassigned). 86 | """ 87 | return self._camera 88 | 89 | @camera.setter 90 | def camera(self, camera): 91 | if camera is not None and not isinstance(camera, VirtualCamera): 92 | raise ValueError('camera must be an object of type VirtualCamera') 93 | self._camera = camera 94 | 95 | def add_object(self, name, obj): 96 | """Adds an object to the scene. 97 | 98 | Parameters 99 | ---------- 100 | name : str 101 | An identifier for the object. 102 | obj : SceneObject 103 | A SceneObject representing the object, including its pose and material properties. 104 | """ 105 | if not isinstance(obj, SceneObject): 106 | raise ValueError('obj must be an object of type SceneObject') 107 | self._objects[name] = obj 108 | self.close_renderer() 109 | 110 | def remove_object(self, name): 111 | """Removes an object from the scene. 112 | 113 | Parameters 114 | ---------- 115 | name : str 116 | An identifier for the object to be removed. 117 | 118 | Raises 119 | ------ 120 | ValueError 121 | If the given name was not assigned to an object in the scene. 122 | """ 123 | if name in self._objects: 124 | del self._objects[name] 125 | else: 126 | raise ValueError('Object {} not in scene!'.format(name)) 127 | self.close_renderer() 128 | 129 | def add_light(self, name, light): 130 | """Adds a named light to the scene. 131 | 132 | Parameters 133 | ---------- 134 | name : str 135 | An identifier for the light. 136 | light : PointLight or DirectionalLight 137 | The light source to add. 138 | """ 139 | if isinstance(light, AmbientLight): 140 | raise ValueError('Set ambient light with set_ambient_light(), not with add_light()') 141 | if len(self._lights) == MAX_N_LIGHTS: 142 | raise ValueError('The maximum number of lights in a scene is capped at {}'.format(MAX_N_LIGHTS)) 143 | if not isinstance(light, PointLight) and not isinstance(light, DirectionalLight): 144 | raise ValueError('Scene only supports PointLight and DirectionalLight types') 145 | self._lights[name] = light 146 | 147 | def remove_light(self, name): 148 | """Removes a light from the scene. 149 | 150 | Parameters 151 | ---------- 152 | name : str 153 | An identifier for the light to be removed. 154 | 155 | Raises 156 | ------ 157 | ValueError 158 | If the given name was not assigned to a light in the scene. 159 | """ 160 | if name in self._lights: 161 | del self._lights[name] 162 | else: 163 | raise ValueError('Light {} not in scene!'.format(name)) 164 | 165 | def close_renderer(self): 166 | """Close the renderer. 167 | """ 168 | if self._renderer is not None: 169 | self._renderer.close() 170 | self._renderer = None 171 | 172 | def render(self, render_color=True, front_and_back=False): 173 | """Render raw images of the scene. 174 | 175 | Parameters 176 | ---------- 177 | render_color : bool 178 | If True, both a color and a depth image are returned. 179 | If False, only a depth image is returned. 180 | 181 | front_and_back : bool 182 | If True, all surface normals are treated as if they're facing the camera. 183 | 184 | Returns 185 | ------- 186 | tuple of (h, w, 3) uint8, (h, w) float32 187 | A raw RGB color image with pixel values in [0, 255] and a depth image 188 | with true depths expressed as floats. If render_color was False, 189 | only the depth image is returned. 190 | 191 | Raises 192 | ------ 193 | ValueError 194 | If the scene has no set camera. 195 | 196 | Note 197 | ----- 198 | This function can be called repeatedly, regardless of changes to the scene 199 | (i.e. moving SceneObjects, adding and removing lights, moving the camera). 200 | However, adding or removing objects causes a new OpenGL context to be created, 201 | so put all the objects in the scene before calling it. 202 | 203 | Note 204 | ---- 205 | Values listed as 0.0 in the depth image are actually at infinity 206 | (i.e. no object present at that pixel). 207 | """ 208 | if self._camera is None: 209 | raise ValueError('scene.camera must be set before calling render()') 210 | if self._renderer is None: 211 | self._renderer = OpenGLRenderer(self) 212 | return self._renderer.render(render_color, front_and_back=front_and_back) 213 | 214 | def wrapped_render(self, render_modes, front_and_back=False): 215 | """Render images of the scene and wrap them with Image wrapper classes 216 | from the Berkeley AUTOLab's perception module. 217 | 218 | Parameters 219 | ---------- 220 | render_modes : list of perception.RenderMode 221 | A list of the desired image types to return, from the perception 222 | module's RenderMode enum. 223 | 224 | front_and_back : bool 225 | If True, all surface normals are treated as if they're facing the camera. 226 | 227 | Returns 228 | ------- 229 | list of perception.Image 230 | A list containing a corresponding Image sub-class for each type listed 231 | in render_modes. 232 | """ 233 | 234 | # Render raw images 235 | render_color = False 236 | for mode in render_modes: 237 | if mode != RenderMode.DEPTH and mode != RenderMode.SCALED_DEPTH: 238 | render_color = True 239 | break 240 | 241 | color_im, depth_im = None, None 242 | if render_color: 243 | color_im, depth_im = self.render(render_color, front_and_back=front_and_back) 244 | else: 245 | depth_im = self.render(render_color) 246 | 247 | # For each specified render mode, add an Image object of the appropriate type 248 | images = [] 249 | for render_mode in render_modes: 250 | # Then, convert them to an image wrapper class 251 | if render_mode == RenderMode.SEGMASK: 252 | images.append(BinaryImage((depth_im > 0.0).astype(np.uint8), frame=self.camera.intrinsics.frame, threshold=0)) 253 | 254 | elif render_mode == RenderMode.COLOR: 255 | images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame)) 256 | 257 | elif render_mode == RenderMode.GRAYSCALE: 258 | images.append(ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale()) 259 | 260 | elif render_mode == RenderMode.DEPTH: 261 | images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame)) 262 | 263 | elif render_mode == RenderMode.SCALED_DEPTH: 264 | images.append(DepthImage(depth_im, frame=self.camera.intrinsics.frame).to_color()) 265 | 266 | elif render_mode == RenderMode.RGBD: 267 | c = ColorImage(color_im, frame=self.camera.intrinsics.frame) 268 | d = DepthImage(depth_im, frame=self.camera.intrinsics.frame) 269 | images.append(RgbdImage.from_color_and_depth(c, d)) 270 | 271 | elif render_mode == RenderMode.GD: 272 | g = ColorImage(color_im, frame=self.camera.intrinsics.frame).to_grayscale() 273 | d = DepthImage(depth_im, frame=self.camera.intrinsics.frame) 274 | images.append(GdImage.from_grayscale_and_depth(g, d)) 275 | else: 276 | raise ValueError('Render mode {} not supported'.format(render_mode)) 277 | 278 | return images 279 | 280 | def close(self): 281 | self.close_renderer() 282 | 283 | def __del__(self): 284 | self.close() 285 | -------------------------------------------------------------------------------- /meshrender/scene_object.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | from trimesh import Trimesh 4 | from autolab_core import RigidTransform 5 | 6 | from .material import MaterialProperties 7 | 8 | class SceneObject(object): 9 | """A complete description of an object in a Scene. 10 | 11 | This includes its geometry (represented as a Trimesh), its pose in the world, 12 | and its material properties. 13 | """ 14 | 15 | def __init__(self, mesh, 16 | T_obj_world=None, 17 | material=None, 18 | enabled=True): 19 | """Initialize a scene object with the given mesh, pose, and material. 20 | 21 | Parameters 22 | ---------- 23 | mesh : trimesh.Trimesh 24 | A mesh representing the object's geometry. 25 | T_obj_world : autolab_core.RigidTransform 26 | A rigid transformation from the object's frame to the world frame. 27 | material : MaterialProperties 28 | A set of material properties for the object. 29 | enabled : bool 30 | If False, the object will not be rendered. 31 | """ 32 | if not isinstance(mesh, Trimesh): 33 | raise ValueError('mesh must be an object of type Trimesh') 34 | if T_obj_world is None: 35 | T_obj_world = RigidTransform(from_frame='obj', to_frame='world') 36 | if material is None: 37 | material = MaterialProperties() 38 | if material.smooth: 39 | mesh = mesh.smoothed() 40 | 41 | self._mesh = mesh 42 | self._material = material 43 | self.T_obj_world = T_obj_world 44 | self._enabled = True 45 | 46 | @property 47 | def enabled(self): 48 | """bool: If False, the object will not be rendered. 49 | """ 50 | return self._enabled 51 | 52 | @enabled.setter 53 | def enabled(self, enabled): 54 | self._enabled = enabled 55 | 56 | @property 57 | def mesh(self): 58 | """trimesh.Trimesh: A mesh representing the object's geometry. 59 | """ 60 | return self._mesh 61 | 62 | @property 63 | def material(self): 64 | """MaterialProperties: A set of material properties for the object. 65 | """ 66 | return self._material 67 | 68 | @property 69 | def T_obj_world(self): 70 | """autolab_core.RigidTransform: A rigid transformation from the object's frame to the world frame. 71 | """ 72 | return self._T_obj_world 73 | 74 | @T_obj_world.setter 75 | def T_obj_world(self, T): 76 | if not isinstance(T, RigidTransform): 77 | raise ValueError('transform must be an object of type RigidTransform') 78 | self._T_obj_world = T 79 | 80 | class InstancedSceneObject(SceneObject): 81 | """A scene object which consists as a set of identical objects. 82 | """ 83 | def __init__(self, mesh, poses=None, raw_pose_data=None, colors=None, 84 | T_obj_world=None, 85 | material=None, 86 | enabled=True): 87 | """Initialize a scene object with the given mesh, pose, and material. 88 | 89 | Parameters 90 | ---------- 91 | mesh : trimesh.Trimesh 92 | A mesh representing the object's geometry. 93 | poses : list of autolab_core.RigidTransform 94 | A set of poses, one for each instance of the scene object, 95 | relative to the full object's origin. 96 | raw_pose_data : (4*n,4) float or None 97 | A numpy array containing raw pose data, where each row is a column of a point's 98 | homogenous transform matrix. If not present, poses must be present. 99 | colors : (n,3) float or None 100 | A set of colors for each instanced object. If None, the color specified in material 101 | properties is used for all instances. 102 | T_obj_world : autolab_core.RigidTransform 103 | A rigid transformation from the object's frame to the world frame. 104 | material : MaterialProperties 105 | A set of material properties for the object. 106 | enabled : bool 107 | If False, the object will not be rendered. 108 | """ 109 | 110 | super(InstancedSceneObject, self).__init__(mesh, T_obj_world, material, enabled) 111 | self._poses = poses 112 | self._raw_pose_data = raw_pose_data 113 | 114 | if self._raw_pose_data is None: 115 | if self._poses is None: 116 | raise ValueError('Either poses or raw_pose_data must be specified') 117 | self._raw_pose_data = np.zeros((4*len(self._poses), 4)) 118 | for i, pose in enumerate(self._poses): 119 | self._raw_pose_data[i*4:(i+1)*4,:] = pose.matrix.T 120 | 121 | self._n_instances = self._raw_pose_data.shape[0] // 4 122 | 123 | self._colors = colors 124 | if self._colors is None: 125 | self._colors = np.tile(material.color, (self._n_instances,1)) 126 | 127 | @property 128 | def poses(self): 129 | """list of autolab_core.RigidTransform: A set of poses for each instance relative to the object's origin. 130 | """ 131 | return self._poses 132 | 133 | @property 134 | def raw_pose_data(self): 135 | """(4*n,4) float: Raw data for pose matrices. 136 | """ 137 | return self._raw_pose_data 138 | 139 | @property 140 | def colors(self): 141 | """(n,3) float: The color of each instance. 142 | """ 143 | return self._colors 144 | 145 | @property 146 | def n_instances(self): 147 | """int: The number of instances of this object. 148 | """ 149 | return self._n_instances 150 | -------------------------------------------------------------------------------- /meshrender/shaders.py: -------------------------------------------------------------------------------- 1 | '''Shaders for pairing with the renderer. 2 | ''' 3 | 4 | depth_vertex_shader = '''#version 330 core 5 | 6 | // Input vertex data 7 | layout(location = 0) in vec3 vertex_position_m; 8 | layout(location = 2) in mat4 inst_M; 9 | 10 | // Output data 11 | out vec4 color; 12 | 13 | // Values that stay constant for the whole mesh. 14 | uniform mat4 P; 15 | uniform mat4 V; 16 | uniform mat4 M; 17 | 18 | void main(){ 19 | gl_Position = P * V * M * inst_M * vec4(vertex_position_m, 1); 20 | 21 | color = vec4(1.0); 22 | } 23 | ''' 24 | 25 | depth_fragment_shader = '''#version 330 core 26 | 27 | // Interpolated values from the vertex shaders 28 | in vec4 color; 29 | 30 | out vec4 frag_color; 31 | 32 | void main(){ 33 | frag_color = color; 34 | } 35 | ''' 36 | 37 | vertex_shader = '''#version 330 core 38 | 39 | // Input vertex data 40 | layout(location = 0) in vec3 vertex_position_m; 41 | layout(location = 1) in vec3 vertex_normal_m; 42 | layout(location = 2) in mat4 inst_M; 43 | layout(location = 6) in vec3 object_color; 44 | 45 | // Output data 46 | out vec4 color; 47 | out vec4 position; 48 | out vec3 normal; 49 | 50 | // Values that stay constant for the whole mesh. 51 | uniform mat4 P; 52 | uniform mat4 V; 53 | uniform mat4 M; 54 | 55 | void main(){ 56 | mat4 MV = V * M * inst_M; 57 | mat4 MVP = P * MV; 58 | gl_Position = MVP * vec4(vertex_position_m, 1); 59 | 60 | color = vec4(object_color, 1.0); 61 | position = MV * vec4(vertex_position_m, 1); 62 | normal = normalize(MV * vec4(vertex_normal_m, 0)).xyz; 63 | } 64 | ''' 65 | 66 | fragment_shader = '''#version 330 core 67 | 68 | const int MAX_N_LIGHTS = 10; 69 | 70 | // Interpolated values from the vertex shaders 71 | in vec4 color; 72 | in vec4 position; 73 | in vec3 normal; 74 | 75 | out vec4 frag_color; 76 | 77 | uniform vec4 material_properties; 78 | uniform vec4 ambient_light_info; 79 | uniform int n_point_lights; 80 | uniform int n_directional_lights; 81 | uniform vec4 point_light_info[2*MAX_N_LIGHTS]; 82 | uniform vec4 directional_light_info[2*MAX_N_LIGHTS]; 83 | uniform mat4 V; 84 | uniform bool front_and_back; 85 | 86 | void main(){ 87 | 88 | // Extract material properties 89 | float k_a = material_properties[0]; // Ambient reflection constant 90 | float k_d = material_properties[1]; // Diffuse reflection constant 91 | float k_s = material_properties[2]; // Specular reflection constant 92 | float alpha = material_properties[3]; // Shininess 93 | 94 | // Compute Lighting Intensities 95 | float ambient_strength = ambient_light_info[3]; 96 | vec3 ambient_color = vec3(ambient_light_info); 97 | 98 | vec3 i_ambient = ambient_strength * ambient_color; 99 | vec3 i_diffuse = vec3(0.0); 100 | vec3 i_specular = vec3(0.0); 101 | 102 | vec3 n = normalize(normal); 103 | vec3 e = normalize(-vec3(position)); 104 | 105 | // Directional lights 106 | for (int i = 0; i < n_directional_lights; i++) { 107 | vec3 light_color = vec3(directional_light_info[2*i]); 108 | float light_strength = directional_light_info[2*i][3]; 109 | light_color = light_color * light_strength; 110 | 111 | vec3 l = normalize(-vec3(V * directional_light_info[2*i+1])); 112 | 113 | vec3 r = reflect(-l, n); 114 | float nldot = dot(n, l); 115 | float erdot = dot(e, r); 116 | if (front_and_back) { 117 | nldot = abs(nldot); 118 | erdot = abs(erdot); 119 | } 120 | float diffuse = clamp(nldot, 0, 1); 121 | float specular = clamp(erdot, 0, 1); 122 | if (specular > 0.0) { 123 | specular = pow(specular, alpha); 124 | } 125 | 126 | i_diffuse += light_color * diffuse; 127 | i_specular += light_color * specular; 128 | } 129 | 130 | // Point lights 131 | for (int i = 0; i < n_point_lights; i++) { 132 | vec3 light_color = vec3(point_light_info[2*i]); 133 | float light_strength = point_light_info[2*i][3]; 134 | light_color = light_color * light_strength; 135 | 136 | vec3 l = vec3(V * point_light_info[2*i+1]) - vec3(position); 137 | float dist = length(l); 138 | l = l / dist; 139 | 140 | light_color *= 1.0 / (dist * dist); 141 | 142 | vec3 r = reflect(-l, n); 143 | float nldot = dot(n, l); 144 | float erdot = dot(e, r); 145 | if (front_and_back) { 146 | nldot = abs(nldot); 147 | erdot = abs(erdot); 148 | } 149 | float diffuse = clamp(nldot, 0, 1); 150 | float specular = clamp(erdot, 0, 1); 151 | if (specular > 0.0) { 152 | specular = pow(specular, alpha); 153 | } 154 | 155 | i_diffuse += light_color * diffuse; 156 | i_specular += light_color * specular; 157 | } 158 | 159 | // Compute final pixel color 160 | frag_color = vec4((i_ambient * vec3(color) * k_a) + // Ambient 161 | (i_diffuse * vec3(color) * k_d) + // Diffuse 162 | (i_specular * k_s), 1.0); // Specular (unweighted by shape color) 163 | } 164 | ''' 165 | -------------------------------------------------------------------------------- /meshrender/trackball.py: -------------------------------------------------------------------------------- 1 | """Trackball class for 3D manipulation of viewpoints. 2 | """ 3 | import numpy as np 4 | 5 | from autolab_core import transformations, RigidTransform 6 | 7 | class Trackball(object): 8 | """A trackball class for creating camera transformations from mouse movements. 9 | """ 10 | STATE_ROTATE = 0 11 | STATE_PAN = 1 12 | STATE_ROLL = 2 13 | STATE_ZOOM = 3 14 | 15 | def __init__(self, T_camera_world, size, scale, 16 | target=np.array([0.0, 0.0, 0.0])): 17 | """Initialize a trackball with an initial camera-to-world pose 18 | and the given parameters. 19 | 20 | Parameters 21 | ---------- 22 | T_camera_world : autolab_core.RigidTransform 23 | An initial camera-to-world pose for the trackball. 24 | 25 | size : (float, float) 26 | The width and height of the camera image in pixels. 27 | 28 | scale : float 29 | The diagonal of the scene's bounding box -- 30 | used for ensuring translation motions are sufficiently 31 | fast for differently-sized scenes. 32 | 33 | target : (3,) float 34 | The center of the scene in world coordinates. 35 | The trackball will revolve around this point. 36 | """ 37 | self._size = np.array(size) 38 | self._scale = float(scale) 39 | 40 | self._T_camera_world = T_camera_world 41 | self._n_T_camera_world = T_camera_world 42 | 43 | self._target = target 44 | self._n_target = target 45 | 46 | self._state = Trackball.STATE_ROTATE 47 | 48 | @property 49 | def T_camera_world(self): 50 | """autolab_core.RigidTransform : The current camera-to-world pose. 51 | """ 52 | return self._n_T_camera_world 53 | 54 | def set_state(self, state): 55 | """Set the state of the trackball in order to change the effect of dragging motions. 56 | 57 | Parameters 58 | ---------- 59 | state : int 60 | One of Trackball.STATE_ROTATE, Trackball.STATE_PAN, Trackball.STATE_ROLL, and 61 | Trackball.STATE_ZOOM. 62 | """ 63 | self._state = state 64 | 65 | def resize(self, size): 66 | """Resize the window. 67 | 68 | Parameters 69 | ---------- 70 | size : (float, float) 71 | The new width and height of the camera image in pixels. 72 | """ 73 | self._size = np.array(size) 74 | 75 | def down(self, point): 76 | """Record an initial mouse press at a given point. 77 | 78 | Parameters 79 | ---------- 80 | point : (2,) int 81 | The x and y pixel coordinates of the mouse press. 82 | """ 83 | self._pdown = np.array(point, dtype=np.float32) 84 | self._T_camera_world = self._n_T_camera_world 85 | self._target = self._n_target 86 | 87 | def drag(self, point): 88 | """Update the tracball during a drag. 89 | 90 | Parameters 91 | ---------- 92 | point : (2,) int 93 | The current x and y pixel coordinates of the mouse during a drag. 94 | This will compute a movement for the trackball with the relative motion 95 | between this point and the one marked by down(). 96 | """ 97 | point = np.array(point, dtype=np.float32) 98 | dx, dy = point - self._pdown 99 | mindim = 0.3 * np.min(self._size) 100 | 101 | target = self._target 102 | x_axis = self._T_camera_world.matrix[:3,0].flatten() 103 | y_axis = self._T_camera_world.matrix[:3,1].flatten() 104 | z_axis = self._T_camera_world.matrix[:3,2].flatten() 105 | eye = self._T_camera_world.matrix[:3,3].flatten() 106 | 107 | # Interpret drag as a rotation 108 | if self._state == Trackball.STATE_ROTATE: 109 | x_angle = dx / mindim 110 | x_rot_mat = transformations.rotation_matrix(x_angle, y_axis, target) 111 | x_rot_tf = RigidTransform(x_rot_mat[:3,:3], x_rot_mat[:3,3], from_frame='world', to_frame='world') 112 | 113 | y_angle = dy / mindim 114 | y_rot_mat = transformations.rotation_matrix(y_angle, x_axis, target) 115 | y_rot_tf = RigidTransform(y_rot_mat[:3,:3], y_rot_mat[:3,3], from_frame='world', to_frame='world') 116 | 117 | self._n_T_camera_world = y_rot_tf.dot(x_rot_tf.dot(self._T_camera_world)) 118 | 119 | # Interpret drag as a roll about the camera axis 120 | elif self._state == Trackball.STATE_ROLL: 121 | center = self._size / 2.0 122 | v_init = self._pdown - center 123 | v_curr = point - center 124 | v_init = v_init / np.linalg.norm(v_init) 125 | v_curr = v_curr / np.linalg.norm(v_curr) 126 | 127 | theta = np.arctan2(v_curr[1], v_curr[0]) - np.arctan2(v_init[1], v_init[0]) 128 | 129 | rot_mat = transformations.rotation_matrix(theta, z_axis, target) 130 | rot_tf = RigidTransform(rot_mat[:3,:3], rot_mat[:3,3], from_frame='world', to_frame='world') 131 | 132 | self._n_T_camera_world = rot_tf.dot(self._T_camera_world) 133 | 134 | # Interpret drag as a camera pan in view plane 135 | elif self._state == Trackball.STATE_PAN: 136 | dx = -dx / (5.0*mindim) * self._scale 137 | dy = dy / (5.0*mindim) * self._scale 138 | 139 | translation = dx * x_axis + dy * y_axis 140 | self._n_target = self._target + translation 141 | t_tf = RigidTransform(translation=translation, from_frame='world', to_frame='world') 142 | self._n_T_camera_world = t_tf.dot(self._T_camera_world) 143 | 144 | # Interpret drag as a zoom motion 145 | elif self._state == Trackball.STATE_ZOOM: 146 | radius = np.linalg.norm(eye - target) 147 | ratio = 0.0 148 | if dy < 0: 149 | ratio = np.exp(abs(dy)/(0.5*self._size[1])) - 1.0 150 | elif dy > 0: 151 | ratio = 1.0 - np.exp(-dy/(0.5*(self._size[1]))) 152 | translation = np.sign(dy) * ratio * radius * z_axis 153 | t_tf = RigidTransform(translation=translation, from_frame='world', to_frame='world') 154 | self._n_T_camera_world = t_tf.dot(self._T_camera_world) 155 | 156 | def scroll(self, clicks): 157 | """Zoom using a mouse scroll wheel motion. 158 | 159 | Parameters 160 | ---------- 161 | clicks : int 162 | The number of clicks. Positive numbers indicate forward wheel movement. 163 | """ 164 | target = self._target 165 | ratio = 0.90 166 | 167 | z_axis = self._n_T_camera_world.matrix[:3,2].flatten() 168 | eye = self._n_T_camera_world.matrix[:3,3].flatten() 169 | radius = np.linalg.norm(eye - target) 170 | translation = clicks * (1 - ratio) * radius * z_axis 171 | t_tf = RigidTransform(translation=translation, from_frame='world', to_frame='world') 172 | self._n_T_camera_world = t_tf.dot(self._n_T_camera_world) 173 | 174 | z_axis = self._T_camera_world.matrix[:3,2].flatten() 175 | eye = self._T_camera_world.matrix[:3,3].flatten() 176 | radius = np.linalg.norm(eye - target) 177 | translation = clicks * (1 - ratio) * radius * z_axis 178 | t_tf = RigidTransform(translation=translation, from_frame='world', to_frame='world') 179 | self._T_camera_world = t_tf.dot(self._T_camera_world) 180 | 181 | def rotate(self, azimuth, axis=None): 182 | """Rotate the trackball about the "Up" axis by azimuth radians. 183 | 184 | Parameters 185 | ---------- 186 | azimuth : float 187 | The number of radians to rotate. 188 | """ 189 | target = self._target 190 | 191 | y_axis = self._n_T_camera_world.matrix[:3,1].flatten() 192 | if axis is not None: 193 | y_axis = axis 194 | x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target) 195 | x_rot_tf = RigidTransform(x_rot_mat[:3,:3], x_rot_mat[:3,3], from_frame='world', to_frame='world') 196 | self._n_T_camera_world = x_rot_tf.dot(self._n_T_camera_world) 197 | 198 | y_axis = self._T_camera_world.matrix[:3,1].flatten() 199 | if axis is not None: 200 | y_axis = axis 201 | x_rot_mat = transformations.rotation_matrix(-azimuth, y_axis, target) 202 | x_rot_tf = RigidTransform(x_rot_mat[:3,:3], x_rot_mat[:3,3], from_frame='world', to_frame='world') 203 | self._T_camera_world = x_rot_tf.dot(self._T_camera_world) 204 | -------------------------------------------------------------------------------- /meshrender/version.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.0.10' 2 | -------------------------------------------------------------------------------- /meshrender/viewer.py: -------------------------------------------------------------------------------- 1 | import ctypes 2 | import os 3 | import weakref 4 | try: 5 | from Tkinter import Tk, tkFileDialog as filedialog 6 | except ImportError: 7 | try: 8 | from tkinter import Tk, filedialog as filedialog 9 | except: 10 | pass 11 | 12 | 13 | import numpy as np 14 | import imageio 15 | import logging 16 | 17 | _USE_EGL_OFFSCREEN = False 18 | if 'MESHRENDER_EGL_OFFSCREEN' in os.environ: 19 | _USE_EGL_OFFSCREEN = True 20 | 21 | try: 22 | import pyglet 23 | pyglet.options['shadow_window'] = False 24 | import pyglet.gl as gl 25 | from pyglet import clock 26 | 27 | import OpenGL 28 | from OpenGL.GL import * 29 | from OpenGL.GL import shaders 30 | from OpenGL.arrays import * 31 | except: 32 | logging.warning('Cannot import OpenGL -- rendering will be broken!') 33 | 34 | from .constants import MAX_N_LIGHTS, OPEN_GL_MAJOR, OPEN_GL_MINOR 35 | from .light import AmbientLight, PointLight, DirectionalLight 36 | from .shaders import vertex_shader, fragment_shader 37 | from .camera import VirtualCamera 38 | from .scene_object import InstancedSceneObject 39 | from .trackball import Trackball 40 | 41 | from autolab_core import transformations, RigidTransform 42 | from perception import CameraIntrinsics, ColorImage 43 | 44 | # Create static c_void_p objects to avoid leaking memory 45 | C_VOID_PS = [] 46 | for i in range(5): 47 | C_VOID_PS.append(ctypes.c_void_p(4*4*i)) 48 | 49 | class SceneViewer(pyglet.window.Window): 50 | """An interactive viewer for a 3D scene. 51 | 52 | This doesn't use the scene's camera - instead, it uses one based on a trackball. 53 | 54 | The basic commands for moving about the scene are given as follows: 55 | 56 | * To rotate the camera about the center of the scene, hold the left mouse button and drag the cursor. 57 | * To rotate the camera about its viewing axis, hold CTRL and then hold the left mouse button and drag the cursor. 58 | * To pan the camera, do one of the following: 59 | 60 | * Hold SHIFT, then hold the left mouse button and drag the cursor. 61 | * Hold the middle mouse button and drag the cursor. 62 | 63 | * To zoom the camera in or our, do one of the following: 64 | 65 | * Scroll the mouse wheel. 66 | * Hold the right mouse button and drag the cursor. 67 | 68 | Other keyboard commands are as follows: 69 | 70 | * z -- resets the view to the original view. 71 | * w -- toggles wireframe mode for each mesh in the scene. 72 | * a -- toggles rotational animation. 73 | * l -- toggles two-sided lighting 74 | * q -- quits the viewer 75 | * s -- saves the current image 76 | * r -- starts a recording session, pressing again stops (saves animation as .gif) 77 | """ 78 | _raymond_lights = None 79 | 80 | 81 | def __init__(self, scene, size=(640,480), raymond_lighting=True, 82 | animate=False, animate_az=0.05, animate_rate=30.0, animate_axis=None, 83 | two_sided_lighting=False, line_width = 1.0, 84 | registered_keys={}, starting_camera_pose=None, max_frames=0, 85 | save_directory=None, save_filename=None, 86 | title='Scene Viewer', target_object=None, **kwargs): 87 | """Initialize a scene viewer and open the viewer window. 88 | 89 | Parameters 90 | ---------- 91 | scene : Scene 92 | A scene to view. The scene's camera is not used. 93 | size : (int, int) 94 | The width and height of the target window in pixels. 95 | raymond_lighting : bool 96 | If True, the scene's point and directional lights are discarded in favor 97 | of a set of three directional lights that move with the camera. 98 | animate : bool 99 | If True, the camera will rotate by default about the scene. 100 | animate_az : float 101 | The number of radians to rotate per timestep. 102 | animate_rate : float 103 | The framerate for animation in fps. 104 | animate_axis : (3,) float or None 105 | If present, the animation will rotate about the given axis in world coordinates. 106 | Otherwise, the animation will rotate in azimuth. 107 | two_sided_lighting : bool 108 | If True, the shader will treat all normals as facing the camera. 109 | line_width : float 110 | Sets the line width for wireframe meshes (default is 1). 111 | registered_keys : dict 112 | Map from alphabetic key to a tuple containing 113 | (1) a callback function taking the viewer itself as its first argument and 114 | (2) an additional list of arguments for the callback. 115 | starting_camera_pose : autolab_core.RigidTransform 116 | An initial pose for the camera, if specified. 117 | max_frames : int 118 | If greater than zero, the viewer will display for the given 119 | number of frames, save those frames, and then close. 120 | save_directory : str 121 | A directory to open the TK save file dialog in to begin with. 122 | If None, uses the current directory. 123 | save_filename : str 124 | A default filename to open the save box with. Shouldn't have an extension -- 125 | extension will be .png or .gif depending on save type. 126 | title : str 127 | A title for the scene viewer. 128 | target_object : str 129 | The name of the object in the scene to center rotations around. 130 | kwargs : other kwargs 131 | Other optional keyword arguments. 132 | """ 133 | if _USE_EGL_OFFSCREEN: 134 | raise ValueError('Cannot initialize SceneViewer when MESHRENDER_EGL_OFFSCREEN is set.') 135 | self._gl_initialized = False 136 | 137 | # Save basic information 138 | self.scene = scene 139 | self._size = np.array(size) 140 | self._camera = None # These two are initialized 141 | self._trackball = None # by reset_view() 142 | self._saved_frames = [] 143 | 144 | # Save settings 145 | self._animate_az = animate_az 146 | self._animate_rate = animate_rate 147 | self._animate_axis = animate_axis 148 | self._line_width = line_width 149 | self._registered_keys = { 150 | ord(k.lower()) : registered_keys[k] for k in registered_keys 151 | } 152 | self._starting_camera_pose = starting_camera_pose 153 | self._max_frames = max_frames 154 | self._save_directory = save_directory 155 | self._save_filename = save_filename 156 | self._raymond_lighting = raymond_lighting 157 | self._raymond_lights = SceneViewer._get_raymond_lights() 158 | self._title = title 159 | self._target_object = target_object 160 | 161 | # Set flags 162 | self._flags = { 163 | 'mouse_pressed' : False, 164 | 'flip_wireframe' : False, 165 | 'two_sided_lighting' : two_sided_lighting, 166 | 'animate' : animate, 167 | 'record' : (self._max_frames > 0), 168 | } 169 | 170 | # Set up the window 171 | self._reset_view() 172 | self.scene.close_renderer() 173 | try: 174 | conf = gl.Config(sample_buffers=1, samples=4, 175 | depth_size=24, double_buffer=True, 176 | major_version=OPEN_GL_MAJOR, 177 | minor_version=OPEN_GL_MINOR) 178 | super(SceneViewer, self).__init__(config=conf, resizable=True, 179 | width=self._size[0], 180 | height=self._size[1]) 181 | except Exception as e: 182 | raise ValueError('Failed to initialize Pyglet window with an OpenGL 3+ context. ' \ 183 | 'If you\'re logged in via SSH, ensure that you\'re running your script ' \ 184 | 'with vglrun (i.e. VirtualGL). Otherwise, the internal error message was: ' \ 185 | '"{}"'.format(e.message)) 186 | 187 | self.set_caption(title) 188 | 189 | # Initialize OpenGL 190 | self._init_gl() 191 | 192 | # Update the application flags 193 | self._update_flags() 194 | 195 | # Start the event loop 196 | pyglet.app.run() 197 | 198 | 199 | @property 200 | def scene(self): 201 | """Scene : the viewer's attached scene. 202 | """ 203 | return self._scene() 204 | 205 | 206 | @scene.setter 207 | def scene(self, s): 208 | self._scene = weakref.ref(s) 209 | 210 | 211 | @property 212 | def saved_frames(self): 213 | """list of perception.ColorImage : Any color images that have been saved 214 | due to recording or the max_frames argument. 215 | """ 216 | return [ColorImage(f) for f in self._saved_frames] 217 | 218 | 219 | @property 220 | def save_directory(self): 221 | """str : A directory to open the TK save file dialog in to begin with. 222 | """ 223 | return self._save_directory 224 | 225 | 226 | def on_close(self): 227 | """Exit the event loop when the window is closed. 228 | """ 229 | OpenGL.contextdata.cleanupContext() 230 | self.close() 231 | pyglet.app.exit() 232 | 233 | 234 | def on_draw(self): 235 | """Redraw the scene into the viewing window. 236 | """ 237 | if not self._gl_initialized: 238 | return 239 | self._render() 240 | 241 | 242 | def on_resize(self, width, height): 243 | """Resize the camera and trackball when the window is resized. 244 | """ 245 | self._size = (width, height) 246 | self._camera.resize(width, height) 247 | self._trackball.resize(self._size) 248 | self.on_draw() 249 | 250 | 251 | def on_mouse_press(self, x, y, buttons, modifiers): 252 | """Record an initial mouse press. 253 | """ 254 | self._trackball.set_state(Trackball.STATE_ROTATE) 255 | if (buttons == pyglet.window.mouse.LEFT): 256 | ctrl = (modifiers & pyglet.window.key.MOD_CTRL) 257 | shift = (modifiers & pyglet.window.key.MOD_SHIFT) 258 | if (ctrl and shift): 259 | self._trackball.set_state(Trackball.STATE_ZOOM) 260 | elif ctrl: 261 | self._trackball.set_state(Trackball.STATE_ROLL) 262 | elif shift: 263 | self._trackball.set_state(Trackball.STATE_PAN) 264 | elif (buttons == pyglet.window.mouse.MIDDLE): 265 | self._trackball.set_state(Trackball.STATE_PAN) 266 | elif (buttons == pyglet.window.mouse.RIGHT): 267 | self._trackball.set_state(Trackball.STATE_ZOOM) 268 | 269 | self._trackball.down(np.array([x, y])) 270 | 271 | # Stop animating while using the mouse 272 | self._flags['mouse_pressed'] = True 273 | 274 | 275 | def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers): 276 | """Record a mouse drag. 277 | """ 278 | self._trackball.drag(np.array([x, y])) 279 | 280 | 281 | def on_mouse_release(self, x, y, button, modifiers): 282 | """Record a mouse release. 283 | """ 284 | self._flags['mouse_pressed'] = False 285 | 286 | 287 | def on_mouse_scroll(self, x, y, dx, dy): 288 | """Record a mouse scroll. 289 | """ 290 | self._trackball.scroll(dy) 291 | 292 | 293 | def on_key_press(self, symbol, modifiers): 294 | """Record a key press. 295 | """ 296 | if symbol in self._registered_keys: 297 | tup = self._registered_keys[symbol] 298 | callback, args = tup 299 | callback(self, *args) 300 | elif symbol == pyglet.window.key.W: 301 | self._flags['flip_wireframe'] = not self._flags['flip_wireframe'] 302 | elif symbol == pyglet.window.key.Z: 303 | self._reset_view() 304 | elif symbol == pyglet.window.key.A: 305 | self._flags['animate'] = not self._flags['animate'] 306 | elif symbol == pyglet.window.key.L: 307 | self._flags['two_sided_lighting'] = not self._flags['two_sided_lighting'] 308 | elif symbol == pyglet.window.key.S: 309 | self._save_image() 310 | elif symbol == pyglet.window.key.Q: 311 | self.on_close() 312 | elif symbol == pyglet.window.key.R: 313 | if self._flags['record']: 314 | self._save_gif() 315 | self.set_caption(self._title) 316 | else: 317 | self.set_caption('{} (RECORDING)'.format(self._title)) 318 | self._flags['record'] = not self._flags['record'] 319 | self._update_flags() 320 | 321 | 322 | def _update_flags(self): 323 | """Update OpenGL state based on the current flags. 324 | """ 325 | glLineWidth(float(self._line_width)) 326 | clock.unschedule(SceneViewer.time_event) 327 | if self._flags['animate'] or self._flags['record']: 328 | clock.schedule_interval(SceneViewer.time_event, 1.0/self._animate_rate, self) 329 | 330 | 331 | def _reset_view(self): 332 | """Reset the view to a good initial state. 333 | 334 | The view is initially along the positive x-axis a sufficient distance from the scene. 335 | """ 336 | 337 | # Compute scene bounds and scale 338 | bounds = self._compute_scene_bounds() 339 | centroid = np.mean(bounds, axis=0) 340 | extents = np.diff(bounds, axis=0).reshape(-1) 341 | scale = (extents ** 2).sum() ** .5 342 | width, height = self._size 343 | 344 | # Set up reasonable camera intrinsics 345 | fov = np.pi / 6.0 346 | fl = height / (2.0 * np.tan(fov / 2)) 347 | ci = CameraIntrinsics( 348 | frame = 'camera', 349 | fx = fl, 350 | fy = fl, 351 | cx = width/2.0, 352 | cy = height/2.0, 353 | skew=0.0, 354 | height=height, 355 | width=width 356 | ) 357 | 358 | # Move centroid if needed 359 | if self._target_object and self._target_object in self.scene.objects: 360 | obj = self.scene.objects[self._target_object] 361 | if isinstance(obj, InstancedSceneObject): 362 | centroid = np.mean(obj.raw_pose_data[3::4,:3], axis=0) 363 | else: 364 | centroid = np.mean(obj.mesh.bounds, axis=0) 365 | centroid = obj.T_obj_world.matrix.dot(np.hstack((centroid, 1.0)))[:3] 366 | scale = (obj.mesh.extents ** 2).sum() ** .5 367 | 368 | # Set up the camera pose (z axis faces towards scene, x to right, y down) 369 | s2 = 1.0/np.sqrt(2.0) 370 | cp = RigidTransform( 371 | rotation = np.array([ 372 | [0.0, s2, -s2], 373 | [1.0, 0.0, 0.0], 374 | [0.0, -s2, -s2] 375 | ]), 376 | translation = np.sqrt(2.0)*np.array([scale, 0.0, scale]) + centroid, 377 | from_frame='camera', 378 | to_frame='world' 379 | ) 380 | if self._starting_camera_pose is not None: 381 | cp = self._starting_camera_pose 382 | 383 | # Create a VirtualCamera 384 | self._camera = VirtualCamera(ci, cp, z_near=scale/100.0, z_far=scale*100.0) 385 | 386 | # Create a trackball 387 | self._trackball = Trackball( 388 | self._camera.T_camera_world, 389 | self._size, scale, 390 | target=centroid, 391 | ) 392 | 393 | 394 | def _compute_scene_bounds(self): 395 | """The axis aligned bounds of the scene. 396 | 397 | Returns 398 | ------- 399 | (2,3) float 400 | The bounding box with [min, max] coordinates. 401 | """ 402 | lb = np.array([np.infty, np.infty, np.infty]) 403 | ub = -1.0 * np.array([np.infty, np.infty, np.infty]) 404 | for on in self.scene.objects: 405 | o = self.scene.objects[on] 406 | poses = [RigidTransform(from_frame=o.T_obj_world.from_frame, to_frame=o.T_obj_world.to_frame)] 407 | if isinstance(o, InstancedSceneObject): 408 | # Cheat for instanced objects -- just find the min/max translations and create poses from those 409 | # Complile translations 410 | translations = o.raw_pose_data[3::4,:3] 411 | min_trans = np.min(translations, axis=0) 412 | max_trans = np.max(translations, axis=0) 413 | poses = [RigidTransform(translation=min_trans), 414 | RigidTransform(translation=max_trans)] 415 | for pose in poses: 416 | tf_verts = pose.matrix[:3,:3].dot(o.mesh.vertices.T).T + pose.matrix[:3,3] 417 | tf_verts = o.T_obj_world.matrix[:3,:3].dot(tf_verts.T).T + o.T_obj_world.matrix[:3,3] 418 | lb_mesh = np.min(tf_verts, axis=0) 419 | ub_mesh = np.max(tf_verts, axis=0) 420 | lb = np.minimum(lb, lb_mesh) 421 | ub = np.maximum(ub, ub_mesh) 422 | if np.any(lb > ub): 423 | return np.array([[-1.0, -1.0, -1.0], [1.0, 1.0, 1.0]]) 424 | return np.array([lb, ub]) 425 | 426 | 427 | def _save_image(self): 428 | # Get save file location 429 | try: 430 | root = Tk() 431 | fn = '' 432 | if self._save_filename: 433 | fn = '{}.png'.format(self._save_filename) 434 | filename = filedialog.asksaveasfilename(initialfile = fn, 435 | initialdir = (self._save_directory or os.getcwd()), 436 | title = 'Select file save location', 437 | filetypes = (('png files','*.png'), 438 | ('jpeg files', '*.jpg'), 439 | ('all files','*.*'))) 440 | except: 441 | logging.warning('Cannot use Tkinter file dialogs over SSH') 442 | return 443 | 444 | root.destroy() 445 | if filename == (): 446 | return 447 | else: 448 | self._save_directory = os.path.dirname(filename) 449 | 450 | # Extract color image from frame buffer 451 | width, height = self._size 452 | glReadBuffer(GL_FRONT) 453 | color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) 454 | 455 | # Re-format them into numpy arrays 456 | color_im = np.frombuffer(color_buf, dtype=np.uint8).reshape((height, width, 3)) 457 | color_im = np.flip(color_im, axis=0) 458 | 459 | imageio.imwrite(filename, color_im) 460 | 461 | 462 | def _save_gif(self): 463 | # Get save file location 464 | try: 465 | root = Tk() 466 | fn = '' 467 | if self._save_filename: 468 | fn = '{}.gif'.format(self._save_filename) 469 | filename = filedialog.asksaveasfilename(initialfile = fn, 470 | initialdir = (self._save_directory or os.getcwd()), 471 | title = 'Select file save location', 472 | filetypes = (('gif files','*.gif'), 473 | ('all files','*.*'))) 474 | except: 475 | logging.warning('Cannot use Tkinter file dialogs over SSH') 476 | self._saved_frames = [] 477 | return 478 | 479 | root.destroy() 480 | if filename == (): 481 | self._saved_frames = [] 482 | return 483 | else: 484 | self._save_directory = os.path.dirname(filename) 485 | 486 | imageio.mimwrite(filename, self._saved_frames, fps=30.0, palettesize=128, subrectangles=True) 487 | 488 | self._saved_frames = [] 489 | 490 | 491 | def _record(self): 492 | """Save another frame for the GIF. 493 | """ 494 | # Extract color image from frame buffer 495 | width, height = self._size 496 | glReadBuffer(GL_FRONT) 497 | color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) 498 | 499 | # Re-format them into numpy arrays 500 | color_im = np.frombuffer(color_buf, dtype=np.uint8).reshape((height, width, 3)) 501 | color_im = np.flip(color_im, axis=0) 502 | 503 | self._saved_frames.append(color_im) 504 | 505 | if self._max_frames: 506 | if len(self._saved_frames) == self._max_frames: 507 | self.on_close() 508 | 509 | 510 | def _animate(self): 511 | """Animate the scene by rotating the camera. 512 | """ 513 | self._trackball.rotate(self._animate_az, self._animate_axis) 514 | 515 | 516 | def _init_gl(self): 517 | """Initialize OpenGL by loading shaders and mesh geometry. 518 | """ 519 | bg = self.scene.background_color 520 | glClearColor(bg[0], bg[1], bg[2], 1.0) 521 | 522 | glEnable(GL_DEPTH_TEST) 523 | glEnable(GL_DEPTH_TEST) 524 | glDepthMask(GL_TRUE) 525 | glDepthFunc(GL_LESS) 526 | glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) 527 | 528 | self._vaids = self._load_meshes() 529 | glBindVertexArray(self._vaids[0]) 530 | self._shader = self._load_shaders(vertex_shader, fragment_shader) 531 | glBindVertexArray(0) 532 | 533 | self._gl_initialized = True 534 | 535 | 536 | def _load_shaders(self, vertex_shader, fragment_shader): 537 | """Load and compile shaders from strings. 538 | """ 539 | shader = shaders.compileProgram( 540 | shaders.compileShader(vertex_shader, GL_VERTEX_SHADER), 541 | shaders.compileShader(fragment_shader, GL_FRAGMENT_SHADER) 542 | ) 543 | 544 | return shader 545 | 546 | 547 | def _load_meshes(self): 548 | """Load the scene's meshes into vertex buffers. 549 | """ 550 | VA_ids = glGenVertexArrays(len(self.scene.objects)) 551 | 552 | if len(self.scene.objects) == 1: 553 | VA_ids = [VA_ids] 554 | 555 | for VA_id, obj in zip(VA_ids, self.scene.objects.values()): 556 | mesh = obj.mesh 557 | material = obj.material 558 | 559 | glBindVertexArray(VA_id) 560 | 561 | if material.smooth: 562 | # If smooth is True, we use indexed element arrays and set only one normal per vertex. 563 | 564 | # Set up the vertex VBO 565 | vertexbuffer = glGenBuffers(1) 566 | glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) 567 | glEnableVertexAttribArray(0) 568 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 569 | glBufferData(GL_ARRAY_BUFFER, 570 | 4*3*len(mesh.vertices), 571 | np.array(mesh.vertices.flatten(), dtype=np.float32), 572 | GL_STATIC_DRAW) 573 | 574 | # Set up the normal VBO 575 | normalbuffer = glGenBuffers(1) 576 | glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) 577 | glEnableVertexAttribArray(1) 578 | glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 579 | glBufferData(GL_ARRAY_BUFFER, 580 | 4*3*len(mesh.vertex_normals), 581 | np.array(mesh.vertex_normals.flatten(), dtype=np.float32), 582 | GL_STATIC_DRAW) 583 | 584 | # Set up the element index buffer 585 | elementbuffer = glGenBuffers(1) 586 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer) 587 | glBufferData(GL_ELEMENT_ARRAY_BUFFER, 588 | 4*3*len(mesh.faces), 589 | np.array(mesh.faces.flatten(), dtype=np.int32), 590 | GL_STATIC_DRAW) 591 | 592 | else: 593 | # If smooth is False, we treat each triangle independently 594 | # and set vertex normals to corresponding face normals. 595 | 596 | # Set up the vertices 597 | vertexbuffer = glGenBuffers(1) 598 | glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer) 599 | glEnableVertexAttribArray(0) 600 | glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 601 | glBufferData(GL_ARRAY_BUFFER, 602 | 4*3*3*len(mesh.triangles), 603 | np.array(mesh.triangles.flatten(), dtype=np.float32), 604 | GL_STATIC_DRAW) 605 | 606 | # Set up the normals 607 | normalbuffer = glGenBuffers(1) 608 | glBindBuffer(GL_ARRAY_BUFFER, normalbuffer) 609 | glEnableVertexAttribArray(1) 610 | glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 611 | normals = np.repeat(mesh.face_normals, 3, axis=0).astype(np.float32) 612 | normals = normals.flatten() 613 | glBufferData(GL_ARRAY_BUFFER, 614 | 4*len(normals), 615 | normals, 616 | GL_STATIC_DRAW) 617 | 618 | glVertexAttribDivisor(0, 0) 619 | glVertexAttribDivisor(1, 0) 620 | 621 | # Set up model matrix buffer 622 | modelbuf = glGenBuffers(1) 623 | glBindBuffer(GL_ARRAY_BUFFER, modelbuf) 624 | for i in range(4): 625 | glEnableVertexAttribArray(2 + i) 626 | glVertexAttribPointer(2 + i, 4, GL_FLOAT, GL_FALSE, 4*16, C_VOID_PS[i]) 627 | glVertexAttribDivisor(2 + i, 1) 628 | 629 | if isinstance(obj, InstancedSceneObject): 630 | glBufferData(GL_ARRAY_BUFFER, 4*16*obj.n_instances, None, GL_STATIC_DRAW) 631 | data = obj.raw_pose_data.flatten().astype(np.float32) 632 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16*obj.n_instances, data) 633 | else: 634 | glBufferData(GL_ARRAY_BUFFER, 4*16, None, GL_STATIC_DRAW) 635 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*16, np.eye(4).flatten().astype(np.float32)) 636 | 637 | # Set up color buffer 638 | colorbuf = glGenBuffers(1) 639 | glBindBuffer(GL_ARRAY_BUFFER, colorbuf) 640 | glEnableVertexAttribArray(6) 641 | glVertexAttribPointer(6, 3, GL_FLOAT, GL_FALSE, 0, C_VOID_PS[0]) 642 | glVertexAttribDivisor(6, 1) 643 | 644 | if isinstance(obj, InstancedSceneObject): 645 | glBufferData(GL_ARRAY_BUFFER, 4*3*len(obj.colors), None, GL_STATIC_DRAW) 646 | data = obj.colors.flatten().astype(np.float32) 647 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3*len(obj.colors), data) 648 | else: 649 | glBufferData(GL_ARRAY_BUFFER, 4*3, None, GL_STATIC_DRAW) 650 | glBufferSubData(GL_ARRAY_BUFFER, 0, 4*3, obj.material.color.astype(np.float32)) 651 | 652 | 653 | # Unbind all buffers 654 | glBindVertexArray(0) 655 | glBindBuffer(GL_ARRAY_BUFFER, 0) 656 | glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0) 657 | 658 | return VA_ids 659 | 660 | 661 | def _render(self): 662 | """Render the scene into the framebuffer and flip. 663 | """ 664 | scene = self.scene 665 | camera = self._camera 666 | 667 | camera.T_camera_world = self._trackball.T_camera_world 668 | 669 | # Set viewport size 670 | context = self.context 671 | back_width, back_height = self._size 672 | 673 | # Check for retina slash high-dpi displays (hack) 674 | if hasattr(self.context, '_nscontext'): 675 | view = self.context._nscontext.view() 676 | bounds = view.convertRectToBacking_(view.bounds()).size 677 | back_width, back_height = (int(bounds.width), int(bounds.height)) 678 | 679 | glViewport(0, 0, back_width, back_height) 680 | glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) 681 | 682 | glUseProgram(self._shader) 683 | 684 | # Get Uniform Locations from Shader 685 | v_id = glGetUniformLocation(self._shader, 'V') 686 | p_id = glGetUniformLocation(self._shader, 'P') 687 | m_id = glGetUniformLocation(self._shader, 'M') 688 | matprop_id = glGetUniformLocation(self._shader, 'material_properties') 689 | ambient_id = glGetUniformLocation(self._shader, 'ambient_light_info') 690 | directional_id = glGetUniformLocation(self._shader, "directional_light_info") 691 | n_directional_id = glGetUniformLocation(self._shader, "n_directional_lights") 692 | point_id = glGetUniformLocation(self._shader, "point_light_info") 693 | n_point_id = glGetUniformLocation(self._shader, "n_point_lights") 694 | front_and_back_id = glGetUniformLocation(self._shader, "front_and_back") 695 | 696 | # Bind bad normals id 697 | glUniform1i(front_and_back_id, int(self._flags['two_sided_lighting'])) 698 | 699 | # Bind view matrix 700 | glUniformMatrix4fv(v_id, 1, GL_TRUE, camera.V) 701 | glUniformMatrix4fv(p_id, 1, GL_TRUE, camera.P) 702 | 703 | # Bind ambient lighting 704 | glUniform4fv(ambient_id, 1, np.hstack((scene.ambient_light.color, 705 | scene.ambient_light.strength))) 706 | 707 | # If using raymond lighting, don't use scene's directional or point lights 708 | d_lights = scene.directional_lights 709 | p_lights = scene.point_lights 710 | if self._raymond_lighting: 711 | d_lights = [] 712 | for dlight in SceneViewer._raymond_lights: 713 | direc = dlight.direction 714 | direc = camera.T_camera_world.matrix[:3,:3].dot(direc) 715 | d_lights.append(DirectionalLight( 716 | direction=direc, 717 | color=dlight.color, 718 | strength=dlight.strength 719 | )) 720 | p_lights = [] 721 | 722 | # Bind directional lighting 723 | glUniform1i(n_directional_id, len(d_lights)) 724 | directional_info = np.zeros((2*MAX_N_LIGHTS, 4)) 725 | for i, dlight in enumerate(d_lights): 726 | directional_info[2*i,:] = np.hstack((dlight.color, dlight.strength)) 727 | directional_info[2*i+1,:] = np.hstack((dlight.direction, 0)) 728 | glUniform4fv(directional_id, 2*MAX_N_LIGHTS, directional_info.flatten()) 729 | 730 | # Bind point lighting 731 | glUniform1i(n_point_id, len(p_lights)) 732 | point_info = np.zeros((2*MAX_N_LIGHTS, 4)) 733 | for i, plight in enumerate(p_lights): 734 | point_info[2*i,:] = np.hstack((plight.color, plight.strength)) 735 | point_info[2*i+1,:] = np.hstack((plight.location, 1)) 736 | glUniform4fv(point_id, 2*MAX_N_LIGHTS, point_info.flatten()) 737 | 738 | for vaid, obj in zip(self._vaids, scene.objects.values()): 739 | if not obj.enabled: 740 | continue 741 | 742 | mesh = obj.mesh 743 | material = obj.material 744 | 745 | glUniformMatrix4fv(m_id, 1, GL_TRUE, obj.T_obj_world.matrix) 746 | glUniform4fv(matprop_id, 1, np.array([material.k_a, material.k_d, material.k_s, material.alpha])) 747 | 748 | wf = material.wireframe != self._flags['flip_wireframe'] 749 | if wf: 750 | glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) 751 | else: 752 | glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) 753 | 754 | glBindVertexArray(vaid) 755 | 756 | n_instances = 1 757 | if isinstance(obj, InstancedSceneObject): 758 | n_instances = obj.n_instances 759 | 760 | if material.smooth: 761 | glDrawElementsInstanced(GL_TRIANGLES, 3*len(mesh.faces), GL_UNSIGNED_INT, C_VOID_PS[0], n_instances) 762 | else: 763 | glDrawArraysInstanced(GL_TRIANGLES, 0, 3*len(mesh.faces), n_instances) 764 | 765 | glBindVertexArray(0) 766 | 767 | glUseProgram(0) 768 | 769 | 770 | @staticmethod 771 | def time_event(dt, self): 772 | if self._flags['record']: 773 | self._record() 774 | if self._flags['animate'] and not self._flags['mouse_pressed']: 775 | self._animate() 776 | 777 | 778 | @staticmethod 779 | def _get_raymond_lights(): 780 | """Create raymond lights for the scene. 781 | """ 782 | if SceneViewer._raymond_lights: 783 | return SceneViewer._raymond_lights 784 | 785 | raymond_lights = [] 786 | 787 | # Create raymond lights 788 | elevs = np.pi * np.array([1/6., 1/6., 1/4.]) 789 | azims = np.pi * np.array([1/6., 5/3., -1/4.]) 790 | l = 0 791 | for az, el in zip(azims, elevs): 792 | x = np.cos(el) * np.cos(az) 793 | y = -np.cos(el) * np.sin(el) 794 | z = -np.sin(el) 795 | 796 | direction = -np.array([x, y, z]) 797 | direction = direction / np.linalg.norm(direction) 798 | direc = DirectionalLight( 799 | direction=direction, 800 | color=np.array([1.0, 1.0, 1.0]), 801 | strength=1.0 802 | ) 803 | raymond_lights.append(direc) 804 | 805 | SceneViewer._raymond_lights = raymond_lights 806 | return raymond_lights 807 | 808 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | meshrender 4 | 0.0.10 5 | The meshrender package 6 | 7 | 8 | 9 | 10 | todo 11 | 12 | 13 | 14 | 15 | 16 | Apache v2.0 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | catkin 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | Setup of meshrender Python codebase. 3 | Authors: Matthew Matl and Jeff Mahler 4 | """ 5 | 6 | from setuptools import setup 7 | 8 | # load __version__ 9 | exec(open('meshrender/version.py').read()) 10 | 11 | requirements = [ 12 | 'numpy', 13 | 'scipy', 14 | 'trimesh[easy]', 15 | 'PyOpenGL>=3.1.0', 16 | 'pyglet>=1.4.0b1', 17 | 'imageio', 18 | 'autolab_core', 19 | 'autolab_perception' 20 | ] 21 | 22 | setup( 23 | name = 'meshrender', 24 | version = __version__, 25 | description = 'Python utilities for rendering scenes containing 3D meshes', 26 | long_description = 'A set of Python utilities for rendering 3D scenes, based on PyOpenGL and target at OpenGL 3+.', 27 | author = 'Matthew Matl', 28 | author_email = 'mmatl@eecs.berkeley.edu', 29 | license = "Apache Software License", 30 | url = 'https://github.com/BerkeleyAutomation/meshrender', 31 | classifiers = [ 32 | 'Development Status :: 3 - Alpha', 33 | 'License :: OSI Approved :: Apache Software License', 34 | 'Operating System :: POSIX :: Linux', 35 | 'Operating System :: MacOS :: MacOS X', 36 | 'Programming Language :: Python :: 2.7', 37 | 'Programming Language :: Python :: 3.5', 38 | 'Programming Language :: Python :: 3.6', 39 | ], 40 | keywords = 'rendering opengl 3d visualization', 41 | packages = ['meshrender'], 42 | setup_requires = requirements, 43 | install_requires = requirements, 44 | extras_require = { 'docs' : [ 45 | 'sphinx', 46 | 'sphinxcontrib-napoleon', 47 | 'sphinx_rtd_theme' 48 | ], 49 | } 50 | ) 51 | -------------------------------------------------------------------------------- /tests/test_temporary.py: -------------------------------------------------------------------------------- 1 | """ 2 | Placeholder test cases for meshrender -- hard to test it in CI 3 | as doing so requires an X display. 4 | """ 5 | import unittest 6 | 7 | class PointsTest(unittest.TestCase): 8 | 9 | def test_temporary(self): 10 | pass 11 | 12 | if __name__ == '__main__': 13 | unittest.main() 14 | 15 | --------------------------------------------------------------------------------