├── .gitignore ├── LICENSE ├── Licenses ├── README.md ├── assets ├── readme1.gif ├── readme2.gif ├── readme3.gif └── rviz_gui.gif ├── config ├── franka_fabric_config.yaml └── franka_robot_description.yaml ├── docker └── Dockerfile ├── interactive_demo ├── mpinets_msgs │ ├── CMakeLists.txt │ ├── msg │ │ └── PlanningProblem.msg │ └── package.xml └── mpinets_ros │ ├── CMakeLists.txt │ ├── launch │ └── visualize.launch │ ├── meshes │ ├── LICENSE │ └── half_open_gripper.stl │ ├── nodes │ ├── interaction_node.py │ └── planning_node.py │ ├── package.xml │ ├── rviz │ └── config.rviz │ └── setup.py ├── jobconfig.yaml ├── mpinets ├── __init__.py ├── data_loader.py ├── data_pipeline │ ├── __init__.py │ ├── environments │ │ ├── __init__.py │ │ ├── base_environment.py │ │ ├── cubby_environment.py │ │ ├── dresser_environment.py │ │ └── tabletop_environment.py │ ├── gen_data.py │ └── process_data.py ├── geometry.py ├── loss.py ├── metrics.py ├── model.py ├── mpinets_types.py ├── run_inference.py ├── run_training.py ├── third_party │ ├── __init__.py │ └── sparc.py └── utils.py ├── pyproject.toml └── setup.cfg /.gitignore: -------------------------------------------------------------------------------- 1 | Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2022 NVIDIA CORPORATION, University of Washington. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Licenses: -------------------------------------------------------------------------------- 1 | SPARC 2 | --------------- 3 | The code used to calculate sparc (i.e. smoothness) in mpinets/metrics.py 4 | was originally licensed according to the ISC license (see below). No changes 5 | were made to this function from the original source. 6 | 7 | Copyright (c) 2015, Sivakumar Balasubramanian 8 | 9 | Permission to use, copy, modify, and/or distribute this software for any 10 | purpose with or without fee is hereby granted, provided that the above 11 | copyright notice and this permission notice appear in all copies. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 | WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 | MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 | ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 | 21 | franka_ros 0.10.0 22 | ---------------- 23 | 24 | The Franka gripper mesh used in interactive_demo/mpinets_ros/meshes/half_open_gripper.stl was constructed using 25 | meshes in the "franka_description" component of the franka_ros package. 26 | 27 | Site: https://github.com/frankaemika/franka_ros/ 28 | License: https://github.com/frankaemika/franka_ros/blob/0.10.0/NOTICE 29 | https://github.com/frankaemika/franka_ros/blob/0.10.0/LICENSE 30 | 31 | Copyright 2017 Franka Emika GmbH 32 | 33 | Licensed under the Apache License, Version 2.0 (the "License"); 34 | you may not use this file except in compliance with the License. 35 | You may obtain a copy of the License at 36 | 37 | http://www.apache.org/licenses/LICENSE-2.0 38 | 39 | Unless required by applicable law or agreed to in writing, software 40 | distributed under the License is distributed on an "AS IS" BASIS, 41 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 42 | See the License for the specific language governing permissions and 43 | limitations under the License. 44 | 45 | Apache License 46 | Version 2.0, January 2004 47 | http://www.apache.org/licenses/ 48 | 49 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 50 | 51 | 1. Definitions. 52 | 53 | "License" shall mean the terms and conditions for use, reproduction, 54 | and distribution as defined by Sections 1 through 9 of this document. 55 | 56 | "Licensor" shall mean the copyright owner or entity authorized by 57 | the copyright owner that is granting the License. 58 | 59 | "Legal Entity" shall mean the union of the acting entity and all 60 | other entities that control, are controlled by, or are under common 61 | control with that entity. For the purposes of this definition, 62 | "control" means (i) the power, direct or indirect, to cause the 63 | direction or management of such entity, whether by contract or 64 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 65 | outstanding shares, or (iii) beneficial ownership of such entity. 66 | 67 | "You" (or "Your") shall mean an individual or Legal Entity 68 | exercising permissions granted by this License. 69 | 70 | "Source" form shall mean the preferred form for making modifications, 71 | including but not limited to software source code, documentation 72 | source, and configuration files. 73 | 74 | "Object" form shall mean any form resulting from mechanical 75 | transformation or translation of a Source form, including but 76 | not limited to compiled object code, generated documentation, 77 | and conversions to other media types. 78 | 79 | "Work" shall mean the work of authorship, whether in Source or 80 | Object form, made available under the License, as indicated by a 81 | copyright notice that is included in or attached to the work 82 | (an example is provided in the Appendix below). 83 | 84 | "Derivative Works" shall mean any work, whether in Source or Object 85 | form, that is based on (or derived from) the Work and for which the 86 | editorial revisions, annotations, elaborations, or other modifications 87 | represent, as a whole, an original work of authorship. For the purposes 88 | of this License, Derivative Works shall not include works that remain 89 | separable from, or merely link (or bind by name) to the interfaces of, 90 | the Work and Derivative Works thereof. 91 | 92 | "Contribution" shall mean any work of authorship, including 93 | the original version of the Work and any modifications or additions 94 | to that Work or Derivative Works thereof, that is intentionally 95 | submitted to Licensor for inclusion in the Work by the copyright owner 96 | or by an individual or Legal Entity authorized to submit on behalf of 97 | the copyright owner. For the purposes of this definition, "submitted" 98 | means any form of electronic, verbal, or written communication sent 99 | to the Licensor or its representatives, including but not limited to 100 | communication on electronic mailing lists, source code control systems, 101 | and issue tracking systems that are managed by, or on behalf of, the 102 | Licensor for the purpose of discussing and improving the Work, but 103 | excluding communication that is conspicuously marked or otherwise 104 | designated in writing by the copyright owner as "Not a Contribution." 105 | 106 | "Contributor" shall mean Licensor and any individual or Legal Entity 107 | on behalf of whom a Contribution has been received by Licensor and 108 | subsequently incorporated within the Work. 109 | 110 | 2. Grant of Copyright License. Subject to the terms and conditions of 111 | this License, each Contributor hereby grants to You a perpetual, 112 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 113 | copyright license to reproduce, prepare Derivative Works of, 114 | publicly display, publicly perform, sublicense, and distribute the 115 | Work and such Derivative Works in Source or Object form. 116 | 117 | 3. Grant of Patent License. Subject to the terms and conditions of 118 | this License, each Contributor hereby grants to You a perpetual, 119 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 120 | (except as stated in this section) patent license to make, have made, 121 | use, offer to sell, sell, import, and otherwise transfer the Work, 122 | where such license applies only to those patent claims licensable 123 | by such Contributor that are necessarily infringed by their 124 | Contribution(s) alone or by combination of their Contribution(s) 125 | with the Work to which such Contribution(s) was submitted. If You 126 | institute patent litigation against any entity (including a 127 | cross-claim or counterclaim in a lawsuit) alleging that the Work 128 | or a Contribution incorporated within the Work constitutes direct 129 | or contributory patent infringement, then any patent licenses 130 | granted to You under this License for that Work shall terminate 131 | as of the date such litigation is filed. 132 | 133 | 4. Redistribution. You may reproduce and distribute copies of the 134 | Work or Derivative Works thereof in any medium, with or without 135 | modifications, and in Source or Object form, provided that You 136 | meet the following conditions: 137 | 138 | (a) You must give any other recipients of the Work or 139 | Derivative Works a copy of this License; and 140 | 141 | (b) You must cause any modified files to carry prominent notices 142 | stating that You changed the files; and 143 | 144 | (c) You must retain, in the Source form of any Derivative Works 145 | that You distribute, all copyright, patent, trademark, and 146 | attribution notices from the Source form of the Work, 147 | excluding those notices that do not pertain to any part of 148 | the Derivative Works; and 149 | 150 | (d) If the Work includes a "NOTICE" text file as part of its 151 | distribution, then any Derivative Works that You distribute must 152 | include a readable copy of the attribution notices contained 153 | within such NOTICE file, excluding those notices that do not 154 | pertain to any part of the Derivative Works, in at least one 155 | of the following places: within a NOTICE text file distributed 156 | as part of the Derivative Works; within the Source form or 157 | documentation, if provided along with the Derivative Works; or, 158 | within a display generated by the Derivative Works, if and 159 | wherever such third-party notices normally appear. The contents 160 | of the NOTICE file are for informational purposes only and 161 | do not modify the License. You may add Your own attribution 162 | notices within Derivative Works that You distribute, alongside 163 | or as an addendum to the NOTICE text from the Work, provided 164 | that such additional attribution notices cannot be construed 165 | as modifying the License. 166 | 167 | You may add Your own copyright statement to Your modifications and 168 | may provide additional or different license terms and conditions 169 | for use, reproduction, or distribution of Your modifications, or 170 | for any such Derivative Works as a whole, provided Your use, 171 | reproduction, and distribution of the Work otherwise complies with 172 | the conditions stated in this License. 173 | 174 | 5. Submission of Contributions. Unless You explicitly state otherwise, 175 | any Contribution intentionally submitted for inclusion in the Work 176 | by You to the Licensor shall be under the terms and conditions of 177 | this License, without any additional terms or conditions. 178 | Notwithstanding the above, nothing herein shall supersede or modify 179 | the terms of any separate license agreement you may have executed 180 | with Licensor regarding such Contributions. 181 | 182 | 6. Trademarks. This License does not grant permission to use the trade 183 | names, trademarks, service marks, or product names of the Licensor, 184 | except as required for reasonable and customary use in describing the 185 | origin of the Work and reproducing the content of the NOTICE file. 186 | 187 | 7. Disclaimer of Warranty. Unless required by applicable law or 188 | agreed to in writing, Licensor provides the Work (and each 189 | Contributor provides its Contributions) on an "AS IS" BASIS, 190 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 191 | implied, including, without limitation, any warranties or conditions 192 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 193 | PARTICULAR PURPOSE. You are solely responsible for determining the 194 | appropriateness of using or redistributing the Work and assume any 195 | risks associated with Your exercise of permissions under this License. 196 | 197 | 8. Limitation of Liability. In no event and under no legal theory, 198 | whether in tort (including negligence), contract, or otherwise, 199 | unless required by applicable law (such as deliberate and grossly 200 | negligent acts) or agreed to in writing, shall any Contributor be 201 | liable to You for damages, including any direct, indirect, special, 202 | incidental, or consequential damages of any character arising as a 203 | result of this License or out of the use or inability to use the 204 | Work (including but not limited to damages for loss of goodwill, 205 | work stoppage, computer failure or malfunction, or any and all 206 | other commercial damages or losses), even if such Contributor 207 | has been advised of the possibility of such damages. 208 | 209 | 9. Accepting Warranty or Additional Liability. While redistributing 210 | the Work or Derivative Works thereof, You may choose to offer, 211 | and charge a fee for, acceptance of support, warranty, indemnity, 212 | or other liability obligations and/or rights consistent with this 213 | License. However, in accepting such obligations, You may act only 214 | on Your own behalf and on Your sole responsibility, not on behalf 215 | of any other Contributor, and only if You agree to indemnify, 216 | defend, and hold each Contributor harmless for any liability 217 | incurred by, or claims asserted against, such Contributor by reason 218 | of your accepting any such warranty or additional liability. 219 | 220 | END OF TERMS AND CONDITIONS 221 | -------------------------------------------------------------------------------- /assets/readme1.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/assets/readme1.gif -------------------------------------------------------------------------------- /assets/readme2.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/assets/readme2.gif -------------------------------------------------------------------------------- /assets/readme3.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/assets/readme3.gif -------------------------------------------------------------------------------- /assets/rviz_gui.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/assets/rviz_gui.gif -------------------------------------------------------------------------------- /config/franka_fabric_config.yaml: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | api_version: 1.0 24 | 25 | fabric_params: 26 | joint_inertia: 27 | inertia: 1.0 28 | joint_limit_avoidance: 29 | barrier_gain: 1.0 30 | barrier_offset: -0.1 31 | barrier_max_acceleration: 1e12 32 | soft_relu_gain: 1200.0 33 | soft_relu_sharpness: 20.0 34 | soft_relu_offset: 1.0 35 | metric_scalar: 1.5 36 | metric_exploder_offset: -0.1 37 | max_metric: 1e12 38 | damping_barrier_gain: 0. 39 | forcing_joint_limit_avoidance: 40 | barrier_gain: 0.016 41 | barrier_offset: -0.1 42 | barrier_max_acceleration: 1e12 43 | soft_relu_gain: 100.0 44 | soft_relu_sharpness: 20.0 45 | soft_relu_offset: 0.2 46 | metric_scalar: 0.16 47 | metric_exploder_offset: -0.1 48 | max_metric: 1e12 49 | damping_barrier_gain: 10. 50 | obstacle_avoidance: 51 | barrier_gain: 10. 52 | barrier_offset: 0. 53 | barrier_max_acceleration: 1e12 54 | soft_relu_gain: 800.0 55 | soft_relu_sharpness: 20.0 56 | soft_relu_offset: 0.25 57 | metric_scalar: 1. 58 | metric_exploder_offset: -0.1 59 | max_metric: 1e12 60 | damping_barrier_gain: 0. 61 | forcing_obstacle_avoidance: 62 | barrier_gain: 0.05 63 | barrier_offset: 0. 64 | barrier_max_acceleration: 1e12 65 | soft_relu_gain: 0. 66 | soft_relu_sharpness: 20.0 67 | soft_relu_offset: 0.2 68 | metric_scalar: 0.08 69 | metric_exploder_offset: 0. 70 | max_metric: 1e12 71 | damping_barrier_gain: 10. 72 | cspace_attractor: 73 | metric_scalar: 1.5 74 | conical_sharpness: 100.0 75 | conical_gain: 15. 76 | target_attractor: 77 | min_isotropic_mass: 75. 78 | max_isotropic_mass: 75. 79 | mass_sharpness: 20.0 80 | mass_switch_offset: 1. 81 | boosting_ratio_sharpness: 20.0 82 | conical_sharpness: 20.0 83 | conical_gain: 80. 84 | forcing_target_attractor: 85 | min_isotropic_mass: 1. 86 | max_isotropic_mass: 15. 87 | mass_sharpness: 20.0 88 | mass_switch_offset: 0.2 89 | boosting_ratio_sharpness: 20.0 90 | conical_sharpness: 20.0 91 | conical_gain: 80. 92 | damping: 80. 93 | damping_radius: 0.2 94 | 95 | damping_control_params: 96 | speed_control: 97 | speed_control_enabled: True 98 | target_execution_speed: 0.75 99 | target_execution_sharpness: 100.0 100 | target_execution_offset: 0.05 101 | speed_boosting: 102 | speed_boosting_enabled: True 103 | speed_boosting_gain: 10. 104 | speed_boosting_budget: 20. 105 | speed_boosting_sharpness: 20.0 106 | damping: 107 | damping_gain: 0. 108 | proximity_damping_enabled: True 109 | proximity_damping_gain: 15. 110 | proximity_damping_sharpness: 20.0 111 | proximity_damping_distance: 0.2 112 | joint_speed_limiting: 113 | cspace_speed_limit: 1.6999 114 | horizon_in_seconds: 0.02 115 | 116 | body_cylinders: 117 | - name: base 118 | pt1: [0, 0, 0.333] 119 | pt2: [0, 0, -0.3] 120 | radius: 0.15 121 | 122 | self_collision_spheres: 123 | - name: panda_link7 124 | radius: .1 125 | - name: panda_wrist_end_pt 126 | radius: .01 127 | - name: panda_hand 128 | radius: .01 129 | - name: panda_face_left 130 | radius: .01 131 | - name: panda_face_right 132 | radius: .01 133 | - name: panda_leftfingertip 134 | radius: .01 135 | - name: panda_rightfingertip 136 | radius: .01 137 | -------------------------------------------------------------------------------- /config/franka_robot_description.yaml: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | api_version: 1.0 24 | 25 | # Defines the generalized coordinates. Each generalized coordinate is assume 26 | # to have an entry in the URDF, except when otherwise specified below under 27 | # cspace_urdf_bridge 28 | cspace: 29 | - panda_joint1 30 | - panda_joint2 31 | - panda_joint3 32 | - panda_joint4 33 | - panda_joint5 34 | - panda_joint6 35 | - panda_joint7 36 | 37 | acceleration_limits: [15.0, 7.5, 10.0, 12.5, 15.0, 20.0, 20.0] 38 | jerk_limits: [7500.0, 3750.0, 5000.0, 6250.0, 7500.0, 10000.0, 10000.0] 39 | 40 | #root_link: world 41 | root_link: base_link 42 | subtree_root_link: panda_link0 43 | 44 | default_q: [ 45 | 0.00, -1.3, 0.00, -2.87, 0.00, 2.00, 0.75 46 | ] 47 | 48 | # Most dimensions of the cspace have a direct corresponding element 49 | # in the URDF. This list of rules defines how unspecified coordinates 50 | # should be extracted. 51 | cspace_to_urdf_rules: 52 | - {name: panda_finger_joint1, rule: fixed, value: 0.025} 53 | - {name: panda_finger_joint2, rule: fixed, value: 0.025} 54 | 55 | composite_task_spaces: [] 56 | 57 | collision_spheres: 58 | - panda_link0: 59 | - "center": [0.0, 0.0, 0.05] 60 | "radius": 0.08 61 | - panda_link1: 62 | - "center": [0.0, -0.08, 0.0] 63 | "radius": 0.06 64 | - "center": [0.0, -0.03, 0.0] 65 | "radius": 0.06 66 | - "center": [0.0, 0.0, -0.12] 67 | "radius": 0.06 68 | - "center": [0.0, 0.0, -0.17] 69 | "radius": 0.06 70 | - panda_link2: 71 | - "center": [0.0, 0.0, 0.03] 72 | "radius": 0.06 73 | - "center": [0.0, 0.0, 0.08] 74 | "radius": 0.06 75 | - "center": [0.0, -0.12, 0.0] 76 | "radius": 0.06 77 | - "center": [0.0, -0.17, 0.0] 78 | "radius": 0.06 79 | - panda_link3: 80 | - "center": [0.0, 0.0, -0.06] 81 | "radius": 0.05 82 | - "center": [0.0, 0.0, -0.1] 83 | "radius": 0.06 84 | - "center": [0.08, 0.06, 0.0] 85 | "radius": 0.055 86 | - "center": [0.08, 0.02, 0.0] 87 | "radius": 0.055 88 | - panda_link4: 89 | - "center": [0.0, 0.0, 0.02] 90 | "radius": 0.055 91 | - "center": [0.0, 0.0, 0.06] 92 | "radius": 0.055 93 | - "center": [-0.08, 0.095, 0.0] 94 | "radius": 0.06 95 | - "center": [-0.08, 0.06, 0.0] 96 | "radius": 0.055 97 | - panda_link5: 98 | - "center": [0.0, 0.055, 0.0] 99 | "radius": 0.06 100 | - "center": [0.0, 0.075, 0.0] 101 | "radius": 0.06 102 | - "center": [0.0, 0.000, -0.22] 103 | "radius": 0.06 104 | - "center": [0.0, 0.05, -0.18] 105 | "radius": 0.05 106 | - "center": [0.01, 0.08, -0.14] 107 | "radius": 0.025 108 | - "center": [0.01, 0.085, -0.11] 109 | "radius": 0.025 110 | - "center": [0.01, 0.09, -0.08] 111 | "radius": 0.025 112 | - "center": [0.01, 0.095, -0.05] 113 | "radius": 0.025 114 | - "center": [-0.01, 0.08, -0.14] 115 | "radius": 0.025 116 | - "center": [-0.01, 0.085, -0.11] 117 | "radius": 0.025 118 | - "center": [-0.01, 0.09, -0.08] 119 | "radius": 0.025 120 | - "center": [-0.01, 0.095, -0.05] 121 | "radius": 0.025 122 | - panda_link6: 123 | - "center": [0.0, 0.0, 0.0] 124 | "radius": 0.06 125 | - "center": [0.08, 0.03, 0.0] 126 | "radius": 0.06 127 | - "center": [0.08, -0.01, 0.0] 128 | "radius": 0.06 129 | - panda_link7: 130 | - "center": [0.0, 0.0, 0.07] 131 | "radius": 0.05 132 | - "center": [0.02, 0.04, 0.08] 133 | "radius": 0.025 134 | - "center": [0.04, 0.02, 0.08] 135 | "radius": 0.025 136 | - "center": [0.04, 0.06, 0.085] 137 | "radius": 0.02 138 | - "center": [0.06, 0.04, 0.085] 139 | "radius": 0.02 140 | - panda_hand: 141 | - "center": [0.0, -0.075, 0.01] 142 | "radius": 0.028 143 | - "center": [0.0, -0.045, 0.01] 144 | "radius": 0.028 145 | - "center": [0.0, -0.015, 0.01] 146 | "radius": 0.028 147 | - "center": [0.0, 0.015, 0.01] 148 | "radius": 0.028 149 | - "center": [0.0, 0.045, 0.01] 150 | "radius": 0.028 151 | - "center": [0.0, 0.075, 0.01] 152 | "radius": 0.028 153 | - "center": [0.0, -0.075, 0.03] 154 | "radius": 0.026 155 | - "center": [0.0, -0.045, 0.03] 156 | "radius": 0.026 157 | - "center": [0.0, -0.015, 0.03] 158 | "radius": 0.026 159 | - "center": [0.0, 0.015, 0.03] 160 | "radius": 0.026 161 | - "center": [0.0, 0.045, 0.03] 162 | "radius": 0.026 163 | - "center": [0.0, 0.075, 0.03] 164 | "radius": 0.026 165 | - "center": [0.0, -0.075, 0.05] 166 | "radius": 0.024 167 | - "center": [0.0, -0.045, 0.05] 168 | "radius": 0.024 169 | - "center": [0.0, -0.015, 0.05] 170 | "radius": 0.024 171 | - "center": [0.0, 0.015, 0.05] 172 | "radius": 0.024 173 | - "center": [0.0, 0.045, 0.05] 174 | "radius": 0.024 175 | - "center": [0.0, 0.075, 0.05] 176 | "radius": 0.024 177 | - panda_leftfingertip: 178 | - "center": [0.0, 0.0075, 0.0] 179 | "radius": 0.0108 180 | - panda_rightfingertip: 181 | - "center": [0.0, -0.0075, 0.0] 182 | "radius": 0.0108 183 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION, University of Washington. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | 24 | FROM nvcr.io/nvidia/isaac-sim:2022.1.0 25 | 26 | WORKDIR /root 27 | 28 | # There is an issue with the Isaac Sim docker where it can't install from Apt 29 | RUN echo "deb http://archive.ubuntu.com/ubuntu/ bionic main restricted" > /etc/apt/sources.list \ 30 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic-updates main restricted" >> /etc/apt/sources.list \ 31 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic universe" >> /etc/apt/sources.list \ 32 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic-updates universe" >> /etc/apt/sources.list \ 33 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic multiverse" >> /etc/apt/sources.list \ 34 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic-updates multiverse" >> /etc/apt/sources.list \ 35 | && echo "deb http://archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse" >> /etc/apt/sources.list \ 36 | && echo "deb http://security.ubuntu.com/ubuntu bionic-security main restricted" >> /etc/apt/sources.list \ 37 | && echo "deb http://security.ubuntu.com/ubuntu bionic-security universe" >> /etc/apt/sources.list \ 38 | && echo "deb http://security.ubuntu.com/ubuntu bionic-security multiverse" >> /etc/apt/sources.list \ 39 | && rm ~/.pip/pip.conf 40 | 41 | # Section 1. Data Generation Tools 42 | # Install apt dependencies necessary for OMPL 43 | RUN apt update \ 44 | && apt install -y --no-install-recommends \ 45 | g++ \ 46 | cmake \ 47 | pkg-config \ 48 | libboost-serialization-dev \ 49 | libboost-filesystem-dev \ 50 | libboost-system-dev \ 51 | libboost-program-options-dev \ 52 | libboost-test-dev \ 53 | libeigen3-dev \ 54 | libode-dev \ 55 | wget \ 56 | libyaml-cpp-dev \ 57 | python3.7 \ 58 | python3.7-dev \ 59 | libpython3.7-dev \ 60 | libboost-python-dev \ 61 | libboost-numpy-dev \ 62 | git \ 63 | curl \ 64 | ninja-build \ 65 | && apt-get clean \ 66 | && rm -rf /var/lib/apt 67 | 68 | # RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.6 1 \ 69 | # && update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.7 2 \ 70 | # && update-alternatives --set python3 /usr/bin/python3.7 \ 71 | # && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \ 72 | # && python3.7 get-pip.py 73 | RUN rm /usr/bin/python3 && ln -s python3.7 /usr/bin/python3 \ 74 | && curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py \ 75 | && python3.7 get-pip.py 76 | 77 | # Clone repos necessary to build OMPL and build it against the Python 3.7 78 | RUN python3 -m pip install -vU https://github.com/CastXML/pygccxml/archive/develop.zip pyplusplus numpy \ 79 | && wget -q -O- https://data.kitware.com/api/v1/file/5e8b740d2660cbefba944189/download | tar zxf - -C ${HOME} \ 80 | && wget -O - https://github.com/ompl/ompl/archive/1.5.2.tar.gz | tar zxf - \ 81 | && export CXX=g++ \ 82 | && export MAKEFLAGS="-j `nproc`" \ 83 | && export PATH=${HOME}/castxml/bin:${PATH} \ 84 | && mkdir -p ompl-1.5.2/build/Release \ 85 | && cd ompl-1.5.2/build/Release \ 86 | && cmake ../.. -DPYTHON_EXEC=/usr/bin/python3.7 \ 87 | && make update_bindings \ 88 | && make \ 89 | && make install 90 | 91 | # Set the PYTHONPATH so that Python has access to Lula (and Geometric Fabrics) and OMPL 92 | # It appears that the apt installable version of python3.7 uses a Debian-only path, so 93 | # adding the standard site-packages back into the PYTHONPATH (because it appears to be 94 | # the default installation path for OMPL) 95 | ENV PYTHONPATH=/isaac-sim/exts/omni.isaac.lula/pip_prebundle/:/usr/lib/python3.7/site-packages 96 | 97 | # Section 2. Install Learning Tools (Cuda, Python Dependencies) 98 | # Install cuda 11.3 (this should be upgraded if Pytorch is also upgraded) 99 | RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-keyring_1.0-1_all.deb \ 100 | && dpkg -i cuda-keyring_1.0-1_all.deb \ 101 | && apt-get update && apt-get install -y --no-install-recommends \ 102 | cuda-cudart-11-3=11.3.109-1 \ 103 | cuda-compat-11-3 \ 104 | cuda-libraries-11-3=11.3.1-1 \ 105 | libnpp-11-3=11.3.3.95-1 \ 106 | cuda-nvtx-11-3=11.3.109-1 \ 107 | libcusparse-11-3=11.6.0.109-1 \ 108 | libcublas-11-3=11.5.1.109-1 \ 109 | libnccl2=2.9.9-1+cuda11.3 \ 110 | cuda-cudart-dev-11-3=11.3.109-1 \ 111 | cuda-command-line-tools-11-3=11.3.1-1 \ 112 | cuda-minimal-build-11-3=11.3.1-1 \ 113 | cuda-libraries-dev-11-3=11.3.1-1 \ 114 | cuda-nvml-dev-11-3=11.3.58-1 \ 115 | cuda-nvprof-11-3=11.3.111-1 \ 116 | libnpp-dev-11-3=11.3.3.95-1 \ 117 | libcusparse-dev-11-3=11.6.0.109-1 \ 118 | libcublas-dev-11-3=11.5.1.109-1 \ 119 | libnccl-dev=2.9.9-1+cuda11.3 \ 120 | && rm -rf /var/lib/apt/lists/* 121 | 122 | ENV PATH /usr/local/nvidia/bin:/usr/local/cuda/bin:${PATH} 123 | ENV LD_LIBRARY_PATH /usr/local/nvidia/lib:/usr/local/nvidia/lib64:${LD_LIBRARY_PATH} 124 | 125 | # Keep apt from auto upgrading the cublas and nccl packages. See https://gitlab.com/nvidia/container-images/cuda/-/issues/88 126 | RUN apt-mark hold libcublas-11-3 libnccl2 libcublas-dev-11-3 libnccl-dev 127 | 128 | 129 | # Install Python dependencies for Motion Policy Networks 130 | # For some reason, the ikfast dependency do not get properly installed if 131 | # ikfast and robofin are in the same pip install statement 132 | RUN python3 -m pip install --upgrade pip setuptools \ 133 | && python3 -m pip install \ 134 | torch --extra-index-url https://download.pytorch.org/whl/cu113 \ 135 | pytorch_lightning \ 136 | h5py \ 137 | trimesh \ 138 | wandb \ 139 | ipython \ 140 | pybullet \ 141 | pyquaternion \ 142 | geometrout==0.0.3.4 \ 143 | ikfast-pybind \ 144 | tqdm \ 145 | urchin==0.0.24 \ 146 | yourdfpy \ 147 | shapely \ 148 | rtree \ 149 | triangle \ 150 | termcolor \ 151 | meshcat \ 152 | git+https://github.com/fishbotics/pointnet2_ops.git@v3.2.0 \ 153 | && python3 -m pip install git+https://github.com/fishbotics/robofin.git@v0.0.1 git+https://github.com/fishbotics/atob@v0.0.1 154 | 155 | # Section 3. Install ROS dependencies for the interactive tutorial 156 | RUN ln -fs /usr/share/zoneinfo/America/Los_Angeles /etc/localtime \ 157 | && echo "America/Los_Angeles" > /etc/timezone \ 158 | && apt update \ 159 | && apt install -y --no-install-recommends \ 160 | curl \ 161 | gnupg \ 162 | && apt-get clean \ 163 | && rm -rf /var/lib/apt \ 164 | && sh -c 'echo "deb http://packages.ros.org/ros/ubuntu bionic main" > /etc/apt/sources.list.d/ros-latest.list' \ 165 | && curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | apt-key add - \ 166 | && apt update && apt install -y --no-install-recommends \ 167 | ros-melodic-desktop-full \ 168 | python-rosdep \ 169 | python-rosinstall \ 170 | python-rosinstall-generator \ 171 | python-wstool \ 172 | ros-melodic-catkin \ 173 | python-catkin-tools \ 174 | ros-melodic-moveit \ 175 | ros-melodic-moveit-python \ 176 | ros-melodic-franka-ros \ 177 | ros-melodic-tf2-ros \ 178 | python3-rospkg-modules \ 179 | python3-catkin-pkg-modules \ 180 | && apt-get clean \ 181 | && rm -rf /var/lib/apt \ 182 | && echo "source /opt/ros/melodic/setup.bash" >> ~/.bashrc \ 183 | && rosdep init && rosdep update \ 184 | && python3 -m pip install rospkg netifaces 185 | 186 | ENV ROS_HOSTNAME=localhost 187 | ENV ROS_MASTER_URI=http://localhost:11311 188 | 189 | # Section 4. Final setup items 190 | ENTRYPOINT [] 191 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_msgs/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION, University of Washington. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | cmake_minimum_required(VERSION 3.0.2) 24 | project(mpinets_msgs) 25 | 26 | ## Compile as C++11, supported in ROS Kinetic and newer 27 | # add_compile_options(-std=c++11) 28 | 29 | ## Find catkin macros and libraries 30 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 31 | ## is used, also find other catkin packages 32 | find_package(catkin REQUIRED COMPONENTS 33 | geometry_msgs 34 | rospy 35 | message_generation 36 | std_msgs 37 | sensor_msgs 38 | ) 39 | 40 | ## System dependencies are found with CMake's conventions 41 | # find_package(Boost REQUIRED COMPONENTS system) 42 | 43 | 44 | ## Uncomment this if the package has a setup.py. This macro ensures 45 | ## modules and global scripts declared therein get installed 46 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 47 | # catkin_python_setup() 48 | 49 | ################################################ 50 | ## Declare ROS messages, services and actions ## 51 | ################################################ 52 | 53 | ## To declare and build messages, services or actions from within this 54 | ## package, follow these steps: 55 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 56 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 57 | ## * In the file package.xml: 58 | ## * add a build_depend tag for "message_generation" 59 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 60 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 61 | ## but can be declared for certainty nonetheless: 62 | ## * add a exec_depend tag for "message_runtime" 63 | ## * In this file (CMakeLists.txt): 64 | ## * add "message_generation" and every package in MSG_DEP_SET to 65 | ## find_package(catkin REQUIRED COMPONENTS ...) 66 | ## * add "message_runtime" and every package in MSG_DEP_SET to 67 | ## catkin_package(CATKIN_DEPENDS ...) 68 | ## * uncomment the add_*_files sections below as needed 69 | ## and list every .msg/.srv/.action file to be processed 70 | ## * uncomment the generate_messages entry below 71 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 72 | 73 | ## Generate messages in the 'msg' folder 74 | add_message_files( 75 | FILES 76 | PlanningProblem.msg 77 | ) 78 | 79 | ## Generate services in the 'srv' folder 80 | # add_service_files( 81 | # FILES 82 | # Service1.srv 83 | # Service2.srv 84 | # ) 85 | 86 | ## Generate actions in the 'action' folder 87 | # add_action_files( 88 | # FILES 89 | # Action1.action 90 | # Action2.action 91 | # ) 92 | 93 | ## Generate added messages and services with any dependencies listed here 94 | generate_messages( 95 | DEPENDENCIES 96 | geometry_msgs 97 | std_msgs 98 | sensor_msgs 99 | ) 100 | 101 | ################################################ 102 | ## Declare ROS dynamic reconfigure parameters ## 103 | ################################################ 104 | 105 | ## To declare and build dynamic reconfigure parameters within this 106 | ## package, follow these steps: 107 | ## * In the file package.xml: 108 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 109 | ## * In this file (CMakeLists.txt): 110 | ## * add "dynamic_reconfigure" to 111 | ## find_package(catkin REQUIRED COMPONENTS ...) 112 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 113 | ## and list every .cfg file to be processed 114 | 115 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 116 | # generate_dynamic_reconfigure_options( 117 | # cfg/DynReconf1.cfg 118 | # cfg/DynReconf2.cfg 119 | # ) 120 | 121 | ################################### 122 | ## catkin specific configuration ## 123 | ################################### 124 | ## The catkin_package macro generates cmake config files for your package 125 | ## Declare things to be passed to dependent projects 126 | ## INCLUDE_DIRS: uncomment this if your package contains header files 127 | ## LIBRARIES: libraries you create in this project that dependent projects also need 128 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 129 | ## DEPENDS: system dependencies of this project that dependent projects also need 130 | catkin_package( 131 | CATKIN_DEPENDS message_runtime 132 | ) 133 | 134 | catkin_package( 135 | # INCLUDE_DIRS include 136 | # LIBRARIES mpinets_msgs 137 | # CATKIN_DEPENDS geometry_msgs std_msgs sensor_msgs 138 | # DEPENDS system_lib 139 | ) 140 | 141 | ########### 142 | ## Build ## 143 | ########### 144 | 145 | ## Specify additional locations of header files 146 | ## Your package locations should be listed before other locations 147 | include_directories( 148 | include 149 | ${catkin_INCLUDE_DIRS} 150 | ) 151 | 152 | ## Declare a C++ library 153 | # add_library(${PROJECT_NAME} 154 | # src/${PROJECT_NAME}/mpinets_msgs.cpp 155 | # ) 156 | 157 | ## Add cmake target dependencies of the library 158 | ## as an example, code may need to be generated before libraries 159 | ## either from message generation or dynamic reconfigure 160 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 161 | 162 | ## Declare a C++ executable 163 | ## With catkin_make all packages are built within a single CMake context 164 | ## The recommended prefix ensures that target names across packages don't collide 165 | # add_executable(${PROJECT_NAME}_node src/mpinets_msgs_node.cpp) 166 | 167 | ## Rename C++ executable without prefix 168 | ## The above recommended prefix causes long target names, the following renames the 169 | ## target back to the shorter version for ease of user use 170 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 171 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 172 | 173 | ## Add cmake target dependencies of the executable 174 | ## same as for the library above 175 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 176 | 177 | ## Specify libraries to link a library or executable target against 178 | # target_link_libraries(${PROJECT_NAME}_node 179 | # ${catkin_LIBRARIES} 180 | # ) 181 | 182 | ############# 183 | ## Install ## 184 | ############# 185 | 186 | # all install targets should use catkin DESTINATION variables 187 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 188 | 189 | ## Mark executable scripts (Python etc.) for installation 190 | ## in contrast to setup.py, you can choose the destination 191 | # catkin_install_python(PROGRAMS 192 | # scripts/my_python_script 193 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 194 | # ) 195 | 196 | ## Mark executables for installation 197 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 198 | # install(TARGETS ${PROJECT_NAME}_node 199 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 200 | # ) 201 | 202 | ## Mark libraries for installation 203 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 204 | # install(TARGETS ${PROJECT_NAME} 205 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 206 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 207 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 208 | # ) 209 | 210 | ## Mark cpp header files for installation 211 | # install(DIRECTORY include/${PROJECT_NAME}/ 212 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 213 | # FILES_MATCHING PATTERN "*.h" 214 | # PATTERN ".svn" EXCLUDE 215 | # ) 216 | 217 | ## Mark other files for installation (e.g. launch and bag files, etc.) 218 | # install(FILES 219 | # # myfile1 220 | # # myfile2 221 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 222 | # ) 223 | 224 | ############# 225 | ## Testing ## 226 | ############# 227 | 228 | ## Add gtest based cpp test target and link libraries 229 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_mpinets_msgs.cpp) 230 | # if(TARGET ${PROJECT_NAME}-test) 231 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 232 | # endif() 233 | 234 | ## Add folders to be run by python nosetests 235 | # catkin_add_nosetests(test) 236 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_msgs/msg/PlanningProblem.msg: -------------------------------------------------------------------------------- 1 | std_msgs/Header header 2 | string[] joint_names 3 | geometry_msgs/TransformStamped target 4 | sensor_msgs/JointState q0 5 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_msgs/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mpinets_msgs 4 | Someone 5 | 0.0.0 6 | The mpinets_msgs package 7 | MIT 8 | catkin 9 | geometry_msgs 10 | rospy 11 | std_msgs 12 | message_generation 13 | sensor_msgs 14 | rospy 15 | geometry_msgs 16 | std_msgs 17 | sensor_msgs 18 | geometry_msgs 19 | std_msgs 20 | sensor_msgs 21 | rospy 22 | message_runtime 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | cmake_minimum_required(VERSION 3.0.2) 24 | project(mpinets_ros) 25 | 26 | ## Compile as C++11, supported in ROS Kinetic and newer 27 | # add_compile_options(-std=c++11) 28 | 29 | ## Find catkin macros and libraries 30 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 31 | ## is used, also find other catkin packages 32 | find_package(catkin REQUIRED COMPONENTS 33 | interactive_markers 34 | roscpp 35 | rospy 36 | tf2 37 | visualization_msgs 38 | ) 39 | 40 | ## System dependencies are found with CMake's conventions 41 | # find_package(Boost REQUIRED COMPONENTS system) 42 | 43 | 44 | ## Uncomment this if the package has a setup.py. This macro ensures 45 | ## modules and global scripts declared therein get installed 46 | ## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html 47 | catkin_python_setup() 48 | 49 | ################################################ 50 | ## Declare ROS messages, services and actions ## 51 | ################################################ 52 | 53 | ## To declare and build messages, services or actions from within this 54 | ## package, follow these steps: 55 | ## * Let MSG_DEP_SET be the set of packages whose message types you use in 56 | ## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). 57 | ## * In the file package.xml: 58 | ## * add a build_depend tag for "message_generation" 59 | ## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET 60 | ## * If MSG_DEP_SET isn't empty the following dependency has been pulled in 61 | ## but can be declared for certainty nonetheless: 62 | ## * add a exec_depend tag for "message_runtime" 63 | ## * In this file (CMakeLists.txt): 64 | ## * add "message_generation" and every package in MSG_DEP_SET to 65 | ## find_package(catkin REQUIRED COMPONENTS ...) 66 | ## * add "message_runtime" and every package in MSG_DEP_SET to 67 | ## catkin_package(CATKIN_DEPENDS ...) 68 | ## * uncomment the add_*_files sections below as needed 69 | ## and list every .msg/.srv/.action file to be processed 70 | ## * uncomment the generate_messages entry below 71 | ## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) 72 | 73 | ## Generate messages in the 'msg' folder 74 | # add_message_files( 75 | # FILES 76 | # Message1.msg 77 | # Message2.msg 78 | # ) 79 | 80 | ## Generate services in the 'srv' folder 81 | # add_service_files( 82 | # FILES 83 | # Service1.srv 84 | # Service2.srv 85 | # ) 86 | 87 | ## Generate actions in the 'action' folder 88 | # add_action_files( 89 | # FILES 90 | # Action1.action 91 | # Action2.action 92 | # ) 93 | 94 | ## Generate added messages and services with any dependencies listed here 95 | # generate_messages( 96 | # DEPENDENCIES 97 | # std_msgs # Or other packages containing msgs 98 | # ) 99 | 100 | ################################################ 101 | ## Declare ROS dynamic reconfigure parameters ## 102 | ################################################ 103 | 104 | ## To declare and build dynamic reconfigure parameters within this 105 | ## package, follow these steps: 106 | ## * In the file package.xml: 107 | ## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" 108 | ## * In this file (CMakeLists.txt): 109 | ## * add "dynamic_reconfigure" to 110 | ## find_package(catkin REQUIRED COMPONENTS ...) 111 | ## * uncomment the "generate_dynamic_reconfigure_options" section below 112 | ## and list every .cfg file to be processed 113 | 114 | ## Generate dynamic reconfigure parameters in the 'cfg' folder 115 | # generate_dynamic_reconfigure_options( 116 | # cfg/DynReconf1.cfg 117 | # cfg/DynReconf2.cfg 118 | # ) 119 | 120 | ################################### 121 | ## catkin specific configuration ## 122 | ################################### 123 | ## The catkin_package macro generates cmake config files for your package 124 | ## Declare things to be passed to dependent projects 125 | ## INCLUDE_DIRS: uncomment this if your package contains header files 126 | ## LIBRARIES: libraries you create in this project that dependent projects also need 127 | ## CATKIN_DEPENDS: catkin_packages dependent projects also need 128 | ## DEPENDS: system dependencies of this project that dependent projects also need 129 | catkin_package( 130 | # INCLUDE_DIRS include 131 | # LIBRARIES mpinets_ros 132 | # CATKIN_DEPENDS other_catkin_pkg 133 | # DEPENDS system_lib 134 | ) 135 | 136 | ########### 137 | ## Build ## 138 | ########### 139 | 140 | ## Specify additional locations of header files 141 | ## Your package locations should be listed before other locations 142 | include_directories( 143 | # include 144 | ${catkin_INCLUDE_DIRS} 145 | ) 146 | 147 | ## Declare a C++ library 148 | # add_library(${PROJECT_NAME} 149 | # src/${PROJECT_NAME}/mpinets_ros.cpp 150 | # ) 151 | 152 | ## Add cmake target dependencies of the library 153 | ## as an example, code may need to be generated before libraries 154 | ## either from message generation or dynamic reconfigure 155 | # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 156 | 157 | ## Declare a C++ executable 158 | ## With catkin_make all packages are built within a single CMake context 159 | ## The recommended prefix ensures that target names across packages don't collide 160 | # add_executable(${PROJECT_NAME}_node src/mpinets_ros_node.cpp) 161 | 162 | ## Rename C++ executable without prefix 163 | ## The above recommended prefix causes long target names, the following renames the 164 | ## target back to the shorter version for ease of user use 165 | ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" 166 | # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") 167 | 168 | ## Add cmake target dependencies of the executable 169 | ## same as for the library above 170 | # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) 171 | 172 | ## Specify libraries to link a library or executable target against 173 | # target_link_libraries(${PROJECT_NAME}_node 174 | # ${catkin_LIBRARIES} 175 | # ) 176 | 177 | ############# 178 | ## Install ## 179 | ############# 180 | 181 | # all install targets should use catkin DESTINATION variables 182 | # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html 183 | 184 | ## Mark executable scripts (Python etc.) for installation 185 | ## in contrast to setup.py, you can choose the destination 186 | # catkin_install_python(PROGRAMS 187 | # scripts/my_python_script 188 | # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 189 | # ) 190 | catkin_install_python(PROGRAMS 191 | nodes/planning_node.py 192 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 193 | ) 194 | 195 | catkin_install_python(PROGRAMS 196 | nodes/interaction_node.py 197 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 198 | ) 199 | 200 | ## Mark executables for installation 201 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html 202 | # install(TARGETS ${PROJECT_NAME}_node 203 | # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 204 | # ) 205 | 206 | ## Mark libraries for installation 207 | ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html 208 | # install(TARGETS ${PROJECT_NAME} 209 | # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 210 | # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} 211 | # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} 212 | # ) 213 | 214 | ## Mark cpp header files for installation 215 | # install(DIRECTORY include/${PROJECT_NAME}/ 216 | # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} 217 | # FILES_MATCHING PATTERN "*.h" 218 | # PATTERN ".svn" EXCLUDE 219 | # ) 220 | 221 | ## Mark other files for installation (e.g. launch and bag files, etc.) 222 | # install(FILES 223 | # # myfile1 224 | # # myfile2 225 | # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} 226 | # ) 227 | 228 | ############# 229 | ## Testing ## 230 | ############# 231 | 232 | ## Add gtest based cpp test target and link libraries 233 | # catkin_add_gtest(${PROJECT_NAME}-test test/test_mpinets_ros.cpp) 234 | # if(TARGET ${PROJECT_NAME}-test) 235 | # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) 236 | # endif() 237 | 238 | ## Add folders to be run by python nosetests 239 | # catkin_add_nosetests(test) 240 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/launch/visualize.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/meshes/LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2017 Franka Emika GmbH 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | 15 | Apache License 16 | Version 2.0, January 2004 17 | http://www.apache.org/licenses/ 18 | 19 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 20 | 21 | 1. Definitions. 22 | 23 | "License" shall mean the terms and conditions for use, reproduction, 24 | and distribution as defined by Sections 1 through 9 of this document. 25 | 26 | "Licensor" shall mean the copyright owner or entity authorized by 27 | the copyright owner that is granting the License. 28 | 29 | "Legal Entity" shall mean the union of the acting entity and all 30 | other entities that control, are controlled by, or are under common 31 | control with that entity. For the purposes of this definition, 32 | "control" means (i) the power, direct or indirect, to cause the 33 | direction or management of such entity, whether by contract or 34 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 35 | outstanding shares, or (iii) beneficial ownership of such entity. 36 | 37 | "You" (or "Your") shall mean an individual or Legal Entity 38 | exercising permissions granted by this License. 39 | 40 | "Source" form shall mean the preferred form for making modifications, 41 | including but not limited to software source code, documentation 42 | source, and configuration files. 43 | 44 | "Object" form shall mean any form resulting from mechanical 45 | transformation or translation of a Source form, including but 46 | not limited to compiled object code, generated documentation, 47 | and conversions to other media types. 48 | 49 | "Work" shall mean the work of authorship, whether in Source or 50 | Object form, made available under the License, as indicated by a 51 | copyright notice that is included in or attached to the work 52 | (an example is provided in the Appendix below). 53 | 54 | "Derivative Works" shall mean any work, whether in Source or Object 55 | form, that is based on (or derived from) the Work and for which the 56 | editorial revisions, annotations, elaborations, or other modifications 57 | represent, as a whole, an original work of authorship. For the purposes 58 | of this License, Derivative Works shall not include works that remain 59 | separable from, or merely link (or bind by name) to the interfaces of, 60 | the Work and Derivative Works thereof. 61 | 62 | "Contribution" shall mean any work of authorship, including 63 | the original version of the Work and any modifications or additions 64 | to that Work or Derivative Works thereof, that is intentionally 65 | submitted to Licensor for inclusion in the Work by the copyright owner 66 | or by an individual or Legal Entity authorized to submit on behalf of 67 | the copyright owner. For the purposes of this definition, "submitted" 68 | means any form of electronic, verbal, or written communication sent 69 | to the Licensor or its representatives, including but not limited to 70 | communication on electronic mailing lists, source code control systems, 71 | and issue tracking systems that are managed by, or on behalf of, the 72 | Licensor for the purpose of discussing and improving the Work, but 73 | excluding communication that is conspicuously marked or otherwise 74 | designated in writing by the copyright owner as "Not a Contribution." 75 | 76 | "Contributor" shall mean Licensor and any individual or Legal Entity 77 | on behalf of whom a Contribution has been received by Licensor and 78 | subsequently incorporated within the Work. 79 | 80 | 2. Grant of Copyright License. Subject to the terms and conditions of 81 | this License, each Contributor hereby grants to You a perpetual, 82 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 83 | copyright license to reproduce, prepare Derivative Works of, 84 | publicly display, publicly perform, sublicense, and distribute the 85 | Work and such Derivative Works in Source or Object form. 86 | 87 | 3. Grant of Patent License. Subject to the terms and conditions of 88 | this License, each Contributor hereby grants to You a perpetual, 89 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 90 | (except as stated in this section) patent license to make, have made, 91 | use, offer to sell, sell, import, and otherwise transfer the Work, 92 | where such license applies only to those patent claims licensable 93 | by such Contributor that are necessarily infringed by their 94 | Contribution(s) alone or by combination of their Contribution(s) 95 | with the Work to which such Contribution(s) was submitted. If You 96 | institute patent litigation against any entity (including a 97 | cross-claim or counterclaim in a lawsuit) alleging that the Work 98 | or a Contribution incorporated within the Work constitutes direct 99 | or contributory patent infringement, then any patent licenses 100 | granted to You under this License for that Work shall terminate 101 | as of the date such litigation is filed. 102 | 103 | 4. Redistribution. You may reproduce and distribute copies of the 104 | Work or Derivative Works thereof in any medium, with or without 105 | modifications, and in Source or Object form, provided that You 106 | meet the following conditions: 107 | 108 | (a) You must give any other recipients of the Work or 109 | Derivative Works a copy of this License; and 110 | 111 | (b) You must cause any modified files to carry prominent notices 112 | stating that You changed the files; and 113 | 114 | (c) You must retain, in the Source form of any Derivative Works 115 | that You distribute, all copyright, patent, trademark, and 116 | attribution notices from the Source form of the Work, 117 | excluding those notices that do not pertain to any part of 118 | the Derivative Works; and 119 | 120 | (d) If the Work includes a "NOTICE" text file as part of its 121 | distribution, then any Derivative Works that You distribute must 122 | include a readable copy of the attribution notices contained 123 | within such NOTICE file, excluding those notices that do not 124 | pertain to any part of the Derivative Works, in at least one 125 | of the following places: within a NOTICE text file distributed 126 | as part of the Derivative Works; within the Source form or 127 | documentation, if provided along with the Derivative Works; or, 128 | within a display generated by the Derivative Works, if and 129 | wherever such third-party notices normally appear. The contents 130 | of the NOTICE file are for informational purposes only and 131 | do not modify the License. You may add Your own attribution 132 | notices within Derivative Works that You distribute, alongside 133 | or as an addendum to the NOTICE text from the Work, provided 134 | that such additional attribution notices cannot be construed 135 | as modifying the License. 136 | 137 | You may add Your own copyright statement to Your modifications and 138 | may provide additional or different license terms and conditions 139 | for use, reproduction, or distribution of Your modifications, or 140 | for any such Derivative Works as a whole, provided Your use, 141 | reproduction, and distribution of the Work otherwise complies with 142 | the conditions stated in this License. 143 | 144 | 5. Submission of Contributions. Unless You explicitly state otherwise, 145 | any Contribution intentionally submitted for inclusion in the Work 146 | by You to the Licensor shall be under the terms and conditions of 147 | this License, without any additional terms or conditions. 148 | Notwithstanding the above, nothing herein shall supersede or modify 149 | the terms of any separate license agreement you may have executed 150 | with Licensor regarding such Contributions. 151 | 152 | 6. Trademarks. This License does not grant permission to use the trade 153 | names, trademarks, service marks, or product names of the Licensor, 154 | except as required for reasonable and customary use in describing the 155 | origin of the Work and reproducing the content of the NOTICE file. 156 | 157 | 7. Disclaimer of Warranty. Unless required by applicable law or 158 | agreed to in writing, Licensor provides the Work (and each 159 | Contributor provides its Contributions) on an "AS IS" BASIS, 160 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 161 | implied, including, without limitation, any warranties or conditions 162 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 163 | PARTICULAR PURPOSE. You are solely responsible for determining the 164 | appropriateness of using or redistributing the Work and assume any 165 | risks associated with Your exercise of permissions under this License. 166 | 167 | 8. Limitation of Liability. In no event and under no legal theory, 168 | whether in tort (including negligence), contract, or otherwise, 169 | unless required by applicable law (such as deliberate and grossly 170 | negligent acts) or agreed to in writing, shall any Contributor be 171 | liable to You for damages, including any direct, indirect, special, 172 | incidental, or consequential damages of any character arising as a 173 | result of this License or out of the use or inability to use the 174 | Work (including but not limited to damages for loss of goodwill, 175 | work stoppage, computer failure or malfunction, or any and all 176 | other commercial damages or losses), even if such Contributor 177 | has been advised of the possibility of such damages. 178 | 179 | 9. Accepting Warranty or Additional Liability. While redistributing 180 | the Work or Derivative Works thereof, You may choose to offer, 181 | and charge a fee for, acceptance of support, warranty, indemnity, 182 | or other liability obligations and/or rights consistent with this 183 | License. However, in accepting such obligations, You may act only 184 | on Your own behalf and on Your sole responsibility, not on behalf 185 | of any other Contributor, and only if You agree to indemnify, 186 | defend, and hold each Contributor harmless for any liability 187 | incurred by, or claims asserted against, such Contributor by reason 188 | of your accepting any such warranty or additional liability. 189 | 190 | END OF TERMS AND CONDITIONS 191 | 192 | Not a contribution 193 | Changes made by NVIDIA CORPORATION & AFFILIATES or University of Washington enabling 194 | visualization of the gripper mesh or otherwise documented as NVIDIA-proprietary 195 | are not a contribution and subject to the following terms and conditions: 196 | 197 | MIT License 198 | 199 | Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 200 | 201 | Permission is hereby granted, free of charge, to any person obtaining a 202 | copy of this software and associated documentation files (the "Software"), 203 | to deal in the Software without restriction, including without limitation 204 | the rights to use, copy, modify, merge, publish, distribute, sublicense, 205 | and/or sell copies of the Software, and to permit persons to whom the 206 | Software is furnished to do so, subject to the following conditions: 207 | 208 | The above copyright notice and this permission notice shall be included in 209 | all copies or substantial portions of the Software. 210 | 211 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 212 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 213 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 214 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 215 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 216 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 217 | DEALINGS IN THE SOFTWARE. 218 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/meshes/half_open_gripper.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/interactive_demo/mpinets_ros/meshes/half_open_gripper.stl -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/nodes/interaction_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # MIT License 4 | # 5 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a 8 | # copy of this software and associated documentation files (the "Software"), 9 | # to deal in the Software without restriction, including without limitation 10 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 | # and/or sell copies of the Software, and to permit persons to whom the 12 | # Software is furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in 15 | # all copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | # DEALINGS IN THE SOFTWARE. 24 | 25 | import rospy 26 | from std_msgs.msg import String 27 | from geometry_msgs.msg import TransformStamped 28 | from interactive_markers.interactive_marker_server import InteractiveMarkerServer 29 | from visualization_msgs.msg import ( 30 | Marker, 31 | InteractiveMarkerFeedback, 32 | InteractiveMarker, 33 | InteractiveMarkerControl, 34 | ) 35 | from sensor_msgs.msg import PointCloud2, PointField 36 | from mpinets_msgs.msg import PlanningProblem 37 | import ctypes 38 | from sensor_msgs import point_cloud2 39 | from std_msgs.msg import Header 40 | import struct 41 | import numpy as np 42 | import time 43 | import tf2_ros 44 | import tf_conversions 45 | from copy import deepcopy 46 | 47 | # Maybe these can be removed if we have a different way of sending commands 48 | import sys 49 | from trajectory_msgs.msg import JointTrajectory 50 | from sensor_msgs.msg import JointState 51 | 52 | 53 | # The neutral configuration at which to start the node 54 | NEUTRAL_CONFIG = np.array( 55 | [ 56 | -0.01779206, 57 | -0.76012354, 58 | 0.01978261, 59 | -2.34205014, 60 | 0.02984053, 61 | 1.54119353, 62 | 0.75344866, 63 | 0.025, 64 | 0.025, 65 | ] 66 | ) 67 | 68 | # A neutral starting target (matches the end effector of the neutral start) 69 | NEUTRAL_TARGET_XYZ = [0.30649957127333377, 0.007287351995245575, 0.4866376674460814] 70 | NEUTRAL_TARGET_XYZW = [ 71 | -0.014241942613215233, 72 | 0.9996573431202568, 73 | 0.008466023255748396, 74 | -0.02026548461261383, 75 | ] 76 | 77 | # The joint names. These would ideally be read from the URDF, but they are hard-coded 78 | # here out of laziness since this is meant to be a demo 79 | JOINT_NAMES = [ 80 | "panda_joint1", 81 | "panda_joint2", 82 | "panda_joint3", 83 | "panda_joint4", 84 | "panda_joint5", 85 | "panda_joint6", 86 | "panda_joint7", 87 | "panda_finger_joint1", 88 | "panda_finger_joint2", 89 | ] 90 | 91 | 92 | class MPiNetsInterface: 93 | def __init__(self): 94 | """ 95 | Initialize the system state, the interactive components, and the subscribers/publishers 96 | """ 97 | rospy.init_node("mpinets_interface") 98 | self.server = InteractiveMarkerServer("mpinets_controls", "") 99 | self.br = tf2_ros.TransformBroadcaster() 100 | # rospy.Timer(rospy.Duration.from_sec(0.01), self.frame_callback) 101 | # Not sure why this is here, but ok 102 | 103 | self.make_execute_button_marker([1.0, -1.0, 0.1], 0.2) 104 | self.make_plan_button_marker([0.7, -1.0, 0.1], 0.2) 105 | self.make_reset_button_marker([0.4, -1.0, 0.1], 0.2) 106 | 107 | self.target_pose = None 108 | self.visualize_plan = False 109 | self.current_plan = [] 110 | self.target_xyz = NEUTRAL_TARGET_XYZ 111 | self.target_xyzw = NEUTRAL_TARGET_XYZW 112 | self.current_joint_state = NEUTRAL_CONFIG 113 | self.make_target_marker( 114 | self.target_xyz, 115 | self.target_xyzw, 116 | ) 117 | self.server.applyChanges() 118 | self.planning_problem_publisher = rospy.Publisher( 119 | "/mpinets/planning_problem", PlanningProblem, queue_size=1 120 | ) 121 | self.joint_states_publisher = rospy.Publisher( 122 | "/mpinets/joint_states", 123 | JointState, 124 | queue_size=1, 125 | ) 126 | 127 | self.planned_joint_states_publisher = rospy.Publisher( 128 | "/mpinets/planned_joint_states", 129 | JointState, 130 | queue_size=1, 131 | ) 132 | 133 | self.planning_problem_subscriber = rospy.Subscriber( 134 | "/mpinets/plan", 135 | JointTrajectory, 136 | self.planning_callback, 137 | queue_size=5, 138 | ) 139 | time.sleep(1) 140 | self.reset_franka() 141 | 142 | def reset_franka(self): 143 | """ 144 | Resets the robot to the neutral pose and resets the current plan 145 | """ 146 | msg = JointState() 147 | msg.header.stamp = rospy.Time.now() 148 | msg.header.frame_id = "panda_link0" 149 | msg.position = NEUTRAL_CONFIG 150 | msg.name = JOINT_NAMES 151 | self.visualize_plan = False 152 | self.joint_states_publisher.publish(msg) 153 | self.planned_joint_states_publisher.publish(msg) 154 | self.current_joint_state = NEUTRAL_CONFIG 155 | self.current_plan = [] 156 | 157 | @staticmethod 158 | def make_box(side_length, color): 159 | """ 160 | Makes a colored box that can be viewed in Rviz (will be used as buttons) 161 | """ 162 | marker = Marker() 163 | marker.type = Marker.CUBE 164 | 165 | marker.scale.x = side_length 166 | marker.scale.y = side_length 167 | marker.scale.z = side_length 168 | ( 169 | marker.color.r, 170 | marker.color.g, 171 | marker.color.b, 172 | marker.color.a, 173 | ) = color 174 | return marker 175 | 176 | @staticmethod 177 | def make_gripper(msg): 178 | """ 179 | Creates a floating gripper that can be viewed in Rviz (will be used as the target) 180 | """ 181 | marker = Marker() 182 | marker.type = Marker.MESH_RESOURCE 183 | marker.mesh_resource = "package://mpinets_ros/meshes/half_open_gripper.stl" 184 | 185 | marker.scale.x = 1.0 186 | marker.scale.y = 1.0 187 | marker.scale.z = 1.0 188 | marker.color.r = 1.0 189 | marker.color.g = 1.0 190 | marker.color.b = 1.0 191 | marker.color.a = 1.0 192 | 193 | return marker 194 | 195 | def make_reset_button_marker(self, xyz, side_length): 196 | """ 197 | Creates a red cube that resets the system when you click on it 198 | 199 | :param xyz List[float]: The center of the button cube 200 | :param side_length float: The side length for the cube 201 | """ 202 | int_marker = InteractiveMarker() 203 | int_marker.header.frame_id = "panda_link0" 204 | ( 205 | int_marker.pose.position.x, 206 | int_marker.pose.position.y, 207 | int_marker.pose.position.z, 208 | ) = xyz 209 | int_marker.scale = 0.5 210 | 211 | int_marker.name = "reset_button" 212 | int_marker.description = "Reset" 213 | 214 | control = InteractiveMarkerControl() 215 | 216 | control.interaction_mode = InteractiveMarkerControl.BUTTON 217 | control.name = "reset_button_control" 218 | 219 | marker = self.make_box(side_length, [204.0 / 255, 50.0 / 255, 50.0 / 255, 1.0]) 220 | control.markers.append(marker) 221 | control.always_visible = True 222 | int_marker.controls.append(control) 223 | # from IPython import embed 224 | self.server.insert(int_marker) 225 | self.server.setCallback(int_marker.name, self.reset_button_callback) 226 | 227 | def make_plan_button_marker(self, xyz, side_length): 228 | """ 229 | Create a yellow cube that calls the planner and visualizes the result when you click on it 230 | 231 | :param xyz List[float]: The center of the button cube 232 | :param side_length float: The side length for the cube 233 | """ 234 | int_marker = InteractiveMarker() 235 | int_marker.header.frame_id = "panda_link0" 236 | ( 237 | int_marker.pose.position.x, 238 | int_marker.pose.position.y, 239 | int_marker.pose.position.z, 240 | ) = xyz 241 | int_marker.scale = 0.5 242 | 243 | int_marker.name = "plan_button" 244 | int_marker.description = "Plan" 245 | 246 | control = InteractiveMarkerControl() 247 | 248 | control.interaction_mode = InteractiveMarkerControl.BUTTON 249 | control.name = "plan_button_control" 250 | 251 | marker = self.make_box(side_length, [231.0 / 255, 180.0 / 255, 22.0 / 255, 1.0]) 252 | control.markers.append(marker) 253 | control.always_visible = True 254 | int_marker.controls.append(control) 255 | # from IPython import embed 256 | self.server.insert(int_marker) 257 | self.server.setCallback(int_marker.name, self.plan_button_callback) 258 | 259 | def make_execute_button_marker(self, xyz, side_length): 260 | """ 261 | Create a green cube button that executes on the robot when you click on it 262 | 263 | :param xyz List[float]: The center of the button cube 264 | :param side_length float: The side length for the cube 265 | """ 266 | int_marker = InteractiveMarker() 267 | int_marker.header.frame_id = "panda_link0" 268 | ( 269 | int_marker.pose.position.x, 270 | int_marker.pose.position.y, 271 | int_marker.pose.position.z, 272 | ) = xyz 273 | int_marker.scale = 0.5 274 | 275 | int_marker.name = "execute_button" 276 | int_marker.description = "Execute" 277 | 278 | control = InteractiveMarkerControl() 279 | 280 | control.interaction_mode = InteractiveMarkerControl.BUTTON 281 | control.name = "execute_button_control" 282 | 283 | marker = self.make_box(side_length, [45.0 / 255, 201.0 / 255, 55.0 / 255, 1.0]) 284 | control.markers.append(marker) 285 | control.always_visible = True 286 | int_marker.controls.append(control) 287 | # from IPython import embed 288 | self.server.insert(int_marker) 289 | self.server.setCallback(int_marker.name, self.execute_button_callback) 290 | 291 | def make_gripper_control(self, msg): 292 | """ 293 | Creates the gripper marker for the target 294 | 295 | :param msg InteractiveMarker: The interactive marker for the target 296 | :rtype InteractiveMarkerControl: The gripper control handle for the target marker 297 | """ 298 | control = InteractiveMarkerControl() 299 | control.always_visible = True 300 | control.markers.append(self.make_gripper(msg)) 301 | msg.controls.append(control) 302 | return msg.controls[-1] 303 | 304 | def make_target_marker(self, xyz, xyzw): 305 | """ 306 | Create the target interactive marker 307 | 308 | :param xyz List[float]: The starting position for the target marker 309 | :param xyzw List[float]: The starting orientation for the target marker 310 | """ 311 | int_marker = InteractiveMarker() 312 | int_marker.header.frame_id = "panda_link0" 313 | ( 314 | int_marker.pose.position.x, 315 | int_marker.pose.position.y, 316 | int_marker.pose.position.z, 317 | ) = xyz 318 | ( 319 | int_marker.pose.orientation.x, 320 | int_marker.pose.orientation.y, 321 | int_marker.pose.orientation.z, 322 | int_marker.pose.orientation.w, 323 | ) = xyzw 324 | int_marker.scale = 0.4 325 | 326 | int_marker.name = "target" 327 | int_marker.description = "Trajectory Target" 328 | 329 | self.make_gripper_control(int_marker) 330 | int_marker.controls[0].interaction_mode = InteractiveMarkerControl.NONE 331 | 332 | control = InteractiveMarkerControl() 333 | xyzw = np.array([1.0, 0.0, 0.0, 1.0]) 334 | xyzw = xyzw / np.linalg.norm(xyzw) 335 | ( 336 | control.orientation.x, 337 | control.orientation.y, 338 | control.orientation.z, 339 | control.orientation.w, 340 | ) = xyzw 341 | control.name = "rotate_x" 342 | control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS 343 | int_marker.controls.append(deepcopy(control)) 344 | control.name = "move_x" 345 | control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS 346 | int_marker.controls.append(deepcopy(control)) 347 | 348 | xyzw = np.array([0.0, 1.0, 0.0, 1.0]) 349 | xyzw = xyzw / np.linalg.norm(xyzw) 350 | ( 351 | control.orientation.x, 352 | control.orientation.y, 353 | control.orientation.z, 354 | control.orientation.w, 355 | ) = xyzw 356 | control.name = "rotate_z" 357 | control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS 358 | int_marker.controls.append(deepcopy(control)) 359 | control.name = "move_z" 360 | control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS 361 | int_marker.controls.append(deepcopy(control)) 362 | 363 | xyzw = np.array([0.0, 0.0, 1.0, 1.0]) 364 | xyzw = xyzw / np.linalg.norm(xyzw) 365 | ( 366 | control.orientation.x, 367 | control.orientation.y, 368 | control.orientation.z, 369 | control.orientation.w, 370 | ) = xyzw 371 | control.name = "rotate_y" 372 | control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS 373 | int_marker.controls.append(deepcopy(control)) 374 | control.name = "move_y" 375 | control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS 376 | int_marker.controls.append(deepcopy(control)) 377 | 378 | self.server.insert(int_marker) 379 | self.server.setCallback(int_marker.name, self.target_feedback) 380 | 381 | def reset_button_callback(self, feedback): 382 | """ 383 | A callback that's called after clicking on the reset button, resets the system 384 | 385 | :param feedback InteractiveMarkerFeedback: The feedback from interacting with the 386 | reset button 387 | """ 388 | if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK: 389 | rospy.loginfo("Resetting robot to neutral pose") 390 | self.reset_franka() 391 | self.server.applyChanges() 392 | 393 | def planning_callback(self, msg): 394 | """ 395 | Displays the planned trajectory as a ghost 396 | 397 | :param msg JointTrajectory: The trajectory coming back from the planning node 398 | """ 399 | self.current_plan = [ 400 | list(point.positions) + NEUTRAL_CONFIG[7:].tolist() for point in msg.points 401 | ] 402 | while self.visualize_plan: 403 | for q in self.current_plan: 404 | joint_msg = JointState() 405 | joint_msg.header.stamp = rospy.Time.now() 406 | joint_msg.header.frame_id = "panda_link0" 407 | joint_msg.position = q 408 | joint_msg.name = JOINT_NAMES 409 | # Checking one more time in case there's a race condition 410 | if self.visualize_plan: 411 | self.planned_joint_states_publisher.publish(joint_msg) 412 | rospy.sleep(0.12) 413 | 414 | def plan_button_callback(self, feedback): 415 | """ 416 | This is called whenever the plan button is clicked. It publishes a planning problem 417 | that can then be solved by the planning node. In a more production-ready system, 418 | this would use a Service instead of just publishing and subscribing, but in this 419 | implementation, it uses a publisher to reduce boilerplate. 420 | 421 | :param feedback InteractiveMarkerFeedback: The feedback from interacting with the 422 | plan button 423 | """ 424 | if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK: 425 | msg = PlanningProblem() 426 | msg.header.stamp = rospy.Time.now() 427 | msg.joint_names = JOINT_NAMES[:7] 428 | msg.target = TransformStamped() 429 | msg.target.header.frame_id = "panda_link0" 430 | msg.target.child_frame_id = "target_frame" 431 | ( 432 | msg.target.transform.translation.x, 433 | msg.target.transform.translation.y, 434 | msg.target.transform.translation.z, 435 | ) = self.target_xyz 436 | ( 437 | msg.target.transform.rotation.x, 438 | msg.target.transform.rotation.y, 439 | msg.target.transform.rotation.z, 440 | msg.target.transform.rotation.w, 441 | ) = self.target_xyzw 442 | msg.q0 = JointState(position=self.current_joint_state[:7]) 443 | self.visualize_plan = True 444 | self.planning_problem_publisher.publish(msg) 445 | 446 | self.server.applyChanges() 447 | 448 | def execute_button_callback(self, feedback): 449 | """ 450 | This is called whenever the execute button is clicked. It will move the "real" 451 | robot according to the currently calculated plan. It will also reset the plan and 452 | current "real" joint configuration to the end of the plan. 453 | 454 | :param feedback InteractiveMarkerFeedback: The feedback from interacting with the 455 | execute button 456 | """ 457 | if feedback.event_type == InteractiveMarkerFeedback.BUTTON_CLICK: 458 | if len(self.current_plan) == 0: 459 | rospy.logwarn("There is no current plan. Plan before executing") 460 | return 461 | self.visualize_plan = False 462 | self.current_joint_state = self.current_plan[-1] 463 | for q in self.current_plan: 464 | joint_msg = JointState() 465 | joint_msg.header.stamp = rospy.Time.now() 466 | joint_msg.header.frame_id = "panda_link0" 467 | joint_msg.position = q 468 | joint_msg.name = JOINT_NAMES 469 | self.joint_states_publisher.publish(joint_msg) 470 | self.planned_joint_states_publisher.publish(joint_msg) 471 | rospy.sleep(0.12) 472 | self.current_plan = [] 473 | 474 | self.server.applyChanges() 475 | 476 | def target_feedback(self, feedback): 477 | """ 478 | This is called whenever the user interacts with the target marker. This is used to 479 | set the target pose. 480 | 481 | :param feedback InteractiveMarkerFeedback: The feedback from interacting with the 482 | execute button 483 | """ 484 | if feedback.event_type == InteractiveMarkerFeedback.POSE_UPDATE: 485 | self.target_xyz = ( 486 | feedback.pose.position.x, 487 | feedback.pose.position.y, 488 | feedback.pose.position.z, 489 | ) 490 | self.target_xyzw = ( 491 | feedback.pose.orientation.x, 492 | feedback.pose.orientation.y, 493 | feedback.pose.orientation.z, 494 | feedback.pose.orientation.w, 495 | ) 496 | 497 | 498 | if __name__ == "__main__": 499 | env = MPiNetsInterface() 500 | rospy.spin() 501 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/nodes/planning_node.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # MIT License 4 | # 5 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a 8 | # copy of this software and associated documentation files (the "Software"), 9 | # to deal in the Software without restriction, including without limitation 10 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 11 | # and/or sell copies of the Software, and to permit persons to whom the 12 | # Software is furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in 15 | # all copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 | # DEALINGS IN THE SOFTWARE. 24 | 25 | import torch 26 | from mpinets.model import MotionPolicyNetwork 27 | from robofin.robots import FrankaRealRobot 28 | from robofin.pointcloud.torch import FrankaSampler 29 | import numpy as np 30 | from mpinets.utils import normalize_franka_joints, unnormalize_franka_joints 31 | from mpinets_msgs.msg import PlanningProblem 32 | from sensor_msgs.msg import PointCloud2, PointField 33 | from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint 34 | from std_msgs.msg import Header 35 | import time 36 | import trimesh.transformations as tra 37 | from functools import partial 38 | from geometrout.transform import SE3 39 | import argparse 40 | from typing import List, Tuple, Any 41 | 42 | import rospy 43 | 44 | NUM_ROBOT_POINTS = 2048 45 | NUM_OBSTACLE_POINTS = 4096 46 | NUM_TARGET_POINTS = 128 47 | MAX_ROLLOUT_LENGTH = 75 48 | 49 | 50 | class Planner: 51 | @torch.no_grad() 52 | def __init__(self, mdl_file: str): 53 | """ 54 | Initializes and loads the model from the checkpoint 55 | 56 | :param mdl_file str: The path to the model checkpoint to be loaded 57 | """ 58 | """ 59 | """ 60 | self.mdl = MotionPolicyNetwork.load_from_checkpoint(mdl_file).cuda().eval() 61 | self.fk_sampler = FrankaSampler("cuda:0") 62 | 63 | @torch.no_grad() 64 | def target_point_cloud(self, pose: SE3) -> torch.Tensor: 65 | """ 66 | Samples target points on the gripper 67 | 68 | :param pose SE3: pose of gripper in world frame 69 | :rtype torch.Tensor: A point cloud sampled from the gripper's mesh 70 | """ 71 | target_points = self.fk_sampler.sample_end_effector( 72 | torch.as_tensor(pose.matrix).float().cuda().unsqueeze(0), 73 | num_points=NUM_TARGET_POINTS, 74 | ) 75 | return target_points 76 | 77 | @torch.no_grad() 78 | def plan( 79 | self, q0: np.ndarray, target_pose: SE3, obstacle_pc: np.ndarray 80 | ) -> Tuple[bool, List[List[float]]]: 81 | """ 82 | Creates a trajectory rollout toward the target. Will give up after MAX_ROLLOUT_LENGTH 83 | prediction steps 84 | 85 | :param q0 np.ndarray: A 7D array (dim 7,) representing the starting config 86 | :param target_pose SE3: A target pose in the `right_gripper` frame 87 | :param obstacle_pc np.ndarray: All the obstacle points fed to the network. These should 88 | be constructed by filtering out outlier points and randomly 89 | downsampling to be of length NUM_OBSTACLE_POINTS 90 | :rtype List[List[float]]: A trajectory as a list of lists (each has 7D). Formatted 91 | as a list to be more friendly to the ROS publisher 92 | """ 93 | assert obstacle_pc.shape == (NUM_OBSTACLE_POINTS, 3), ( 94 | "You must downsample obstacle PC before passing to planner. " 95 | "While you're at it, filter the outliers out as well" 96 | ) 97 | obstacle_points = torch.as_tensor(obstacle_pc).cuda() 98 | target_points = self.target_point_cloud(target_pose).squeeze() 99 | assert np.all( 100 | FrankaRealRobot.JOINT_LIMITS[:, 0] <= q0 101 | ), "Configuration is outside of feasible limits" 102 | assert np.all( 103 | q0 <= FrankaRealRobot.JOINT_LIMITS[:, 1] 104 | ), "Configuration is outside of feasible limits" 105 | q = torch.as_tensor(q0).cuda().unsqueeze(0).float() 106 | robot_points = self.fk_sampler.sample(q, NUM_ROBOT_POINTS) 107 | point_cloud = torch.cat( 108 | ( 109 | torch.zeros(NUM_ROBOT_POINTS, 4), 110 | torch.ones(NUM_OBSTACLE_POINTS, 4), 111 | 2 * torch.ones(NUM_TARGET_POINTS, 4), 112 | ), 113 | dim=0, 114 | ).cuda() 115 | point_cloud[:NUM_ROBOT_POINTS, :3] = robot_points.float() 116 | point_cloud[ 117 | NUM_ROBOT_POINTS : NUM_ROBOT_POINTS + NUM_OBSTACLE_POINTS, :3 118 | ] = obstacle_points.float() 119 | point_cloud[ 120 | NUM_ROBOT_POINTS + NUM_OBSTACLE_POINTS :, :3 121 | ] = target_points.float() 122 | point_cloud = point_cloud.unsqueeze(0) 123 | 124 | trajectory = [q] 125 | q_norm = normalize_franka_joints(q) 126 | success = False 127 | for _ in range(MAX_ROLLOUT_LENGTH): 128 | step_start = time.time() 129 | q_norm = torch.clamp(q_norm + self.mdl(point_cloud, q_norm), min=-1, max=1) 130 | qt = unnormalize_franka_joints(q_norm).type_as(q) 131 | assert isinstance(qt, torch.Tensor) 132 | trajectory.append(qt) 133 | eff_pose = FrankaRealRobot.fk( 134 | qt.squeeze().detach().cpu().numpy(), eff_frame="right_gripper" 135 | ) 136 | # [TUNE] This is where the 'success' is defined. 137 | # Feel free to change this. 138 | if ( 139 | np.linalg.norm(eff_pose._xyz - target_pose._xyz) < 0.01 140 | and np.abs( 141 | np.degrees( 142 | (eff_pose.so3._quat * target_pose.so3._quat.conjugate).radians 143 | ) 144 | ) 145 | < 15 146 | ): 147 | success = True 148 | break 149 | robot_points = self.fk_sampler.sample(qt, NUM_ROBOT_POINTS) 150 | point_cloud[:, :NUM_ROBOT_POINTS, :3] = robot_points 151 | return success, [q.squeeze().cpu().numpy().tolist() for q in trajectory] 152 | 153 | 154 | class PlanningNode: 155 | def __init__(self): 156 | """ 157 | Initializes the subscribers, loads the data from file, and loads the model. 158 | """ 159 | rospy.init_node("mpinets_planning_node") 160 | time.sleep(1) 161 | 162 | self.planner = None 163 | self.base_frame = "panda_link0" 164 | self.planning_problem_subscriber = rospy.Subscriber( 165 | "/mpinets/planning_problem", 166 | PlanningProblem, 167 | self.plan_callback, 168 | queue_size=1, 169 | ) 170 | self.full_point_cloud_publisher = rospy.Publisher( 171 | "/mpinets/full_point_cloud", PointCloud2, queue_size=2 172 | ) 173 | self.plan_publisher = rospy.Publisher( 174 | "/mpinets/plan", JointTrajectory, queue_size=1 175 | ) 176 | rospy.loginfo("Loading data") 177 | self.load_point_cloud_data( 178 | rospy.get_param("/mpinets_planning_node/point_cloud_path") 179 | ) 180 | rospy.loginfo("Data loaded") 181 | rospy.loginfo("Loading model") 182 | self.planner = Planner(rospy.get_param("/mpinets_planning_node/mdl_path")) 183 | rospy.loginfo("Model loaded") 184 | rospy.loginfo("System ready") 185 | 186 | @staticmethod 187 | def clean_point_cloud( 188 | xyz: np.ndarray, rgba: np.ndarray 189 | ) -> Tuple[np.ndarray, np.ndarray]: 190 | """ 191 | Some points are outside of the feasible range and create artifacts for 192 | the network. This filters out those points and then downsamples to the right size 193 | for the network 194 | 195 | :param xyz np.ndarray: The geometry information for the point cloud (dim N x 3) 196 | :param rgba np.ndarray: The color information for the point cloud (dim N x 4) 197 | :rtype Tuple[np.ndarray, np.ndarray]: Returns a tuple of the cleaned and 198 | downsized geometry information and color 199 | information 200 | """ 201 | task_tabletop_mask = np.logical_and.reduce( 202 | ( 203 | xyz[:, 0] > 0.25, 204 | xyz[:, 0] < 1.35, 205 | xyz[:, 1] > -0.3, 206 | xyz[:, 1] < 1.6, 207 | xyz[:, 2] > -0.05, 208 | xyz[:, 2] < 0.35, 209 | ) 210 | ) 211 | 212 | mount_table_mask = np.logical_and.reduce( 213 | ( 214 | xyz[:, 0] > -0.35, 215 | xyz[:, 0] < 0.30, 216 | xyz[:, 1] > -0.5, 217 | xyz[:, 1] < 0.5, 218 | xyz[:, 2] > -0.05, 219 | xyz[:, 2] < 0.05, 220 | ) 221 | ) 222 | workspace_mask = np.logical_or(task_tabletop_mask, mount_table_mask) 223 | xyz = xyz[workspace_mask] 224 | rgba = rgba[workspace_mask] 225 | random_mask = np.random.choice( 226 | len(xyz), size=NUM_OBSTACLE_POINTS, replace=False 227 | ) 228 | return xyz[random_mask], rgba[random_mask] 229 | 230 | def load_point_cloud_data(self, path: str): 231 | """ 232 | Loads scene from a point cloud file, transforms into the 233 | 'panda_link0' frame, stores it to the class, and starts a publishing 234 | loop to show it 235 | 236 | :param path str: The path to the point cloud file 237 | """ 238 | 239 | # Load the file 240 | observation_data = np.load( 241 | path, 242 | allow_pickle=True, 243 | ).item() 244 | 245 | # Transform it into the "world frame," i.e. `panda_link0` 246 | full_pc = tra.transform_points( 247 | observation_data["pc"], observation_data["camera_pose"] 248 | ) 249 | 250 | # Remove the robot points 251 | no_robot_mask = ( 252 | observation_data["label_map"]["robot"] != observation_data["pc_label"] 253 | ) 254 | scene_pc = full_pc[no_robot_mask] 255 | 256 | # Scale the color values to be within [0-1] and add alpha channel 257 | scene_colors = observation_data["pc_color"][no_robot_mask] / 255.0 258 | scene_colors = np.concatenate( 259 | (scene_colors, np.ones((len(scene_colors), 1))), axis=1 260 | ) 261 | assert scene_colors.shape[1] == 4 262 | rospy.Timer( 263 | rospy.Duration(1.0), 264 | partial(self.publish_point_cloud_data, scene_pc, scene_colors), 265 | ) 266 | self.full_scene_pc = scene_pc 267 | self.full_scene_colors = scene_colors 268 | 269 | def publish_point_cloud_data(self, points: np.ndarray, colors: np.ndarray, _: Any): 270 | """ 271 | Publishes the point cloud so that it can be visualized in Rviz 272 | 273 | :param points np.ndarray: The 3D locations of the point cloud (dimension N x 3) 274 | :param colors np.ndarray: The color values of each point (dimension N x 4) 275 | :param _ Any: This is a parameter necessary to run this within a rospy timing 276 | loop and is unused. 277 | """ 278 | ros_dtype = PointField.FLOAT32 279 | dtype = np.float32 280 | itemsize = np.dtype(dtype).itemsize 281 | assert points.shape[1] == 3 282 | assert colors.shape[1] == 4 283 | colors[:, -1] = 0.5 284 | data = np.concatenate((points, colors), axis=1).astype(dtype) # .tobytes() 285 | data = data.tobytes() 286 | fields = [ 287 | PointField(name=n, offset=i * itemsize, datatype=ros_dtype, count=1) 288 | for i, n in enumerate("xyzrgba") 289 | ] 290 | header = Header(frame_id="panda_link0", stamp=rospy.Time.now()) 291 | msg = PointCloud2( 292 | header=header, 293 | height=1, 294 | width=points.shape[0], 295 | is_dense=False, 296 | is_bigendian=False, 297 | fields=fields, 298 | point_step=(itemsize * 7), 299 | row_step=(itemsize * 7 * points.shape[0]), 300 | data=data, 301 | ) 302 | self.full_point_cloud_publisher.publish(msg) 303 | 304 | def plan_callback(self, msg: PlanningProblem): 305 | """ 306 | Receives the planning problem from the interaction tool and calls the planner 307 | Afterward, it publishes the solution, whether or not we consider it a success 308 | 309 | :param msg PlanningProblem: A message describing the planning problem 310 | """ 311 | q0 = np.asarray(msg.q0.position) 312 | target = SE3( 313 | xyz=[ 314 | msg.target.transform.translation.x, 315 | msg.target.transform.translation.y, 316 | msg.target.transform.translation.z, 317 | ], 318 | quaternion=[ 319 | msg.target.transform.rotation.w, 320 | msg.target.transform.rotation.x, 321 | msg.target.transform.rotation.y, 322 | msg.target.transform.rotation.z, 323 | ], 324 | ) 325 | scene_pc, scene_colors = self.clean_point_cloud( 326 | self.full_scene_pc, self.full_scene_colors 327 | ) 328 | if self.planner is None: 329 | rospy.logwarn("Model is not yet loaded and planner cannot yet be called") 330 | return 331 | rospy.loginfo(f"Attempting to plan") 332 | success, plan = self.planner.plan(q0, target, scene_pc) 333 | rospy.loginfo(f"Planning succeeded: {success}") 334 | joint_trajectory = JointTrajectory() 335 | joint_trajectory.header.stamp = rospy.Time.now() 336 | joint_trajectory.header.frame_id = "panda_link0" 337 | joint_trajectory.joint_names = msg.joint_names 338 | for ii, q in enumerate(plan): 339 | point = JointTrajectoryPoint( 340 | time_from_start=rospy.Duration.from_sec(0.12 * ii) 341 | ) 342 | for qi in q: 343 | point.positions.append(qi) 344 | joint_trajectory.points.append(point) 345 | rospy.loginfo("Planning solution published") 346 | self.plan_publisher.publish(joint_trajectory) 347 | 348 | 349 | if __name__ == "__main__": 350 | PlanningNode() 351 | rospy.spin() 352 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | mpinets_ros 4 | 0.0.0 5 | The mpinets_ros package 6 | 7 | 8 | 9 | 10 | root 11 | 12 | 13 | 14 | 15 | 16 | MIT 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | catkin 52 | interactive_markers 53 | roscpp 54 | rospy 55 | tf2 56 | visualization_msgs 57 | interactive_markers 58 | roscpp 59 | rospy 60 | tf2 61 | visualization_msgs 62 | interactive_markers 63 | roscpp 64 | rospy 65 | tf2 66 | visualization_msgs 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/rviz/config.rviz: -------------------------------------------------------------------------------- 1 | Panels: 2 | - Class: rviz/Displays 3 | Help Height: 78 4 | Name: Displays 5 | Property Tree Widget: 6 | Expanded: 7 | - /Global Options1 8 | - /Status1 9 | - /Grid1 10 | - /PointCloud21 11 | - /InteractiveMarkers1 12 | - /Planned Robot Model1 13 | - /RobotModel1 14 | Splitter Ratio: 0.5 15 | Tree Height: 542 16 | - Class: rviz/Selection 17 | Name: Selection 18 | - Class: rviz/Tool Properties 19 | Expanded: 20 | - /2D Pose Estimate1 21 | - /2D Nav Goal1 22 | - /Publish Point1 23 | Name: Tool Properties 24 | Splitter Ratio: 0.5886790156364441 25 | - Class: rviz/Views 26 | Expanded: 27 | - /Current View1 28 | Name: Views 29 | Splitter Ratio: 0.5 30 | - Class: rviz/Time 31 | Experimental: false 32 | Name: Time 33 | SyncMode: 0 34 | SyncSource: PointCloud2 35 | Preferences: 36 | PromptSaveOnExit: true 37 | Toolbars: 38 | toolButtonStyle: 2 39 | Visualization Manager: 40 | Class: "" 41 | Displays: 42 | - Alpha: 0.5 43 | Cell Size: 1 44 | Class: rviz/Grid 45 | Color: 160; 160; 164 46 | Enabled: true 47 | Line Style: 48 | Line Width: 0.029999999329447746 49 | Value: Lines 50 | Name: Grid 51 | Normal Cell Count: 0 52 | Offset: 53 | X: 0 54 | Y: 0 55 | Z: 0 56 | Plane: XY 57 | Plane Cell Count: 10 58 | Reference Frame: 59 | Value: true 60 | - Alpha: 1 61 | Autocompute Intensity Bounds: true 62 | Autocompute Value Bounds: 63 | Max Value: 10 64 | Min Value: -10 65 | Value: true 66 | Axis: Z 67 | Channel Name: intensity 68 | Class: rviz/PointCloud2 69 | Color: 255; 255; 255 70 | Color Transformer: RGBF32 71 | Decay Time: 0 72 | Enabled: true 73 | Invert Rainbow: false 74 | Max Color: 255; 255; 255 75 | Min Color: 0; 0; 0 76 | Name: PointCloud2 77 | Position Transformer: XYZ 78 | Queue Size: 10 79 | Selectable: true 80 | Size (Pixels): 3 81 | Size (m): 0.009999999776482582 82 | Style: Points 83 | Topic: /mpinets/full_point_cloud 84 | Unreliable: false 85 | Use Fixed Frame: true 86 | Use rainbow: true 87 | Value: true 88 | - Class: rviz/InteractiveMarkers 89 | Enable Transparency: true 90 | Enabled: true 91 | Name: InteractiveMarkers 92 | Show Axes: false 93 | Show Descriptions: true 94 | Show Visual Aids: false 95 | Update Topic: /mpinets_controls/update 96 | Value: true 97 | - Alpha: 0.5 98 | Class: rviz/RobotModel 99 | Collision Enabled: false 100 | Enabled: true 101 | Links: 102 | All Links Enabled: true 103 | Expand Joint Details: false 104 | Expand Link Details: false 105 | Expand Tree: false 106 | Link Tree Style: Links in Alphabetic Order 107 | panda_hand: 108 | Alpha: 1 109 | Show Axes: false 110 | Show Trail: false 111 | Value: true 112 | panda_hand_tcp: 113 | Alpha: 1 114 | Show Axes: false 115 | Show Trail: false 116 | panda_leftfinger: 117 | Alpha: 1 118 | Show Axes: false 119 | Show Trail: false 120 | Value: true 121 | panda_link0: 122 | Alpha: 1 123 | Show Axes: false 124 | Show Trail: false 125 | Value: true 126 | panda_link1: 127 | Alpha: 1 128 | Show Axes: false 129 | Show Trail: false 130 | Value: true 131 | panda_link2: 132 | Alpha: 1 133 | Show Axes: false 134 | Show Trail: false 135 | Value: true 136 | panda_link3: 137 | Alpha: 1 138 | Show Axes: false 139 | Show Trail: false 140 | Value: true 141 | panda_link4: 142 | Alpha: 1 143 | Show Axes: false 144 | Show Trail: false 145 | Value: true 146 | panda_link5: 147 | Alpha: 1 148 | Show Axes: false 149 | Show Trail: false 150 | Value: true 151 | panda_link6: 152 | Alpha: 1 153 | Show Axes: false 154 | Show Trail: false 155 | Value: true 156 | panda_link7: 157 | Alpha: 1 158 | Show Axes: false 159 | Show Trail: false 160 | Value: true 161 | panda_link8: 162 | Alpha: 1 163 | Show Axes: false 164 | Show Trail: false 165 | Value: true 166 | panda_rightfinger: 167 | Alpha: 1 168 | Show Axes: false 169 | Show Trail: false 170 | Value: true 171 | Name: Planned Robot Model 172 | Robot Description: robot_description 173 | TF Prefix: /planned_robot_states 174 | Update Interval: 0 175 | Value: true 176 | Visual Enabled: true 177 | - Alpha: 1 178 | Class: rviz/RobotModel 179 | Collision Enabled: false 180 | Enabled: true 181 | Links: 182 | All Links Enabled: true 183 | Expand Joint Details: false 184 | Expand Link Details: false 185 | Expand Tree: false 186 | Link Tree Style: Links in Alphabetic Order 187 | panda_hand: 188 | Alpha: 1 189 | Show Axes: false 190 | Show Trail: false 191 | Value: true 192 | panda_hand_tcp: 193 | Alpha: 1 194 | Show Axes: false 195 | Show Trail: false 196 | panda_leftfinger: 197 | Alpha: 1 198 | Show Axes: false 199 | Show Trail: false 200 | Value: true 201 | panda_link0: 202 | Alpha: 1 203 | Show Axes: false 204 | Show Trail: false 205 | Value: true 206 | panda_link1: 207 | Alpha: 1 208 | Show Axes: false 209 | Show Trail: false 210 | Value: true 211 | panda_link2: 212 | Alpha: 1 213 | Show Axes: false 214 | Show Trail: false 215 | Value: true 216 | panda_link3: 217 | Alpha: 1 218 | Show Axes: false 219 | Show Trail: false 220 | Value: true 221 | panda_link4: 222 | Alpha: 1 223 | Show Axes: false 224 | Show Trail: false 225 | Value: true 226 | panda_link5: 227 | Alpha: 1 228 | Show Axes: false 229 | Show Trail: false 230 | Value: true 231 | panda_link6: 232 | Alpha: 1 233 | Show Axes: false 234 | Show Trail: false 235 | Value: true 236 | panda_link7: 237 | Alpha: 1 238 | Show Axes: false 239 | Show Trail: false 240 | Value: true 241 | panda_link8: 242 | Alpha: 1 243 | Show Axes: false 244 | Show Trail: false 245 | Value: true 246 | panda_rightfinger: 247 | Alpha: 1 248 | Show Axes: false 249 | Show Trail: false 250 | Value: true 251 | Name: RobotModel 252 | Robot Description: robot_description 253 | TF Prefix: "" 254 | Update Interval: 0 255 | Value: true 256 | Visual Enabled: true 257 | Enabled: true 258 | Global Options: 259 | Background Color: 48; 48; 48 260 | Default Light: true 261 | Fixed Frame: panda_link0 262 | Frame Rate: 30 263 | Name: root 264 | Tools: 265 | - Class: rviz/Interact 266 | Hide Inactive Objects: true 267 | - Class: rviz/MoveCamera 268 | - Class: rviz/Select 269 | - Class: rviz/FocusCamera 270 | - Class: rviz/Measure 271 | - Class: rviz/SetInitialPose 272 | Theta std deviation: 0.2617993950843811 273 | Topic: /initialpose 274 | X std deviation: 0.5 275 | Y std deviation: 0.5 276 | - Class: rviz/SetGoal 277 | Topic: /move_base_simple/goal 278 | - Class: rviz/PublishPoint 279 | Single click: true 280 | Topic: /clicked_point 281 | Value: true 282 | Views: 283 | Current: 284 | Class: rviz/Orbit 285 | Distance: 3.4338955879211426 286 | Enable Stereo Rendering: 287 | Stereo Eye Separation: 0.05999999865889549 288 | Stereo Focal Distance: 1 289 | Swap Stereo Eyes: false 290 | Value: false 291 | Focal Point: 292 | X: 0.2798369228839874 293 | Y: -0.42762672901153564 294 | Z: 0.02097627893090248 295 | Focal Shape Fixed Size: true 296 | Focal Shape Size: 0.05000000074505806 297 | Invert Z Axis: false 298 | Name: Current View 299 | Near Clip Distance: 0.009999999776482582 300 | Pitch: 0.42539820075035095 301 | Target Frame: 302 | Value: Orbit (rviz) 303 | Yaw: 1.1903965473175049 304 | Saved: ~ 305 | Window Geometry: 306 | Displays: 307 | collapsed: false 308 | Height: 833 309 | Hide Left Dock: false 310 | Hide Right Dock: false 311 | QMainWindow State: 000000ff00000000fd000000040000000000000156000002a7fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003b000002a7000000c700fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002a7fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003b000002a7000000a000fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004b00000003efc0100000002fb0000000800540069006d00650100000000000004b00000024400fffffffb0000000800540069006d006501000000000000045000000000000000000000023f000002a700000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000 312 | Selection: 313 | collapsed: false 314 | Time: 315 | collapsed: false 316 | Tool Properties: 317 | collapsed: false 318 | Views: 319 | collapsed: false 320 | Width: 1200 321 | X: 1086 322 | Y: 314 323 | -------------------------------------------------------------------------------- /interactive_demo/mpinets_ros/setup.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | ## ! DO NOT MANUALLY INVOKE THIS setup.py, USE CATKIN INSTEAD 24 | from distutils.core import setup 25 | from catkin_pkg.python_setup import generate_distutils_setup 26 | 27 | # fetch values from package.xml 28 | setup_args = generate_distutils_setup( 29 | packages=["mpinets_ros"], 30 | package_dir={"": "src"}, 31 | ) 32 | setup(**setup_args) 33 | -------------------------------------------------------------------------------- /jobconfig.yaml: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | training_model_parameters: 24 | point_match_loss_weight: 1 25 | collision_loss_weight: 5 26 | data_module_parameters: 27 | data_dir: /data 28 | trajectory_key: 'hybrid_solutions' # Could also use 'global_solutions' for the global expert 29 | num_obstacle_points: 4096 30 | num_target_points: 128 31 | random_scale: 0.015 32 | shared_parameters: 33 | num_robot_points: 2048 34 | checkpoint_interval: 60 35 | validation_interval: 3000 36 | gpus: 1 # If > 1, will use DDP for training. 37 | batch_size: 10 38 | save_checkpoint_dir: /workspace/mpinets_checkpoints 39 | experiment_name: ExampleTrainingJob 40 | description: "Here is where you put in details about the training job to keep track of what was changed" 41 | -------------------------------------------------------------------------------- /mpinets/__init__.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /mpinets/data_pipeline/__init__.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /mpinets/data_pipeline/environments/__init__.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /mpinets/data_pipeline/environments/base_environment.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from abc import ( 24 | ABC, 25 | abstractmethod, 26 | ) 27 | from typing import List, Union, List, Any 28 | from robofin.collision import FrankaSelfCollisionChecker 29 | from geometrout.primitive import Cuboid, Cylinder 30 | from dataclasses import dataclass, field 31 | from geometrout.transform import SE3 32 | import numpy as np 33 | from mpinets.mpinets_types import Obstacles 34 | 35 | 36 | def radius_sample(center: float, radius: float): 37 | """ 38 | Helper function to draw a uniform sample with a fixed radius around a center 39 | 40 | :param center float: The center of the distribution 41 | :param radius float: The radius of the distribution 42 | """ 43 | return np.random.uniform(center - radius, center + radius) 44 | 45 | 46 | @dataclass 47 | class Candidate: 48 | """ 49 | Represents a configuration and the corresponding end-effector pose 50 | (in the right_gripper frame). 51 | """ 52 | 53 | pose: SE3 54 | config: np.ndarray 55 | negative_volumes: Obstacles 56 | 57 | 58 | @dataclass 59 | class TaskOrientedCandidate(Candidate): 60 | """ 61 | Represents a configuration and the corresponding end-effector pose 62 | (in the right_gripper frame) for a task oriented pose. 63 | """ 64 | 65 | pass 66 | 67 | 68 | @dataclass 69 | class NeutralCandidate(Candidate): 70 | """ 71 | Represents a configuration and the corresponding end-effector pose 72 | (in the right_gripper frame) for a neutral pose. 73 | """ 74 | 75 | pass 76 | 77 | 78 | class Environment(ABC): 79 | def __init__(self): 80 | self.generated = False 81 | self.demo_candidates = [] 82 | pass 83 | 84 | @property 85 | @abstractmethod 86 | def obstacles(self) -> List[Union[Cuboid, Cylinder]]: 87 | """ 88 | Returns all obstacles in the scene. 89 | :rtype List[Union[Cuboid, Cylinder]]: The list of obstacles in the scene 90 | """ 91 | pass 92 | 93 | @property 94 | @abstractmethod 95 | def cuboids(self) -> List[Cuboid]: 96 | """ 97 | Returns just the cuboids in the scene 98 | :rtype List[Cuboid]: The list of cuboids in the scene 99 | """ 100 | pass 101 | 102 | @property 103 | @abstractmethod 104 | def cylinders(self) -> List[Cylinder]: 105 | """ 106 | Returns just the cylinders in the scene 107 | :rtype List[Cylinder]: The list of cylinders in the scene 108 | """ 109 | pass 110 | 111 | def gen(self, selfcc: FrankaSelfCollisionChecker, **kwargs: Any) -> bool: 112 | """ 113 | Generates an environment and a pair of start/end candidates 114 | 115 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 116 | mimic the internal Franka collision checker. 117 | :rtype bool: Whether the environment was successfully generated 118 | """ 119 | self.generated = self._gen(selfcc, **kwargs) 120 | if self.generated: 121 | assert len(self.demo_candidates) == 2 122 | cand1, cand2 = self.demo_candidates 123 | assert cand1 is not None and cand2 is not None 124 | return self.generated 125 | 126 | def gen_additional_candidate_sets( 127 | self, how_many: int, selfcc: FrankaSelfCollisionChecker 128 | ) -> List[List[TaskOrientedCandidate]]: 129 | """ 130 | This creates two sets of `how_many` candidates that 131 | are intended to be used as start/end respectively. Take the cartesian product of these 132 | two sets and you'll have a bunch of valid problems. 133 | 134 | :param how_many int: How many candidates to generate in each candidate set (the result 135 | is guaranteed to match this number or the function will run forever) 136 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 137 | mimic the internal Franka collision checker. 138 | :rtype List[List[TaskOrientedCandidate]]: A list of candidate sets, where each has `how_many` 139 | candidates on the table. 140 | """ 141 | assert ( 142 | self.generated 143 | ), "Must run generate the environment before requesting additional candidates" 144 | return self._gen_additional_candidate_sets(how_many, selfcc) 145 | 146 | def gen_neutral_candidates( 147 | self, how_many: int, selfcc: FrankaSelfCollisionChecker 148 | ) -> List[NeutralCandidate]: 149 | """ 150 | Generate a set of collision free neutral poses and corresponding configurations 151 | (represented as Candidate object) 152 | 153 | :param how_many int: How many neutral poses to generate 154 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 155 | mimic the internal Franka collision checker. 156 | :rtype List[NeutralCandidate]: A list of neutral poses 157 | """ 158 | assert ( 159 | self.generated 160 | ), "Must run generate the environment before requesting additional candidates" 161 | return self._gen_neutral_candidates(how_many, selfcc) 162 | 163 | @abstractmethod 164 | def _gen(self, selfcc: FrankaSelfCollisionChecker) -> bool: 165 | """ 166 | The internal implementation of the gen function. 167 | 168 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 169 | mimic the internal Franka collision checker. 170 | :rtype bool: Whether the environment was successfully generated 171 | """ 172 | pass 173 | 174 | @abstractmethod 175 | def _gen_additional_candidate_sets( 176 | self, how_many: int, selfcc: FrankaSelfCollisionChecker 177 | ) -> List[List[TaskOrientedCandidate]]: 178 | """ 179 | This creates two sets of `how_many` candidates that 180 | are intended to be used as start/end respectively. Take the cartesian product of these 181 | two sets and you'll have a bunch of valid problems. 182 | 183 | :param how_many int: How many candidates to generate in each candidate set (the result 184 | is guaranteed to match this number or the function will run forever) 185 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 186 | mimic the internal Franka collision checker. 187 | :rtype List[List[TaskOrientedCandidate]]: A list of candidate sets, where each has `how_many` 188 | candidates on the table. 189 | """ 190 | pass 191 | 192 | @abstractmethod 193 | def _gen_neutral_candidates( 194 | self, how_many: int, selfcc: FrankaSelfCollisionChecker 195 | ) -> List[NeutralCandidate]: 196 | """ 197 | Generate a set of collision free neutral poses and corresponding configurations 198 | (represented as NeutralCandidate object) 199 | 200 | :param how_many int: How many neutral poses to generate 201 | :param selfcc FrankaSelfCollisionChecker: Checks for self collisions using spheres that 202 | mimic the internal Franka collision checker. 203 | :rtype List[NeutralCandidate]: A list of neutral poses 204 | """ 205 | pass 206 | -------------------------------------------------------------------------------- /mpinets/data_pipeline/process_data.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import numpy as np 24 | from tqdm.auto import tqdm, trange 25 | import h5py 26 | from pathlib import Path 27 | from typing import List 28 | from argparse import ArgumentParser, RawTextHelpFormatter 29 | import sys 30 | 31 | 32 | def merge_files(files: List[Path], output_file: str): 33 | """ 34 | Will merge a bunch of hdf5 dataset files into a single file. 35 | 36 | :param data_path List[Path]: A list of files to merge 37 | :param output_file str: The file to output. Should be something like `oogyboogy.hdf5` 38 | """ 39 | # Calculate total amount 40 | N = 0 41 | max_cuboids = 0 42 | max_cylinders = 0 43 | for fn in tqdm(files): 44 | with h5py.File(str(fn)) as f: 45 | N += len(f["global_solutions"]) 46 | if "cuboid_centers" in f.keys(): 47 | max_cuboids = max(max_cuboids, f["cuboid_centers"].shape[1]) 48 | if "cylinder_centers" in f.keys(): 49 | max_cylinders = max(max_cylinders, f["cylinder_centers"].shape[1]) 50 | with h5py.File(output_file, "w-") as g: 51 | with h5py.File(str(files[0])) as f: 52 | for k in f.keys(): 53 | if "cuboid" in k: 54 | g.create_dataset(k, (N, max_cuboids, f[k].shape[2])) 55 | elif "cylinder" in k: 56 | g.create_dataset(k, (N, max_cylinders, f[k].shape[2])) 57 | else: 58 | g.create_dataset(k, (N, *f[k].shape[1:])) 59 | idx = 0 60 | for fn in tqdm(files): 61 | with h5py.File(str(fn)) as f: 62 | n = len(f["cuboid_centers"]) 63 | # Do the copying in 10000 data point chunks for memory reasons 64 | for ii in trange(0, n, 10000): 65 | if ii < (n // 10000) * 10000: 66 | for k in f.keys(): 67 | if "cuboid" in k: 68 | g[k][ 69 | idx + ii : idx + ii + 10000, 70 | : f["cuboid_centers"].shape[1], 71 | ..., 72 | ] = f[k][ii : ii + 10000, ...] 73 | elif "cylinder" in k: 74 | g[k][ 75 | idx + ii : idx + ii + 10000, 76 | : f["cylinder_centers"].shape[1], 77 | ..., 78 | ] = f[k][ii : ii + 10000, ...] 79 | else: 80 | g[k][idx + ii : idx + ii + 10000, ...] = f[k][ 81 | ii : ii + 10000, ... 82 | ] 83 | else: 84 | for k in f.keys(): 85 | if "cuboid" in k: 86 | g[k][ 87 | idx + ii : idx + n, 88 | : f["cuboid_centers"].shape[1], 89 | ..., 90 | ] = f[k][ii:, ...] 91 | elif "cylinder" in k: 92 | g[k][ 93 | idx + ii : idx + n, 94 | : f["cylinder_centers"].shape[1], 95 | ..., 96 | ] = f[k][ii:, ...] 97 | else: 98 | g[k][idx + ii : idx + n, ...] = f[k][ii:, ...] 99 | idx += n 100 | 101 | 102 | def merge_data_pipeline_files(data_path: str, output_file: str): 103 | """ 104 | Will merge a bunch of final output files from the `gen_data.py` script. These files 105 | should all correspond to the same problem paradigm (i.e. with or without neutral poses) 106 | of the same scene type (i.e. dresser, cubby, merged cubby, or tabletop). For example, 107 | they should all be dresser scenes going to/from a neutral pose. 108 | 109 | :param data_path str: A directory containing all the merged files (the final outputs) 110 | of the `gen_data.py` script. Note that the script's final output 111 | is always named `all_data.hdf5`, so these files should either be 112 | renamed or kept in unique folders that live within `data_path`. 113 | This function will do a recursive search within this folder to find 114 | all `*.hdf5` files. 115 | :param output_file str: The file to output. Should be something like `blahblahblah.hdf5` 116 | """ 117 | generated_files = list(Path(data_path).rglob("*.hdf5")) 118 | merge_files(generated_files, output_file) 119 | 120 | 121 | def extract_hybrid_expert_data(input_file: str, output_file: str): 122 | """ 123 | Takes a file generated by `merge_data_pipeline_files` and removes all trajectories 124 | that do not have a hybrid expert solution. This is necessary if you want to train on 125 | only hybrid solutions because it's much faster to do this once during processing than 126 | to rely on the dataloader to do it. 127 | 128 | :param input_file str: The output of `merge_data_pipeline_files`. Should be something like 129 | `blahblahblah.hdf5` 130 | :param output_file str: The file to output with only the problems with hybrid expert 131 | demonstrations. Should be something like `somethingsomething.hdf5` 132 | """ 133 | with h5py.File(input_file) as g: 134 | hybrid_indices = np.nonzero( 135 | ~np.all(g["hybrid_solutions"] == np.zeros((50, 7)), axis=(1, 2)) 136 | )[0] 137 | N = len(hybrid_indices) 138 | print(f"Found {N} hybrid demonstrations") 139 | with h5py.File(output_file, "w-") as f: 140 | for k in g.keys(): 141 | f.create_dataset(k, (N, *g[k].shape[1:])) 142 | for ii, jj in enumerate(tqdm(hybrid_indices)): 143 | for k in g.keys(): 144 | f[k][ii, ...] = g[k][jj, ...] 145 | 146 | 147 | def downsize_and_split( 148 | input_file: str, output_dir: str, train_size: int, val_size: int, test_size: int 149 | ): 150 | """ 151 | This function is meant to be used to regularize the sizes of individual problem types 152 | (e.g. merged cubbies without neutral poses). Use this function on the output of either 153 | `merge_data_pipeline_files` or `extract_hybrid_expert_data` (depending on whether you 154 | want all the problems with global expert solutions or the subset that have hybrid expert solutions 155 | as well). This function will create three datasets, a train, val, and test dataset. 156 | If any of the sizes are set to 0, it will ignore that dataset 157 | 158 | :param input_file str: The input file, should come from one of the functions described above 159 | :param output_dir str: The output directory (this directory should exist but doesn't 160 | need any subdirectories) 161 | :param train_size int: The size of the training dataset 162 | :param val_size int: The size of the validation dataset 163 | :param test_size int: The size of the test dataset 164 | """ 165 | with h5py.File(input_file) as f: 166 | assert train_size + val_size + test_size < len(f["cuboid_centers"]) 167 | indices = np.random.choice( 168 | np.arange(len(f["cuboid_centers"])), 169 | size=train_size + test_size + val_size, 170 | replace=False, 171 | ) 172 | train_indices, val_indices, test_indices = ( 173 | np.sort(indices[:train_size]), 174 | np.sort(indices[train_size : train_size + val_size]), 175 | np.sort(indices[train_size + val_size :]), 176 | ) 177 | 178 | assert ( 179 | len(train_indices) + len(val_indices) + len(test_indices) 180 | == train_size + val_size + test_size 181 | ) 182 | 183 | path = Path(output_dir).resolve() 184 | 185 | if val_size > 0: 186 | (path / "val").mkdir(parents=True, exist_ok=True) 187 | with h5py.File(str(path / "val" / "val.hdf5"), "w-") as g: 188 | for k in f.keys(): 189 | g.create_dataset(k, (val_size, *f[k].shape[1:])) 190 | for ii, jj in enumerate(tqdm(val_indices)): 191 | for k in g.keys(): 192 | g[k][ii, ...] = f[k][jj, ...] 193 | if test_size > 0: 194 | (path / "test").mkdir(parents=True, exist_ok=True) 195 | with h5py.File(str(path / "test" / "test.hdf5"), "w-") as g: 196 | for k in f.keys(): 197 | g.create_dataset(k, (test_size, *f[k].shape[1:])) 198 | for ii, jj in enumerate(tqdm(test_indices)): 199 | for k in g.keys(): 200 | g[k][ii, ...] = f[k][jj, ...] 201 | if train_size > 0: 202 | (path / "train").mkdir(parents=True, exist_ok=True) 203 | with h5py.File(str(path / "train" / "train.hdf5"), "w-") as g: 204 | for k in f.keys(): 205 | g.create_dataset(k, (train_size, *f[k].shape[1:])) 206 | for ii, jj in enumerate(tqdm(train_indices)): 207 | for k in g.keys(): 208 | g[k][ii, ...] = f[k][jj, ...] 209 | 210 | 211 | def merge_scenes(input_dir: str, output_dir: str): 212 | """ 213 | Merges all the `train.hdf5` files, merges all the `val.hdf5` files, and merges all the 214 | `test.hdf5` files to create three files that can be used to train. Use this function 215 | to merge datasets with different scene types and problem paradigms 216 | 217 | :param input_dir str: The directory containing all the datasets. This should have the 218 | following structure (scene types are just examples) 219 | input_dir/ 220 | dresser/ 221 | neutral/ 222 | train/ 223 | train.hdf5 224 | val/ 225 | val.hdf5 226 | test/ 227 | test.hdf5 228 | task_oriented/ 229 | train/ 230 | train.hdf5 231 | val/ 232 | val.hdf5 233 | test/ 234 | test.hdf5 235 | cubby/ 236 | ... 237 | tabletop/ 238 | ... 239 | :param output_dir str: The final output directory where the data will live. This 240 | directory should exist, but can be empty. 241 | """ 242 | train_files = list(Path(input_dir).rglob("*train/train.hdf5")) 243 | if len(train_files) > 0: 244 | (Path(output_dir) / "train").mkdir(parents=True, exist_ok=True) 245 | merge_files(train_files, str(Path(output_dir) / "train" / "train.hdf5")) 246 | val_files = list(Path(input_dir).rglob("*val/val.hdf5")) 247 | if len(val_files) > 0: 248 | (Path(output_dir) / "val").mkdir(parents=True, exist_ok=True) 249 | merge_files(val_files, str(Path(output_dir) / "val" / "val.hdf5")) 250 | test_files = list(Path(input_dir).rglob("*test/test.hdf5")) 251 | if len(test_files) > 0: 252 | (Path(output_dir) / "test").mkdir(parents=True, exist_ok=True) 253 | merge_files(test_files, str(Path(output_dir) / "test" / "test.hdf5")) 254 | 255 | 256 | if __name__ == "__main__": 257 | parser = ArgumentParser() 258 | subparsers = parser.add_subparsers( 259 | help="What stage of the data pipeline to run", dest="stage" 260 | ) 261 | merge_single_parser = subparsers.add_parser( 262 | "merge-single-scene", 263 | help=( 264 | "Will merge a bunch of final output files from the `gen_data.py` script." 265 | " These files should all correspond to the same problem paradigm" 266 | " (i.e. with or without neutral poses) of the same scene type" 267 | " (i.e. dresser, cubby, merged cubby, or tabletop). For example, they" 268 | " should all be dresser scenes going to/from a neutral pose." 269 | ), 270 | ) 271 | merge_single_parser.add_argument( 272 | "data_dir", 273 | type=str, 274 | help=( 275 | "A directory containing all the merged files (the final outputs)" 276 | " of the `gen_data.py` script. Note that the script's final output" 277 | " is always named `all_data.hdf5`, so these files should either be" 278 | " renamed or kept in unique folders that live within `data_path`." 279 | " This function will do a recursive search within this folder to find" 280 | " all `*.hdf5` files." 281 | ), 282 | ) 283 | merge_single_parser.add_argument( 284 | "output_file", 285 | type=str, 286 | help="The file to output. Should be something like `blahblahblah.hdf5`", 287 | ) 288 | 289 | hybrid_parser = subparsers.add_parser( 290 | "extract-hybrid", 291 | help=( 292 | "Takes a file generated by `merge_data_pipeline_files` and removes all trajectories" 293 | " that do not have a hybrid expert solution. This is necessary if you want to train on" 294 | " only hybrid solutions because it's much faster to do this once during processing than" 295 | " to rely on the dataloader to do it." 296 | ), 297 | ) 298 | hybrid_parser.add_argument( 299 | "input_file", 300 | type=str, 301 | help="The output of the merge-single-scene phase. Should be something like `blahblahblah.hdf5`", 302 | ) 303 | hybrid_parser.add_argument( 304 | "output_file", 305 | type=str, 306 | help=( 307 | "The file to output with only the problems with hybrid expert" 308 | " demonstrations. Should be something like `somethingsomething.hdf5`" 309 | ), 310 | ) 311 | 312 | downsize_parser = subparsers.add_parser( 313 | "downsize-and-split", 314 | help=( 315 | "This function is meant to be used to regularize the sizes of individual problem types" 316 | " (e.g. merged cubbies without neutral poses). Use this function on the output of either" 317 | " `merge_data_pipeline_files` or `extract_hybrid_expert_data` (depending on whether you" 318 | " want all the problems with global expert solutions or the subset that have hybrid expert solutions" 319 | " as well). This function will create three datasets, a train, val, and test dataset." 320 | " If any of the sizes are set to 0, it will ignore that dataset" 321 | ), 322 | ) 323 | downsize_parser.add_argument( 324 | "input_file", 325 | type=str, 326 | help="The input file, should come from one of the functions described above", 327 | ) 328 | downsize_parser.add_argument( 329 | "output_dir", 330 | type=str, 331 | help=( 332 | "The output directory (this directory should exist but doesn't" 333 | " need any subdirectories)" 334 | ), 335 | ) 336 | downsize_parser.add_argument( 337 | "train_size", 338 | type=int, 339 | help="The size of the training dataset (must be less than the full dataset)", 340 | ) 341 | downsize_parser.add_argument( 342 | "val_size", 343 | type=int, 344 | help="The size of the validation dataset (must be less than the full dataset)", 345 | ) 346 | downsize_parser.add_argument( 347 | "test_size", 348 | type=int, 349 | help="The size of the test dataset (must be less than the full dataset)", 350 | ) 351 | 352 | merge_all_parser = subparsers.add_parser( 353 | "final-merge", 354 | formatter_class=RawTextHelpFormatter, 355 | help=( 356 | "Merges all the `train.hdf5` files, merges all the `val.hdf5` files, and merges all the" 357 | " `test.hdf5` files to create three files that can be used to train. Use this function" 358 | " to merge datasets with different scene types and problem paradigms" 359 | ), 360 | ) 361 | merge_all_parser.add_argument( 362 | "input_dir", 363 | type=str, 364 | help=( 365 | "The directory containing all the datasets. This should have the" 366 | " following structure (scene types are just examples)\n" 367 | "input_dir/\n" 368 | " dresser/\n" 369 | " neutral/\n" 370 | " train/\n" 371 | " train.hdf5\n" 372 | " val/\n" 373 | " val.hdf5\n" 374 | " test/\n" 375 | " test.hdf5\n" 376 | " task_oriented/\n" 377 | " train/\n" 378 | " train.hdf5\n" 379 | " val/\n" 380 | " val.hdf5\n" 381 | " test/\n" 382 | " test.hdf5\n" 383 | " cubby/\n" 384 | " ...\n" 385 | " tabletop/\n" 386 | " ...\n" 387 | ), 388 | ) 389 | merge_all_parser.add_argument( 390 | "output_dir", 391 | type=str, 392 | help=( 393 | "The final output directory where the data will live. This" 394 | " directory should exist, but can be empty." 395 | ), 396 | ) 397 | 398 | if len(sys.argv) == 1: 399 | parser.print_usage(sys.stderr) 400 | sys.exit(1) 401 | 402 | args = parser.parse_args() 403 | 404 | if args.stage == "merge-single-scene": 405 | merge_data_pipeline_files(args.data_dir, args.output_file) 406 | elif args.stage == "extract-hybrid": 407 | extract_hybrid_expert_data(args.input_file, args.output_file) 408 | elif args.stage == "downsize-and-split": 409 | downsize_and_split( 410 | args.input_file, 411 | args.output_dir, 412 | args.train_size, 413 | args.val_size, 414 | args.test_size, 415 | ) 416 | elif args.stage == "final-merge": 417 | merge_scenes(args.input_dir, args.output_dir) 418 | -------------------------------------------------------------------------------- /mpinets/loss.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from typing import Tuple 24 | from mpinets import utils 25 | from mpinets.geometry import TorchCuboids, TorchCylinders 26 | import torch.nn.functional as F 27 | import torch 28 | from robofin.pointcloud.torch import FrankaSampler 29 | 30 | 31 | def point_match_loss(input_pc: torch.Tensor, target_pc: torch.Tensor) -> torch.Tensor: 32 | """ 33 | A combination L1 and L2 loss to penalize large and small deviations between 34 | two point clouds 35 | 36 | :param input_pc torch.Tensor: Point cloud sampled from the network's output. 37 | Has dim [B, N, 3] 38 | :param target_pc torch.Tensor: Point cloud sampled from the supervision 39 | Has dim [B, N, 3] 40 | :rtype torch.Tensor: The single loss value 41 | """ 42 | return F.mse_loss(input_pc, target_pc, reduction="mean") + F.l1_loss( 43 | input_pc, target_pc, reduction="mean" 44 | ) 45 | 46 | 47 | def collision_loss( 48 | input_pc: torch.Tensor, 49 | cuboid_centers: torch.Tensor, 50 | cuboid_dims: torch.Tensor, 51 | cuboid_quaternions: torch.Tensor, 52 | cylinder_centers: torch.Tensor, 53 | cylinder_radii: torch.Tensor, 54 | cylinder_heights: torch.Tensor, 55 | cylinder_quaternions: torch.Tensor, 56 | ) -> torch.Tensor: 57 | """ 58 | Calculates the hinge loss, calculating whether the robot (represented as a 59 | point cloud) is in collision with any obstacles in the scene. Collision 60 | here actually means within 3cm of the obstacle--this is to provide stronger 61 | gradient signal to encourage the robot to move out of the way. Also, some of the 62 | primitives can have zero volume (i.e. a dim is zero for cuboids or radius or height is zero for cylinders). 63 | If these are zero volume, they will have infinite sdf values (and therefore be ignored by the loss). 64 | 65 | :param input_pc torch.Tensor: Points sampled from the robot's surface after it 66 | is placed at the network's output prediction. Has dim [B, N, 3] 67 | :param cuboid_centers torch.Tensor: Has dim [B, M1, 3] 68 | :param cuboid_dims torch.Tensor: Has dim [B, M1, 3] 69 | :param cuboid_quaternions torch.Tensor: Has dim [B, M1, 4]. Quaternion is formatted as w, x, y, z. 70 | :param cylinder_centers torch.Tensor: Has dim [B, M2, 3] 71 | :param cylinder_radii torch.Tensor: Has dim [B, M2, 1] 72 | :param cylinder_heights torch.Tensor: Has dim [B, M2, 1] 73 | :param cylinder_quaternions torch.Tensor: Has dim [B, M2, 4]. Quaternion is formatted as w, x, y, z. 74 | :rtype torch.Tensor: Returns the loss value aggregated over the batch 75 | """ 76 | 77 | cuboids = TorchCuboids( 78 | cuboid_centers, 79 | cuboid_dims, 80 | cuboid_quaternions, 81 | ) 82 | cylinders = TorchCylinders( 83 | cylinder_centers, 84 | cylinder_radii, 85 | cylinder_heights, 86 | cylinder_quaternions, 87 | ) 88 | sdf_values = torch.minimum(cuboids.sdf(input_pc), cylinders.sdf(input_pc)) 89 | return F.hinge_embedding_loss( 90 | sdf_values, 91 | -torch.ones_like(sdf_values), 92 | margin=0.03, 93 | reduction="mean", 94 | ) 95 | 96 | 97 | class CollisionAndBCLossContainer: 98 | """ 99 | A container class to hold the various losses. This is structured as a 100 | container because that allows it to cache the robot pointcloud sampler 101 | object. By caching this, we reduce parsing time when processing the URDF 102 | and allow for a consistent random pointcloud (consistent per-GPU, that is) 103 | """ 104 | 105 | def __init__( 106 | self, 107 | ): 108 | self.fk_sampler = None 109 | self.num_points = 1024 110 | 111 | def __call__( 112 | self, 113 | input_normalized: torch.Tensor, 114 | cuboid_centers: torch.Tensor, 115 | cuboid_dims: torch.Tensor, 116 | cuboid_quaternions: torch.Tensor, 117 | cylinder_centers: torch.Tensor, 118 | cylinder_radii: torch.Tensor, 119 | cylinder_heights: torch.Tensor, 120 | cylinder_quaternions: torch.Tensor, 121 | target_normalized: torch.Tensor, 122 | ) -> Tuple[torch.Tensor, torch.Tensor]: 123 | """ 124 | This method calculates both constituent loss function after loading, 125 | and then caching, a fixed robot point cloud sampler (i.e. the task 126 | spaces sampled are always the same, as opposed to a random point cloud). 127 | The fixed point cloud is important for loss calculation so that 128 | it's possible to take mse between the two pointclouds. 129 | 130 | :param input_normalized torch.Tensor: Has dim [B, 7] and is always between -1 and 1 131 | :param cuboid_centers torch.Tensor: Has dim [B, M1, 3] 132 | :param cuboid_dims torch.Tensor: Has dim [B, M1, 3] 133 | :param cuboid_quaternions torch.Tensor: Has dim [B, M1, 4]. Quaternion is formatted as w, x, y, z. 134 | :param cylinder_centers torch.Tensor: Has dim [B, M2, 3] 135 | :param cylinder_radii torch.Tensor: Has dim [B, M2, 1] 136 | :param cylinder_heights torch.Tensor: Has dim [B, M2, 1] 137 | :param cylinder_quaternions torch.Tensor: Has dim [B, M2, 4]. Quaternion is formatted as w, x, y, z. 138 | :param target_normalized torch.Tensor: Has dim [B, 7] and is always between -1 and 1 139 | :rtype Tuple[torch.Tensor, torch.Tensor]: The two losses aggregated over the batch 140 | """ 141 | if self.fk_sampler is None: 142 | self.fk_sampler = FrankaSampler( 143 | input_normalized.device, 144 | num_fixed_points=self.num_points, 145 | use_cache=True, 146 | with_base_link=False, # Remove base link because this isn't controllable anyway 147 | ) 148 | input_pc = self.fk_sampler.sample( 149 | utils.unnormalize_franka_joints(input_normalized), 150 | ) 151 | target_pc = self.fk_sampler.sample( 152 | utils.unnormalize_franka_joints(target_normalized), 153 | ) 154 | return ( 155 | collision_loss( 156 | input_pc, 157 | cuboid_centers, 158 | cuboid_dims, 159 | cuboid_quaternions, 160 | cylinder_centers, 161 | cylinder_radii, 162 | cylinder_heights, 163 | cylinder_quaternions, 164 | ), 165 | point_match_loss(input_pc, target_pc), 166 | ) 167 | -------------------------------------------------------------------------------- /mpinets/model.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | import torch 24 | from torch import nn 25 | from robofin.pointcloud.torch import FrankaSampler, FrankaCollisionSampler 26 | import pytorch_lightning as pl 27 | from pointnet2_ops.pointnet2_modules import PointnetSAModule 28 | 29 | from mpinets import loss 30 | from mpinets.utils import unnormalize_franka_joints 31 | from mpinets.geometry import TorchCuboids, TorchCylinders 32 | from typing import List, Tuple, Sequence, Dict, Callable 33 | 34 | 35 | class MotionPolicyNetwork(pl.LightningModule): 36 | """ 37 | The architecture laid out here is the default architecture laid out in the 38 | Motion Policy Networks paper (Fishman, et. al, 2022). 39 | """ 40 | 41 | def __init__(self): 42 | """ 43 | Constructs the model 44 | """ 45 | super().__init__() 46 | self.point_cloud_encoder = MPiNetsPointNet() 47 | self.feature_encoder = nn.Sequential( 48 | nn.Linear(7, 32), 49 | nn.LeakyReLU(), 50 | nn.Linear(32, 64), 51 | nn.LeakyReLU(), 52 | nn.Linear(64, 128), 53 | nn.LeakyReLU(), 54 | nn.Linear(128, 128), 55 | nn.LeakyReLU(), 56 | nn.Linear(128, 64), 57 | ) 58 | self.decoder = nn.Sequential( 59 | nn.Linear(2048 + 64, 512), 60 | nn.LeakyReLU(), 61 | nn.Linear(512, 256), 62 | nn.LeakyReLU(), 63 | nn.Linear(256, 128), 64 | nn.LeakyReLU(), 65 | nn.Linear(128, 7), 66 | ) 67 | 68 | def configure_optimizers(self): 69 | """ 70 | A standard method in PyTorch lightning to set the optimizer 71 | """ 72 | optimizer = torch.optim.Adam(self.parameters(), lr=1e-4) 73 | return optimizer 74 | 75 | def forward(self, xyz: torch.Tensor, q: torch.Tensor) -> torch.Tensor: # type: ignore[override] 76 | """ 77 | Passes data through the network to produce an output 78 | 79 | :param xyz torch.Tensor: Tensor representing the point cloud. Should 80 | have dimensions of [B x N x 4] where B is the batch 81 | size, N is the number of points and 4 is because there 82 | are three geometric dimensions and a segmentation mask 83 | :param q torch.Tensor: The current robot configuration normalized to be between 84 | -1 and 1, according to each joint's range of motion 85 | :rtype torch.Tensor: The displacement to be applied to the current configuration to get 86 | the position at the next step (still in normalized space) 87 | """ 88 | pc_encoding = self.point_cloud_encoder(xyz) 89 | feature_encoding = self.feature_encoder(q) 90 | x = torch.cat((pc_encoding, feature_encoding), dim=1) 91 | return self.decoder(x) 92 | 93 | 94 | class TrainingMotionPolicyNetwork(MotionPolicyNetwork): 95 | """ 96 | An version of the MotionPolicyNetwork model that has additional attributes 97 | necessary during training (or using the validation step outside of the 98 | training process). This class is a valid model, but it's overkill when 99 | doing real robot inference and, for example, point cloud sampling is 100 | done by an outside process (such as downsampling point clouds from a point cloud). 101 | """ 102 | 103 | def __init__( 104 | self, 105 | num_robot_points: int, 106 | point_match_loss_weight: float, 107 | collision_loss_weight: float, 108 | ): 109 | """ 110 | Creates the network and assigns additional parameters for training 111 | 112 | 113 | :param num_robot_points int: The number of robot points used when resampling 114 | the robot points during rollouts (used in validation) 115 | :param point_match_loss_weight float: The weight assigned to the behavior 116 | cloning loss. 117 | :param collision_loss_weight float: The weight assigned to the collision loss 118 | :rtype Self: An instance of the network 119 | """ 120 | super().__init__() 121 | self.num_robot_points = num_robot_points 122 | self.point_match_loss_weight = point_match_loss_weight 123 | self.collision_loss_weight = collision_loss_weight 124 | self.fk_sampler = None 125 | self.collision_sampler = None 126 | self.loss_fun = loss.CollisionAndBCLossContainer() 127 | 128 | def rollout( 129 | self, 130 | batch: Dict[str, torch.Tensor], 131 | rollout_length: int, 132 | sampler: Callable[[torch.Tensor], torch.Tensor], 133 | unnormalize: bool = False, 134 | ) -> List[torch.Tensor]: 135 | """ 136 | Rolls out the policy an arbitrary length by calling it iteratively 137 | 138 | :param batch Dict[str, torch.Tensor]: A data batch coming from the 139 | data loader--should already be 140 | on the correct device 141 | :param rollout_length int: The number of steps to roll out (not including the start) 142 | :param sampler Callable[[torch.Tensor], torch.Tensor]: A function that takes a batch of robot 143 | configurations [B x 7] and returns a batch of 144 | point clouds samples on the surface of that robot 145 | :param unnormalize bool: Whether to return the whole trajectory unnormalized 146 | (i.e. converted back into joint space) 147 | :rtype List[torch.Tensor]: The entire trajectory batch, i.e. a list of 148 | configuration batches including the starting 149 | configurations where each element in the list 150 | corresponds to a timestep. For example, the 151 | first element of each batch in the list would 152 | be a single trajectory. 153 | """ 154 | xyz, q = ( 155 | batch["xyz"], 156 | batch["configuration"], 157 | ) 158 | # This block is to adapt for the case where we only want to roll out a 159 | # single trajectory 160 | if q.ndim == 1: 161 | xyz = xyz.unsqueeze(0) 162 | q = q.unsqueeze(0) 163 | if unnormalize: 164 | q_unnorm = unnormalize_franka_joints(q) 165 | assert isinstance(q_unnorm, torch.Tensor) 166 | trajectory = [q_unnorm] 167 | else: 168 | trajectory = [q] 169 | 170 | for i in range(rollout_length): 171 | q = torch.clamp(q + self(xyz, q), min=-1, max=1) 172 | q_unnorm = unnormalize_franka_joints(q) 173 | assert isinstance(q_unnorm, torch.Tensor) 174 | q_unnorm = q_unnorm.type_as(q) 175 | if unnormalize: 176 | trajectory.append(q_unnorm) 177 | else: 178 | trajectory.append(q) 179 | 180 | samples = sampler(q_unnorm).type_as(xyz) 181 | xyz[:, : samples.shape[1], :3] = samples 182 | 183 | return trajectory 184 | 185 | def training_step( # type: ignore[override] 186 | self, batch: Dict[str, torch.Tensor], batch_idx: int 187 | ) -> torch.Tensor: 188 | """ 189 | A function called automatically by Pytorch Lightning during training. 190 | This function handles the forward pass, the loss calculation, and what to log 191 | 192 | :param batch Dict[str, torch.Tensor]: A data batch coming from the 193 | data loader--should already be 194 | on the correct device 195 | :param batch_idx int: The index of the batch (not used by this function) 196 | :rtype torch.Tensor: The overall weighted loss (used for backprop) 197 | """ 198 | xyz, q = ( 199 | batch["xyz"], 200 | batch["configuration"], 201 | ) 202 | y_hat = torch.clamp(q + self(xyz, q), min=-1, max=1) 203 | ( 204 | cuboid_centers, 205 | cuboid_dims, 206 | cuboid_quats, 207 | cylinder_centers, 208 | cylinder_radii, 209 | cylinder_heights, 210 | cylinder_quats, 211 | supervision, 212 | ) = ( 213 | batch["cuboid_centers"], 214 | batch["cuboid_dims"], 215 | batch["cuboid_quats"], 216 | batch["cylinder_centers"], 217 | batch["cylinder_radii"], 218 | batch["cylinder_heights"], 219 | batch["cylinder_quats"], 220 | batch["supervision"], 221 | ) 222 | collision_loss, point_match_loss = self.loss_fun( 223 | y_hat, 224 | cuboid_centers, 225 | cuboid_dims, 226 | cuboid_quats, 227 | cylinder_centers, 228 | cylinder_radii, 229 | cylinder_heights, 230 | cylinder_quats, 231 | supervision, 232 | ) 233 | self.log("point_match_loss", point_match_loss) 234 | self.log("collision_loss", collision_loss) 235 | val_loss = ( 236 | self.point_match_loss_weight * point_match_loss 237 | + self.collision_loss_weight * collision_loss 238 | ) 239 | self.log("val_loss", val_loss) 240 | return val_loss 241 | 242 | def sample(self, q: torch.Tensor) -> torch.Tensor: 243 | """ 244 | Samples a point cloud from the surface of all the robot's links 245 | 246 | :param q torch.Tensor: Batched configuration in joint space 247 | :rtype torch.Tensor: Batched point cloud of size [B, self.num_robot_points, 3] 248 | """ 249 | assert self.fk_sampler is not None 250 | return self.fk_sampler.sample(q, self.num_robot_points) 251 | 252 | def validation_step( # type: ignore[override] 253 | self, batch: Dict[str, torch.Tensor], batch_idx: int 254 | ) -> torch.Tensor: 255 | """ 256 | This is a Pytorch Lightning function run automatically across devices 257 | during the validation loop 258 | 259 | :param batch Dict[str, torch.Tensor]: The batch coming from the dataloader 260 | :param batch_idx int: The index of the batch (not used by this function) 261 | :rtype torch.Tensor: The loss values which are to be collected into summary stats 262 | """ 263 | 264 | # These are defined here because they need to be set on the correct devices. 265 | # The easiest way to do this is to do it at call-time 266 | if self.fk_sampler is None: 267 | self.fk_sampler = FrankaSampler(self.device, use_cache=True) 268 | if self.collision_sampler is None: 269 | self.collision_sampler = FrankaCollisionSampler( 270 | self.device, with_base_link=False 271 | ) 272 | rollout = self.rollout(batch, 69, self.sample, unnormalize=True) 273 | 274 | assert self.fk_sampler is not None # Necessary for mypy to type properly 275 | eff = self.fk_sampler.end_effector_pose(rollout[-1]) 276 | position_error = torch.linalg.vector_norm( 277 | eff[:, :3, -1] - batch["target_position"], dim=1 278 | ) 279 | avg_target_error = torch.mean(position_error) 280 | 281 | cuboids = TorchCuboids( 282 | batch["cuboid_centers"], 283 | batch["cuboid_dims"], 284 | batch["cuboid_quats"], 285 | ) 286 | cylinders = TorchCylinders( 287 | batch["cylinder_centers"], 288 | batch["cylinder_radii"], 289 | batch["cylinder_heights"], 290 | batch["cylinder_quats"], 291 | ) 292 | 293 | B = batch["cuboid_centers"].size(0) 294 | rollout = torch.stack(rollout, dim=1) 295 | # Here is some Pytorch broadcasting voodoo to calculate whether each 296 | # rollout has a collision or not (looking to calculate the collision rate) 297 | assert rollout.shape == (B, 70, 7) 298 | rollout = rollout.reshape(-1, 7) 299 | has_collision = torch.zeros(B, dtype=torch.bool, device=self.device) 300 | collision_spheres = self.collision_sampler.compute_spheres(rollout) 301 | for radius, spheres in collision_spheres: 302 | num_spheres = spheres.shape[-2] 303 | sphere_sequence = spheres.reshape((B, -1, num_spheres, 3)) 304 | sdf_values = torch.minimum( 305 | cuboids.sdf_sequence(sphere_sequence), 306 | cylinders.sdf_sequence(sphere_sequence), 307 | ) 308 | assert sdf_values.shape == (B, 70, num_spheres) 309 | radius_collisions = torch.any( 310 | sdf_values.reshape((sdf_values.size(0), -1)) <= radius, dim=-1 311 | ) 312 | has_collision = torch.logical_or(radius_collisions, has_collision) 313 | 314 | avg_collision_rate = torch.count_nonzero(has_collision) / B 315 | return { 316 | "avg_target_error": avg_target_error, 317 | "avg_collision_rate": avg_collision_rate, 318 | } 319 | 320 | def validation_step_end( # type: ignore[override] 321 | self, batch_parts: Dict[str, torch.Tensor] 322 | ) -> Dict[str, torch.Tensor]: 323 | """ 324 | Called by Pytorch Lightning at the end of each validation step to 325 | aggregate across devices 326 | 327 | :param batch_parts Dict[str, torch.Tensor]: The parts accumulated from all devices 328 | :rtype Dict[str, torch.Tensor]: The average values across the devices 329 | """ 330 | return { 331 | "avg_target_error": torch.mean(batch_parts["avg_target_error"]), 332 | "avg_collision_rate": torch.mean(batch_parts["avg_collision_rate"]), 333 | } 334 | 335 | def validation_epoch_end( # type: ignore[override] 336 | self, validation_step_outputs: Sequence[Dict[str, torch.Tensor]] 337 | ): 338 | """ 339 | Pytorch lightning method that aggregates stats from the validation loop and logs 340 | 341 | :param validation_step_outputs Sequence[Dict[str, torch.Tensor]]: The outputs from each 342 | validation step 343 | """ 344 | avg_target_error = torch.mean( 345 | torch.stack([x["avg_target_error"] for x in validation_step_outputs]) 346 | ) 347 | self.log("avg_target_error", avg_target_error) 348 | 349 | avg_collision_rate = torch.mean( 350 | torch.stack([x["avg_collision_rate"] for x in validation_step_outputs]) 351 | ) 352 | self.log("avg_collision_rate", avg_collision_rate) 353 | 354 | 355 | class MPiNetsPointNet(pl.LightningModule): 356 | def __init__(self): 357 | super().__init__() 358 | self._build_model() 359 | 360 | def _build_model(self): 361 | """ 362 | Assembles the model design into a ModuleList 363 | """ 364 | self.SA_modules = nn.ModuleList() 365 | self.SA_modules.append( 366 | PointnetSAModule( 367 | npoint=512, 368 | radius=0.05, 369 | nsample=128, 370 | mlp=[1, 64, 64, 64], 371 | bn=False, 372 | ) 373 | ) 374 | self.SA_modules.append( 375 | PointnetSAModule( 376 | npoint=128, 377 | radius=0.3, 378 | nsample=128, 379 | mlp=[64, 128, 128, 256], 380 | bn=False, 381 | ) 382 | ) 383 | self.SA_modules.append(PointnetSAModule(mlp=[256, 512, 512, 1024], bn=False)) 384 | 385 | self.fc_layer = nn.Sequential( 386 | nn.Linear(1024, 4096), 387 | nn.GroupNorm(16, 4096), 388 | nn.LeakyReLU(inplace=True), 389 | nn.Linear(4096, 2048), 390 | nn.GroupNorm(16, 2048), 391 | nn.LeakyReLU(inplace=True), 392 | nn.Linear(2048, 2048), 393 | ) 394 | 395 | @staticmethod 396 | def _break_up_pc(pc: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: 397 | """ 398 | Breaks up the point cloud into the xyz coordinates and segmentation mask 399 | 400 | :param pc torch.Tensor: Tensor with shape [B, N, M] where M is larger than 3. 401 | The first three dimensions along the last axis will be x, y, z 402 | :rtype Tuple[torch.Tensor, torch.Tensor]: Two tensors, one with just xyz 403 | and one with the corresponding features 404 | """ 405 | xyz = pc[..., 0:3].contiguous() 406 | features = pc[..., 3:].transpose(1, 2).contiguous() 407 | return xyz, features 408 | 409 | def forward(self, point_cloud: torch.Tensor) -> torch.Tensor: # type: ignore[override] 410 | """ 411 | Forward pass of the network 412 | 413 | :param point_cloud torch.Tensor: Has dimensions (B, N, 4) 414 | B is the batch size 415 | N is the number of points 416 | 4 is x, y, z, segmentation_mask 417 | This tensor must be on the GPU (CPU tensors not supported) 418 | :rtype torch.Tensor: The output from the network 419 | """ 420 | assert point_cloud.size(2) == 4 421 | xyz, features = self._break_up_pc(point_cloud) 422 | 423 | for module in self.SA_modules: 424 | xyz, features = module(xyz, features) 425 | 426 | return self.fc_layer(features.squeeze(-1)) 427 | -------------------------------------------------------------------------------- /mpinets/mpinets_types.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from dataclasses import dataclass, field 24 | from typing import List, Union, Optional, Dict, Sequence 25 | import numpy as np 26 | from geometrout.transform import SE3 27 | from geometrout.primitive import Cuboid, Cylinder, Sphere 28 | 29 | 30 | Obstacles = List[Union[Cuboid, Cylinder, Sphere]] 31 | Trajectory = Sequence[Union[Sequence, np.ndarray]] 32 | 33 | 34 | @dataclass 35 | class PlanningProblem: 36 | """ 37 | Defines a common interface to describe planning problems 38 | """ 39 | 40 | target: SE3 # The target in the `right_gripper` frame 41 | target_volume: Union[Cuboid, Cylinder] 42 | q0: np.ndarray # The starting configuration 43 | obstacles: Optional[Obstacles] = None # The obstacles in the scene 44 | obstacle_point_cloud: Optional[np.ndarray] = None 45 | target_negative_volumes: Obstacles = field(default_factory=lambda: []) 46 | 47 | 48 | ProblemSet = Dict[str, Dict[str, List[PlanningProblem]]] 49 | -------------------------------------------------------------------------------- /mpinets/run_training.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from typing import Optional, Any, Dict, List 24 | from pathlib import Path 25 | import sys 26 | import os 27 | from datetime import timedelta 28 | import pytorch_lightning as pl 29 | from pytorch_lightning.loggers import WandbLogger 30 | from pytorch_lightning.strategies import DDPStrategy 31 | from pytorch_lightning.callbacks import ModelCheckpoint, Callback 32 | from termcolor import colored 33 | import argparse 34 | import yaml 35 | import uuid 36 | 37 | PROJECT_ROOT = str(Path(__file__).resolve().parent.parent) 38 | sys.path.insert(0, PROJECT_ROOT) 39 | from mpinets.data_loader import DataModule 40 | from mpinets.model import TrainingMotionPolicyNetwork 41 | 42 | 43 | def setup_trainer( 44 | gpus: int, 45 | test: bool, 46 | should_checkpoint: bool, 47 | logger: Optional[WandbLogger], 48 | checkpoint_interval: int, 49 | checkpoint_dir: str, 50 | validation_interval: float, 51 | ) -> pl.Trainer: 52 | """ 53 | Creates the Pytorch Lightning trainer object 54 | 55 | :param gpus int: The number of GPUs (if more than 1, uses DDP) 56 | :param test bool: Whether to use a test dataset 57 | :param should_checkpoint bool: Whether to save checkpoints 58 | :param logger Optional[WandbLogger]: The logger object, set to None if logging is disabled 59 | :param checkpoint_interval int: The number of minutes between checkpoints 60 | :param checkpoint_dir str: The directory in which to save checkpoints (a subdirectory will 61 | be created according to the experiment ID) 62 | :param validation_interval float: How often to run the validation step, either as a proportion 63 | of the training epoch or as a number of batches 64 | :rtype pl.Trainer: The trainer object 65 | """ 66 | args: Dict[str, Any] = {} 67 | 68 | if test: 69 | args = {**args, "limit_train_batches": 10, "limit_val_batches": 3} 70 | validation_interval = 2 # Overwritten to be an appropriate size for test 71 | if (isinstance(gpus, list) and len(gpus) > 1) or ( 72 | isinstance(gpus, int) and gpus > 1 73 | ): 74 | args = { 75 | **args, 76 | "strategy": DDPStrategy(find_unused_parameters=False), 77 | } 78 | if validation_interval is not None: 79 | args = {**args, "val_check_interval": validation_interval} 80 | callbacks: List[Callback] = [] 81 | if logger is not None: 82 | experiment_id = str(logger.experiment.id) 83 | else: 84 | experiment_id = str(uuid.uuid1()) 85 | if should_checkpoint: 86 | if checkpoint_dir is not None: 87 | dirpath = Path(checkpoint_dir).resolve() / experiment_id 88 | else: 89 | dirpath = PROJECT_ROOT / "checkpoints" / experiment_id 90 | pl.utilities.rank_zero_info(f"Saving checkpoints to {dirpath}") 91 | every_n_checkpoint = ModelCheckpoint( 92 | monitor="val_loss", 93 | save_last=True, 94 | dirpath=dirpath, 95 | train_time_interval=timedelta(minutes=checkpoint_interval), 96 | ) 97 | epoch_end_checkpoint = ModelCheckpoint( 98 | monitor="val_loss", 99 | save_last=True, 100 | dirpath=dirpath, 101 | save_on_train_epoch_end=True, 102 | ) 103 | epoch_end_checkpoint.CHECKPOINT_NAME_LAST = "epoch-{epoch}-end" 104 | callbacks.extend([every_n_checkpoint, epoch_end_checkpoint]) 105 | 106 | trainer = pl.Trainer( 107 | enable_checkpointing=should_checkpoint, 108 | callbacks=callbacks, 109 | max_epochs=1 if test else 500, 110 | gradient_clip_val=1.0, 111 | gpus=gpus, 112 | precision=16, 113 | logger=False if logger is None else logger, 114 | **args, 115 | ) 116 | return trainer 117 | 118 | 119 | def setup_logger( 120 | should_log: bool, experiment_name: str, config_values: Dict[str, Any] 121 | ) -> Optional[WandbLogger]: 122 | if not should_log: 123 | pl.utilities.rank_zero_info("Disabling all logs") 124 | return None 125 | logger = WandbLogger( 126 | name=experiment_name, 127 | project="mpinets", 128 | log_model=True, 129 | ) 130 | logger.log_hyperparams(config_values) 131 | return logger 132 | 133 | 134 | def parse_args_and_configuration(): 135 | """ 136 | Checks the command line arguments and merges them with the configuration yaml file 137 | """ 138 | parser = argparse.ArgumentParser() 139 | parser.add_argument("yaml_config", type=str) 140 | parser.add_argument( 141 | "--test", 142 | action="store_true", 143 | help="Test with only a few batches (disables logging)", 144 | ) 145 | parser.add_argument( 146 | "--no-logging", action="store_true", help="Don't log to weights and biases" 147 | ) 148 | parser.add_argument( 149 | "--no-checkpointing", action="store_true", help="Don't checkpoint" 150 | ) 151 | args = parser.parse_args() 152 | 153 | if args.test: 154 | args.no_logging = True 155 | 156 | with open(args.yaml_config) as f: 157 | configuration = yaml.safe_load(f) 158 | 159 | return { 160 | "training_node_name": os.uname().nodename, 161 | **configuration, 162 | **vars(args), 163 | } 164 | 165 | 166 | def run(): 167 | """ 168 | Runs the training procedure 169 | """ 170 | config = parse_args_and_configuration() 171 | 172 | color_name = colored(config["experiment_name"], "green") 173 | pl.utilities.rank_zero_info(f"Experiment name: {color_name}") 174 | logger = setup_logger( 175 | not config["no_logging"], 176 | config["experiment_name"], 177 | config, 178 | ) 179 | 180 | trainer = setup_trainer( 181 | config["gpus"], 182 | config["test"], 183 | should_checkpoint=not config["no_checkpointing"], 184 | logger=logger, 185 | checkpoint_interval=config["checkpoint_interval"], 186 | checkpoint_dir=config["save_checkpoint_dir"], 187 | validation_interval=config["validation_interval"], 188 | ) 189 | dm = DataModule( 190 | batch_size=config["batch_size"], 191 | **(config["shared_parameters"] or {}), 192 | **(config["data_module_parameters"] or {}), 193 | ) 194 | mdl = TrainingMotionPolicyNetwork( 195 | **(config["shared_parameters"] or {}), 196 | **(config["training_model_parameters"] or {}), 197 | ) 198 | if logger is not None: 199 | logger.watch(mdl, log="gradients", log_freq=100) 200 | trainer.fit(model=mdl, datamodule=dm) 201 | 202 | 203 | if __name__ == "__main__": 204 | run() 205 | -------------------------------------------------------------------------------- /mpinets/third_party/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/NVlabs/motion-policy-networks/42ff07bb95bd75f7c7752bf4300eb53698ae099d/mpinets/third_party/__init__.py -------------------------------------------------------------------------------- /mpinets/third_party/sparc.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2015, Sivakumar Balasubramanian 2 | # 3 | # Permission to use, copy, modify, and/or distribute this software for any 4 | # purpose with or without fee is hereby granted, provided that the above 5 | # copyright notice and this permission notice appear in all copies. 6 | # 7 | # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 | # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 | # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 | # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 | # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 | # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 13 | # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 | 15 | # Not a contribution 16 | # Changes made by NVIDIA CORPORATION & AFFILIATES or University of Washington enabling SPARC computation 17 | # or otherwise documented as NVIDIA-proprietary are not a contribution and subject 18 | # to the following terms and conditions: 19 | # 20 | # MIT License 21 | # 22 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 23 | # 24 | # Permission is hereby granted, free of charge, to any person obtaining a 25 | # copy of this software and associated documentation files (the "Software"), 26 | # to deal in the Software without restriction, including without limitation 27 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 28 | # and/or sell copies of the Software, and to permit persons to whom the 29 | # Software is furnished to do so, subject to the following conditions: 30 | # 31 | # The above copyright notice and this permission notice shall be included in 32 | # all copies or substantial portions of the Software. 33 | # 34 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 35 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 36 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 37 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 38 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 39 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 40 | # DEALINGS IN THE SOFTWARE. 41 | 42 | 43 | # Note: original code can be found on Github at 44 | # https://github.com/siva82kb/SPARC/blob/master/scripts/smoothness.py 45 | 46 | import numpy as np 47 | 48 | 49 | def sparc(movement, fs, padlevel=4, fc=10.0, amp_th=0.05): 50 | """ 51 | Calculates the smoothness of the given speed profile using the modified 52 | spectral arc length metric. 53 | Parameters 54 | ---------- 55 | movement : np.array 56 | The array containing the movement speed profile. 57 | fs : float 58 | The sampling frequency of the data. 59 | padlevel : integer, optional 60 | Indicates the amount of zero padding to be done to the movement 61 | data for estimating the spectral arc length. [default = 4] 62 | fc : float, optional 63 | The max. cut off frequency for calculating the spectral arc 64 | length metric. [default = 10.] 65 | amp_th : float, optional 66 | The amplitude threshold to used for determing the cut off 67 | frequency upto which the spectral arc length is to be estimated. 68 | [default = 0.05] 69 | Returns 70 | ------- 71 | sal : float 72 | The spectral arc length estimate of the given movement's 73 | smoothness. 74 | (f, Mf) : tuple of two np.arrays 75 | This is the frequency(f) and the magntiude spectrum(Mf) of the 76 | given movement data. This spectral is from 0. to fs/2. 77 | (f_sel, Mf_sel) : tuple of two np.arrays 78 | This is the portion of the spectrum that is selected for 79 | calculating the spectral arc length. 80 | Notes 81 | ----- 82 | This is the modfieid spectral arc length metric, which has been tested only 83 | for discrete movements. 84 | 85 | Examples 86 | -------- 87 | >>> t = np.arange(-1, 1, 0.01) 88 | >>> move = np.exp(-5*pow(t, 2)) 89 | >>> sal, _, _ = sparc(move, fs=100.) 90 | >>> '%.5f' % sal 91 | '-1.41403' 92 | """ 93 | if np.allclose(movement, 0): 94 | print("All movement was 0, returning 0") 95 | return 0, None, None 96 | # Number of zeros to be padded. 97 | nfft = int(pow(2, np.ceil(np.log2(len(movement))) + padlevel)) 98 | 99 | # Frequency 100 | f = np.arange(0, fs, fs / nfft) 101 | # Normalized magnitude spectrum 102 | Mf = abs(np.fft.fft(movement, nfft)) 103 | Mf = Mf / max(Mf) 104 | 105 | # Indices to choose only the spectrum within the given cut off frequency 106 | # Fc. 107 | # NOTE: This is a low pass filtering operation to get rid of high frequency 108 | # noise from affecting the next step (amplitude threshold based cut off for 109 | # arc length calculation). 110 | fc_inx = ((f <= fc) * 1).nonzero() 111 | f_sel = f[fc_inx] 112 | Mf_sel = Mf[fc_inx] 113 | 114 | # Choose the amplitude threshold based cut off frequency. 115 | # Index of the last point on the magnitude spectrum that is greater than 116 | # or equal to the amplitude threshold. 117 | inx = ((Mf_sel >= amp_th) * 1).nonzero()[0] 118 | fc_inx = range(inx[0], inx[-1] + 1) 119 | f_sel = f_sel[fc_inx] 120 | Mf_sel = Mf_sel[fc_inx] 121 | 122 | # Calculate arc length 123 | new_sal = -sum( 124 | np.sqrt( 125 | pow(np.diff(f_sel) / (f_sel[-1] - f_sel[0]), 2) + pow(np.diff(Mf_sel), 2) 126 | ) 127 | ) 128 | return new_sal, (f, Mf), (f_sel, Mf_sel) 129 | -------------------------------------------------------------------------------- /mpinets/utils.py: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | from typing import Union, Tuple 24 | 25 | import numpy as np 26 | import torch 27 | from robofin.robots import FrankaRobot, FrankaRealRobot 28 | 29 | 30 | def _normalize_franka_joints_numpy( 31 | batch_trajectory: np.ndarray, 32 | limits: Tuple[float, float] = (-1, 1), 33 | use_real_constraints: bool = True, 34 | ) -> np.ndarray: 35 | """ 36 | Normalizes joint angles to be within a specified range according to the Franka's 37 | joint limits. This is the numpy version 38 | 39 | :param batch_trajectory np.ndarray: A batch of trajectories. Can have dims 40 | [7] if a single configuration 41 | [B, 7] if a batch of configurations 42 | [B, T, 7] if a batched time-series of configurations 43 | :param limits Tuple[float, float]: The new limits to map to 44 | :param use_real_constraints bool: If true, use the empirically determined joint limits 45 | (this is unpublished--just found by monkeying around 46 | with the robot). 47 | If false, use the published joint limits from Franka 48 | :rtype np.ndarray: An array with the same dimensions as the input 49 | """ 50 | robot = FrankaRealRobot if use_real_constraints else FrankaRobot 51 | franka_limits = robot.JOINT_LIMITS 52 | assert ( 53 | (batch_trajectory.ndim == 1 and batch_trajectory.shape[0] == robot.DOF) 54 | or (batch_trajectory.ndim == 2 and batch_trajectory.shape[1] == robot.DOF) 55 | or (batch_trajectory.ndim == 3 and batch_trajectory.shape[2] == robot.DOF) 56 | ) 57 | normalized = (batch_trajectory - franka_limits[:, 0]) / ( 58 | franka_limits[:, 1] - franka_limits[:, 0] 59 | ) * (limits[1] - limits[0]) + limits[0] 60 | return normalized 61 | 62 | 63 | def _normalize_franka_joints_torch( 64 | batch_trajectory: torch.Tensor, 65 | limits: Tuple[float, float] = (-1, 1), 66 | use_real_constraints: bool = True, 67 | ) -> torch.Tensor: 68 | """ 69 | Normalizes joint angles to be within a specified range according to the Franka's 70 | joint limits. This is the torch version 71 | 72 | :param batch_trajectory torch.Tensor: A batch of trajectories. Can have dims 73 | [7] if a single configuration 74 | [B, 7] if a batch of configurations 75 | [B, T, 7] if a batched time-series of configurations 76 | :param limits Tuple[float, float]: The new limits to map to 77 | :param use_real_constraints bool: If true, use the empirically determined joint limits 78 | (this is unpublished--just found by monkeying around 79 | with the robot). 80 | If false, use the published joint limits from Franka 81 | :rtype torch.Tensor: A tensor with the same dimensions as the input 82 | """ 83 | assert isinstance(batch_trajectory, torch.Tensor) 84 | robot = FrankaRealRobot if use_real_constraints else FrankaRobot 85 | franka_limits = torch.as_tensor(robot.JOINT_LIMITS).type_as(batch_trajectory) 86 | assert ( 87 | (batch_trajectory.ndim == 1 and batch_trajectory.size(0) == robot.DOF) 88 | or (batch_trajectory.ndim == 2 and batch_trajectory.size(1) == robot.DOF) 89 | or (batch_trajectory.ndim == 3 and batch_trajectory.size(2) == robot.DOF) 90 | ) 91 | return (batch_trajectory - franka_limits[:, 0]) / ( 92 | franka_limits[:, 1] - franka_limits[:, 0] 93 | ) * (limits[1] - limits[0]) + limits[0] 94 | 95 | 96 | def normalize_franka_joints( 97 | batch_trajectory: Union[np.ndarray, torch.Tensor], 98 | limits: Tuple[float, float] = (-1, 1), 99 | use_real_constraints: bool = True, 100 | ) -> Union[np.ndarray, torch.Tensor]: 101 | """ 102 | Normalizes joint angles to be within a specified range according to the Franka's 103 | joint limits. This is semantic sugar that dispatches to the correct implementation. 104 | 105 | :param batch_trajectory Union[np.ndarray, torch.Tensor]: A batch of trajectories. Can have dims 106 | [7] if a single configuration 107 | [B, 7] if a batch of configurations 108 | [B, T, 7] if a batched time-series of configurations 109 | :param limits Tuple[float, float]: The new limits to map to 110 | :param use_real_constraints bool: If true, use the empirically determined joint limits 111 | (this is unpublished--just found by monkeying around 112 | with the robot). 113 | If false, use the published joint limits from Franka 114 | :rtype Union[np.ndarray, torch.Tensor]: A tensor or numpy array with the same dimensions 115 | and type as the input 116 | :raises NotImplementedError: Raises an error if another data type (e.g. a list) is passed in 117 | """ 118 | if isinstance(batch_trajectory, torch.Tensor): 119 | return _normalize_franka_joints_torch( 120 | batch_trajectory, limits=limits, use_real_constraints=True 121 | ) 122 | elif isinstance(batch_trajectory, np.ndarray): 123 | return _normalize_franka_joints_numpy( 124 | batch_trajectory, limits=limits, use_real_constraints=True 125 | ) 126 | else: 127 | raise NotImplementedError("Only torch.Tensor and np.ndarray implemented") 128 | 129 | 130 | def _unnormalize_franka_joints_numpy( 131 | batch_trajectory: np.ndarray, 132 | limits: Tuple[float, float] = (-1, 1), 133 | use_real_constraints: bool = True, 134 | ) -> np.ndarray: 135 | """ 136 | Unnormalizes joint angles from a specified range back into the Franka's joint limits. 137 | This is the numpy version and the inverse of `_normalize_franka_joints_numpy`. 138 | 139 | :param batch_trajectory np.ndarray: A batch of trajectories. Can have dims 140 | [7] if a single configuration 141 | [B, 7] if a batch of configurations 142 | [B, T, 7] if a batched time-series of configurations 143 | :param limits Tuple[float, float]: The current limits to map to the joint limits 144 | :param use_real_constraints bool: If true, use the empirically determined joint limits 145 | (this is unpublished--just found by monkeying around 146 | with the robot). 147 | If false, use the published joint limits from Franka 148 | :rtype np.ndarray: An array with the same dimensions as the input 149 | """ 150 | robot = FrankaRealRobot if use_real_constraints else FrankaRobot 151 | franka_limits = robot.JOINT_LIMITS 152 | assert ( 153 | (batch_trajectory.ndim == 1 and batch_trajectory.shape[0] == robot.DOF) 154 | or (batch_trajectory.ndim == 2 and batch_trajectory.shape[1] == robot.DOF) 155 | or (batch_trajectory.ndim == 3 and batch_trajectory.shape[2] == robot.DOF) 156 | ) 157 | assert np.all(batch_trajectory >= limits[0]) 158 | assert np.all(batch_trajectory <= limits[1]) 159 | franka_limit_range = franka_limits[:, 1] - franka_limits[:, 0] 160 | franka_lower_limit = franka_limits[:, 0] 161 | for _ in range(batch_trajectory.ndim - 1): 162 | franka_limit_range = franka_limit_range[np.newaxis, ...] 163 | franka_lower_limit = franka_lower_limit[np.newaxis, ...] 164 | unnormalized = (batch_trajectory - limits[0]) * franka_limit_range / ( 165 | limits[1] - limits[0] 166 | ) + franka_lower_limit 167 | 168 | return unnormalized 169 | 170 | 171 | def _unnormalize_franka_joints_torch( 172 | batch_trajectory: torch.Tensor, 173 | limits: Tuple[float, float] = (-1, 1), 174 | use_real_constraints: bool = True, 175 | ) -> torch.Tensor: 176 | """ 177 | Unnormalizes joint angles from a specified range back into the Franka's joint limits. 178 | This is the torch version and the inverse of `_normalize_franka_joints_torch`. 179 | 180 | :param batch_trajectory torch.Tensor: A batch of trajectories. Can have dims 181 | [7] if a single configuration 182 | [B, 7] if a batch of configurations 183 | [B, T, 7] if a batched time-series of configurations 184 | :param limits Tuple[float, float]: The current limits to map to the joint limits 185 | :param use_real_constraints bool: If true, use the empirically determined joint limits 186 | (this is unpublished--just found by monkeying around 187 | with the robot). 188 | If false, use the published joint limits from Franka 189 | :rtype torch.Tensor: A tensor with the same dimensions as the input 190 | """ 191 | assert isinstance(batch_trajectory, torch.Tensor) 192 | robot = FrankaRealRobot if use_real_constraints else FrankaRobot 193 | franka_limits = torch.as_tensor(robot.JOINT_LIMITS).type_as(batch_trajectory) 194 | dof = franka_limits.size(0) 195 | assert ( 196 | (batch_trajectory.ndim == 1 and batch_trajectory.size(0) == dof) 197 | or (batch_trajectory.ndim == 2 and batch_trajectory.size(1) == dof) 198 | or (batch_trajectory.ndim == 3 and batch_trajectory.size(2) == dof) 199 | ) 200 | assert torch.all(batch_trajectory >= limits[0]) 201 | assert torch.all(batch_trajectory <= limits[1]) 202 | franka_limit_range = franka_limits[:, 1] - franka_limits[:, 0] 203 | franka_lower_limit = franka_limits[:, 0] 204 | for _ in range(batch_trajectory.ndim - 1): 205 | franka_limit_range = franka_limit_range.unsqueeze(0) 206 | franka_lower_limit = franka_lower_limit.unsqueeze(0) 207 | return (batch_trajectory - limits[0]) * franka_limit_range / ( 208 | limits[1] - limits[0] 209 | ) + franka_lower_limit 210 | 211 | 212 | def unnormalize_franka_joints( 213 | batch_trajectory: Union[np.ndarray, torch.Tensor], 214 | limits: Tuple[float, float] = (-1, 1), 215 | use_real_constraints: bool = True, 216 | ) -> Union[np.ndarray, torch.Tensor]: 217 | """ 218 | Unnormalizes joint angles from a specified range back into the Franka's joint limits. 219 | This is semantic sugar that dispatches to the correct implementation, the inverse of 220 | `normalize_franka_joints`. 221 | 222 | :param batch_trajectory Union[np.ndarray, torch.Tensor]: A batch of trajectories. Can have dims 223 | [7] if a single configuration 224 | [B, 7] if a batch of configurations 225 | [B, T, 7] if a batched time-series of configurations 226 | :param limits Tuple[float, float]: The current limits to map to the joint limits 227 | :param use_real_constraints bool: If true, use the empirically determined joint limits 228 | (this is unpublished--just found by monkeying around 229 | with the robot). 230 | If false, use the published joint limits from Franka 231 | :rtype Union[np.ndarray, torch.Tensor]: A tensor or numpy array with the same dimensions 232 | and type as the input 233 | :raises NotImplementedError: Raises an error if another data type (e.g. a list) is passed in 234 | """ 235 | if isinstance(batch_trajectory, torch.Tensor): 236 | return _unnormalize_franka_joints_torch( 237 | batch_trajectory, limits=limits, use_real_constraints=use_real_constraints 238 | ) 239 | elif isinstance(batch_trajectory, np.ndarray): 240 | return _unnormalize_franka_joints_numpy( 241 | batch_trajectory, limits=limits, use_real_constraints=use_real_constraints 242 | ) 243 | else: 244 | raise NotImplementedError("Only torch.Tensor and np.ndarray implemented") 245 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | [build-system] 24 | requires = ["setuptools", "wheel"] 25 | build-backend = "setuptools.build_meta" 26 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # MIT License 2 | # 3 | # Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES, University of Washington. All rights reserved. 4 | # 5 | # Permission is hereby granted, free of charge, to any person obtaining a 6 | # copy of this software and associated documentation files (the "Software"), 7 | # to deal in the Software without restriction, including without limitation 8 | # the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 | # and/or sell copies of the Software, and to permit persons to whom the 10 | # Software is furnished to do so, subject to the following conditions: 11 | # 12 | # The above copyright notice and this permission notice shall be included in 13 | # all copies or substantial portions of the Software. 14 | # 15 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 | # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 | # DEALINGS IN THE SOFTWARE. 22 | 23 | [metadata] 24 | name = mpinets 25 | version = 0.0.1 26 | description = The code corresponding to the paper Motion Policy Networks (Fishman, et. al 2022) 27 | license = MIT 28 | 29 | [options] 30 | packages = find: 31 | python_requires = >=3.6 32 | include_package_data = True 33 | # No install_requires is included because the dependencies are complex and included in the docker 34 | --------------------------------------------------------------------------------