├── .gitignore
├── LICENSE
├── README.md
├── assets
├── experiments-demo-img
│ ├── active_diffco_example1.png
│ ├── active_diffco_example2.png
│ ├── active_diffco_example3.png
│ ├── active_fcl_example1.png
│ ├── active_fcl_example2.png
│ ├── active_fcl_example3.png
│ ├── compare_example1.png
│ ├── correlate_example1.png
│ ├── correlate_example1_dataset.png
│ ├── dataset_example1.png
│ ├── dataset_example2.png
│ ├── trajopt_example1.png
│ ├── trajopt_example2.png
│ ├── trajopt_example3.png
│ └── trajopt_example4.png
├── object_meshes
│ └── teapot.stl
└── readme-img
│ ├── rrtconnect+optimize.gif
│ └── rrtconnect.gif
├── diffco
├── __init__.py
├── collision_checkers.py
├── collision_interfaces
│ ├── __init__.py
│ ├── curobo_interface.py
│ ├── env_interface.py
│ ├── rigid_body.py
│ ├── robot_interface_base.py
│ ├── ros_interface.py
│ ├── se3_so3_util.py
│ ├── spatial_vector_algebra.py
│ └── urdf_interface.py
├── deprecated
│ ├── DiffCo.py
│ ├── DiffCoBeta.py
│ ├── FCLChecker.py
│ ├── MultiDiffCo.py
│ └── Obstacles.py
├── envs
│ ├── __init__.py
│ ├── collision_env.py
│ ├── moveit
│ │ ├── baxter
│ │ │ ├── rviz_configs
│ │ │ │ └── baxter_visualize_path.rviz
│ │ │ └── scene_objects
│ │ │ │ ├── baxter_2objontable.scene
│ │ │ │ ├── baxter_complex.scene
│ │ │ │ ├── baxter_floating.scene
│ │ │ │ ├── baxter_medium.scene
│ │ │ │ ├── baxter_scene.scene
│ │ │ │ ├── cat.obj
│ │ │ │ ├── cat_scene.scene
│ │ │ │ └── table.obj
│ │ └── panda
│ │ │ ├── rviz_configs
│ │ │ └── pandarvizconfig.rviz
│ │ │ └── scene_objects
│ │ │ ├── bookshelves.scene
│ │ │ ├── bookshelves_small.scene
│ │ │ ├── narrow.scene
│ │ │ └── raised_shelves.scene
│ └── rtb
│ │ ├── __init__.py
│ │ └── panda_envs.py
├── kernel.py
├── kernel_perceptrons.py
├── model.py
├── optim.py
├── robot_data
│ ├── 2link_robot.urdf
│ ├── __init__.py
│ ├── allegro
│ │ ├── meshes
│ │ │ ├── base_link.STL
│ │ │ ├── base_link_left.STL
│ │ │ ├── link_0.0.STL
│ │ │ ├── link_1.0.STL
│ │ │ ├── link_12.0_left.STL
│ │ │ ├── link_12.0_right.STL
│ │ │ ├── link_13.0.STL
│ │ │ ├── link_14.0.STL
│ │ │ ├── link_15.0.STL
│ │ │ ├── link_15.0_tip.STL
│ │ │ ├── link_2.0.STL
│ │ │ ├── link_3.0.STL
│ │ │ ├── link_3.0_tip.STL
│ │ │ └── link_4.0.STL
│ │ └── urdf
│ │ │ ├── allegro_hand_description_left.urdf
│ │ │ └── allegro_hand_description_left_small_damping.urdf
│ ├── fetch_description
│ │ ├── meshes
│ │ │ ├── base_link.dae
│ │ │ ├── base_link_collision.STL
│ │ │ ├── base_link_uv.png
│ │ │ ├── bellows_link.STL
│ │ │ ├── bellows_link_collision.STL
│ │ │ ├── elbow_flex_link.dae
│ │ │ ├── elbow_flex_link_collision.STL
│ │ │ ├── elbow_flex_uv.png
│ │ │ ├── estop_link.STL
│ │ │ ├── forearm_roll_link.dae
│ │ │ ├── forearm_roll_link_collision.STL
│ │ │ ├── forearm_roll_uv.png
│ │ │ ├── gripper_link.STL
│ │ │ ├── gripper_link.dae
│ │ │ ├── gripper_uv.png
│ │ │ ├── head_pan_link.dae
│ │ │ ├── head_pan_link_collision.STL
│ │ │ ├── head_pan_uv.png
│ │ │ ├── head_tilt_link.dae
│ │ │ ├── head_tilt_link_collision.STL
│ │ │ ├── head_tilt_uv.png
│ │ │ ├── l_gripper_finger_link.STL
│ │ │ ├── l_wheel_link.STL
│ │ │ ├── l_wheel_link_collision.STL
│ │ │ ├── laser_link.STL
│ │ │ ├── r_gripper_finger_link.STL
│ │ │ ├── r_wheel_link.STL
│ │ │ ├── r_wheel_link_collision.STL
│ │ │ ├── shoulder_lift_link.dae
│ │ │ ├── shoulder_lift_link_collision.STL
│ │ │ ├── shoulder_lift_uv.png
│ │ │ ├── shoulder_pan_link.dae
│ │ │ ├── shoulder_pan_link_collision.STL
│ │ │ ├── shoulder_pan_uv.png
│ │ │ ├── torso_fixed_link.STL
│ │ │ ├── torso_fixed_link.dae
│ │ │ ├── torso_fixed_uv.png
│ │ │ ├── torso_lift_link.dae
│ │ │ ├── torso_lift_link_collision.STL
│ │ │ ├── torso_lift_uv.png
│ │ │ ├── upperarm_roll_link.dae
│ │ │ ├── upperarm_roll_link_collision.STL
│ │ │ ├── upperarm_roll_uv.png
│ │ │ ├── wrist_flex_link.dae
│ │ │ ├── wrist_flex_link_collision.STL
│ │ │ ├── wrist_flex_uv.png
│ │ │ ├── wrist_roll_link.dae
│ │ │ ├── wrist_roll_link_collision.STL
│ │ │ └── wrist_roll_uv.png
│ │ └── urdf
│ │ │ ├── FETCH_MODIFICATIONS.md
│ │ │ ├── fetch.urdf
│ │ │ ├── fetch_arm_no_gripper.urdf
│ │ │ └── fetch_arm_no_gripper_small_damping.urdf
│ ├── kinova_description
│ │ ├── meshes
│ │ │ ├── arm.SLDPRT
│ │ │ ├── arm.STL
│ │ │ ├── arm.dae
│ │ │ ├── arm_half_1.STL
│ │ │ ├── arm_half_1.dae
│ │ │ ├── arm_half_2.STL
│ │ │ ├── arm_half_2.dae
│ │ │ ├── arm_mico.STL
│ │ │ ├── arm_mico.dae
│ │ │ ├── base.STL
│ │ │ ├── base.dae
│ │ │ ├── finger_distal.STL
│ │ │ ├── finger_distal.dae
│ │ │ ├── finger_proximal.STL
│ │ │ ├── finger_proximal.dae
│ │ │ ├── forearm.STL
│ │ │ ├── forearm.dae
│ │ │ ├── forearm_mico.STL
│ │ │ ├── forearm_mico.dae
│ │ │ ├── hand_2finger.STL
│ │ │ ├── hand_2finger.dae
│ │ │ ├── hand_3finger.STL
│ │ │ ├── hand_3finger.dae
│ │ │ ├── ring_big.STL
│ │ │ ├── ring_big.dae
│ │ │ ├── ring_small.STL
│ │ │ ├── ring_small.dae
│ │ │ ├── shoulder.STL
│ │ │ ├── shoulder.dae
│ │ │ ├── wrist.STL
│ │ │ ├── wrist.dae
│ │ │ ├── wrist_spherical_1.STL
│ │ │ ├── wrist_spherical_1.dae
│ │ │ ├── wrist_spherical_2.STL
│ │ │ └── wrist_spherical_2.dae
│ │ └── urdf
│ │ │ ├── jaco.urdf
│ │ │ └── jaco_clean.urdf
│ ├── kuka_iiwa
│ │ ├── meshes
│ │ │ └── iiwa7
│ │ │ │ └── collision
│ │ │ │ ├── link_0.stl
│ │ │ │ ├── link_1.stl
│ │ │ │ ├── link_2.stl
│ │ │ │ ├── link_3.stl
│ │ │ │ ├── link_4.stl
│ │ │ │ ├── link_5.stl
│ │ │ │ ├── link_6.stl
│ │ │ │ └── link_7.stl
│ │ └── urdf
│ │ │ ├── iiwa7.urdf
│ │ │ └── iiwa7_allegro.urdf
│ ├── panda_description
│ │ ├── CHANGELOG.rst
│ │ ├── CMakeLists.txt
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── meshes
│ │ │ └── collision
│ │ │ │ ├── finger.stl
│ │ │ │ ├── hand.stl
│ │ │ │ ├── link0.stl
│ │ │ │ ├── link1.stl
│ │ │ │ ├── link2.stl
│ │ │ │ ├── link3.stl
│ │ │ │ ├── link4.stl
│ │ │ │ ├── link5.stl
│ │ │ │ ├── link6.stl
│ │ │ │ └── link7.stl
│ │ ├── package.xml
│ │ └── urdf
│ │ │ ├── meshes
│ │ │ └── collision
│ │ │ │ ├── finger.stl
│ │ │ │ ├── hand.stl
│ │ │ │ ├── link0.stl
│ │ │ │ ├── link1.stl
│ │ │ │ ├── link2.stl
│ │ │ │ ├── link3.stl
│ │ │ │ ├── link4.stl
│ │ │ │ ├── link5.stl
│ │ │ │ ├── link6.stl
│ │ │ │ └── link7.stl
│ │ │ ├── panda.srdf
│ │ │ ├── panda.urdf
│ │ │ ├── panda_no_gripper.urdf
│ │ │ ├── panda_no_gripper_simple_collision.urdf
│ │ │ └── panda_simple_collision.urdf
│ ├── rope_description
│ │ └── rope.urdf
│ └── trifinger_edu_description
│ │ ├── meshes
│ │ └── stl
│ │ │ ├── edu
│ │ │ ├── base_back.stl
│ │ │ ├── base_front.stl
│ │ │ ├── base_side_left.stl
│ │ │ ├── base_side_right.stl
│ │ │ ├── base_top.stl
│ │ │ ├── frame_wall.stl
│ │ │ ├── lower_link.stl
│ │ │ ├── middle_link.stl
│ │ │ └── upper_link.stl
│ │ │ └── trifinger_table_without_border.stl
│ │ └── trifinger_edu.urdf
├── robot_fkine.py
├── routines.py
└── utils.py
├── examples
└── tests
│ ├── test_fkine_diffco.py
│ ├── test_rope.py
│ ├── test_urdf_robot.py
│ └── test_urdf_robot_vs_shapeenv.py
├── experiments.md
├── misc
├── csv_dataset_to_pt.py
├── fcl-test-extracting-contacts.py
├── fcl-test-group.py
├── fcl-test.py
├── fix_exp.py
├── module_test.py
├── profile_reading.py
├── rbf_regression.py
├── recover_data.py
├── robopy-test.py
├── rrt_star.py
├── small_test.py
├── speed_compare_plot.ipynb
└── viewer.ipynb
├── notebook_tutorials
└── trajectory_optimization_tutorial.ipynb
├── requirements.txt
├── scripts
├── 2d_data_generation.py
├── 2d_escape.py
├── 2d_line_data_generation.py
├── 2d_trajopt.py
├── 3d_data_generation.py
├── 3d_trajopt.py
├── active.py
├── collision_landscape.py
├── compare_plot_planning.py
├── compare_sampling.py
├── escape.py
├── generate_batch_data_2d.py
├── generate_batch_data_se2.py
├── generate_test_configs.py
├── line_query_eval.py
├── manual_trajopt_se2.py
├── manual_trajopt_se3.py
├── motion_planner.py
├── record_moveit_path.py
├── se3_data_generation.py
├── simple_temporal_active.py
├── speed_compare.py
├── speed_compare_plot.py
├── temporal1d_data_generation.py
├── test_correlation.py
├── trajectory_optim.py
└── visualize_trajectory_2d.py
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
162 | # VS Code
163 | .vscode/
164 |
165 | # DiffCo
166 | results/
167 | data*/*
168 | figs/
169 | visual/
170 | matlabcode/
171 | debug/
172 | docker/
173 |
174 | .devcontainer
175 | notebook_tutorials/testing_random_stuff.ipynb
176 | examples/private_tests/
177 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2020, Yuheng Zhi
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DiffCo: Auto-Differentiable Proxy Collision Checking
2 |
3 | ## Updates
4 |
5 | 🟥 *[05/30/2023]* **First notebook tutorial** available! Check out [`trajectory_optimization_tutorial.ipynb`](notebook_tutorials/trajectory_optimization_tutorial.ipynb)
6 | (A few upgrades to optimization setup as well.)
7 |
8 | This is the codebase for our paper **DiffCo: Auto-Differentiable Proxy Collision Detection with Multi-class Labels for Safety-Aware Trajectory Optimization**, Yuheng Zhi, Nikhil Das, Michael Yip. [[arxiv]](https://arxiv.org/abs/2102.07413)[[IEEE T-Ro]](https://ieeexplore.ieee.org/document/9747928)
9 |
10 | RRT-Connect | Optimized
11 | :-------------------------:|:-------------------------:
12 |  | 
13 |
14 | ## Installation
15 | It is recommended to use [Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/getting-started.html) for virtual environment management.
16 | ```
17 | pip install -r requirements.txt
18 | pip install -e .
19 | ```
20 | ### Additional steps for Moveit experiments
21 | 1. Install [MoveIt](https://moveit.ros.org/install/).
22 | 2. To install Baxter configurations, refer to [this tutorial](https://github.com/RethinkRobotics/sdk-docs/wiki/MoveIt-Tutorial) but change the versions of ros according to the one you installed. Note: Ubuntu 20.04 only has ros noetic supported AFAIK, so please do modify command lines from the Baxter tutorial according to the ros version that you installed.
23 |
24 | ## Start using DiffCo:
25 | **The best entry point is the notebook tutorial under `notebook_tutorials/`.** (More to come)
26 |
27 | This is a library of a relatively easy implementation of the differentiable collision checker itself (under the directory `diffco`) and some scripts to reproduce experiments in the paper (under the directory `script`)
28 | 1. `speed_compare.py` includes comprehensive examples of using DiffCo to do trajectory optimization. Besides the experiment in the paper, we also included an implementation of using DiffCo with Adam to optimize trajectories under constraints.
29 | 2. `motion_planner.py` shows how we use OMPL (as initial point for optimization).
30 | 3. `[2,3]d_data_generation.py` implements how to use MoveIt and FCL to generate the initial dataset for Baxter and Plannar robots, respectively. You may use any of your favourite collision detection library to do this. In the tutorial, we also tried [roboticstoolbox-python](https://github.com/petercorke/robotics-toolbox-python).
31 | 4. `active.py` contains code of the active learning experiment.
32 | 5. `test_correlation.py` contains code to evaluate the correlation between DiffCo output and FCL.
33 | 6. `escape.py` and `compare_sampling.py` contain code to do the free-space sampling speed comparison.
34 | 7. `diffco/optim.py` now contains our code for trajectory optimization. The code in `scirpts/trajectory_optimization.py` are going to be deprecated.
35 | 8. `diffco/routines.py` contains a few convenient subroutines that appear often enough that we decided to write them into functions.
36 |
37 | For more info on how to get started with these scripts (to, e.g., reproduce our experiments), see [`experiments.md`](experiments.md).
38 |
39 |
40 |
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_diffco_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_diffco_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_diffco_example2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_diffco_example2.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_diffco_example3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_diffco_example3.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_fcl_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_fcl_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_fcl_example2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_fcl_example2.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/active_fcl_example3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/active_fcl_example3.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/compare_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/compare_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/correlate_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/correlate_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/correlate_example1_dataset.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/correlate_example1_dataset.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/dataset_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/dataset_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/dataset_example2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/dataset_example2.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/trajopt_example1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/trajopt_example1.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/trajopt_example2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/trajopt_example2.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/trajopt_example3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/trajopt_example3.png
--------------------------------------------------------------------------------
/assets/experiments-demo-img/trajopt_example4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/experiments-demo-img/trajopt_example4.png
--------------------------------------------------------------------------------
/assets/object_meshes/teapot.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/object_meshes/teapot.stl
--------------------------------------------------------------------------------
/assets/readme-img/rrtconnect+optimize.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/readme-img/rrtconnect+optimize.gif
--------------------------------------------------------------------------------
/assets/readme-img/rrtconnect.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/assets/readme-img/rrtconnect.gif
--------------------------------------------------------------------------------
/diffco/__init__.py:
--------------------------------------------------------------------------------
1 | from . import kernel, model, utils, optim, routines
2 | from .kernel_perceptrons import DiffCo, MultiDiffCo, DiffCoBeta
3 | from .collision_checkers import CollisionChecker, RBFDiffCo, ForwardKinematicsDiffCo, HybridForwardKinematicsDiffCo
4 | from .collision_interfaces import *
--------------------------------------------------------------------------------
/diffco/collision_interfaces/__init__.py:
--------------------------------------------------------------------------------
1 | from .env_interface import ShapeEnv, PCDEnv
2 | from .urdf_interface import RobotInterfaceBase, URDFRobot, MultiURDFRobot, robot_description_folder, \
3 | FrankaPanda, KUKAiiwa, TwoLinkRobot, TrifingerEdu
4 | from .ros_interface import ROSRobotEnv
5 | from .curobo_interface import CuRoboRobot, CuRoboCollisionWorldEnv
--------------------------------------------------------------------------------
/diffco/collision_interfaces/curobo_interface.py:
--------------------------------------------------------------------------------
1 | from typing import List, Tuple, Dict, Optional, Union
2 | from dataclasses import dataclass
3 | import os
4 | from collections import defaultdict
5 | import time
6 |
7 | import torch
8 |
9 | from .robot_interface_base import RobotInterfaceBase
10 |
11 | try:
12 | import curobo
13 | from curobo.cuda_robot_model.cuda_robot_model import CudaRobotModel as curobo_CudaRobotModel
14 | from curobo.geom.sdf.world import WorldCollision, CollisionBuffer, CollisionQueryBuffer
15 | from curobo.geom.sdf.world_mesh import WorldPrimitiveCollision, WorldMeshCollision
16 | from curobo.curobolib.geom import SelfCollisionDistance
17 | except ImportError:
18 | print("curobo not found. Please install curobo to use the cuRobo interface")
19 |
20 |
21 | class CuRoboCollisionWorldEnv:
22 | '''
23 | A thin wrapper around curobo.geom.sdf.world.WorldCollision to provide a interface for CuRoboInterface
24 | '''
25 | def __init__(self, curobo_collision_world: 'Union[WorldCollision, WorldMeshCollision, WorldPrimitiveCollision]'):
26 | self.curobo_collision_world = curobo_collision_world
27 |
28 | def get_sphere_collision(self, *args, **kwargs):
29 | return self.curobo_collision_world.get_sphere_collision(*args, **kwargs)
30 |
31 |
32 | class CuRoboRobot(RobotInterfaceBase):
33 | def __init__(
34 | self,
35 | cuda_robot_model: 'curobo_CudaRobotModel',
36 | name: str = "curobo_robot",
37 | unique_position_link_names: Optional[List[str]] = None,
38 | world_coll_activation_distance: float = 0.0,
39 | ):
40 | super().__init__(name=name, device=cuda_robot_model.tensor_args.device)
41 | self.cuda_robot_model = cuda_robot_model
42 | self.joint_limits = cuda_robot_model.get_joint_limits().position.transpose(0, 1)
43 | self._n_dofs = cuda_robot_model.get_dof()
44 | self._batch_size = cuda_robot_model._batch_size
45 | self.tensor_args = cuda_robot_model.tensor_args
46 |
47 | if unique_position_link_names is None:
48 | self.unique_position_link_names = cuda_robot_model.link_names
49 | print(f'cuda_robot_model.link_names: {cuda_robot_model.link_names}')
50 | else:
51 | self.unique_position_link_names = unique_position_link_names
52 |
53 | self._collision_query_buffer = CollisionQueryBuffer()
54 | self.world_coll_activation_distance = self.tensor_args.to_device([world_coll_activation_distance])
55 | self.env_query_idx = None
56 | self.weight = self.tensor_args.to_device([1.0])
57 | self.self_collision_kin_config = self.cuda_robot_model.get_self_collision_config()
58 | self.self_collision_weight = self.tensor_args.to_device([1.0])
59 | self.return_loss = False
60 |
61 |
62 |
63 | def rand_configs(self, num_cfgs):
64 | return torch.rand(num_cfgs, self._n_dofs, device=self._device) * (self.joint_limits[:, 1] - self.joint_limits[:, 0]) + self.joint_limits[:, 0]
65 |
66 | def _update_self_collision_batch_size(self, robot_spheres):
67 | # from curobo/src/curobo/rollout/cost/self_collision_cost.py
68 |
69 | # Assuming n stays constant
70 | # TODO: use collision buffer here?
71 |
72 | if self._batch_size is None or self._batch_size != robot_spheres.shape:
73 | b, h, n, k = robot_spheres.shape
74 | self._out_distance = torch.zeros(
75 | (b, h), device=self.tensor_args.device, dtype=self.tensor_args.dtype
76 | )
77 | self._out_vec = torch.zeros(
78 | (b, h, n, k), device=self.tensor_args.device, dtype=self.tensor_args.dtype
79 | )
80 | self._batch_size = robot_spheres.shape
81 | self._sparse_sphere_idx = torch.zeros(
82 | (b, h, n), device=self.tensor_args.device, dtype=torch.uint8
83 | )
84 |
85 | def collision(
86 | self,
87 | q,
88 | other: Optional[CuRoboCollisionWorldEnv]=None,
89 | ):
90 | '''
91 | Assumes q = [batch_size x n_dofs]
92 | '''
93 | robot_spheres = self.cuda_robot_model.forward(q,)[-1]
94 | robot_spheres = robot_spheres.unsqueeze(0) if len(robot_spheres.shape) == 3 else robot_spheres
95 |
96 | if other is not None:
97 | self._collision_query_buffer.update_buffer_shape(
98 | robot_spheres.shape, self.tensor_args, other.curobo_collision_world.collision_types
99 | )
100 | robot_world_collision = other.get_sphere_collision(
101 | robot_spheres,
102 | self._collision_query_buffer,
103 | self.weight,
104 | env_query_idx=self.env_query_idx,
105 | activation_distance=self.world_coll_activation_distance,
106 | return_loss=False
107 | ) # Positive means in collision
108 | robot_world_collision = robot_world_collision.max(dim=-1).values
109 |
110 | self._update_self_collision_batch_size(robot_spheres)
111 |
112 | self_collision = SelfCollisionDistance.apply(
113 | self._out_distance,
114 | self._out_vec,
115 | self._sparse_sphere_idx,
116 | robot_spheres,
117 | self.self_collision_kin_config.offset,
118 | self.weight,
119 | self.self_collision_kin_config.collision_matrix,
120 | self.self_collision_kin_config.thread_location,
121 | self.self_collision_kin_config.thread_max,
122 | self.self_collision_kin_config.checks_per_thread,
123 | self.self_collision_kin_config.experimental_kernel,
124 | self.return_loss,
125 | )
126 | if other is not None:
127 | return (robot_world_collision[0] > 0) | (self_collision[0] > 0)
128 | else:
129 | return self_collision[0] < 0
130 |
131 |
132 | def forward_kinematics(self, q, link_names=None, return_collision=False):
133 | '''
134 | returns only positions unique position links for FKDiffCo.
135 | TODO: maybe in the future return rotations as well if needed.
136 | '''
137 | link_names = self.unique_position_link_names if link_names is None else link_names
138 | shape_tuple = q.shape
139 | q = q.view(-1, shape_tuple[-1]).contiguous()
140 | link_poses = self.cuda_robot_model.get_link_poses(q, link_names=link_names) # .contiguous()
141 | # return link_poses
142 | link_pos = link_poses.position.reshape(shape_tuple[:-1] + link_poses.position.shape[1:]) # .contiguous()
143 | return link_pos
144 |
145 | def compute_forward_kinematics_all_links(self, q, return_collision=False):
146 | raise NotImplementedError(f"{self.__class__.__name__} does not implement compute_forward_kinematics_all_links, "
147 | "which is expected to return a dictionary of link names to poses. "
148 | "Instead it directly uses curobo's tensorized forward kinematics")
--------------------------------------------------------------------------------
/diffco/collision_interfaces/env_interface.py:
--------------------------------------------------------------------------------
1 | import trimesh
2 | import numpy as np
3 | import fcl
4 |
5 |
6 | class PCDCollisionManager:
7 | # TODO: consider converting pcd to python-octomap
8 | def __init__(self, point_cloud):
9 | raise NotImplementedError('PCDCollisionManager is not implemented yet')
10 |
11 | self.point_cloud = point_cloud
12 | self._manager = fcl.DynamicAABBTreeCollisionManager()
13 |
14 | self.collision_object = fcl.CollisionObject(fcl.OcTree(0.1, point_cloud))
15 | self._manager.registerObject(self.collision_object)
16 |
17 | def update_point_cloud(self, point_cloud):
18 | self._manager.unregisterObject(self.collision_object)
19 | self.point_cloud = point_cloud
20 | self.collision_object = fcl.CollisionObject(fcl.OcTree(0.1, point_cloud))
21 | self._manager.registerObject(self.collision_object)
22 |
23 |
24 |
25 | class PCDEnv:
26 | def __init__(self, point_cloud):
27 | self.point_cloud = point_cloud
28 | self.collision_manager = PCDCollisionManager(point_cloud)
29 |
30 | def update_point_cloud(self, point_cloud):
31 | self.collision_manager.update_point_cloud(point_cloud)
32 |
33 |
34 |
35 | class ShapeEnv:
36 | '''
37 | - uses a dict of shape types and params to represent environment. the dict can be updated.:
38 | - the shapes are converted to a trimesh scene and further converted to a collision manager
39 | The format of the dict is as follows:
40 | shape_dict = {
41 | 'box1': {'type': 'Box', 'params': {'extents': [1, 1, 1]}, 'transform': np.eye(4)},
42 | 'sphere1': {'type': 'Sphere', 'params': {'radius': 1}, 'transform': np.eye(4)},
43 | 'cylinder1': {'type': 'Cylinder', 'params': {'radius': 1, 'height': 1}, 'transform': np.eye(4)},
44 | 'capsule1': {'type': 'Capsule', 'params': {'radius': 1, 'height': 1}, 'transform': np.eye(4)},
45 | 'mesh1': {'type': 'Mesh', 'params': {'file_obj': 'path/to/obj'}, 'transform': np.eye(4)}
46 | 'mesh2': {'type': 'Mesh', 'params': {'file_stl': 'path/to/stl'}, 'transform': np.eye(4)}
47 | }
48 | '''
49 | def __init__(self, shapes):
50 | self.name = 'ShapeEnv'
51 | self.scene = trimesh.Scene()
52 | for shape_name in shapes:
53 | shape_type = shapes[shape_name]['type']
54 | shape_params = shapes[shape_name]['params']
55 | shape_transform = shapes[shape_name].get('transform', np.eye(4))
56 | if shape_type == 'Box':
57 | self.scene.add_geometry(trimesh.primitives.Box(**shape_params), node_name=shape_name, transform=shape_transform)
58 | elif shape_type == 'Sphere':
59 | self.scene.add_geometry(trimesh.primitives.Sphere(**shape_params), node_name=shape_name, transform=shape_transform)
60 | elif shape_type == 'Cylinder':
61 | self.scene.add_geometry(trimesh.primitives.Cylinder(**shape_params), node_name=shape_name, transform=shape_transform)
62 | elif shape_type == 'Capsule':
63 | self.scene.add_geometry(trimesh.primitives.Capsule(**shape_params), node_name=shape_name, transform=shape_transform)
64 | elif shape_type == 'Mesh':
65 | if 'scale' in shape_params:
66 | scale = shape_params.pop('scale')
67 | else:
68 | scale = 1
69 | geom = trimesh.load(**shape_params)
70 | geom.apply_scale(scale)
71 | self.scene.add_geometry(geom, node_name=shape_name, transform=shape_transform)
72 |
73 | self.collision_manager = trimesh.collision.CollisionManager()
74 | self._add_scene_to_collision_manager(self.scene, self.collision_manager)
75 |
76 | def _add_scene_to_collision_manager(self, scene, collision_manager: trimesh.collision.CollisionManager):
77 | """
78 | Convert objects in trimesh.Scene to fcl CollisionObject's, keeping the names of the objects
79 | """
80 | for geometry_node_name in scene.graph.nodes_geometry:
81 | T, geometry = scene.graph[geometry_node_name]
82 | mesh = scene.geometry[geometry]
83 | cobj = collision_manager.add_object(name=geometry_node_name, mesh=mesh, transform=T)
84 |
85 |
86 | def remove_object(self, name):
87 | self.scene.delete_geometry(name)
88 | self.collision_manager.remove_object(name)
89 |
90 | def add_object(self, name, shape_type, shape_params, transform=np.eye(4)):
91 | if shape_type == 'Box':
92 | self.scene.add_geometry(trimesh.primitives.Box(**shape_params), node_name=name, transform=transform)
93 | elif shape_type == 'Sphere':
94 | self.scene.add_geometry(trimesh.primitives.Sphere(**shape_params), node_name=name, transform=transform)
95 | elif shape_type == 'Cylinder':
96 | self.scene.add_geometry(trimesh.primitives.Cylinder(**shape_params), node_name=name, transform=transform)
97 | elif shape_type == 'Capsule':
98 | self.scene.add_geometry(trimesh.primitives.Capsule(**shape_params), node_name=name, transform=transform)
99 | elif shape_type == 'Mesh':
100 | self.scene.add_geometry(trimesh.load(**shape_params), node_name=name, transform=transform)
101 | T, geometry = self.scene.graph[name]
102 | mesh = self.scene.geometry[geometry]
103 | cobj = self.collision_manager.add_object(name=name, mesh=mesh, transform=T)
104 |
105 | def update_transform(self, name, transform):
106 | '''
107 | If the transform of an object is updated, this function should be called to update the collision manager
108 | '''
109 | self.collision_manager.set_transform(name, transform)
110 |
111 | def update_scene(self, name=None, transform=None):
112 | '''
113 | This should only be called before visualization. It updates the scene graph with the latest transforms
114 | in the collision manager. It is not necessary to call this function before collision checking.
115 | '''
116 | if name is not None:
117 | self.scene.graph.update(frame_to=name, matrix=transform)
118 | else:
119 | for node_name in self.scene.graph.nodes_geometry:
120 | transform = np.eye(4)
121 | transform[:3, :3] = self.collision_manager._objs[node_name]['obj'].get_rotation()
122 | transform[:3, 3] = self.collision_manager._objs[node_name]['obj'].get_translation()
123 | self.scene.graph.update(frame_to=node_name, matrix=transform)
--------------------------------------------------------------------------------
/diffco/collision_interfaces/rigid_body.py:
--------------------------------------------------------------------------------
1 | """
2 | Rigid body
3 | ====================================
4 | TODO
5 | """
6 |
7 | from typing import List, Optional
8 | import json
9 |
10 | import torch
11 |
12 | from .spatial_vector_algebra import (
13 | CoordinateTransform,
14 | z_rot,
15 | y_rot,
16 | x_rot,
17 | )
18 |
19 | class RigidBody:
20 | """
21 | Node representation of a link.
22 | Implements recursive forward kinematics.
23 | """
24 |
25 | _parent: Optional["RigidBody"]
26 | _children: List["RigidBody"]
27 |
28 | def __init__(self, rigid_body_params, device="cpu"):
29 |
30 | super().__init__()
31 |
32 | self._parent = None
33 | self._children = []
34 |
35 | self._device = torch.device(device)
36 | self.link_idx = rigid_body_params["link_idx"]
37 | self.name = rigid_body_params["link_name"]
38 | self.joint_name = rigid_body_params["joint_name"]
39 | self.joint_type = rigid_body_params["joint_type"]
40 |
41 | self.joint_trans = lambda: rigid_body_params["joint_trans"].reshape(1, 3)
42 | self.joint_rot_angles = lambda: rigid_body_params["joint_rot_angles"].reshape(1, 3)
43 |
44 | # local joint axis (w.r.t. joint coordinate frame):
45 | self.joint_axis = rigid_body_params["joint_axis"]
46 |
47 | self.joint_limits = rigid_body_params["joint_limits"]
48 | self.joint_mimic = rigid_body_params["joint_mimic"]
49 |
50 | self.joint_pose = CoordinateTransform(device=self._device)
51 | self.joint_pose.set_translation(torch.reshape(self.joint_trans(), (1, 3)))
52 |
53 | self.update_joint_state(
54 | torch.zeros([1, 1], device=self._device),
55 | )
56 |
57 | self.collision_origins = rigid_body_params.get("collision_origins", None)
58 | if self.collision_origins:
59 | self.collision_origins = [CoordinateTransform(
60 | rot=pose[:3, :3].reshape(1, 3, 3),
61 | trans=pose[:3, 3].reshape(1, 3),
62 | device=self._device,
63 | ) for pose in self.collision_origins]
64 | self.visual_origins = rigid_body_params.get("visual_origins", None)
65 | if self.visual_origins:
66 | self.visual_origins = [CoordinateTransform(
67 | rot=pose[:3, :3].reshape(1, 3, 3),
68 | trans=pose[:3, 3].reshape(1, 3),
69 | device=self._device,
70 | ) for pose in self.visual_origins]
71 |
72 | self.pose = CoordinateTransform(device=self._device)
73 |
74 | # Kinematic tree construction
75 | def set_parent(self, link: "RigidBody"):
76 | self._parent = link
77 |
78 | def add_child(self, link: "RigidBody"):
79 | self._children.append(link)
80 |
81 | # Recursive algorithms
82 | def forward_kinematics(self, q_dict, return_collision=False):
83 | """Recursive forward kinematics
84 | Computes transformations from self to all descendants.
85 |
86 | Returns: Dict[link_name, transform_from_self_to_link]
87 | """
88 | # Compute joint pose
89 | if self.name in q_dict:
90 | q = q_dict[self.name]
91 | if self.joint_mimic is not None:
92 | q = q * self.joint_mimic.multiplier + self.joint_mimic.offset
93 | batch_size = q.shape[0]
94 |
95 | rot_angles_vals = self.joint_rot_angles()
96 | roll = rot_angles_vals[0, 0]
97 | pitch = rot_angles_vals[0, 1]
98 | yaw = rot_angles_vals[0, 2]
99 | fixed_rotation = (z_rot(yaw) @ y_rot(pitch)) @ x_rot(roll)
100 | fixed_translation = self.joint_trans().reshape(1, 3)
101 |
102 | if self.joint_type in ["revolute", "continuous"]:
103 | if torch.abs(self.joint_axis[0, 0]) == 1:
104 | rot = x_rot(torch.sign(self.joint_axis[0, 0]) * q)
105 | elif torch.abs(self.joint_axis[0, 1]) == 1:
106 | rot = y_rot(torch.sign(self.joint_axis[0, 1]) * q)
107 | else:
108 | rot = z_rot(torch.sign(self.joint_axis[0, 2]) * q)
109 | joint_pose = CoordinateTransform(
110 | rot=fixed_rotation.repeat(batch_size, 1, 1) @ rot,
111 | trans=fixed_translation.repeat(batch_size, 1),
112 | device=self._device,
113 | )
114 | elif self.joint_type == "prismatic":
115 | trans = self.joint_axis.reshape(1, 3, 1) * q.reshape(-1, 1, 1)
116 | assert torch.any(self.joint_axis != 0)
117 | rot = fixed_rotation.repeat(batch_size, 1, 1)
118 | trans = fixed_translation.reshape(-1, 3, 1) + rot @ trans
119 | trans = trans.reshape(-1, 3)
120 | joint_pose = CoordinateTransform(
121 | rot=rot,
122 | trans=trans,
123 | device=self._device,
124 | )
125 | else:
126 | joint_pose = self.joint_pose
127 |
128 | # Compute forward kinematics of children
129 | if return_collision:
130 | pose_dict = {self.name: [self.pose.multiply_transform(origin) for origin in self.collision_origins]}
131 | else:
132 | pose_dict = {self.name: [self.pose]}
133 | for child in self._children:
134 | pose_dict.update(child.forward_kinematics(q_dict, return_collision))
135 |
136 | # Apply joint pose
137 | return {
138 | body_name: [joint_pose.multiply_transform(p) for p in pose_dict[body_name]]
139 | for body_name in pose_dict
140 | }
141 |
142 | def body_transforms(self, q_dict):
143 | """Compute body transforms
144 | Computes transformations from self to all descendants.
145 |
146 | Returns: Dict[link_name, transform_from_self_to_link]
147 | """
148 | # Compute
149 |
150 | # Get/set
151 | def update_joint_state(self, q):
152 | batch_size = q.shape[0]
153 |
154 | # joint_ang_vel = qd @ self.joint_axis
155 | # self.joint_vel = SpatialMotionVec(
156 | # torch.zeros_like(joint_ang_vel), joint_ang_vel
157 | # )
158 |
159 | rot_angles_vals = self.joint_rot_angles()
160 | roll = rot_angles_vals[0, 0]
161 | pitch = rot_angles_vals[0, 1]
162 | yaw = rot_angles_vals[0, 2]
163 |
164 | fixed_rotation = (z_rot(yaw) @ y_rot(pitch)) @ x_rot(roll)
165 |
166 | # when we update the joint angle, we also need to update the transformation
167 | self.joint_pose.set_translation(
168 | torch.reshape(self.joint_trans(), (1, 3)).repeat(batch_size, 1)
169 | )
170 | if torch.abs(self.joint_axis[0, 0]) == 1:
171 | rot = x_rot(torch.sign(self.joint_axis[0, 0]) * q)
172 | elif torch.abs(self.joint_axis[0, 1]) == 1:
173 | rot = y_rot(torch.sign(self.joint_axis[0, 1]) * q)
174 | else:
175 | rot = z_rot(torch.sign(self.joint_axis[0, 2]) * q)
176 |
177 | self.joint_pose.set_rotation(fixed_rotation.repeat(batch_size, 1, 1) @ rot)
178 | return
179 |
180 | def get_joint_limits(self):
181 | return self.joint_limits
182 |
183 | def __repr__(self) -> str:
184 | # Pretty print all members
185 | return "\n"+"\n".join([f"{k}: {v}" for k, v in {
186 | "name": self.name,
187 | "children": [child.name for child in self._children],
188 | "parent": self._parent.name if self._parent else "None",
189 | "_device": self._device,
190 | "joint_idx": self.link_idx,
191 | "joint_axis": self.joint_axis,
192 | "joint_limits": self.joint_limits,
193 | "joint_pose": self.joint_pose,
194 | }.items()])+"\n"
--------------------------------------------------------------------------------
/diffco/collision_interfaces/robot_interface_base.py:
--------------------------------------------------------------------------------
1 | import torch
2 |
3 | class RobotInterfaceBase:
4 | def __init__(self, name='', device='cpu'):
5 | self.name = name
6 | self._device = torch.device(device)
7 | self.base_transform = None
8 | self.joint_limits = None
9 | self._n_dofs = None
10 | self._controlled_joints = None
11 | self._mimic_joints = None
12 | self._bodies = None
13 | self._body_name_to_idx_map = None
14 |
15 | def rand_configs(self, num_configs):
16 | raise NotImplementedError
17 |
18 | def collision(self, q):
19 | '''
20 | Args:
21 | q: joint angles [batch_size x n_dofs]
22 | Returns a boolean tensor of size (batch_size,) indicating whether each configuration is in collision
23 | '''
24 | raise NotImplementedError
25 |
26 | def compute_forward_kinematics_all_links(self, q, return_collision=False):
27 | r"""
28 |
29 | Args:
30 | q: joint angles [batch_size x n_dofs]
31 | link_name: name of link
32 | return_collision: whether to return collision geometry transforms
33 |
34 | Returns: translation and rotation of the link frame
35 |
36 | """
37 | raise NotImplementedError
38 |
--------------------------------------------------------------------------------
/diffco/collision_interfaces/ros_interface.py:
--------------------------------------------------------------------------------
1 | # Description: This file contains the interfaces for different robots and environments
2 | # They either read robot info and provides fkine function, or read obstacle info.
3 | # Consider add parent classes for robot and environment interfaces, respectively.
4 |
5 | # Copyright (c) Facebook, Inc. and its affiliates.
6 | """
7 | Differentiable robot model class
8 | ====================================
9 | TODO
10 | """
11 |
12 | from typing import List, Tuple, Dict, Optional
13 | from dataclasses import dataclass
14 | import os
15 |
16 | try:
17 | import rospy
18 | from moveit_commander import PlanningSceneInterface, RobotCommander, MoveGroupCommander, MoveItCommanderException
19 | from moveit_msgs.msg import RobotState, DisplayRobotState, PlanningScene, RobotTrajectory, ObjectColor
20 | from geometry_msgs.msg import Quaternion, Pose, PoseStamped, Point
21 | from rosgraph.names import ns_join
22 | except ImportError:
23 | print("ROS-related imports failed. This is expected if not running in a ROS environment. "
24 | "Otherwise, try source your ROS setup.bash file or check your ROS installation.")
25 |
26 | import torch
27 |
28 | from .robot_interface_base import RobotInterfaceBase
29 |
30 | class ROSRobotEnv(RobotInterfaceBase):
31 | def __init__(self, ns='', robot_topic=None, env_dict=None, name='', device='cpu'):
32 | rospy.init_node(f'node_{ns}_{__name__}')
33 | self.scene = PlanningSceneInterface(ns=ns)
34 | self.robot = RobotCommander(ns_join(ns, 'robot_description')) if robot_topic is None else RobotCommander(robot_topic)
35 | self.group = MoveGroupCommander("right_arm", robot_description=ns_join(ns, 'robot_description'), ns=ns)
36 | self.scene._scene_pub = rospy.Publisher(ns_join(ns, 'planning_scene'),
37 | PlanningScene,
38 | queue_size=0)
39 |
40 | self.sv = StateValidity(ns=ns)
41 | # set_environment(self.robot, self.scene)
42 |
43 | # masterModifier = ShelfSceneModifier()
44 | self.sceneModifier = PlanningSceneModifier(envDict['obsData'])
45 | self.sceneModifier.setup_scene(self.scene, self.robot, self.group)
46 |
47 | self.rs_man = RobotState()
48 | robot_state = self.robot.get_current_state()
49 | self.rs_man.joint_state.name = robot_state.joint_state.name
50 | self.filler_robot_state = list(robot_state.joint_state.position)
51 | self.joint_ranges = torch.FloatTensor(
52 | [3.4033, 3.194, 6.117, 3.6647, 6.117, 6.1083, 2.67])
53 |
54 | self.gt_checker = self
55 |
56 | def collision(self, q): #, print_depth=False):
57 | # returns true if robot state is in collision, false if robot state is collision free
58 | if states.ndim == 1:
59 | states = states[None, :]
60 | states = self.unnormalizer(states)
61 | for state in states:
62 | self.filler_robot_state[10:17] = state # moveit_scrambler(state)
63 | self.rs_man.joint_state.position = tuple(self.filler_robot_state)
64 | collision_free = self.sv.getStateValidity(
65 | self.rs_man, group_name="right_arm") #, print_depth=print_depth)
66 | if not collision_free:
67 | return True
68 | return False
69 |
70 | def reset_pose(self, pose_dict, env_name=None):
71 | self.sceneModifier.delete_obstacles()
72 | self.sceneModifier.permute_obstacles(pose_dict)
73 |
74 | def normalizer(self, states):
75 | return moveit_unscrambler(states) / self.joint_ranges
76 |
77 | def unnormalizer(self, states):
78 | return moveit_scrambler(states * self.joint_ranges)
79 |
80 | def compute_forward_kinematics_all_links(self, q, return_collision=False):
81 | return super().compute_forward_kinematics_all_links(q, return_collision)
82 |
83 |
84 | class PlanningSceneModifier():
85 | def __init__(self, obstacles, port=0):
86 | self._obstacles = obstacles
87 |
88 | self.port = port
89 |
90 | self._scene = None
91 | self._robot = None
92 |
93 | def setup_scene(self, scene, robot, group):
94 | self._scene = scene
95 | self._robot = robot
96 | self._group = group
97 |
98 | def permute_obstacles(self, pose_dict):
99 | for name in pose_dict.keys():
100 | pose = PoseStamped()
101 | pose.header.frame_id = self._robot.get_planning_frame()
102 | pose.pose.position.x = pose_dict[name][0]
103 | pose.pose.position.y = pose_dict[name][1]
104 | pose.pose.position.z = pose_dict[name][2] + self._obstacles[name]['z_offset']
105 |
106 | # Keep the orientations
107 | if 'orientation' not in pose_dict[name] and self._obstacles[name]['orientation'] is not None:
108 | pose.pose.orientation.x = self._obstacles[name]['orientation'][0]
109 | pose.pose.orientation.y = self._obstacles[name]['orientation'][1]
110 | pose.pose.orientation.z = self._obstacles[name]['orientation'][2]
111 | pose.pose.orientation.w = self._obstacles[name]['orientation'][3]
112 |
113 | if self._obstacles[name]['is_mesh']:
114 | # _logger.info(self._obstacles[name]['mesh_file'])
115 | self._scene.add_mesh(name, pose, filename=self._obstacles[name]['mesh_file'], size=self._obstacles[name]['dim'])
116 | else:
117 | self._scene.add_box(name, pose, size=self._obstacles[name]['dim'])
118 |
119 | # rospy.sleep(1)
120 | # _logger.info(self._scene.get_known_object_names())
121 |
122 | def delete_obstacles(self):
123 | #scene.remove_world_object("table_center")
124 | for name in self._obstacles.keys():
125 | self._scene.remove_world_object(name)
--------------------------------------------------------------------------------
/diffco/deprecated/FCLChecker.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from scipy.ndimage.measurements import label
3 | import torch
4 | from tqdm import tqdm
5 | from time import time
6 | from . import kernel
7 | from .Obstacles import Obstacle
8 | from .DiffCo import CollisionChecker
9 | from .Obstacles import FCLObstacle
10 | import fcl
11 |
12 |
13 | class FCLChecker(CollisionChecker):
14 | def __init__(self, obstacles, robot, robot_manager=None, obs_managers=None, label_type=None, num_class=None):
15 | super().__init__(obstacles)
16 | self.robot = robot
17 | self.robot_manager = robot_manager
18 | self.obs_managers = obs_managers
19 | self.label_type = label_type
20 | self.num_class = num_class
21 |
22 | if self.robot_manager is None:
23 | rand_cfg = torch.rand(robot.dof)
24 | rand_cfg = rand_cfg * (robot.limits[:, 1]-robot.limits[:, 0]) + robot.limits[:, 0]
25 | robot_links = robot.update_polygons(rand_cfg)
26 | robot_manager = fcl.DynamicAABBTreeCollisionManager()
27 | robot_manager.registerObjects(robot_links)
28 | robot_manager.setup()
29 | self.robot_manager = robot_manager
30 |
31 | if self.obs_managers is None:
32 | assert label_type == 'binary' or label_type == 'instance' or \
33 | (label_type == 'class' and num_class is not None), \
34 | (f'When obs_managers is not provided one need to provide label type: \
35 | label_type={label_type}, num_class={num_class}')
36 |
37 | fcl_obs = [FCLObstacle(*param) for param in obstacles]
38 | fcl_collision_obj = [fobs.cobj for fobs in fcl_obs]
39 |
40 | if label_type == 'binary':
41 | obs_managers = [fcl.DynamicAABBTreeCollisionManager()]
42 | obs_managers[0].registerObjects(fcl_collision_obj)
43 | obs_managers[0].setup()
44 | elif label_type == 'instance':
45 | obs_managers = [fcl.DynamicAABBTreeCollisionManager() for _ in fcl_obs]
46 | for mng, cobj in zip(obs_managers, fcl_collision_obj):
47 | mng.registerObjects([cobj])
48 | elif label_type == 'class':
49 | obs_managers = [fcl.DynamicAABBTreeCollisionManager() for _ in range(num_class)]
50 | obj_by_cls = [[] for _ in range(num_class)]
51 | for obj in fcl_obs:
52 | obj_by_cls[obj.category].append(obj.cobj)
53 | for mng, obj_group in zip(obs_managers, obj_by_cls):
54 | mng.registerObjects(obj_group)
55 | else:
56 | raise NotImplementedError('Unsupported label_type {}'.format(label_type))
57 |
58 | for mng in obs_managers:
59 | mng.setup()
60 |
61 | self.obs_managers = obs_managers
62 |
63 | self.num_class = len(obs_managers)
64 |
65 | def predict(self, X, distance=True):
66 | if X.ndim == 1:
67 | X = X[None, :]
68 | labels = torch.FloatTensor(len(X), len(self.obs_managers))
69 | dists = torch.FloatTensor(len(X), len(self.obs_managers)) if distance else None
70 | req = fcl.CollisionRequest(num_max_contacts=1000 if distance else 1, enable_contact=distance)
71 | for i, cfg in enumerate(X):
72 | self.robot.update_polygons(cfg)
73 | self.robot_manager.update()
74 | # assert len(self.robot_manager.getObjects()) == self.robot.dof
75 | for cat, obs_mng in enumerate(self.obs_managers):
76 | rdata = fcl.CollisionData(request = req)
77 | self.robot_manager.collide(obs_mng, rdata, fcl.defaultCollisionCallback)
78 | in_collision = rdata.result.is_collision
79 | labels[i, cat] = 1 if in_collision else -1
80 | if distance:
81 | ddata = fcl.DistanceData()
82 | self.robot_manager.distance(obs_mng, ddata, fcl.defaultDistanceCallback)
83 | depths = torch.FloatTensor([c.penetration_depth for c in rdata.result.contacts])
84 | dists[i, cat] = depths.abs().max() if in_collision else -ddata.result.min_distance
85 | if distance:
86 | return labels, dists
87 | else:
88 | return labels
89 |
90 | def score(self, X):
91 | return self.predict(X, distance=True)[1]
92 |
93 | class Simple1DDynamicChecker(CollisionChecker):
94 | def __init__(self, obstacles, robot):
95 | super().__init__(obstacles)
96 | self.robot = robot
97 |
98 | def predict(self, X, distance=True):
99 | if X.ndim == 1:
100 | X = X[None, :]
101 | # labels = torch.FloatTensor(len(X))
102 | # dists = torch.FloatTensor(len(X)) if distance else None
103 | X = self.robot.unnormalize(X)
104 | res = [obs.is_collision(X, distance=distance) for obs in self.obstacles]
105 | labels, dists = tuple(zip(*res))
106 | labels = (torch.vstack(labels).sum(dim=1) > 0) * 2 - 1
107 | if not distance:
108 | return labels
109 | dists = torch.max(torch.hstack(dists), dim=1).values
110 | # for i, (cfg, t) in enumerate(zip(X, ts)):
111 | # res = [obs.is_collision(cfg[0], t) for obs in self.obstacles]
112 | # in_collision = any([r[0] for r in res])
113 | # labels[i] = 1 if in_collision else -1
114 | # if distance:
115 | # dists[i] = max([r[1] for r in res])
116 | return labels, dists
117 |
118 |
--------------------------------------------------------------------------------
/diffco/deprecated/Obstacles.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import fcl
3 | import numpy as np
4 |
5 | class Obstacle:
6 | def __init__(self, kind, position, size, cost=np.inf):
7 | self.kind = kind
8 | if self.kind not in ['circle', 'rect']:
9 | raise NotImplementedError('Obstacle kind {} not supported'.format(kind))
10 | self.position = torch.FloatTensor(position)
11 | self.size = torch.FloatTensor([size]) if kind == 'circle' else torch.FloatTensor(size)
12 | self.cost = cost
13 |
14 | def is_collision(self, point):
15 | if point.ndim == 1:
16 | point = point[np.newaxis, :]
17 | if self.kind == 'circle':
18 | return torch.norm(self.position-point, dim=1) < self.size/2
19 | elif self.kind == 'rect':
20 | return torch.all(torch.abs(self.position-point) < self.size/2, dim=1)
21 | else:
22 | raise NotImplementedError('Obstacle kind {} not supported'.format(self.kind))
23 |
24 | def get_cost(self):
25 | return self.cost
26 |
27 | class FCLObstacle:
28 | def __init__(self, shape, position, size=None, category=None, **kwargs):
29 | self.size = size
30 | self.position = position
31 | if shape == 'circle':
32 | pos_3d = torch.DoubleTensor([position[0], position[1], 0])
33 | self.geom = fcl.Cylinder(size, 1000)
34 | elif shape == 'rect':
35 | pos_3d = torch.DoubleTensor([position[0], position[1], 0])
36 | self.geom = fcl.Box(size[0], size[1], 1000)
37 | elif shape == 'mesh':
38 | pos_3d = torch.DoubleTensor(position)
39 | self.geom = kwargs['geom']
40 |
41 | self.cobj = fcl.CollisionObject(self.geom, fcl.Transform(pos_3d))
42 | self.category = category
43 |
44 | class Simple1DDynamicObstacle:
45 | def __init__(self, size, position_func):
46 | self.size = size
47 | self.position_func = position_func
48 |
49 | def is_collision(self, st_point, distance=True):
50 | ''' st_point is the query point (joints, time), p is the center of the obstacle
51 | distance indicates whether to return collision distance
52 | '''
53 |
54 | p = self.position_func(st_point[:, -1:])
55 | d = self.size/2 - torch.abs(st_point[:, :-1]-p)
56 | in_collision = d > 0 # point >= p-self.size/2 and point <= p+self.size/2
57 | # if in_collision:
58 | # d = torch.minimum(point - p + self.size/2, p + self.size/2 - point)
59 | # else:
60 | # d = -torch.minimum(torch.abs(point-p+self.size/2), torch.abs(point - p - self.size/2))
61 | if distance:
62 | return in_collision, d
63 | else:
64 | return in_collision
65 |
66 | class ObstacleMotion:
67 | def predict(self, t):
68 | raise NotImplementedError
69 |
70 | def __call__(self, *args, **kwargs):
71 | return self.predict(*args, **kwargs)
72 |
73 | class LinearMotion(ObstacleMotion):
74 | def __init__(self, A, B):
75 | self.A = A
76 | self.B = B
77 |
78 | def predict(self, t):
79 | return self.A * t + self.B
80 |
81 | class SineMotion(ObstacleMotion):
82 | def __init__(self, A, alpha, beta, bias):
83 | self.A = A
84 | self.alpha = alpha
85 | self.beta = beta
86 | self.bias = bias
87 |
88 | def predict(self, t):
89 | return self.A*torch.sin(self.alpha*t+self.beta) + self.bias
90 |
91 | # class ComposeMotion # TODO
--------------------------------------------------------------------------------
/diffco/envs/__init__.py:
--------------------------------------------------------------------------------
1 | from .collision_env import CollisionEnv
--------------------------------------------------------------------------------
/diffco/envs/collision_env.py:
--------------------------------------------------------------------------------
1 | class CollisionEnv:
2 | '''
3 | A template class for collision environment.
4 | Use as a reference for implementing your own collision environment.
5 | '''
6 | def __init__(self):
7 | pass
8 |
9 | def is_collision(self, qs):
10 | return [self._single_collision(q) for q in qs]
11 |
12 | def _single_collision(self, q):
13 | raise NotImplementedError
14 |
15 | def distance(self, qs):
16 | return [self._single_distance(q) for q in qs]
17 |
18 | def _single_distance(self, q):
19 | raise NotImplementedError
20 |
21 | def sample_q(self):
22 | raise NotImplementedError
23 |
24 | def plot(self, qs):
25 | raise NotImplementedError
--------------------------------------------------------------------------------
/diffco/envs/moveit/panda/scene_objects/bookshelves.scene:
--------------------------------------------------------------------------------
1 | bookshelves
2 | * bookshelf_bottom
3 | 1
4 | box
5 | 0.60 1.1 0.02
6 | -0.95 0 0.20
7 | 0 0 0 1
8 | 0 0 0 0
9 | * bookshelf_shelf1
10 | 1
11 | box
12 | 0.60 1.1 0.02
13 | -0.95 0 0.60
14 | 0 0 0 1
15 | 0 0 0 0
16 | * bookshelf_shelf2
17 | 1
18 | box
19 | 0.60 1.1 0.02
20 | -0.95 0 1.00
21 | 0 0 0 1
22 | 0 0 0 0
23 | * bookshelf_top
24 | 1
25 | box
26 | 0.60 1.1 0.02
27 | -0.95 0 1.40
28 | 0 0 0 1
29 | 0 0 0 0
30 | * bookshelf_left_wall
31 | 1
32 | box
33 | 0.60 0.02 1.40
34 | -0.95 0.55 0.7
35 | 0 0 0 1
36 | 0 0 0 0
37 | * bookshelf_right_wall
38 | 1
39 | box
40 | 0.60 0.02 1.40
41 | -0.95 -0.55 0.7
42 | 0 0 0 1
43 | 0 0 0 0
44 | .
45 |
--------------------------------------------------------------------------------
/diffco/envs/moveit/panda/scene_objects/bookshelves_small.scene:
--------------------------------------------------------------------------------
1 | bookshelves
2 | * bookshelf_bottom
3 | 1
4 | box
5 | 0.48 0.88 0.02
6 | -0.60 0 0.16
7 | 0 0 0 1
8 | 0 0 0 0
9 | * bookshelf_shelf1
10 | 1
11 | box
12 | 0.48 0.88 0.02
13 | -0.60 0 0.48
14 | 0 0 0 1
15 | 0 0 0 0
16 | * bookshelf_shelf2
17 | 1
18 | box
19 | 0.48 0.88 0.02
20 | -0.60 0 0.80
21 | 0 0 0 1
22 | 0 0 0 0
23 | * bookshelf_top
24 | 1
25 | box
26 | 0.48 0.88 0.02
27 | -0.60 0 1.12
28 | 0 0 0 1
29 | 0 0 0 0
30 | * bookshelf_left_wall
31 | 1
32 | box
33 | 0.48 0.02 1.12
34 | -0.60 0.44 0.56
35 | 0 0 0 1
36 | 0 0 0 0
37 | * bookshelf_right_wall
38 | 1
39 | box
40 | 0.48 0.02 1.12
41 | -0.60 -0.44 0.56
42 | 0 0 0 1
43 | 0 0 0 0
44 | .
45 |
--------------------------------------------------------------------------------
/diffco/envs/moveit/panda/scene_objects/narrow.scene:
--------------------------------------------------------------------------------
1 | (noname)+++
2 | * Box_0
3 | 1
4 | box
5 | 0.2 0.2 0.2
6 | 0 0 0.9
7 | 0 0 0 1
8 | 0 0 0 0
9 | * Box_1
10 | 1
11 | box
12 | 0.2 0.2 0.2
13 | 0.54 0 0.09
14 | 0 0 0 1
15 | 0 0 0 0
16 | * Box_2
17 | 1
18 | box
19 | 0.2 0.2 0.2
20 | 0.57 0 0.55
21 | 0 0 0 1
22 | 0 0 0 0
23 | * Box_3
24 | 1
25 | box
26 | 0.4 0.4 0.4
27 | 0.38 -0.38 0.45
28 | 0 0 0 1
29 | 0 0 0 0
30 | * Box_4
31 | 1
32 | box
33 | 0.4 0.4 0.4
34 | 0.4 0.42 0.44
35 | 0 0 0 1
36 | 0 0 0 0
37 | .
38 |
--------------------------------------------------------------------------------
/diffco/envs/moveit/panda/scene_objects/raised_shelves.scene:
--------------------------------------------------------------------------------
1 | raised_shelves
2 | * bookshelf_bottom
3 | 1
4 | box
5 | 0.60 2.40 0.02
6 | 0.30 1.20 0.20
7 | 0 0 0 1
8 | 1 1 0 1
9 | * bookshelf_shelf1
10 | 1
11 | box
12 | 0.60 2.40 0.02
13 | 0.30 1.20 0.60
14 | 0 0 0 1
15 | 1 1 0 1
16 | * bookshelf_shelf2
17 | 1
18 | box
19 | 0.60 2.40 0.02
20 | 0.30 1.20 1.00
21 | 0 0 0 1
22 | 1 1 0 1
23 | * bookshelf_top
24 | 1
25 | box
26 | 0.60 2.40 0.02
27 | 0.30 1.20 1.40
28 | 0 0 0 1
29 | 1 1 0 1
30 | * palette_bottom
31 | 1
32 | box
33 | 1.80 0.50 0.02
34 | 1.50 2.15 0.15
35 | 0 0 0 1
36 | 1 1 0 1
37 | * palette_rail_1
38 | 1
39 | box
40 | 0.15 0.50 0.15
41 | 1.00 2.15 0.075
42 | 0 0 0 1
43 | 1 1 0 1
44 | * palette_rail_2
45 | 1
46 | box
47 | 0.15 0.50 0.15
48 | 1.50 2.15 0.075
49 | 0 0 0 1
50 | 1 1 0 1
51 | * palette_rail_3
52 | 1
53 | box
54 | 0.15 0.50 0.15
55 | 2.00 2.15 0.075
56 | 0 0 0 1
57 | 1 1 0 1
58 | * bookshelf_left_wall
59 | 1
60 | box
61 | 0.60 0.02 1.30
62 | 0.30 0.01 0.65
63 | 0 0 0 1
64 | 1 1 0 1
65 | * bookshelf_right_wall
66 | 1
67 | box
68 | 0.60 0.02 1.30
69 | 0.30 2.39 0.65
70 | 0 0 0 1
71 | 1 1 0 1
72 | .
73 |
--------------------------------------------------------------------------------
/diffco/envs/rtb/__init__.py:
--------------------------------------------------------------------------------
1 | from .panda_envs import (
2 | PandaEnv,
3 | PandaSingleCuboidEnv,
4 | PandaSingleCylinderEnv,
5 | PandaThreeCylinderEnv,
6 | )
--------------------------------------------------------------------------------
/diffco/envs/rtb/panda_envs.py:
--------------------------------------------------------------------------------
1 | from .. import CollisionEnv
2 |
3 | import roboticstoolbox as rtb
4 | from roboticstoolbox.models import Panda
5 | from spatialgeometry import Cuboid, Cylinder, Sphere, Mesh
6 | from spatialmath import SE3
7 | from swift import Swift
8 |
9 | # Path: envs/rtb/panda_envs.py
10 |
11 | class PandaEnv(CollisionEnv):
12 | '''
13 | General collision environment for Panda robot.
14 | Add your own objects to create a custom environment.
15 |
16 | Objects: dict[key: (shape type, other shape parameters[dict])]
17 | '''
18 | def __init__(self, object_info: dict=None, launch_args: dict=None):
19 | super().__init__()
20 | self.robot = Panda()
21 | self.robot.q = self.robot.qr
22 | self.env = self._launch_env(launch_args)
23 | self._add_objects(object_info)
24 |
25 | def _launch_env(self, launch_args: dict):
26 | '''
27 | Launch the collision environment.
28 |
29 | Parameters:
30 | launch_args: dict
31 | '''
32 | if launch_args is None:
33 | launch_args = {}
34 | env = Swift()
35 | env.launch(**launch_args)
36 | env.add(self.robot)
37 | return env
38 |
39 | def _add_objects(self, object_info: dict):
40 | '''
41 | Add objects to the environment.
42 |
43 | Parameters:
44 | objects: dict[shape type: shape parameters[dict]]
45 | '''
46 | self.objects = {}
47 | shape_class_map = {
48 | 'box': Cuboid,
49 | 'cuboid': Cuboid,
50 | 'cylinder': Cylinder,
51 | 'sphere': Sphere,
52 | 'mesh': Mesh
53 | }
54 | for shape_key, (shape_type, shape_params) in object_info.items():
55 | if shape_type in shape_class_map:
56 | shape_class = shape_class_map[shape_type]
57 | shape_obj = shape_class(**shape_params)
58 | self.env.add(shape_obj)
59 | self.objects[shape_key] = shape_obj
60 | else:
61 | raise NotImplementedError
62 |
63 | def _single_collision(self, q):
64 | collided = [self.robot.iscollided(q, obj) for _, obj in self.objects.items()]
65 | return any(collided)
66 |
67 | def _single_distance(self, q):
68 | dists = [self.robot.closest_point(q, obj)[0] for _, obj in self.objects.items()]
69 | return min(dists)
70 |
71 | def sample_q(self):
72 | return self.robot.random_q()
73 |
74 |
75 | class PandaSingleCylinderEnv(PandaEnv):
76 | '''
77 | Collision environment for Panda robot with a single cylinder.
78 | '''
79 | def __init__(self, launch_args: dict=None):
80 | object_info = {
81 | 'cylinder1': ('cylinder', {
82 | 'radius': 0.05,
83 | 'length': 0.8,
84 | 'pose': SE3(0.5, 0, 0.4),
85 | 'color': (1.0, 1.0, 0.0, 1.)
86 | })
87 | }
88 | super().__init__(object_info, launch_args)
89 |
90 | class PandaThreeCylinderEnv(PandaEnv):
91 | '''
92 | Collision environment for Panda robot with three cylinders.
93 | '''
94 | def __init__(self, launch_args: dict=None):
95 | object_info = {
96 | 'cylinder1': ('cylinder', {
97 | 'radius': 0.05,
98 | 'length': 0.8,
99 | 'pose': SE3(0.3, -0.5, 0.4),
100 | 'color': (1.0, 1.0, 0.0, 1.)
101 | }),
102 | 'cylinder2': ('cylinder', {
103 | 'radius': 0.05,
104 | 'length': 0.8,
105 | 'pose': SE3(0.5, 0, 0.4),
106 | 'color': (1.0, 1.0, 0.0, 1.)
107 | }),
108 | 'cylinder3': ('cylinder', {
109 | 'radius': 0.05,
110 | 'length': 0.8,
111 | 'pose': SE3(0.3, 0.5, 0.4),
112 | 'color': (1.0, 1.0, 0.0, 1.)
113 | })
114 | }
115 | super().__init__(object_info, launch_args)
116 |
117 | class PandaSingleCuboidEnv(PandaEnv):
118 | '''
119 | Collision environment for Panda robot with a single cuboid.
120 | '''
121 | def __init__(self, launch_args: dict=None):
122 | object_info = {
123 | 'cuboid1': ('cuboid', {
124 | 'scale': [0.2, 0.2, 0.2],
125 | 'pose': SE3(0.5, 0, 0.4),
126 | 'color': (1.0, 1.0, 0.0, 1.)
127 | })
128 | }
129 | super().__init__(object_info, launch_args)
130 |
--------------------------------------------------------------------------------
/diffco/kernel.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import torch
3 |
4 | class KernelFunc:
5 | def __init__(self):
6 | pass
7 |
8 | def __call__(self):
9 | raise NotImplementedError('You need to define your own __call__ function.')
10 |
11 |
12 | class RQKernel(KernelFunc):
13 | def __init__(self, gamma: float, p: int=2):
14 | self.gamma = gamma
15 | self.p = p
16 |
17 | def __call__(self, xs, x_primes):
18 | if xs.ndim < x_primes.ndim:
19 | xs = xs[[None] * (x_primes.ndim - xs.ndim)]
20 | xs = xs.reshape(xs.shape[0], -1)
21 | x_primes = x_primes.reshape(x_primes.shape[0], -1)
22 | # pair_diff = x_primes[None, :] - xs[:, None]
23 | # kvalues = (1/(1+self.gamma/self.p*torch.sum(pair_diff**2, dim=2))**self.p)
24 | pair_dist = torch.cdist(xs, x_primes).square()
25 | kvalues = (1/(1+self.gamma/self.p*pair_dist)**self.p)
26 | if kvalues.shape[0] == 1:
27 | kvalues = kvalues.squeeze_(0)
28 |
29 | return kvalues
30 |
31 | class CauchyKernel(KernelFunc):
32 | def __init__(self, c):
33 | self.c = c
34 |
35 | def __call__(self, xs, x_primes):
36 | if xs.ndim == 1:
37 | xs = xs[np.newaxis, :]
38 | xs = xs[:, np.newaxis] # change to [1, len(x), channel]
39 | pair_diff = x_primes[np.newaxis, :] - xs
40 | kvalues = self.c / (np.sum(pair_diff**2, axis=2) + self.c)
41 | if kvalues.shape[0] == 1:
42 | kvalues = kvalues.squeeze_(0)
43 | return kvalues
44 |
45 | class MultiQuadratic(KernelFunc):
46 | def __init__(self, epsilon):
47 | self.epsilon = epsilon
48 |
49 | def __call__(self, xs, x_primes):
50 | if xs.ndim == 1:
51 | xs = xs[np.newaxis, :]
52 | xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
53 | pair_diff = x_primes[np.newaxis, :] - xs # shape [len(x_primes), len(xs), channel]
54 | kvalues = torch.sqrt(torch.sum(pair_diff**2, axis=2)/self.epsilon**2 + 1)
55 | if kvalues.shape[0] == 1:
56 | kvalues = kvalues.squeeze(0)
57 | return kvalues
58 |
59 | class Polyharmonic(KernelFunc):
60 | def __init__(self, k, epsilon):
61 | self.epsilon = epsilon
62 | if k % 2 == 0:
63 | def _even_func(r):
64 | tmp = (r**k * torch.log(r))
65 | tmp[torch.isnan(tmp)] = 0
66 | return tmp
67 | self._func = _even_func
68 | else:
69 | def _odd_func(r):
70 | return r**k
71 | self._func = _odd_func if k > 1 else lambda r: r
72 |
73 | def __call__(self, xs, x_primes):
74 | # TODO: take care of shape outside of this function
75 | if xs.ndim < x_primes.ndim:
76 | xs = xs.view(tuple([None] * (x_primes.ndim - xs.ndim)) + xs.shape)
77 | r = torch.cdist(xs.view(len(xs), -1), x_primes.view(len(x_primes), -1))
78 | kvalues = self._func(r) / self.epsilon
79 | return kvalues
80 |
81 |
82 | # def mq_r(self, r):
83 | # kvalues = torch.sqrt(r**2/self.epsilon**2 + 1)
84 | # return kvalues
85 |
86 | # class mq(KernelFunc):
87 | # def __init__(self, epsilon):
88 | # self.epsilon = epsilon
89 |
90 | # def __call__(self, xs, x_primes):
91 | # if xs.ndim == 1:
92 | # xs = xs[np.newaxis, :]
93 | # xs = xs[np.newaxis, :] # change to [1, len(x), channel]
94 | # pair_diff = x_primes[:, np.newaxis] - xs # [len(x_primes), len(xs), channel]
95 | # kvalues = torch.sqrt(torch.sum(pair_diff**2, axis=2)
96 | # if kvalues.shape[1] == 1:
97 | # kvalues = kvalues.squeeze(1)
98 | # return kvalues
99 |
100 | class WeightedKernel(KernelFunc):
101 | def __init__(self, gamma, w, p=2):
102 | self.gamma = gamma
103 | self.p = p
104 | self.w = np.array(w).reshape((1, 1, -1))
105 |
106 | def __call__(self, xs, x_primes):
107 | if xs.ndim == 1:
108 | xs = xs[np.newaxis, :]
109 | xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
110 | pair_diff = x_primes[np.newaxis, :] - xs # shape [len(x_primes), len(xs), channel]
111 | kvalues = 1/(1+self.gamma/self.p*np.sum((pair_diff*self.w)**2, axis=2))**self.p
112 | if kvalues.shape[1] == 1:
113 | kvalues = kvalues.squeeze(1)
114 | return kvalues
115 |
116 | class TangentKernel(KernelFunc):
117 | def __init__(self, a, c):
118 | self.a = a
119 | self.c = c
120 |
121 | def __call__(self, xs, x_primes):
122 | if xs.ndim == 1:
123 | xs = xs[np.newaxis, :]
124 | xs = xs[:, np.newaxis] # change shape to [1, len(x), channel]
125 | pair_prod = x_primes[np.newaxis, :] * xs # [len(x_primes), len(xs), channel]
126 | kvalues = np.tanh(self.a * np.sum(pair_prod, 2) + self.c)
127 | if kvalues.shape[1] == 1:
128 | kvalues = kvalues.squeeze(1)
129 | return kvalues
130 |
131 | class FKKernel(KernelFunc):
132 | def __init__(self, fkine, rq_kernel):
133 | raise DeprecationWarning('This class is deprecated. Specify the transform in kernel_perceptrons.DiffCo directly.')
134 | self.fkine = fkine
135 | self.rq_kernel = rq_kernel
136 |
137 | def __call__(self, xs, x_primes=None, x_primes_controls=None):
138 | if xs.ndim == 1:
139 | xs = xs[np.newaxis, :]
140 | xs_controls = self.fkine(xs).reshape(len(xs), -1)
141 | if x_primes_controls is None:
142 | x_primes_controls = self.fkine(x_primes).reshape(len(x_primes), -1)
143 | return self.rq_kernel(xs_controls, x_primes_controls)
144 |
145 | class TemporalFKKernel(KernelFunc):
146 | def __init__(self, fkine, rqkernel, t_rqkernel, alpha=0.5):
147 | # k((x1, t1), (x2, t2)) = alpha * fk_kernel(x1, x2) + (1-alpha) * rq_kernel(x1, x2)
148 | self.fkine = fkine
149 | self.rqkernel = rqkernel
150 | self.t_rqkernel = t_rqkernel
151 | self.alpha = alpha # (power) weight on time. \in [1, +\infty]
152 |
153 | def __call__(self, xs, x_primes):
154 | '''
155 | This assumes t is the last feature of each (extended) configuration
156 | '''
157 | if xs.ndim == 1:
158 | xs = xs[None, :]
159 | xs, ts = xs[:, :-1], xs[:, -1:]
160 | x_primes, t_primes = x_primes[:, :-1], x_primes[:, -1:]
161 | xs_controls = self.fkine(xs).reshape(len(xs), -1)
162 | x_primes_controls = self.fkine(x_primes).reshape(len(x_primes), -1)
163 | return self.rqkernel(xs_controls, x_primes_controls) * \
164 | self.t_rqkernel(ts, t_primes)**self.alpha # debugging
165 | # return self.rqkernel( #self.alpha *
166 | # torch.hstack([xs_controls, ts]),
167 | # torch.hstack([x_primes_controls, t_primes]))# \
168 | # #+ (1-self.alpha) * self.t_rqkernel(ts, t_primes) # debugging
169 |
170 | class LineKernel(KernelFunc):
171 | def __init__(self, point_kernel):
172 | self.point_kernel = point_kernel
173 |
174 | def __call__(self, xs, x_primes):
175 | if xs.ndim == 1:
176 | xs = xs[np.newaxis, :]
177 | if x_primes.ndim == 1:
178 | x_primes = x_primes[np.newaxis, :]
179 | twice_DOF = xs.shape[1]
180 | assert twice_DOF == x_primes.shape[1]
181 | assert twice_DOF%2 == 0
182 | dof = twice_DOF // 2
183 | return (self.point_kernel(xs[:, :dof], x_primes[:, :dof])\
184 | + self.point_kernel(xs[:, dof:], x_primes[:, dof:]))/2
185 |
186 | class LineFKKernel(KernelFunc):
187 | def __init__(self, fkine, rq_kernel):
188 | self.fkine = fkine
189 | self.rq_kernel = rq_kernel
190 |
191 | def __call__(self, xs, x_primes):
192 | if xs.ndim == 1:
193 | xs = xs[np.newaxis, :]
194 | if x_primes.ndim == 1:
195 | x_primes = x_primes[np.newaxis, :]
196 | twice_DOF = xs.shape[1]
197 | assert twice_DOF == x_primes.shape[1]
198 | assert twice_DOF % 2 == 0
199 | dof = twice_DOF // 2
200 | xs_controls = self.fkine(xs.reshape(-1, dof)).reshape(len(xs), -1)
201 | x_primes_controls = self.fkine(x_primes.reshape(-1, dof)).reshape(len(x_primes), -1)
202 | return self.rq_kernel(xs_controls, x_primes_controls)
--------------------------------------------------------------------------------
/diffco/robot_data/2link_robot.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/diffco/robot_data/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/__init__.py
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/base_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/base_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/base_link_left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/base_link_left.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_0.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_0.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_1.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_1.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_12.0_left.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_12.0_left.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_12.0_right.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_12.0_right.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_13.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_13.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_14.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_14.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_15.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_15.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_15.0_tip.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_15.0_tip.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_2.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_2.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_3.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_3.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_3.0_tip.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_3.0_tip.STL
--------------------------------------------------------------------------------
/diffco/robot_data/allegro/meshes/link_4.0.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/allegro/meshes/link_4.0.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/base_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/base_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/base_link_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/base_link_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/bellows_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/bellows_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/bellows_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/bellows_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/elbow_flex_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/elbow_flex_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/elbow_flex_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/elbow_flex_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/estop_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/estop_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/forearm_roll_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/forearm_roll_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/forearm_roll_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/forearm_roll_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/gripper_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/gripper_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/gripper_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/gripper_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/head_pan_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/head_pan_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/head_pan_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/head_pan_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/head_tilt_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/head_tilt_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/head_tilt_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/head_tilt_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/l_gripper_finger_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/l_gripper_finger_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/l_wheel_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/l_wheel_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/l_wheel_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/l_wheel_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/laser_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/laser_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/r_gripper_finger_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/r_gripper_finger_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/r_wheel_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/r_wheel_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/r_wheel_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/r_wheel_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/shoulder_lift_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/shoulder_lift_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/shoulder_lift_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/shoulder_lift_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/shoulder_pan_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/shoulder_pan_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/shoulder_pan_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/shoulder_pan_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/torso_fixed_link.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/torso_fixed_link.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/torso_fixed_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/torso_fixed_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/torso_lift_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/torso_lift_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/torso_lift_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/torso_lift_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/upperarm_roll_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/upperarm_roll_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/upperarm_roll_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/upperarm_roll_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/wrist_flex_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/wrist_flex_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/wrist_flex_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/wrist_flex_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/wrist_roll_link_collision.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/wrist_roll_link_collision.STL
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/meshes/wrist_roll_uv.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/fetch_description/meshes/wrist_roll_uv.png
--------------------------------------------------------------------------------
/diffco/robot_data/fetch_description/urdf/FETCH_MODIFICATIONS.md:
--------------------------------------------------------------------------------
1 | readme and fetch.urdf were copied from roboschool, created a reduced urdf with arm links/joints only.
2 |
3 | *Original Readme*:
4 |
5 | Original code taken from:
6 |
7 | https://github.com/fetchrobotics/fetch_ros
8 |
9 | Modifications:
10 |
11 | * Comment out `bellows_link` and `bellows_joint`. You can push off the ground using this strange thing!
12 |
13 | * Set moments of inertia of `r_gripper_finger_link` and `l_gripper_finger_link` to non-zero.
14 |
15 | * Set moments of inertia of `base_link` to zero, except for Z-axis, to disable rotation other than around Z axis.
16 |
17 | * Added `` for camera, turn `head_camera_rgb_optical_frame` 180 degrees as camera was facing backwards.
18 |
19 | * Spheres are now collision shape for wheels (fixes jumping).
20 |
21 | * Boxes for fingers collision shape.
22 |
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/arm.SLDPRT:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/arm.SLDPRT
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/arm.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/arm.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/arm_half_1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/arm_half_1.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/arm_half_2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/arm_half_2.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/arm_mico.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/arm_mico.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/base.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/base.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/finger_distal.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/finger_distal.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/finger_proximal.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/finger_proximal.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/forearm.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/forearm.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/forearm_mico.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/forearm_mico.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/hand_2finger.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/hand_2finger.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/hand_3finger.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/hand_3finger.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/ring_big.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/ring_big.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/ring_small.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/ring_small.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/shoulder.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/shoulder.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/wrist.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/wrist.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/wrist_spherical_1.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/wrist_spherical_1.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kinova_description/meshes/wrist_spherical_2.STL:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kinova_description/meshes/wrist_spherical_2.STL
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_0.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_0.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_1.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_2.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_3.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_4.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_4.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_5.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_5.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_6.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_6.stl
--------------------------------------------------------------------------------
/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_7.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/kuka_iiwa/meshes/iiwa7/collision/link_7.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/CHANGELOG.rst:
--------------------------------------------------------------------------------
1 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
2 | Changelog for package moveit_resources_panda_description
3 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
4 |
5 | 0.7.1 (2020-10-09)
6 | ------------------
7 |
8 | 0.7.0 (2020-08-13)
9 | ------------------
10 | * Split resources into multiple packages (`#36 `_)
11 | * Contributors: Robert Haschke, Michael Görner
12 |
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.1.3)
2 | project(moveit_resources_panda_description)
3 | find_package(catkin REQUIRED)
4 |
5 | catkin_package()
6 |
7 | install(DIRECTORY meshes urdf DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION})
8 |
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/README.md:
--------------------------------------------------------------------------------
1 | # panda_description
2 |
3 | The URDF model and meshes contained in this package were copied from the frankaemika `franka_ros` package and adapted for use with `moveit_resources`.
4 |
5 | All imported files were released under the Apache-2.0 license.
6 |
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/finger.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/finger.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/hand.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link0.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link0.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link1.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link2.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link3.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link4.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link4.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link5.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link5.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link6.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link6.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/meshes/collision/link7.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/meshes/collision/link7.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/package.xml:
--------------------------------------------------------------------------------
1 |
2 | moveit_resources_panda_description
3 | 0.7.1
4 | panda Resources used for MoveIt! testing
5 |
6 | Ioan Sucan
7 | Acorn Pooley
8 |
9 | Dave Coleman
10 |
11 | BSD
12 | http://moveit.ros.org
13 | https://github.com/ros-planning/moveit-resources/issues
14 | https://github.com/ros-planning/moveit-resources
15 |
16 | catkin
17 |
18 |
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/finger.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/finger.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/hand.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/hand.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link0.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link0.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link1.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link1.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link2.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link2.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link3.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link3.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link4.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link4.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link5.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link5.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link6.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link6.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/meshes/collision/link7.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/panda_description/urdf/meshes/collision/link7.stl
--------------------------------------------------------------------------------
/diffco/robot_data/panda_description/urdf/panda.srdf:
--------------------------------------------------------------------------------
1 |
2 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
--------------------------------------------------------------------------------
/diffco/robot_data/rope_description/rope.urdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_back.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_back.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_front.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_front.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_side_left.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_side_left.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_side_right.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_side_right.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_top.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/base_top.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/frame_wall.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/frame_wall.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/lower_link.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/lower_link.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/middle_link.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/middle_link.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/upper_link.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/edu/upper_link.stl
--------------------------------------------------------------------------------
/diffco/robot_data/trifinger_edu_description/meshes/stl/trifinger_table_without_border.stl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ucsdarclab/diffco/5bb84c5d6efa7c95dbf2e721188e008642e2b019/diffco/robot_data/trifinger_edu_description/meshes/stl/trifinger_table_without_border.stl
--------------------------------------------------------------------------------
/diffco/utils.py:
--------------------------------------------------------------------------------
1 | import torch
2 | import numpy as np
3 |
4 | def rotz(phi):
5 | res = torch.zeros((len(phi), 3, 3))
6 | s = torch.sin(phi)
7 | c = torch.cos(phi)
8 | res[:, 0, 0] = c
9 | res[:, 0, 1] = -s
10 | res[:, 1, 0] = s
11 | res[:, 1, 1] = c
12 | res[:, 2, 2] = 1
13 | return res
14 |
15 | def euler2mat(phi):
16 | # assumes roll pitch yaw (x, y, z).
17 | phi = phi.reshape((-1, 3))
18 | s = torch.sin(phi)
19 | c = torch.cos(phi)
20 | ones = torch.ones_like(s[:, 0])
21 | zeros = torch.zeros_like(s[:, 0])
22 |
23 | rx = torch.stack([
24 | ones, zeros, zeros,
25 | zeros, c[:, 0], -s[:, 0],
26 | zeros, s[:, 0], c[:, 0]
27 | ], dim=1).reshape((len(phi), 3, 3))
28 | ry = torch.stack([
29 | c[:, 1], zeros, s[:, 1],
30 | zeros, ones, zeros,
31 | -s[:, 1], zeros, c[:, 1],
32 | ], dim=1).reshape((len(phi), 3, 3))
33 | rz = torch.stack([
34 | c[:, 2], -s[:, 2], zeros,
35 | s[:, 2], c[:, 2], zeros,
36 | zeros, zeros, ones,
37 | ], dim=1).reshape((len(phi), 3, 3))
38 | return rz@ry@rx# rx@ry@rz
39 |
40 | def rot_2d(phi):
41 | res = torch.zeros((len(phi), 2, 2))
42 | s = torch.sin(phi)
43 | c = torch.cos(phi)
44 | res[:, 0, 0] = c
45 | res[:, 0, 1] = -s
46 | res[:, 1, 0] = s
47 | res[:, 1, 1] = c
48 | return res
49 |
50 | # Convert an angle to be between [-pi, pi)
51 | def wrap2pi(theta):
52 | return (np.pi + theta) % (np.pi*2)-np.pi
53 |
54 | def se2_wrap2pi(x):
55 | return torch.cat([x[..., :2], wrap2pi(x[..., 2:3])], dim=-1)
56 |
57 | # Generate a sequence of angles between q1 and q2,
58 | # acts like np.linspace but considered the wrapping-around problem
59 | # q1 and q2 can both be vectors of the same dimension
60 | def anglin(q1, q2, num=50, endpoint=True):
61 | q1 = torch.FloatTensor(q1)
62 | q2 = torch.FloatTensor(q2)
63 | dq = torch.from_numpy(np.linspace(np.zeros_like(q1), wrap2pi(q2-q1), num, endpoint))
64 | return wrap2pi(q1 + dq)
65 |
66 | def DH2mat(q, a, d, s_alpha, c_alpha):
67 | c_theta = q.cos() # n * self.dof
68 | s_theta = q.sin()
69 | tfs = torch.stack([
70 | torch.stack([c_theta, -s_theta*c_alpha, s_theta*s_alpha, a*c_theta], dim=2),
71 | torch.stack([s_theta, c_theta*c_alpha, -c_theta * s_alpha, a * s_theta], dim=2),
72 | torch.stack([torch.zeros_like(q), s_alpha.repeat(len(q), 1), c_alpha.repeat(len(q), 1), d.repeat(len(q), 1)], dim=2),
73 | torch.stack([torch.zeros_like(q)]*3 + [torch.ones_like(q)], dim=2)
74 | ], dim=2)
75 | return tfs
76 |
77 | # Convert a sequence of adjacent joint angles to be numerically adjacent
78 | # eg. [5pi/6, -pi] will be converted to [5pi/6, pi]
79 | # This is for the convenience of plotting angular configuration space
80 | def make_continue(q, max_gap=np.pi):
81 | q = torch.FloatTensor(q)
82 | sudden_change = torch.zeros_like(q)
83 | sudden_change[1:] = (torch.abs(q[1:]-q[:-1]) > max_gap) * torch.sign(q[1:]-q[:-1])
84 | offset = -torch.cumsum(sudden_change, dim=0) * np.pi*2
85 | return q + offset
86 |
87 | def dense_path(q, max_step=2.0, max_step_num=None):
88 | if max_step_num is not None:
89 | tmp_step_size = torch.norm(q[1:] - q[:-1], dim=-1).sum().item() / max_step_num
90 | max_step = max_step if max_step > tmp_step_size else tmp_step_size
91 | denseq = []
92 | for i in range(len(q)-1):
93 | delta = q[i+1] - q[i]
94 | dist = delta.norm()
95 | num_steps = torch.ceil(dist/max_step).item()
96 | irange = torch.arange(num_steps).reshape(-1, 1).to(q.device)
97 | denseq.append(q[i] + irange * delta * max_step/dist)
98 | denseq.append(q[-1:])
99 | denseq = torch.cat(denseq)
100 | assert torch.all(denseq[0] == q[0]) and torch.all(denseq[-1] == q[-1])
101 | return denseq
102 |
103 |
104 |
105 | if __name__ == "__main__":
106 | # x = np.linspace(-8*np.pi, 8*np.pi, 1000)
107 | # y = wrap2pi(x)
108 | # from matplotlib import pyplot as plt
109 | # plt.plot(x, y)
110 | # plt.show()
111 | print(wrap2pi(0.8*np.pi - (-0.9)*np.pi)/np.pi*180)
112 | print(anglin([-0.8*np.pi], [0.9*np.pi], 1, False)/np.pi*180)
--------------------------------------------------------------------------------
/examples/tests/test_rope.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import time
3 |
4 | import diffco as dc
5 | import numpy as np
6 | import torch
7 | import trimesh.transformations as tf
8 |
9 | class TestForwardKinematicsDiffCo(unittest.TestCase):
10 | def setUp(self):
11 | # Initialize any necessary objects or variables
12 | pass
13 |
14 | def tearDown(self):
15 | # Clean up any resources used in the test
16 | pass
17 |
18 | def test_forward_kinematics_rope(self):
19 | # Create an instance of the ForwardKinematicsDiffCo class
20 | print('-'*50, '\nTesting ForwardKinematicsDiffCo with Rope robot.')
21 | shape_env = dc.ShapeEnv(
22 | shapes={
23 | 'box1': {'type': 'Box', 'params': {'extents': [0.1, 0.1, 0.1]}, 'transform': tf.translation_matrix([0.5, 0.5, 0.5])},
24 | 'sphere1': {'type': 'Sphere', 'params': {'radius': 0.1}, 'transform': tf.translation_matrix([0.5, 0, 0])},
25 | 'cylinder1': {'type': 'Cylinder', 'params': {'radius': 0.1, 'height': 0.2}, 'transform': tf.translation_matrix([0, -0.5, 0.5])},
26 | 'capsule1': {'type': 'Capsule', 'params': {'radius': 0.1, 'height': 0.2}, 'transform': tf.translation_matrix([0.5, 0.5, 0])},
27 | 'mesh1': {'type': 'Mesh', 'params': {'file_obj': '../../assets/object_meshes/teapot.stl', 'scale': 1e-1}, 'transform': tf.translation_matrix([0, 0.5, 0])},
28 | }
29 | )
30 | rope_urdf_robot = dc.URDFRobot(
31 | urdf_path='../../diffco/robot_data/rope_description/rope.urdf',
32 | base_transform=torch.eye(4),
33 | device="cpu",
34 | load_visual_meshes=False
35 | )
36 | fkdc = dc.ForwardKinematicsDiffCo(
37 | robot=rope_urdf_robot,
38 | environment=shape_env,
39 | )
40 | acc, tpr, tnr = fkdc.fit(num_samples=10000, verbose=True)
41 |
42 | # Assert the expected result
43 | expected_result = 0.9 # Define the expected result here
44 | # self.assertGreaterEqual(acc, expected_result)
45 | self.assertGreaterEqual(tpr, expected_result)
46 | # self.assertGreaterEqual(tnr, expected_result)
47 |
48 | # self.speed_test(fkdc)
49 |
50 |
51 | if __name__ == '__main__':
52 | unittest.main()
--------------------------------------------------------------------------------
/examples/tests/test_urdf_robot.py:
--------------------------------------------------------------------------------
1 | from diffco.collision_interfaces.urdf_interface import TwoLinkRobot, FrankaPanda, URDFRobot, MultiURDFRobot, robot_description_folder
2 | import trimesh
3 | import fcl
4 | import numpy as np
5 | import torch
6 | import os
7 | import trimesh.transformations as tf
8 | from trimesh.collision import CollisionManager
9 | from trimesh.primitives import Box, Sphere, Cylinder, Capsule
10 |
11 | def test_urdf(urdf_robot: URDFRobot, num_cfgs=1000, show=False):
12 | print(urdf_robot)
13 | # print(list(urdf_robot.robot.link_map.values()))
14 | # print(urdf_robot.robot.collision_scene)
15 |
16 | cfgs = urdf_robot.rand_configs(num_cfgs)
17 |
18 | collision_labels = urdf_robot.collision(cfgs, other=None, show=show)
19 | print(f"in collision: {collision_labels.sum()}/{num_cfgs}, collision-free {(collision_labels==0).sum()}/{num_cfgs}")
20 | if isinstance(urdf_robot, MultiURDFRobot):
21 | urdf_robots = urdf_robot.urdf_robots
22 | cfgs_split = urdf_robot.split_configs(cfgs)
23 | else:
24 | urdf_robots = [urdf_robot]
25 | cfgs_split = [cfgs]
26 |
27 | for urdf_robot, cfgs in zip(urdf_robots, cfgs_split):
28 | print(urdf_robot.collision_manager._allowed_internal_collisions)
29 | print(f"Verifying forward kinematics for {urdf_robot.name}")
30 | for i in range(num_cfgs):
31 |
32 | urdf_robot.urdf.update_cfg(cfgs[i].numpy())
33 | fk = urdf_robot.compute_forward_kinematics_all_links(cfgs[i:i+1], return_collision=True)
34 | urdf_robot.collision_manager.set_fkine({k: [(t[0], r[0]) for t, r in v] for k, v in fk.items()})
35 | for link_name in fk:
36 | for piece_idx, cobj_name in enumerate(urdf_robot.collision_manager.link_collision_objects[link_name]):
37 | fk_trans, fk_rot = fk[link_name][piece_idx]
38 | fk_rot = fk_rot.numpy()
39 | fk_tf = np.eye(4, dtype=fk_rot.dtype)
40 | fk_tf[:3, :3] = fk_rot
41 | fk_tf[:3, 3] = fk_trans.numpy()
42 | fcl_trans = urdf_robot.collision_manager._objs[cobj_name]['obj'].getTranslation()
43 | fcl_rot = urdf_robot.collision_manager._objs[cobj_name]['obj'].getRotation()
44 | fcl_tf = np.eye(4, dtype=fcl_rot.dtype)
45 | fcl_tf[:3, :3] = fcl_rot
46 | fcl_tf[:3, 3] = fcl_trans
47 | t_from_urdfpy = urdf_robot.urdf.get_transform(cobj_name, collision_geometry=True)
48 | assert np.allclose(t_from_urdfpy, fcl_tf, rtol=1e-4, atol=1e-6), \
49 | f"Link {link_name} transform mismatch between fcl and urdfpy: {t_from_urdfpy - fcl_tf}"
50 | assert np.allclose(fk_tf, fcl_tf, rtol=1e-4, atol=1e-6), \
51 | f"Link {link_name} transform mismatch between fcl and compute_forward_kinematics_all_links: {fk_tf - fcl_tf}"
52 | print(f"Forward kinematics of {urdf_robot.name} verified")
53 |
54 | if __name__ == "__main__":
55 | # dual panda arms placed like human-ish
56 | # panda0 = FrankaPanda(
57 | # load_gripper=True,
58 | # base_transform=torch.eye(4, dtype=torch.float32),
59 | # device="cpu", load_visual_meshes=True)
60 | transform1 = tf.translation_matrix([0.1, 0.0, 0.8])
61 | transform1[:3, :3] = tf.euler_matrix(0, np.pi/2, 0)[:3, :3]
62 | panda1 = FrankaPanda(
63 | name="panda1",
64 | load_gripper=True,
65 | base_transform=torch.tensor(transform1, dtype=torch.float32),
66 | device="cpu", load_visual_meshes=True)
67 | transform2 = tf.translation_matrix([-0.1, 0.0, 0.8])
68 | transform2[:3, :3] = tf.euler_matrix(0, -np.pi/2, 0)[:3, :3]
69 | panda2 = FrankaPanda(
70 | name="panda2",
71 | load_gripper=True,
72 | base_transform=torch.tensor(transform2, dtype=torch.float32),
73 | device="cpu", load_visual_meshes=True)
74 | multi_panda = MultiURDFRobot(
75 | urdf_robots=[panda1, panda2],
76 | device="cpu"
77 | )
78 | test_urdf(multi_panda, show=True)
79 |
80 | exit()
81 |
82 | # rope_urdf_robot = URDFRobot(
83 | # urdf_path='../../diffco/robot_data/rope_description/rope.urdf',
84 | # base_transform=torch.eye(4),
85 | # device="cpu",
86 | # load_visual_meshes=True
87 | # )
88 | # test_urdf(rope_urdf_robot, show=True)
89 |
90 | # exit()
91 |
92 | # dvrk_urdf_robot = URDFRobot(
93 | # urdf_path=os.path.join(
94 | # robot_description_folder,
95 | # "dvrk_model/urdf/both_sca.urdf"),
96 | # name="dvrk",
97 | # device="cpu",
98 | # load_visual_meshes=True
99 | # )
100 | # test_urdf(dvrk_urdf_robot, show=True)
101 |
102 | # exit()
103 |
104 | two_link_robot = TwoLinkRobot()
105 | test_urdf(two_link_robot, show=False)
106 |
107 | panda_urdf_robot = FrankaPanda(
108 | load_gripper=True,
109 | base_transform=torch.eye(4),
110 | device="cpu", load_visual_meshes=True)
111 | test_urdf(panda_urdf_robot, show=False)
112 |
113 | panda_simple_collision_urdf_robot = FrankaPanda(
114 | simple_collision=True,
115 | load_gripper=False,
116 | load_visual_meshes=False
117 | )
118 | test_urdf(panda_simple_collision_urdf_robot, show=False)
119 |
120 | base_transform = torch.tensor(tf.translation_matrix([0.5, 0, 0.5]), dtype=torch.float32)
121 | hand_urdf_robot = URDFRobot(
122 | urdf_path=os.path.join(
123 | robot_description_folder,
124 | "allegro/urdf/allegro_hand_description_left.urdf"),
125 | name="allegro",
126 | base_transform=base_transform,
127 | device="cpu",
128 | load_visual_meshes=True
129 | )
130 | test_urdf(hand_urdf_robot, show=False)
131 |
132 | multi_urdf_robot = MultiURDFRobot(
133 | urdf_robots=[
134 | panda_urdf_robot,
135 | hand_urdf_robot
136 | ],
137 | device="cpu"
138 | )
139 | test_urdf(multi_urdf_robot, show=False)
140 |
141 | fetch_urdf_robot = URDFRobot(
142 | urdf_path=os.path.join(
143 | robot_description_folder,
144 | "fetch_description/urdf/fetch.urdf"),
145 | name="fetch",
146 | device="cpu",
147 | load_visual_meshes=False
148 | )
149 | fetch_urdf_robot.collision_manager._allowed_internal_collisions[('base_link', 'bellows_link2')] = 'always'
150 | test_urdf(fetch_urdf_robot, show=False)
151 |
152 | print("All tests passed")
--------------------------------------------------------------------------------
/examples/tests/test_urdf_robot_vs_shapeenv.py:
--------------------------------------------------------------------------------
1 | from diffco.collision_interfaces.urdf_interface import TwoLinkRobot, FrankaPanda, URDFRobot, MultiURDFRobot, robot_description_folder
2 | from diffco.collision_interfaces.env_interface import ShapeEnv
3 | import trimesh
4 | import fcl
5 | import numpy as np
6 | import torch
7 | import os
8 | import trimesh.transformations as tf
9 | from trimesh.collision import CollisionManager
10 | from trimesh.primitives import Box, Sphere, Cylinder, Capsule
11 |
12 | def test_urdf_vs_shapeenv(urdf_robot: URDFRobot, shape_env: ShapeEnv, show=False):
13 | print(urdf_robot)
14 | print(shape_env)
15 |
16 | num_cfgs = 1000
17 | cfgs = urdf_robot.rand_configs(num_cfgs)
18 |
19 | collision_labels = urdf_robot.collision(cfgs, other=shape_env, show=show)
20 | print(f"in collision: {collision_labels.sum()}/{num_cfgs}, collision-free {(collision_labels==0).sum()}/{num_cfgs}")
21 |
22 | if __name__ == "__main__":
23 | # Some scattered shapes with different positions and orientations
24 | shape_env = ShapeEnv(
25 | shapes={
26 | 'box1': {'type': 'Box', 'params': {'extents': [0.1, 0.1, 0.1]}, 'transform': tf.translation_matrix([0.5, 0.5, 0.5])},
27 | 'sphere1': {'type': 'Sphere', 'params': {'radius': 0.1}, 'transform': tf.translation_matrix([0.5, 0, 0])},
28 | 'cylinder1': {'type': 'Cylinder', 'params': {'radius': 0.1, 'height': 0.2}, 'transform': tf.translation_matrix([0, -0.5, 0.5])},
29 | 'capsule1': {'type': 'Capsule', 'params': {'radius': 0.1, 'height': 0.2}, 'transform': tf.translation_matrix([0.5, 0.5, 0])},
30 | 'mesh1': {'type': 'Mesh', 'params': {'file_obj': '../../assets/object_meshes/teapot.stl', 'scale': 1e-1}, 'transform': tf.translation_matrix([0, 0.5, 0])},
31 | }
32 | )
33 |
34 | # two_link_robot = TwoLinkRobot()
35 | # test_urdf_vs_shapeenv(two_link_robot, shape_env, show=True)
36 |
37 |
38 | # panda_urdf_robot = FrankaPanda(
39 | # load_gripper=True,
40 | # base_transform=torch.eye(4),
41 | # device="cpu", load_visual_meshes=True)
42 | # test_urdf_vs_shapeenv(panda_urdf_robot, shape_env, show=True)
43 |
44 | # base_transform = torch.tensor(tf.translation_matrix([-0.5, 0, 0.5]), dtype=torch.float32)
45 | # hand_urdf_robot = URDFRobot(
46 | # urdf_path=os.path.join(
47 | # robot_description_folder,
48 | # "allegro/urdf/allegro_hand_description_left.urdf"),
49 | # name="allegro",
50 | # base_transform=base_transform,
51 | # device="cpu",
52 | # load_visual_meshes=True
53 | # )
54 | # test_urdf_vs_shapeenv(hand_urdf_robot, shape_env, show=False)
55 |
56 | # multi_urdf_robot = MultiURDFRobot(
57 | # urdf_robots=[
58 | # panda_urdf_robot,
59 | # hand_urdf_robot
60 | # ],
61 | # device="cpu"
62 | # )
63 | # test_urdf_vs_shapeenv(multi_urdf_robot, shape_env, show=False)
64 |
65 | transform1 = tf.translation_matrix([0.1, 0.0, 0.8])
66 | transform1[:3, :3] = tf.euler_matrix(0, np.pi/2, 0)[:3, :3]
67 | panda1 = FrankaPanda(
68 | name="panda1",
69 | load_gripper=True,
70 | base_transform=torch.tensor(transform1, dtype=torch.float32),
71 | device="cpu", load_visual_meshes=True)
72 | transform2 = tf.translation_matrix([-0.1, 0.0, 0.8])
73 | transform2[:3, :3] = tf.euler_matrix(0, -np.pi/2, 0)[:3, :3]
74 | panda2 = FrankaPanda(
75 | name="panda2",
76 | load_gripper=True,
77 | base_transform=torch.tensor(transform2, dtype=torch.float32),
78 | device="cpu", load_visual_meshes=True)
79 | multi_panda = MultiURDFRobot(
80 | urdf_robots=[panda1, panda2],
81 | device="cpu"
82 | )
83 | test_urdf_vs_shapeenv(multi_panda, shape_env, show=True)
84 |
85 | print("All tests passed")
--------------------------------------------------------------------------------
/experiments.md:
--------------------------------------------------------------------------------
1 | # Instructions for Reproducing Experiments from the Paper
2 |
3 | Table of Contents:
4 | - [Instructions for Reproducing Experiments from the Paper](#instructions-for-reproducing-experiments-from-the-paper)
5 | - [Experiment A: Evaluation of DiffCo Score](#experiment-a-evaluation-of-diffco-score)
6 | - [2D environment](#2d-environment)
7 | - [3D environment](#3d-environment)
8 | - [Experiment C: Trajectory Optimization that Avoids Important Objects](#experiment-c-trajectory-optimization-that-avoids-important-objects)
9 | - [2D environment](#2d-environment-1)
10 | - [3D environment](#3d-environment-1)
11 | - [Experiment D: Trajectory Optimization with Dynamic Objects](#experiment-d-trajectory-optimization-with-dynamic-objects)
12 | - [2D environment](#2d-environment-2)
13 | - [3D environment](#3d-environment-2)
14 |
15 | ---
16 |
17 | ## Experiment A: Evaluation of DiffCo Score
18 |
19 | ### 2D environment
20 | Use the script `scripts/test_correlation.py` to generate correlation and FCL comparison figures. If you do not provide a dataset, a default one will be autogenerated for you.
21 | ```
22 | python scripts/test_correlation.py correlate
23 | ```
24 |
25 |
26 |
27 |
28 |
29 | ```
30 | python scripts/test_correlation.py compare
31 | ```
32 |
33 |
34 | To create a dataset, use the script `2d_data_generation.py` to create data for a handful of predefined environments. Use the `--help` flag to see all the possible environments.
35 | ```
36 | python scripts/2d_data_generation.py --help`
37 | ```
38 |
39 | ```
40 | python scripts/2d_data_generation.py --env 3d_halfnarrow --dof 7 --link-length 1
41 | ```
42 |
43 |
44 | ```
45 | python scripts/2d_data_generation.py --env 1rect_1circle --dof 3 --link-length 2
46 | ```
47 |
48 |
49 | Once you have the dataset you want, pass it in to the `scripts/test_correlation.py` script with the `--dataset` flag.
50 |
51 |
52 | ### 3D environment
53 | Documentation coming soon.
54 |
55 | ---
56 |
57 | ## Experiment C: Trajectory Optimization that Avoids Important Objects
58 | ### 2D environment
59 | Use the script `2d_trajopt.py` to generate trajectory optimization plans and figures. If you do not provide a dataset, a default one will be autogenerated for you.
60 | ```
61 | python scripts/2d_trajopt.py
62 | ```
63 |
64 |
65 | You can also specify custom start and target configurations.
66 | ```
67 | python scripts/2d_trajopt.py --start-cfg 1 0.5 0 --target-cfg -1 -0.5 0
68 | ```
69 |
70 |
71 | Using the `--safety-margin` flag, you can assign a bias to one of the classes.
72 | ```
73 | python scripts/2d_trajopt.py --start-cfg 1 0.5 0 --target-cfg -1 -0.5 0 --safety-margin -12 -1.2
74 | ```
75 |
76 |
77 | You can pass in a custom dataset with the `--dataset` flag.
78 | ```
79 | python scripts/2d_data_generation.py --env 2class_2 --dof 7 --label-type class --link-length 1
80 | python scripts/2d_trajopt.py -d data/landscape/2d_7dof_2obs_class_2class_2.pt --start-cfg 3.14 0 0 0 0 0 0 --target-cfg 0 0 0 0 0 0 0
81 | ```
82 |
83 |
84 | The script also generates a video animation.
85 |
86 | 
87 |
88 |
89 |
90 | ### 3D environment
91 | Documentation coming soon.
92 |
93 | ---
94 |
95 | ## Experiment D: Trajectory Optimization with Dynamic Objects
96 | ### 2D environment
97 | Use the script `active.py` to generate trajectories and figures for snapshots of environments with moving obstacles. If a dataset is not provided, one will be autogenerated. You can generate trajectories using either DiffCo or FCL.
98 |
99 | ```
100 | python scripts/active.py diffco
101 | ```
102 |
107 |
108 | ```
109 | python scripts/active.py fcl
110 | ```
111 |
116 |
117 | ### 3D environment
118 | Documentation coming soon.
119 |
120 | ---
121 |
--------------------------------------------------------------------------------
/misc/csv_dataset_to_pt.py:
--------------------------------------------------------------------------------
1 | from diffco.routines import load_dataset
2 | import torch
3 | import argparse
4 |
5 | if __name__ == '__main__':
6 | parser = argparse.ArgumentParser(description='Convert a csv dataset file generated by C++ code to the .pt format used in python scripts.')
7 | parser.add_argument('path', type=str, help='Path to the csv dataset file')
8 | parser.add_argument('robot_name', type=str, help='Robot name (Baxter, panda, 2d)')
9 | parser.add_argument('group', type=str, help='Move group name')
10 | parser.add_argument('--output', '-o', type=str, default='', help='Path to the output file')
11 | args = parser.parse_args()
12 | print(args)
13 |
14 | if args.output == '':
15 | args.output = args.path.replace('.csv', '.pt')
16 | dataset = load_dataset(args.path, args.robot_name, args.group)
17 | torch.save(dataset, args.output)
18 | print(f'======== Output file saved to {args.output} ========')
--------------------------------------------------------------------------------
/misc/fcl-test-extracting-contacts.py:
--------------------------------------------------------------------------------
1 | import fcl
2 | import numpy as np
3 |
4 | # Create collision geometry and objects
5 | geom1 = fcl.Cylinder(1.0, 1.0)
6 | obj1 = fcl.CollisionObject(geom1)
7 |
8 | geom2 = fcl.Cylinder(1.0, 1.0)
9 | obj2 = fcl.CollisionObject(geom2, fcl.Transform(np.array([0.0, 0.0, 0.3])))
10 |
11 | geom3 = fcl.Cylinder(1.0, 1.0)
12 | obj3 = fcl.CollisionObject(geom3, fcl.Transform(np.array([0.0, 0.0, 3.0])))
13 |
14 | geoms = [geom1, geom2, geom3]
15 | objs = [obj1, obj2, obj3]
16 | names = ['obj1', 'obj2', 'obj3']
17 |
18 | # Create map from geometry IDs to objects
19 | geom_id_to_obj = { id(geom) : obj for geom, obj in zip(geoms, objs) }
20 |
21 | # Create map from geometry IDs to string names
22 | geom_id_to_name = { id(geom) : name for geom, name in zip(geoms, names) }
23 |
24 | # Create manager
25 | manager = fcl.DynamicAABBTreeCollisionManager()
26 | manager.registerObjects(objs)
27 | manager.setup()
28 |
29 | # Create collision request structure
30 | crequest = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
31 | cdata = fcl.CollisionData(crequest, fcl.CollisionResult())
32 |
33 | # Run collision request
34 | manager.collide(cdata, fcl.defaultCollisionCallback)
35 |
36 | # Extract collision data from contacts and use that to infer set of
37 | # objects that are in collision
38 | objs_in_collision = set()
39 |
40 | for contact in cdata.result.contacts:
41 | # Extract collision geometries that are in contact
42 | coll_geom_0 = contact.o1
43 | coll_geom_1 = contact.o2
44 | print(contact.penetration_depth)
45 |
46 | # Get their names
47 | coll_names = [geom_id_to_name[id(coll_geom_0)], geom_id_to_name[id(coll_geom_1)]]
48 | coll_names = tuple(sorted(coll_names))
49 | objs_in_collision.add(coll_names)
50 |
51 | for coll_pair in objs_in_collision:
52 | print('Object {} in collision with object {}!'.format(coll_pair[0], coll_pair[1]))
--------------------------------------------------------------------------------
/misc/fcl-test-group.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import fcl
3 | from scipy.spatial.transform import Rotation
4 |
5 | v1 = np.array([1.0, 2.0, 3.0])
6 | v2 = np.array([2.0, 1.0, 3.0])
7 | v3 = np.array([3.0, 2.0, 1.0])
8 | x, y, z = 1.0, 2.0, 3.0
9 | rad, lz = 1.0, 3.0
10 | n = np.array([1.0, 0.0, 0.0])
11 | d = 5.0
12 |
13 | t = fcl.TriangleP(v1, v2, v3) # Triangle defined by three points
14 | b = fcl.Box(x, y, z) # Axis-aligned box with given side lengths
15 | s = fcl.Sphere(rad) # Sphere with given radius
16 | e = fcl.Ellipsoid(x, y, z) # Axis-aligned ellipsoid with given radii
17 | # c = fcl.Capsule(rad, lz) # Capsule with given radius and height along z-axis
18 | # c = fcl.Cone(rad, lz) # Cone with given radius and cylinder height along z-axis
19 | c = fcl.Cylinder(rad, lz) # Cylinder with given radius and height along z-axis
20 | h = fcl.Halfspace(n, d) # Half-space defined by {x : < d}
21 | p = fcl.Plane(n, d) # Plane defined by {x : = d}
22 |
23 | verts = np.array([[1.0, 1.0, 1.0],
24 | [2.0, 1.0, 1.0],
25 | [1.0, 2.0, 1.0],
26 | [1.0, 1.0, 2.0]])
27 | tris = np.array([[0,2,1],
28 | [0,3,2],
29 | [0,1,3],
30 | [1,2,3]])
31 |
32 | m = fcl.BVHModel()
33 | m.beginModel(len(verts), len(tris))
34 | m.addSubModel(verts, tris)
35 | m.endModel()
36 |
37 | objs1 = [fcl.CollisionObject(b, fcl.Transform(Rotation.from_euler('XYZ', [0, 0, np.arccos(2.0/3)-np.arctan(0.5)]).as_quat()[[3,0,1,2]], [1.5, 0, 0]))] #, fcl.CollisionObject(s)] np.arccos(2.0/3) Rotation.from_euler('XYZ', [0, 0, np.pi/2]).as_quat()[[3,0,1,2]], [1.5, 0, 0]
38 | print(Rotation.from_rotvec([0, 0, np.arccos(2.0/3)]).as_quat()[[3, 0, 1, 2]])
39 | objs2 = [fcl.CollisionObject(c)] #, fcl.CollisionObject(m)]
40 |
41 | manager1 = fcl.DynamicAABBTreeCollisionManager()
42 | manager2 = fcl.DynamicAABBTreeCollisionManager()
43 |
44 | manager1.registerObjects(objs1)
45 | manager2.registerObjects(objs2)
46 |
47 | manager1.setup()
48 | manager2.setup()
49 |
50 | # #=====================================================================
51 | # # Managed internal (sub-n^2) collision checking
52 | # #=====================================================================
53 | # cdata = fcl.CollisionData()
54 | # manager1.collide(cdata, fcl.defaultCollisionCallback)
55 | # print('Collision within manager 1?: {}'.format(cdata.result.is_collision))
56 |
57 | # #=====================================================================
58 | # # Managed internal (sub-n^2) distance checking
59 | # #=====================================================================
60 | # ddata = fcl.DistanceData()
61 | # manager1.distance(ddata, fcl.defaultDistanceCallback)
62 | # print('Closest distance within manager 1?: {}'.format(ddata.result.min_distance))
63 |
64 | #=====================================================================
65 | # Managed one to many collision checking
66 | #=====================================================================
67 | # req = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
68 | # rdata = fcl.CollisionData(request = req)
69 |
70 | # manager1.collide(fcl.CollisionObject(m), rdata, fcl.defaultCollisionCallback)
71 | # print('Collision between manager 1 and Mesh?: {}'.format(rdata.result.is_collision))
72 | # print('Contacts:')
73 | # for c in rdata.result.contacts:
74 | # print('\tO1: {}, O2: {}, depth: {}'.format(c.o1, c.o2, c.penetration_depth))
75 |
76 | #=====================================================================
77 | # Managed many to many collision checking
78 | #=====================================================================
79 | # req = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
80 | # rdata = fcl.CollisionData(request = req)
81 | req = fcl.DistanceRequest(enable_nearest_points=True, enable_signed_distance=True)
82 | # ddata = fcl.DistanceData(request = req)
83 | res = fcl.DistanceResult()
84 | # manager1.distance(manager2, rdata, fcl.defaultDistanceCallback)
85 | # manager1.distance(manager2, ddata, fcl.defaultDistanceCallback)
86 | fcl.distance(objs1[0], objs2[0], request=req, result=res)
87 | # print('Collision between manager 1 and manager 2?: {}'.format(ddata.result.is_collision))
88 | print('Collision between manager 1 and manager 2?: {}'.format(res.min_distance))
89 | # print('Contacts:')
90 | # for c in ddata.result.contacts:
91 | # print('\tO1: {}, O2: {}, depth: {}, pos: {}, normal: {}'.format(c.o1, c.o2, c.penetration_depth, c.pos, c.normal))
92 | # print(1-2/np.sqrt(5))
--------------------------------------------------------------------------------
/misc/fcl-test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import fcl
3 | import torch
4 | from scipy.spatial.transform import Rotation
5 |
6 | # R = np.array([[0.0, -1.0, 0.0],
7 | # [1.0, 0.0, 0.0],
8 | # [0.0, 0.0, 1.0]])
9 | # R = np.array([[1.0, 0.0, 0.0],
10 | # [0.0, 1.0, 0.0],
11 | # [0.0, 0.0, 1.0]])
12 | # T = np.array([1.0, 1.865, 0])
13 |
14 | g1 = fcl.Box(10,20,1000)
15 | t1 = fcl.Transform()
16 | o1 = fcl.CollisionObject(g1, t1)
17 |
18 | # g2 = fcl.Cone(1,3)
19 | # g2 = fcl.Cylinder(0.01, 1000)
20 | g2 = fcl.Box(10,20,1000)
21 | t2 = fcl.Transform( Rotation.from_rotvec([0., 0., np.pi/4]).as_quat()[[3,0,1,2]], [10., 0., 0.])
22 | o2 = fcl.CollisionObject(g2, t2)
23 |
24 | request = fcl.DistanceRequest(enable_nearest_points=True, enable_signed_distance=True)#, gjk_solver_type=fcl.GJKSolverType.GST_LIBCCD)
25 | result = fcl.DistanceResult()
26 | # request.enable_signed_distance = True
27 | # request.distance_tolerance = 0.01
28 | # request = fcl.CollisionRequest(enable_contact=True)
29 | # request.enable_nearest_points = True
30 | # result = fcl.CollisionResult()
31 |
32 | ret = fcl.distance(o1, o2, request, result)
33 | # ret = fcl.collide(o1, o2, request, result)
34 |
35 | # size = 50, 50
36 | # yy, xx = torch.meshgrid(torch.linspace(-5, 5, size[0]), torch.linspace(-5, 5, size[1]))
37 | # grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2))
38 | # grid_labels = torch.zeros_like(grid_points)[:, 0]
39 | # for i, (x, y) in enumerate(grid_points):
40 | # print(x, y)
41 | # o2.setTranslation([x, y, 0])
42 | # # fcl.update()
43 | # # result.clear()
44 | # result = fcl.CollisionResult()
45 | # ret = fcl.collide(o1, o2, request, result)
46 | # grid_labels[i] = result.is_collision
47 | # print(result.is_collision)
48 | # print(grid_labels)
49 | # import matplotlib.pyplot as plt
50 | # plt.scatter(grid_points[grid_labels==False, 0], grid_points[grid_labels==False, 1], c='g')
51 | # plt.scatter(grid_points[grid_labels==True, 0], grid_points[grid_labels==True, 1], c='r')
52 | # plt.show()
53 |
54 | print(result.nearest_points) #, result.contacts[0].penetration_depth)
55 | print(result.min_distance) #, result.contacts[0].penetration_depth)
--------------------------------------------------------------------------------
/misc/fix_exp.py:
--------------------------------------------------------------------------------
1 | from abc import abstractclassmethod
2 | import json
3 | from os.path import join, basename
4 | import numpy as np
5 |
6 |
7 | # !!Use after BACKUP the files being written!!
8 | # loadexps = ['results/2d_3dof_exp1']
9 | saveexps = ['results/2d_7dof_exp2']
10 | methods = ['fclgradfree']
11 |
12 | ## ========== Recover missing exp result from previous result files. =============
13 | # for saveexp, loadexp in zip(saveexps, loadexps):
14 | # from glob import glob
15 | # envs = sorted(glob(join(saveexp, '2d*.json'),))
16 | # for env in envs:
17 | # with open(join(loadexp, basename(env)), 'r') as loadfp:
18 | # loadrec = json.load(loadfp)
19 | # with open(join(saveexp, basename(env)), 'r') as savefp:
20 | # saverec = json.load(savefp)
21 | # for m in methods:
22 | # assert m in loadrec
23 | # saverec[m] = loadrec[m]
24 | # with open(join(saveexp, basename(env)), 'w') as savefp:
25 | # json.dump(saverec, savefp, indent=4)
26 | # print('Written: {}'.format(env))
27 |
28 | # break # DEBUG
29 | # =================================================================================
30 |
31 | ## =========== Remove wrong number of repair results of FCLGRADFREE =================
32 | # 1.verify it is all zero
33 | for saveexp in saveexps:
34 | from glob import glob
35 | envs = sorted(glob(join(saveexp, '2d*.json'),))
36 | for env in envs:
37 | with open(join(saveexp, basename(env)), 'r') as savefp:
38 | saverec = json.load(savefp)
39 | for m in methods:
40 | assert m in saverec
41 | l = len(saverec[m]['repair_cnt_check'])
42 | print(env, l)
43 | if l < 10: continue
44 | if l == 12:
45 | indices = [-12, -10] + list(range(-8, 0))
46 | elif l == 20:
47 | indices = list(range(0, 20, 2))
48 | else:
49 | indices = list(range(10))
50 | for i, idx in enumerate(indices):
51 | assert saverec[m]['repair_cnt_check'][idx] == 0
52 | saverec[m]['repair_cnt_check'][i] = 0
53 | assert saverec[m]['repair_success'][idx] == saverec[m]['success'][i]
54 | saverec[m]['repair_success'][i] = saverec[m]['success'][i]
55 | assert saverec[m]['repair_time'][idx] == 0
56 | saverec[m]['repair_time'][i] = 0
57 | assert saverec[m]['repair_cost'][idx] == saverec[m]['cost'][i]
58 | saverec[m]['repair_cost'][i] = saverec[m]['cost'][i]
59 | assert np.all(np.array(saverec[m]['repair_solution'][idx]) == np.array(saverec[m]['solution'][i]))
60 | saverec[m]['repair_solution'][i] = saverec[m]['solution'][i]
61 | saverec[m]['repair_cnt_check'] = saverec[m]['repair_cnt_check'][:10]
62 | assert len(saverec[m]['repair_cnt_check']) == 10
63 | saverec[m]['repair_success'] = saverec[m]['repair_success'][:10]
64 | assert len(saverec[m]['repair_success']) == 10
65 | saverec[m]['repair_time'] = saverec[m]['repair_time'][:10]
66 | assert len(saverec[m]['repair_time']) == 10
67 | saverec[m]['repair_cost'] = saverec[m]['repair_cost'][:10]
68 | assert len(saverec[m]['repair_cost']) == 10
69 | saverec[m]['repair_solution'] = saverec[m]['repair_solution'][:10]
70 | assert len(saverec[m]['repair_solution']) == 10
71 | with open(join(saveexp, basename(env)), 'w') as savefp:
72 | json.dump(saverec, savefp, indent=4)
73 | print('Written: {}'.format(env))
74 |
75 | # break # DEBUG
76 |
--------------------------------------------------------------------------------
/misc/module_test.py:
--------------------------------------------------------------------------------
1 | import sys
2 | # sys.path.append('/home/yuheng/DiffCo/')
3 |
4 | if __name__ == "__main__":
5 | from diffco.DiffCo import main
6 | main()
--------------------------------------------------------------------------------
/misc/profile_reading.py:
--------------------------------------------------------------------------------
1 | import pstats
2 | from pstats import SortKey
3 | p = pstats.Stats('restats')
4 | p.sort_stats(SortKey.CUMULATIVE).print_stats(10)
--------------------------------------------------------------------------------
/misc/rbf_regression.py:
--------------------------------------------------------------------------------
1 | import sys
2 | # sys.path.append('/home/yuheng/DiffCo/')
3 | from diffco import DiffCo, Obstacle, CollisionChecker
4 | from diffco import kernel
5 | from matplotlib import pyplot as plt
6 | import numpy as np
7 | import torch
8 | from scipy import ndimage
9 | from scipy.interpolate import Rbf
10 |
11 | def plot_score(checker, score, n, i, **kwargs):
12 | ax = plt.subplot(1, n, i)
13 | c = ax.pcolormesh(xx, yy, score, cmap='RdBu_r', vmin=-torch.abs(score).max(), vmax=torch.abs(score).max())
14 | ax.scatter(checker.support_points[:, 0], checker.support_points[:, 1], marker='.', c='black')
15 | ax.contour(xx, yy, score, levels=0)
16 | ax.axis('equal')
17 | fig.colorbar(c, ax=ax)
18 | sparse_score = score[::10, ::10]
19 | score_grad_x = -ndimage.sobel(sparse_score.numpy(), axis=1)
20 | score_grad_y = -ndimage.sobel(sparse_score.numpy(), axis=0)
21 | score_grad = np.stack([score_grad_x, score_grad_y], axis=2)
22 | score_grad /= np.linalg.norm(score_grad, axis=2, keepdims=True)
23 | score_grad_x, score_grad_y = score_grad[:, :, 0], score_grad[:, :, 1]
24 | ax.quiver(xx[::10, ::10], yy[::10, ::10], score_grad_x, score_grad_y, color='red', width=2e-3, headwidth=2, headlength=5)
25 |
26 |
27 | if i == 1:
28 | inits = torch.rand((300, 2)) * 10
29 | for init in inits:
30 | cfg = init.clone().detach().requires_grad_(True)
31 | if not checker.is_collision(cfg.data.numpy()):
32 | continue
33 | opt = torch.optim.SGD([cfg], lr=1e-2)
34 | print(opt.param_groups)
35 | path = [cfg.data.numpy().copy()]
36 | grads = []
37 | k = kernel.MultiQuadratic(0.7)
38 | gains = torch.tensor(kwargs['regressor'].nodes, dtype=torch.float32)
39 | for it in range(10):
40 | opt.zero_grad()
41 | score = gains@k(cfg, checker.support_points)
42 | score.backward()
43 | opt.step()
44 | print(cfg.data)
45 | path.append(cfg.data.numpy().copy())
46 | grads.append(cfg.grad.numpy().copy())
47 | path = np.array(path)
48 | print(path)
49 | print(np.array(grads))
50 | ax.plot(path[:, 0], path[:, 1], marker='.', markerfacecolor='black')
51 |
52 | for obs in checker.obstacles:
53 | if obs.kind == 'circle':
54 | artist = plt.Circle(obs.position, radius=obs.size/2, color=[0, 0, 0, 0.3])
55 | elif obs.kind == 'rect':
56 | artist = plt.Rectangle(obs.position-obs.size/2, obs.size[0], obs.size[1], color=[0, 0, 0, 0.3])
57 | else:
58 | raise NotImplementedError('Unknown obstacle type')
59 | ax.add_artist(artist)
60 |
61 | if __name__ == "__main__":
62 | # obstacles = [
63 | # ('circle', (6, 2), 1.5),
64 | # ('rect', (2, 6), (1.5, 1.5))]
65 | obstacles = [
66 | ('circle', (6, 2), 2),
67 | # ('circle', (2, 7), 1),
68 | ('rect', (3.5, 6), (2, 1)),
69 | ('rect', (4, 7), 1),
70 | ('rect', (5, 8), (10, 1)),
71 | ('rect', (7.5, 6), (2, 1)),
72 | ('rect', (8, 7), 1),]
73 | obstacles = [Obstacle(*param) for param in obstacles]
74 | # kernel = kernel.CauchyKernel(100)
75 | # k = kernel.TangentKernel(0.8, 0)
76 | k = kernel.RQKernel(5)
77 | checker = DiffCo(obstacles, kernel_func=k, beta=20)
78 | gt_checker = CollisionChecker(obstacles)
79 |
80 | np.random.seed(1917)
81 | torch.random.manual_seed(1917)
82 |
83 | # checker.initialize(3000)
84 | # checker.train(200000, method='original')
85 | cfgs = torch.rand((3000, 2)) * 10
86 | labels = torch.tensor(gt_checker.predict(cfgs) * 2 - 1)
87 | assert labels.max() == 1 and labels.min() == -1
88 | checker.train(cfgs, labels, method='original')
89 |
90 | rbfi = Rbf(checker.support_points[:, 0].numpy(), checker.support_points[:, 1].numpy(), checker.hypothesis.numpy(), epsilon=0.7)#, epsilon=10) #checker.y) #, checker.hypothesis)
91 | # print(checker.support_points[:, 0], checker.support_points[:, 1], checker.hypothesis)
92 | # print(rbfi.__dict__)
93 | fig, ax = plt.subplots(figsize=(10, 7))
94 | size = [400, 400]
95 | yy, xx = torch.meshgrid(torch.linspace(0, 10, size[0]), torch.linspace(0, 10, size[1]))
96 | grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2))
97 |
98 | score_spline = rbfi(grid_points[:, 0].numpy(), grid_points[:, 1].numpy()).reshape(size)
99 | score_spline = torch.tensor(score_spline)
100 | score_DiffCo = checker.score(grid_points).reshape(size)
101 | sigma = 0.001
102 | # comb_score = score_spline
103 | # comb_score = (1-np.exp(-(score_DiffCo/sigma)**2))*(score_spline) #-score_spline.min()
104 | # comb_score = np.sign(score_DiffCo) * (score_spline-score_spline.min())
105 | comb_score = (torch.sign(score_DiffCo)+1)/2*(score_spline-score_spline.min()) + (-torch.sign(score_DiffCo)+1)/2*(score_spline-score_spline.max())
106 |
107 | # ax1 = plt.subplot(121)
108 | # c = ax1.pcolormesh(xx, yy, comb_score, cmap='RdBu_r', vmin=-np.abs(score_spline).max(), vmax=np.abs(score_spline).max())
109 | # ax1.scatter(checker.support_points[:, 0], checker.support_points[:, 1], marker='.', c='black')
110 | # ax1.contour(xx, yy, (score_spline).astype(float), levels=0)
111 | # ax1.axis('equal')
112 | # fig.colorbar(c, ax=ax1)
113 | # sparse_score = score_spline[::10, ::10]
114 | # score_grad_x = -ndimage.sobel(sparse_score, axis=1)
115 | # score_grad_y = -ndimage.sobel(sparse_score, axis=0)
116 | # score_grad = np.stack([score_grad_x, score_grad_y], axis=2)
117 | # score_grad /= np.linalg.norm(score_grad, axis=2, keepdims=True)
118 | # score_grad_x, score_grad_y = score_grad[:, :, 0], score_grad[:, :, 1]
119 | # ax1.quiver(xx[::10, ::10], yy[::10, ::10], score_grad_x, score_grad_y, scale=40, color='red')
120 | # ax1.set_title('{}'.format(rbfi.epsilon))
121 | plot_score(checker, comb_score, 1, 1, regressor=rbfi)
122 |
123 | # ax2 = plt.subplot(122)
124 | # c = ax2.pcolormesh(xx, yy, score_DiffCo, cmap='RdBu_r', vmin=-np.abs(score_DiffCo).max(), vmax=np.abs(score_DiffCo).max())
125 | # ax2.scatter(checker.support_points[:, 0], checker.support_points[:, 1], marker='.', c='black')
126 | # ax2.contour(xx, yy, (score_DiffCo).astype(float), levels=0)
127 | # ax2.axis('equal')
128 | # fig.colorbar(c, ax=ax2)
129 | # sparse_score = score_DiffCo[::10, ::10]
130 | # score_grad_x = -ndimage.sobel(sparse_score, axis=1)
131 | # score_grad_y = -ndimage.sobel(sparse_score, axis=0)
132 | # score_grad = np.stack([score_grad_x, score_grad_y], axis=2)
133 | # score_grad /= np.linalg.norm(score_grad, axis=2, keepdims=True)
134 | # score_grad_x, score_grad_y = score_grad[:, :, 0], score_grad[:, :, 1]
135 | # ax2.quiver(xx[::10, ::10], yy[::10, ::10], score_grad_x, score_grad_y, scale=40, color='red')
136 | # ax2.set_title('{}'.format(rbfi.epsilon))
137 | # plot_score(checker, score_DiffCo, 3, 2)
138 | # plot_score(checker, score_spline, 3, 3)
139 | plt.show()
140 |
--------------------------------------------------------------------------------
/misc/recover_data.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | from diffco import DiffCo
3 | import diffco
4 | import torch
5 |
6 | # import rospy
7 | import moveit_msgs
8 | # import moveit_commander
9 | # import geometry_msgs.msg
10 | # from moveit_msgs.srv import GetStateValidityRequest, GetStateValidity
11 | # from moveit_msgs.msg import RobotState
12 | # from tqdm import tqdm
13 |
14 | class RenamingUnpickler(pickle.Unpickler):
15 | def find_class(self, module, name):
16 | if 'Fastronpp' in module:
17 | print('replaced!')
18 | module = module.replace('Fastronpp', 'diffco')
19 | return super().find_class(module, name)
20 |
21 | if __name__ == "__main__":
22 | # with open('data/2d_2dof_exp1/2d_2dof_1obs_binary_00.pt', 'rb') as fp:
23 | # d = torch.load(fp)
24 | pickle.Unpickler = RenamingUnpickler
25 | # d = torch.load('data/2d_2dof_exp1/2d_2dof_1obs_binary_00.pt')
26 | # print(d)
27 |
28 | from os import walk
29 | from os.path import join
30 | for root, dirs, files in walk('recovered_data'):
31 | for fn in sorted(files):
32 | if '.pt' not in fn:
33 | continue
34 | print(root, fn)
35 | d = torch.load(join(root, fn))
36 | torch.save(d, join(root, fn))
37 | print(root, fn)
--------------------------------------------------------------------------------
/misc/robopy-test.py:
--------------------------------------------------------------------------------
1 | from robopy import *
2 | import robopy.base.model as model
3 |
4 |
5 | # x = UnitQuaternion.Rx(10, 'deg')
6 | # y = UnitQuaternion.Ry(120, 'deg')
7 | # x.animate(y, duration=15)
8 |
9 | class link2(SerialLink):
10 | def __init__(self):
11 | links = [Revolute(d=0, a=0, alpha=pi / 2, j=0, theta=0, offset=0, qlim=(-160 * pi / 180, 160 * pi / 180)),
12 | Revolute(d=0, a=0.4318, alpha=0, j=0, theta=0, offset=0, qlim=(-45 * pi / 180, 225 * pi / 180))]
13 | colors = graphics.vtk_named_colors(["Red", "DarkGreen", "Blue", "Cyan", "Magenta", "Yellow", "White"])
14 | file_names = SerialLink._setup_file_names(7)
15 | param = {
16 | "cube_axes_x_bounds": np.matrix([[-1.5, 1.5]]),
17 | "cube_axes_y_bounds": np.matrix([[-0.7, 1.5]]),
18 | "cube_axes_z_bounds": np.matrix([[-1.5, 1.5]]),
19 | "floor_position": np.matrix([[0, -0.7, 0]])
20 | }
21 | super().__init__(links=links, colors=colors, name='puma_560', stl_files=file_names, param=param)
22 |
23 | q1 = np.linspace(1, -180, 500).reshape((-1, 1))
24 | q2 = np.linspace(1, 180, 500).reshape((-1, 1))
25 | q = np.concatenate((q1, q2), axis=1)
26 | robot = link2()
27 | robot.animate(stances=q, frame_rate=30, unit='deg')
28 | # print(robot.fkine(q[0]))
--------------------------------------------------------------------------------
/misc/small_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import fcl
3 | from scipy.spatial.transform import Rotation
4 | import matplotlib.pyplot as plt
5 | from matplotlib.patches import Rectangle
6 | import matplotlib.patheffects as path_effects
7 | from matplotlib.cm import get_cmap
8 | import seaborn as sns
9 | sns.set()
10 |
11 | box1, box1_t = (20., 5.), (-0.0,0.0)
12 | box2, box2_t = (5., 5.), (-5., 10.)
13 | box3, box3_t = (5., 5.), (-15., 15.)
14 | theta1, theta2 = np.pi/4, 0
15 | theta3 = 0
16 |
17 |
18 | h = 1000.
19 | box1_bottom_left, box2_bottom_left, box3_bottom_left = (-box1[0]/2, -box1[1]/2), (-box2[0]/2, -box2[1]/2), (-box3[0]/2, -box3[0]/2)
20 | g1 = fcl.Box(*box1, h)
21 | t1 = fcl.Transform(Rotation.from_rotvec([0., 0., theta1]).as_quat()[[3,0,1,2]], [*box1_t,0])
22 | o1 = fcl.CollisionObject(g1, t1)
23 | g2 = fcl.Box(*box2, h)
24 | t2 = fcl.Transform(Rotation.from_rotvec([0., 0., theta2]).as_quat()[[3,0,1,2]], [*box2_t,0])
25 | o2 = fcl.CollisionObject(g2, t2)
26 | g3 = fcl.Box(*box3, h)
27 | t3 = fcl.Transform(Rotation.from_rotvec([0., 0., theta3]).as_quat()[[3,0,1,2]], [*box3_t,0])
28 | o3 = fcl.CollisionObject(g3, t3)
29 |
30 | robot_manager = fcl.DynamicAABBTreeCollisionManager()
31 | robot_manager.registerObjects([o1])
32 | robot_manager.setup()
33 | obs_manager = fcl.DynamicAABBTreeCollisionManager()
34 | obs_manager.registerObjects([o2, o3])
35 | # obs_manager.registerObjects([o3, o2])
36 |
37 | obs_manager.setup()
38 | ddata = fcl.DistanceData(request=fcl.DistanceRequest(enable_nearest_points=True, gjk_solver_type=fcl.GJKSolverType.GST_LIBCCD))
39 | robot_manager.distance(obs_manager, ddata, fcl.defaultDistanceCallback)
40 | p_o1 = ddata.result.nearest_points[0][:2]
41 | p_o2 = ddata.result.nearest_points[1][:2]
42 | assert np.isclose(ddata.result.min_distance, np.linalg.norm(p_o1-p_o2)), \
43 | (ddata.result.min_distance, np.linalg.norm(p_o1-p_o2))
44 |
45 | ## below are just visualization
46 | box1_bottom_left = (Rotation.from_rotvec([0., 0., theta1]).as_matrix() @ np.array([*box1_bottom_left, 0]))[:2]+box1_t
47 | box2_bottom_left = (Rotation.from_rotvec([0., 0., theta2]).as_matrix() @ np.array([*box2_bottom_left, 0]))[:2]+box2_t
48 | box3_bottom_left = (Rotation.from_rotvec([0., 0., theta3]).as_matrix() @ np.array([*box3_bottom_left, 0]))[:2]+box3_t
49 |
50 | fig, ax = plt.subplots(1,1)
51 | ax.set_xlim(-30, 30)
52 | ax.set_ylim(-30, 30)
53 | ax.set_aspect('equal', adjustable='box')
54 |
55 | cmaps = [get_cmap('Reds'), get_cmap('Blues')]
56 | with sns.axes_style('ticks'):
57 | ax.add_patch(Rectangle(tuple(box1_bottom_left), *box1, angle=np.degrees(theta1), path_effects=[path_effects.withSimplePatchShadow()], color=cmaps[1](0.5), alpha=0.5))
58 | ax.add_patch(Rectangle(tuple(box2_bottom_left), *box2, angle=np.degrees(theta2), path_effects=[path_effects.withSimplePatchShadow()], color=cmaps[0](0.5), alpha=0.5))
59 | ax.add_patch(Rectangle(tuple(box3_bottom_left), *box3, angle=np.degrees(theta3), path_effects=[path_effects.withSimplePatchShadow()], color=cmaps[0](0.5), alpha=0.5))
60 | ax.plot(*p_o1, 'o', color=cmaps[1](0.5), markersize=4)
61 | ax.plot(*p_o2, 'o', color=cmaps[0](0.5), markersize=4)
62 | plt.show(block=True)
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | numpy
2 | torch
3 | pillow
4 | matplotlib
5 | scikit-learn
6 | tqdm
7 | python-fcl
8 | seaborn
9 | pyyaml
10 | trimesh
11 | yourdfpy
12 | pyglet<2
13 | websockets<14
14 | jupyter[lab]
15 | roboticstoolbox-python[nb,collision]
--------------------------------------------------------------------------------
/scripts/2d_data_generation.py:
--------------------------------------------------------------------------------
1 | import argparse
2 |
3 | import numpy as np
4 | import torch
5 | from diffco.model import RevolutePlanarRobot
6 |
7 | from generate_batch_data_2d import generate_data_planar_manipulators
8 |
9 | predefined_obstacles = {
10 | '2circle': [
11 | ('circle', (3, 2), 2),
12 | ('circle', (-2, 3), 0.5),
13 | ],
14 | '1rect_1circle': [
15 | ('rect', (4, 3), (2, 2)),
16 | ('circle', (-4, -3), 1)],
17 | '2rect': [
18 | ('rect', (4, 3), (2, 2)),
19 | ('rect', (-4, -3), (2, 2)),
20 | ],
21 | '1rect': [
22 | ('rect', (3, 2), (2, 2)),
23 | ],
24 | '3circle': [
25 | ('circle', (0, 4.5), 1),
26 | ('circle', (-2, -3), 2),
27 | ('circle', (-2, 2), 1.5),
28 | ],
29 | '1rect_1circle_7d': [
30 | ('circle', (-2, 3), 1),
31 | ('rect', (3, 2), (2, 2)),
32 | ],
33 | '2class_1': [
34 | ('rect', (5, 0), (2, 2), 0),
35 | ('circle', (-3, 6), 1, 1),
36 | ('rect', (-5, 2), (2, 1.5), 1),
37 | ('circle', (-5, -2), 1.5, 1),
38 | ('circle', (-3, -6), 1, 1),
39 | ],
40 | '2class_2': [
41 | ('rect', (0, 3), (16, 0.5), 1),
42 | ('rect', (0, -3), (16, 0.5), 0),
43 | ],
44 | '1rect_active': [
45 | ('rect', (-7, 3), (2, 2)),
46 | ],
47 | '3circle_7d': [
48 | ('circle', (-2, 2), 1),
49 | ('circle', (-3, 3), 1),
50 | ('circle', (-6, -3), 1),
51 | ],
52 | '2instance_big': [
53 | ('rect', (5, 4), (4, 4), 0),
54 | ('circle', (-5, -4), 2, 1),
55 | ],
56 | '7d_narrow': [],
57 | '3d_halfnarrow': [],
58 | }
59 |
60 | def setup_7d_narrow() -> None:
61 | """Generate obstacles for 7d_narrow environment."""
62 | lb = np.array([-8, 1.0], dtype=float)
63 | ub = np.array([8, 8], dtype=float)
64 | for i in range(150):
65 | pos = np.random.rand(2,)*(ub-lb)+lb
66 | pos = pos.tolist()
67 | size = (1, 1)
68 | predefined_obstacles['7d_narrow'].append(('rect', pos, size))
69 |
70 | lb = np.array([-8, -8], dtype=float)
71 | ub = np.array([8, -1.0], dtype=float)
72 | for i in range(150):
73 | pos = np.random.rand(2,)*(ub-lb)+lb
74 | pos = pos.tolist()
75 | size = (1, 1)
76 | predefined_obstacles['7d_narrow'].append(('rect', pos, size))
77 |
78 | def setup_3d_halfnarrow() -> None:
79 | """Generate obstacles for 3d_halfnarrow environment."""
80 | lb = np.array([-8, 1.0], dtype=float)
81 | ub = np.array([8, 8], dtype=float)
82 | for i in range(150):
83 | pos = np.random.rand(2,)*(ub-lb)+lb
84 | pos = pos.tolist()
85 | size = (1, 1)
86 | predefined_obstacles['3d_halfnarrow'].append(('rect', pos, size))
87 |
88 | setup_7d_narrow()
89 | setup_3d_halfnarrow()
90 |
91 | def main(
92 | env_name: str = '3d_halfnarrow',
93 | folder: str = 'data/landscape',
94 | label_type: str = 'binary',
95 | num_class: int = 2,
96 | dof: int = 3,
97 | num_init_points: int = 8000,
98 | width: float = 0.3,
99 | link_length: float = 1.0,
100 | generate_random_cfgs: bool = True,
101 | random_seed: int = 2021) -> None:
102 | """Main entry point for the script. Loads the desired set of obstacles,
103 | generates the corresponding dataset, and saves it to a file in the specified
104 | folder.
105 |
106 | Args:
107 | env_name (str): A short, unique description of the environment. Defaults
108 | to 3d_halfnarrow.
109 | folder (str): The folder to save the dataset in. Defaults to
110 | 'data/landscape'.
111 | label_type (str): The obstacle label type. Must be one of 'binary',
112 | 'class', or 'instance'. Defaults to 'binary'.
113 | num_class (int): If label_type is 'class', the total number of classes
114 | to create. Defaults to 2.
115 | dof (int): Number of degrees of freedom. Defaults to 3.
116 | num_init_points (int): Number of init points. Defaults to 8000.
117 | width (float): The width of each robot link. Defaults to 0.3.
118 | link_length (float): The length of each robot link. Defaults to 1.0.
119 | generate_random_cfgs (bool): Flag for determining whether to generate
120 | random configs or not. Defaults to True. Should be set to False when
121 | doing a compare test with FCL.
122 | random_seed (int): Random seed used to reproduce the same results,
123 | useful for debugging. Defaults to 2021.
124 | """
125 | torch.manual_seed(random_seed)
126 | np.random.seed(random_seed)
127 | obstacles = predefined_obstacles[env_name]
128 | robot = RevolutePlanarRobot(link_length, width, dof)
129 | generate_data_planar_manipulators(robot, folder, obstacles, label_type=label_type,
130 | num_class=num_class, num_points=num_init_points, env_id=env_name, vis=True,
131 | generate_random_cfgs=generate_random_cfgs)
132 |
133 |
134 | if __name__ == "__main__":
135 | desc = '2D data generation'
136 | parser = argparse.ArgumentParser(description=desc)
137 | env_choices = predefined_obstacles.keys()
138 | parser.add_argument('--env', dest='env_name', help='2D environment', choices=env_choices, default='3d_halfnarrow')
139 | parser.add_argument('-o', '--output-dir', dest='folder', default='data/landscape')
140 | parser.add_argument('-l', '--label-type', choices=['instance', 'class', 'binary'], default='binary')
141 | parser.add_argument('--num-classes', dest='num_class', default=2, type=int)
142 | parser.add_argument('--dof', help='degrees of freedom', choices=[2, 3, 7], default=3, type=int)
143 | parser.add_argument('--num-init-points', type=int, default=8000)
144 | parser.add_argument('--width', help='link width', type=float, default=0.3)
145 | parser.add_argument('--link-length', help='link length', type=float, default=2.0)
146 | parser.add_argument('--random-seed', type=int, default=2021)
147 | parser.add_argument('--no-random-cfgs', dest='generate_random_cfgs', action='store_false', default=True)
148 | args = parser.parse_args()
149 | main(**vars(args))
150 |
--------------------------------------------------------------------------------
/scripts/2d_trajopt.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import os
3 | import sys
4 | import json
5 | from diffco import DiffCo, MultiDiffCo
6 | from diffco import kernel
7 | from matplotlib import pyplot as plt
8 | import numpy as np
9 | import torch
10 | import seaborn as sns
11 | sns.set_theme()
12 | from diffco import utils, CollisionChecker
13 | from trajectory_optim import adam_traj_optimize
14 | from diffco.routines import fit_checker, get_estimator, train_checker, train_test_split, unpack_dataset, test_checker, autogenerate_2d_dataset
15 | from visualize_trajectory_2d import animation_demo, create_plots, single_plot
16 |
17 |
18 | def main(
19 | dataset_filepath: str = None,
20 | checker_type: CollisionChecker = MultiDiffCo,
21 | start_cfg: torch.Tensor = None,
22 | target_cfg: torch.Tensor = None,
23 | num_waypoints: int = 12,
24 | safety_margin: torch.Tensor = None,
25 | cache: bool = False,
26 | random_seed: int = 100,):
27 | """Main entry point for the script. Loads a dataset, trains/loads a checker,
28 | calculates an optimal path from start to target configuration, and creates
29 | a plot and video animation saved in the `figs/` directory.
30 |
31 | Args:
32 | dataset_filepath (str): Path to dataset. If not provided, an
33 | autogenerated dataset will be used.
34 | checker_type (CollisionChecker): The collision checker class. Should be
35 | either MultiDiffCo or DiffCo. Defaults to MultiDiffCo.
36 | start_cfg (Tensor): The starting configuration. If not provided, a
37 | random configuration from the set of collision-free configurations
38 | will be chosen.
39 | target_cfg (Tensor): The target configuration. If not provided, a
40 | random configuration from the set of collision-free configurations
41 | will be chosen.
42 | num_waypoints (int): The number of intermediate steps in the path from
43 | start to target configuration. Defaults to 12.
44 | safety_margin (Tensor): The safety bias to apply to the different
45 | classes of obstacles. Defaults to None.
46 | cache (bool): Flag for loading cached trajectory corresponding to the
47 | current dataset. Defaults to False.
48 | random_seed (int): Random seed used to reproduce the same results,
49 | useful for debugging. Defaults to 19961221.
50 | """
51 | if dataset_filepath is None:
52 | dataset_filepath = autogenerate_2d_dataset(3, 5, 'class', '2class_1', link_length=2,
53 | random_seed=random_seed)
54 | robot, cfgs, labels, dists, obstacles = unpack_dataset(dataset_filepath)
55 | obstacles = [obs+(i, ) for i, obs in enumerate(obstacles)]
56 | fkine = robot.fkine
57 |
58 | # Train on 75% of the data or 10,000 instances, whichever is smaller
59 | train_indices, test_indices = train_test_split(len(cfgs), min(int(0.75 * len(cfgs)), 10000))
60 | if labels.dim() > 1 and checker_type != MultiDiffCo:
61 | raise ValueError(f'If data is nonbinary you must use MultiDiffCo, not {checker_type}')
62 | description = os.path.splitext(os.path.basename(dataset_filepath))[0] # Remove the .pt extension
63 | checker = train_checker(checker_type, cfgs[train_indices], labels[train_indices],
64 | dists[train_indices], fkine, obstacles, description)
65 | test_checker(checker, checker.score, cfgs[test_indices], labels[test_indices])
66 | fit_checker(checker, fitting_epsilon=1, fitting_target='label', fkine=fkine)
67 | dist_est = get_estimator(checker, scoring_method='rbf_score')
68 | print('MIN_SCORE = {:.6f}'.format(dist_est(cfgs[test_indices]).min()))
69 |
70 | cfg_path_plots = []
71 | if robot.dof > 2:
72 | fig, ax, link_plot, joint_plot, eff_plot = create_plots(robot, obstacles, dist_est, checker)
73 | elif robot.dof == 2:
74 | fig, ax, link_plot, joint_plot, eff_plot, cfg_path_plots = create_plots(robot, obstacles, dist_est, checker)
75 |
76 | free_cfgs = cfgs[(labels == -1).all(axis=1)]
77 | indices = np.random.default_rng(random_seed).choice(len(free_cfgs), 2, replace=False)
78 | if start_cfg is None:
79 | start_cfg = free_cfgs[indices[0]]
80 | else:
81 | assert len(start_cfg) == free_cfgs.shape[-1]
82 | if target_cfg is None:
83 | target_cfg = free_cfgs[indices[1]]
84 | else:
85 | assert len(target_cfg) == free_cfgs.shape[-1]
86 |
87 | path_dir = 'results/safetybias'
88 | os.makedirs(path_dir, exist_ok=True)
89 | traj_optim_cached_filepath = os.path.join(path_dir, f'path_{description}.json')
90 | if cache and os.path.exists(traj_optim_cached_filepath):
91 | with open(traj_optim_cached_filepath, 'r') as f:
92 | optim_rec = json.load(f)
93 | else:
94 | if safety_margin is None:
95 | safety_margin = torch.zeros(labels.shape[-1])
96 | else:
97 | assert labels.shape[-1] == len(safety_margin)
98 | optim_options = {
99 | 'N_WAYPOINTS': num_waypoints,
100 | 'NUM_RE_TRIALS': 10,
101 | 'MAXITER': 200,
102 | 'safety_margin': safety_margin,
103 | 'max_speed': 0.3,
104 | 'seed': random_seed,
105 | 'history': False
106 | }
107 | optim_rec = adam_traj_optimize(robot, dist_est, start_cfg, target_cfg, options=optim_options)
108 | with open(traj_optim_cached_filepath, 'w') as f:
109 | json.dump(optim_rec, f, indent=4)
110 | print('Plan recorded in {}'.format(f.name))
111 |
112 | single_plot(robot, torch.FloatTensor(optim_rec['solution']), fig, link_plot, joint_plot, eff_plot, cfg_path_plots=cfg_path_plots, ax=ax)
113 | plt.tight_layout()
114 | fig_dir = 'figs/safetybias'
115 | os.makedirs(fig_dir, exist_ok=True)
116 | plt.savefig(os.path.join(fig_dir, f'path_{description}.png'), dpi=500)
117 | animation_demo(robot, torch.FloatTensor(optim_rec['solution']), fig, link_plot, joint_plot,
118 | eff_plot, cfg_path_plots, save_dir=os.path.join(fig_dir, f'path_{description}.mp4'))
119 |
120 |
121 | if __name__ == "__main__":
122 | desc = 'Tool for generating optimized trajectories for 2D workspaces.'
123 | parser = argparse.ArgumentParser(description=desc)
124 | parser.add_argument('-d', '--dataset', dest='dataset_filepath', help='Dataset filepath')
125 | parser.add_argument('--checker', dest='checker_type', help='Collision checker class',
126 | choices=['diffco', 'multidiffco'], default='multidiffco')
127 | parser.add_argument('--start-cfg', nargs='*', type=float, help='Start configuration')
128 | parser.add_argument('--target-cfg', nargs='*', type=float, help='Final configuration')
129 | parser.add_argument('--num-waypoints', type=int, default=12, help='Number of waypoints')
130 | parser.add_argument('--safety-margin', nargs='*', type=float, help='Safety margin')
131 | parser.add_argument('--cache', action='store_true', default=False)
132 | parser.add_argument('--random-seed', type=int, default=100)
133 | args = parser.parse_args()
134 |
135 | if args.checker_type == 'diffco':
136 | args.checker_type = DiffCo
137 | elif args.checker_type == 'multidiffco':
138 | args.checker_type = MultiDiffCo
139 |
140 | if args.start_cfg:
141 | args.start_cfg = torch.Tensor(args.start_cfg)
142 | if args.target_cfg:
143 | args.target_cfg = torch.Tensor(args.target_cfg)
144 | if args.safety_margin:
145 | args.safety_margin = torch.Tensor(args.safety_margin)
146 |
147 | main(**vars(args))
148 |
--------------------------------------------------------------------------------
/scripts/3d_data_generation.py:
--------------------------------------------------------------------------------
1 | import sys
2 | # sys.path.append('/home/yuheng/DiffCo/')
3 | from diffco import DiffCo
4 | from diffco import kernel
5 | from matplotlib import pyplot as plt
6 | import numpy as np
7 | import torch
8 | from diffco.model import BaxterLeftArmFK, PandaFK
9 |
10 | import rospy
11 | import moveit_commander
12 | import geometry_msgs.msg
13 | from moveit_msgs.srv import GetStateValidityRequest, GetStateValidity
14 | from moveit_msgs.msg import RobotState
15 | from tqdm import tqdm
16 |
17 |
18 |
19 |
20 | if __name__ == "__main__":
21 | moveit_commander.roscpp_initialize(sys.argv)
22 | rospy.init_node('DiffCoplusDataGenerator', anonymous=True)
23 | robot = moveit_commander.RobotCommander()
24 | scene = moveit_commander.PlanningSceneInterface()
25 | group_name = "panda_arm"
26 | move_group = moveit_commander.MoveGroupCommander(group_name)
27 | sv_srv = rospy.ServiceProxy('/check_state_validity', GetStateValidity)
28 |
29 | # ========================== Data generqation =========================
30 | def wait_for_state_update(box_name, box_is_known=False, box_is_attached=False, timeout=4):
31 | # Copy class variables to local variables to make the web tutorials more clear.
32 | # In practice, you should use the class variables directly unless you have a good
33 | # reason not to.
34 | start = rospy.get_time()
35 | seconds = rospy.get_time()
36 | while (seconds - start < timeout) and not rospy.is_shutdown():
37 | # Test if the box is in attached objects
38 | attached_objects = scene.get_attached_objects([box_name])
39 | is_attached = len(attached_objects.keys()) > 0
40 |
41 | # Test if the box is in the scene.
42 | # Note that attaching the box will remove it from known_objects
43 | is_known = box_name in scene.get_known_object_names()
44 |
45 | # Test if we are in the expected state
46 | if (box_is_attached == is_attached) and (box_is_known == is_known):
47 | return True
48 |
49 | # Sleep so that we give other threads time on the processor
50 | rospy.sleep(0.1)
51 | seconds = rospy.get_time()
52 |
53 | # If we exited the while loop without returning then we timed out
54 | return False
55 |
56 | obstacles = [
57 | # ('circle', (3, 2), 2),
58 | # ('circle', (-2, 3), 1),
59 | # ('rect', (-2, 3), (1, 1)),
60 | # ('rect', (1, 0, 0), (0.3, 0.3, 0.3)),
61 | # ('rect', (-1.7, 3), (2, 3)),
62 | # ('rect', (0, -1), (10, 1)),
63 | # ('rect', (8, 7), 1),
64 | ]
65 | box_names = []
66 | rospy.sleep(2)
67 | # for i, obs in enumerate(obstacles):
68 | # box_name = 'box_{}'.format(i)
69 | # box_names.append(box_name)
70 | # box_pose = geometry_msgs.msg.PoseStamped()
71 | # # box_pose.header.frame_id = "base"
72 | # box_pose.header.frame_id = robot.get_planning_frame()
73 | # # box_pose.pose.orientation.w = 1.0
74 | # box_pose.pose.position.x = obs[1][0]
75 | # box_pose.pose.position.y = obs[1][1]
76 | # box_pose.pose.position.z = obs[1][2]
77 | # scene.add_box(box_name, box_pose, size=obs[2])
78 | # wait_for_state_update(box_name, box_is_known=True)
79 |
80 |
81 | env_name = 'bookshelvessmall'
82 |
83 | robot_name = 'panda'
84 | DOF = 7
85 | robot = PandaFK()
86 |
87 | np.random.seed(1917)
88 | torch.random.manual_seed(1917)
89 | num_init_points = 8000
90 | cfgs = torch.rand((num_init_points, DOF), dtype=torch.float32)
91 | cfgs = cfgs * (robot.limits[:, 1]-robot.limits[:, 0]) + robot.limits[:, 0]
92 | labels = torch.zeros(num_init_points, dtype=torch.float)
93 | # dists = torch.zeros(num_init_points, dtype=torch.float)
94 | # req = fcl.CollisionRequest(num_max_contacts=100, enable_contact=True)
95 |
96 | rs = RobotState()
97 | # rs.joint_state.name = ['left_s0', 'left_s1', 'left_e0', 'left_e1', 'left_w0', 'left_w1', 'left_w2'] # Baxter
98 | rs.joint_state.name = [f'panda_joint{j}' for j in range(1, 8)] # panda
99 | gsvr = GetStateValidityRequest()
100 | gsvr.robot_state = rs
101 | gsvr.group_name = group_name
102 | for i, cfg in enumerate(tqdm(cfgs)):
103 | rs.joint_state.position = cfg
104 | result = sv_srv.call(gsvr)
105 |
106 | in_collision = not result.valid
107 | labels[i] = 1 if in_collision else -1
108 | # if in_collision:
109 | # depths = torch.tensor([c.penetration_depth for c in rdata.result.contacts])
110 | # dists[i] = depths.abs().max()
111 | # else:
112 | # ddata = fcl.DistanceData()
113 | # robot_manager.distance(obs_manager, ddata, fcl.defaultDistanceCallback)
114 | # dists[i] = -ddata.result.min_distance
115 | print('{} collisons, {} free'.format(torch.sum(labels==1), torch.sum(labels==-1)))
116 | dataset = {'data': cfgs, 'label': labels, 'obs': obstacles, 'robot': robot.__class__}
117 | torch.save(dataset, '/home/yuheng/DiffCo/data/3d_{}_{}.pt'.format(robot_name, env_name))
118 | # input()
119 | # for box_name in box_names:
120 | # scene.remove_world_object(box_name)
121 | # wait_for_state_update(box_name, box_is_attached=False, box_is_known=False)
--------------------------------------------------------------------------------
/scripts/collision_landscape.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | import json
4 | from time import time
5 | from tqdm import tqdm
6 |
7 | import numpy as np
8 | import torch
9 |
10 | from diffco import DiffCo, MultiDiffCo
11 | from diffco import kernel
12 | from diffco import utils
13 | from diffco import FCLChecker
14 |
15 | from matplotlib import pyplot as plt
16 | from matplotlib.patches import Rectangle, FancyBboxPatch, Circle
17 | # import seaborn as sns
18 | # sns.set()
19 | import matplotlib.patheffects as path_effects
20 |
21 | from escape import *
22 |
23 | def create_plots(dist_list, cfgs=None, labels=None):
24 | plt.rcParams.update({
25 | "text.usetex": True,
26 | "font.family": "sans-serif",
27 | "font.sans-serif": ["Helvetica"]})
28 |
29 | if labels is None:
30 | labels = [None] * len(dist_list)
31 | if cfgs is None:
32 | cfgs = torch.linspace(-np.pi, np.pi, len(dist_list[0]))
33 | fig, ax = plt.subplots(figsize=(8, 3))
34 |
35 | color = 'tab:red'
36 | ax.plot(cfgs, dist_list[0], color=color)
37 | ax.set_xlabel('Joint 1')
38 | ax.set_xlim(-np.pi, np.pi)
39 | ax.set_xticks([-np.pi, 0, np.pi])
40 | ax.set_xticklabels(['$-\pi$', '$0$', '$\pi$'], fontsize=10)
41 | ax.axhline(0, linestyle='--', color='gray', alpha=0.5)
42 |
43 | ax.set_ylabel(labels[0], color=color)
44 | ax.tick_params(axis='y', labelcolor=color)
45 | ax.set_ylim(-dist_list[0].abs().max()*1.1, dist_list[0].abs().max()*1.1)
46 | # ax.legend()
47 | if len(dist_list) > 1:
48 | ax = ax.twinx()
49 | color = 'tab:blue'
50 | ax.plot(cfgs, dist_list[1], label=labels[1], color=color)
51 | ax.set_ylabel(labels[1], color=color)
52 | ax.tick_params(axis='y', labelcolor=color)
53 | ax.set_ylim(-dist_list[1].abs().max()*1.1, dist_list[1].abs().max()*1.1)
54 | # ax.legend()
55 |
56 | # plt.legend()
57 |
58 | # Commented out lines include convenient code for debugging purposes
59 | def main(datapath, test_num=360, key=None):
60 | env_name = os.path.splitext(os.path.basename(datapath))[0]
61 | if key is not None:
62 | env_name = f'{env_name}_{key}'
63 | res_dir = 'results/collision_landscape'
64 | os.makedirs(res_dir, exist_ok=True)
65 | resfilename = os.path.join(res_dir, ''f'landscape_{env_name}.json')
66 |
67 | # =========================================================
68 | dataset = torch.load(datapath)
69 | cfgs = dataset['data']
70 | labels = dataset['label']
71 | dists = dataset['dist']
72 | obstacles = dataset['obs']
73 | robot = dataset['robot'](*dataset['rparam'])
74 | width = robot.link_width
75 | train_num = 6000
76 | fkine = robot.fkine
77 |
78 | checker = DiffCo(obstacles, kernel_func=kernel.FKKernel(fkine, kernel.RQKernel(10)), beta=1.0)
79 | # checker = MultiDiffCo(obstacles, kernel_func=kernel.FKKernel(fkine, kernel.RQKernel(10)), beta=1.0)
80 | checker.train(cfgs[:train_num], labels[:train_num], max_iteration=len(cfgs[:train_num]), distance=dists[:train_num])
81 |
82 | # Check DiffCo test ACC
83 | test_preds = (checker.score(cfgs[train_num:]) > 0) * 2 - 1
84 | test_acc = torch.sum(test_preds == labels[train_num:], dtype=torch.float32)/len(test_preds.view(-1))
85 | test_tpr = torch.sum(test_preds[labels[train_num:]==1] == 1, dtype=torch.float32) / len(test_preds[labels[train_num:]==1])
86 | test_tnr = torch.sum(test_preds[labels[train_num:]==-1] == -1, dtype=torch.float32) / len(test_preds[labels[train_num:]==-1])
87 | print('Test acc: {}, TPR {}, TNR {}'.format(test_acc, test_tpr, test_tnr))
88 | assert(test_acc > 0.9)
89 |
90 | fitting_target = 'dist' # {label, dist, hypo}
91 | Epsilon = 1 #0.01
92 | checker.fit_poly(kernel_func=kernel.Polyharmonic(1, Epsilon), target=fitting_target)
93 | dist_est = checker.rbf_score
94 | min_score = dist_est(cfgs[train_num:]).min().item()
95 | print('MIN_SCORE = {:.6f}'.format(min_score))
96 |
97 | fcl_checker = FCLChecker(obstacles, robot, label_type='binary')
98 |
99 |
100 | j1 = torch.linspace(-np.pi, np.pi, test_num)
101 | test_cfgs = torch.zeros(test_num, robot.dof)
102 | test_cfgs[:, 0] = j1
103 | diffco_dist = dist_est(test_cfgs)
104 | fcl_dist = fcl_checker(test_cfgs, distance=True)[1]
105 |
106 | with open(resfilename, 'w') as f:
107 | json.dump({
108 | 'dof': robot.dof,
109 | 'cfgs': test_cfgs.tolist(),
110 | 'diffco_dist': diffco_dist.tolist(),
111 | 'fcl_dist': fcl_dist.tolist()
112 | }, f, indent=1)
113 | print('Distance estimations recorded in {}'.format(f.name))
114 | # ====================================================================
115 |
116 | ## This is for loading previously computed trajectory
117 | with open(resfilename, 'r') as f:
118 | rec = json.load(f)
119 | diffco_dist = torch.FloatTensor(rec['diffco_dist'])
120 | fcl_dist = torch.FloatTensor(rec['fcl_dist'])
121 | cfgs = torch.FloatTensor(rec['cfgs'])
122 | dof = rec['dof']
123 |
124 | create_plots([fcl_dist, diffco_dist], cfgs=cfgs[:, 0], labels=['FCL Distance', 'DiffCo Collision Score'])
125 | # create_plots([fcl_dist], labels=['FCL'])
126 |
127 |
128 | # (Recommended) This produces a single shot of the valid cfgs
129 | # single_plot(robot, torch.FloatTensor(valid_cfg_rec['cfgs']), fig, link_plot, joint_plot, eff_plot, cfg_path_plots=cfg_path_plots, ax=ax)
130 | plt.tight_layout()
131 | fig_dir = 'figs/collision_landscape'
132 | os.makedirs(fig_dir, exist_ok=True)
133 | # plt.show()
134 | plt.savefig(os.path.join(fig_dir, '2d_{}dof_{}.pdf'.format(dof, env_name)))
135 |
136 |
137 |
138 |
139 |
140 |
141 | if __name__ == "__main__":
142 | main('data/landscape/2d_3dof_150obs_binary_3d_halfnarrow.pt') #'equalmargin'
143 | # main('data/compare_escape/2d_7dof_300obs_binary_7d_narrow.pt', 1000, key='resampling') #'equalmargin'
144 | # main('2class_1', 3, key='equalmargin')
--------------------------------------------------------------------------------
/scripts/escape.py:
--------------------------------------------------------------------------------
1 | import torch
2 | from diffco.model import Model
3 |
4 | class OptimSampler():
5 | def __init__(self, robot: Model, dist_est, args=None):
6 | if args is None:
7 | args = {}
8 | self.robot = robot
9 | self.dist_est = dist_est
10 | self.N_WAYPOINTS = args['N_WAYPOINTS'] if 'N_WAYPOINTS' in args else 20
11 | self.safety_margin = args['safety_margin'] if 'safety_margin' in args else -0.3
12 | self.lr = args['lr'] if 'lr' in args else 5e-2
13 | self.record_freq = args['record_freq'] if 'record_freq' in args else 1
14 | self.post_transform = args['post_transform'] if 'post_transform' in args else None
15 |
16 | self.opt_args = args['opt_args'] if 'opt_args' in args else {'lr': self.lr}
17 | self.optimizer = args['optimizer'] if 'optimizer' in args else torch.optim.Adam
18 |
19 | def optim_escape(self, start_cfg):
20 | cfg_history = []
21 | init_cfg = start_cfg.clone()
22 | p = init_cfg.requires_grad_(True)
23 | opt = self.optimizer([p], **self.opt_args)
24 |
25 | for step in range(self.N_WAYPOINTS):
26 | collision_score = torch.sum(self.dist_est(p)-self.safety_margin)
27 | if collision_score <= 0:
28 | break
29 | if self.record_freq and step % self.record_freq == 0:
30 | cfg_history.append(p.data.clone())
31 | opt.zero_grad()
32 | loss = collision_score
33 | loss.backward()
34 | opt.step()
35 | if self.post_transform:
36 | p.data = self.post_transform(p.data)
37 | cfg_history.append(p.data.clone())
38 | return torch.stack(cfg_history, dim=0), step+1 # cfg, num of diffco checks done
39 |
40 | def resampling_escape(robot: Model, *args, **kwargs):
41 | rand_cfg = torch.rand(1, robot.dof)
42 | rand_cfg = rand_cfg * (robot.limits[:, 1]-robot.limits[:, 0]) + robot.limits[:, 0]
43 | return rand_cfg
--------------------------------------------------------------------------------
/scripts/generate_batch_data_se2.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import matplotlib as mpl
4 | import numpy as np
5 | import torch
6 | from diffco import utils
7 | from diffco.model import RigidPlanarBody
8 | from matplotlib import pyplot as plt
9 | from matplotlib.patches import Circle, Rectangle
10 | from numpy.random import rand, randint
11 |
12 | from generate_batch_data_2d import build_dataset
13 |
14 |
15 | def create_plots(robot, obstacles, cfg=None, label=None):
16 | from matplotlib.cm import get_cmap
17 | cmaps = [get_cmap('Reds'), get_cmap('Blues')]
18 | plt.rcParams.update({
19 | "text.usetex": True,
20 | "font.family": "sans-serif",
21 | "font.sans-serif": ["Helvetica"]})
22 |
23 | fig = plt.figure(figsize=(6, 6))
24 | ax = fig.add_subplot(111) #, projection='3d'
25 |
26 | # Plot ostacles
27 | # ax.axis('tight')
28 | ax.set_xlim(-16, 16)
29 | ax.set_ylim(-16, 16)
30 | ax.set_aspect('equal', adjustable='box')
31 | # ax.set_xticks([-4, 0, 4])
32 | # ax.set_yticks([-4, 0, 4])
33 | for obs in obstacles:
34 | cat = obs[3] if len(obs) >= 4 else 1
35 | # print('{}, cat {}, {}'.format(obs[0], cat, obs))
36 | if obs[0] == 'circle':
37 | ax.add_patch(Circle(obs[1], obs[2], color=cmaps[cat](0.5))) #path_effects=[path_effects.withSimplePatchShadow()],
38 | elif obs[0] == 'rect':
39 | ax.add_patch(Rectangle((obs[1][0]-float(obs[2][0])/2, obs[1][1]-float(obs[2][1])/2), obs[2][0], obs[2][1],
40 | color=cmaps[cat](0.5))) #, path_effects=[path_effects.withSimplePatchShadow()]
41 |
42 | # Plot robot
43 | if cfg is None:
44 | cfg = torch.rand(1, robot.dof, dtype=torch.float32)
45 | cfg = cfg * (robot.limits[:, 1]-robot.limits[:, 0]) + robot.limits[:, 0]
46 | label = torch.zeros(len(cfg))
47 | cfg = cfg.reshape(-1, 3)
48 | label = label.reshape(-1, 1)
49 | for q, l in zip(cfg, label):
50 | points = robot.fkine(q)[0]
51 | for p, trans in zip(robot.parts, points):
52 | if p[0] == 'circle':
53 | ax.add_patch(Circle(trans, p[2], color=cmaps[cat](0.5))) #path_effects=[path_effects.withSimplePatchShadow()],
54 | elif p[0] == 'rect':
55 | lower_left = torch.FloatTensor([-float(p[2][0])/2, -float(p[2][1])/2])
56 | R = utils.rot_2d(q[2:3])[0]
57 | lower_left_position = R@lower_left + trans
58 | rect_patch = Rectangle([0, 0], p[2][0], p[2][1], color='grey' if l < 0 else 'red',) #
59 | tf = mpl.transforms.Affine2D().rotate(q[2].item()).translate(*lower_left_position) + ax.transData
60 | rect_patch.set_transform(tf)
61 | ax.add_patch(rect_patch) #, path_effects=[path_effects.withSimplePatchShadow()]
62 |
63 | def generate_obstacles_for_rigid_body(obs_num: int) -> list:
64 | obstacles = []
65 | types = ['rect', 'circle']
66 | # link_length = robot.link_length[0].item()
67 | for i in range(obs_num):
68 | obs_t = types[randint(2)]
69 | if types[0] in obs_t: # rectangle, size = 0.5-3.5, pos = -7~7
70 | s = rand(2) * 3 + 0.5
71 | if obs_num <= 2:
72 | p = rand(2) * 10 - 5
73 | else:
74 | p = rand(2) * 14 - 7
75 | elif types[1] in obs_t: # circle, size = 0.25-2, pos = -7~7
76 | s = rand() * 1.75 + 0.25
77 | if obs_num <= 2:
78 | p = rand(2) * 10 - 5
79 | else:
80 | p = rand(2) * 14 - 7
81 | obstacles.append((obs_t, p, s))
82 | return obstacles
83 |
84 | def generate_data_rigid_body(
85 | robot,
86 | folder: str,
87 | obs_num: int,
88 | label_type: str = 'binary',
89 | num_class: int = None,
90 | num_points: int = 8000,
91 | env_id: str = '',
92 | vis: bool = True):
93 | """Generate dataset for a 2D rigid body robot.
94 | """
95 | obstacles = generate_obstacles_for_rigid_body(obs_num)
96 | robot, cfgs, labels, dists = build_dataset(robot, obstacles, num_points, num_class, label_type, env_id)
97 |
98 | dataset = {
99 | 'data': cfgs, 'label': labels, 'dist': dists, 'obs': obstacles,
100 | 'robot': robot.__class__, 'rparam': [robot.parts]
101 | }
102 | os.makedirs(folder, exist_ok=True)
103 | torch.save(dataset, os.path.join(
104 | folder, 'se2_{}obs_{}_{}.pt'.format(obs_num, label_type, env_id)))
105 |
106 | if vis:
107 | create_plots(robot, obstacles, cfg=cfgs[:100], label=labels[:100] )# torch.FloatTensor([2.5, -5, np.pi/6]))
108 | plt.savefig(os.path.join(
109 | folder, 'se2_{}obs_{}_{}.png'.format(obs_num, label_type, env_id)))
110 | plt.close()
111 |
112 | return
113 |
114 | if __name__ == "__main__":
115 | batch_name = 'exp1'
116 |
117 | label_type = 'binary' #[instance, class, binary]
118 | seed = 1917
119 |
120 | # type, offset, dimensions
121 | parts = [
122 | ('rect', (-1, 0), (0.3, 2)),
123 | ('rect', (1, 0), (0.3, 2)),
124 | ('rect', (0, 1), (2, 0.3)),
125 | ]
126 | robot = RigidPlanarBody(parts)
127 |
128 | folder_name = os.path.join('data', 'se2_{}'.format(batch_name))
129 | if os.path.isdir(folder_name):
130 | ans = input('Folder {} exists. Continue? (Y/n)'.format(folder_name))
131 | if 'y' in ans or 'Y' in ans:
132 | pass
133 | else:
134 | exit(1)
135 | else:
136 | os.makedirs(folder_name)
137 | print('Start writing data in {}'.format(folder_name))
138 |
139 | np.random.seed(seed)
140 | torch.random.manual_seed(seed)
141 |
142 | num_envs_per_num_obs = 1
143 | seq_num_obs = [10] # [5, 10] # [1, 2, 5, 10, 20]
144 | for num_obs in seq_num_obs:
145 | for env_id in range(num_envs_per_num_obs):
146 | # ==== may be used to regenerate some data with no collision ====
147 | # file_name = os.path.join(
148 | # folder_name, '2d_{}dof_{}obs_{}_{}.pt'.format(robot.dof, num_obs, label_type, env_id))
149 | # if os.path.isfile(file_name):
150 | # with open(file_name) as f:
151 | # labels = torch.load(f)['labels']
152 | # if torch.sum(labels==1) != 0:
153 | # continue
154 | # ==============================================================
155 | generate_data_rigid_body(robot, folder_name, num_obs, num_points=8000, env_id='{:02d}'.format(env_id), vis=True)
156 |
--------------------------------------------------------------------------------
/scripts/generate_test_configs.py:
--------------------------------------------------------------------------------
1 | import json
2 | import torch
3 | import os
4 | from os.path import basename, splitext, join, isdir
5 | import sys
6 | # sys.path.append('/home/yuheng/DiffCo/')
7 | from diffco import DiffCo, MultiDiffCo
8 |
9 | if __name__ == "__main__":
10 | exp_name = '2d_7dof_exp1'
11 | folder = join('data', exp_name)
12 | from glob import glob
13 | envs = sorted(glob(join(folder, '*.pt'),))
14 |
15 | for env_name in envs:
16 | dataset = torch.load(env_name)
17 | cfgs = dataset['data']
18 | labels = dataset['label']
19 | free_cfgs = cfgs[labels == -1]
20 | s_cfgs = []
21 | t_cfgs = []
22 | for test_it in range(100):
23 | indices = torch.randint(0, len(free_cfgs), (2, ))
24 | while indices[0] == indices[1]:
25 | indices = torch.randint(0, len(free_cfgs), (2, ))
26 | print('Config indices: ', indices)
27 | start_cfg = free_cfgs[indices[0]]
28 | target_cfg = free_cfgs[indices[1]]
29 | s_cfgs.append(start_cfg)
30 | t_cfgs.append(target_cfg)
31 | with open(splitext(env_name)[0]+'_testcfgs.json', 'w') as f:
32 | json.dump({
33 | 'env_name': splitext(basename(env_name))[0],
34 | 'start_cfgs': torch.stack(s_cfgs).numpy().tolist(),
35 | 'target_cfgs': torch.stack(t_cfgs).numpy().tolist(),
36 | }, f, indent=1)
37 |
38 |
39 |
--------------------------------------------------------------------------------
/scripts/line_query_eval.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import json
3 | # sys.path.append('/home/yuheng/DiffCo/')
4 | from diffco import DiffCo, MultiDiffCo
5 | from diffco import kernel
6 | from matplotlib import pyplot as plt
7 | import numpy as np
8 | import torch
9 | from diffco.model import RevolutePlanarRobot
10 | import fcl
11 | from scipy import ndimage
12 | from matplotlib import animation
13 | from matplotlib.patches import Rectangle, FancyBboxPatch, Circle
14 | import seaborn as sns
15 | sns.set()
16 | import matplotlib.patheffects as path_effects
17 | from time import time
18 |
19 | def main(DOF, env_name, lmbda=10):
20 | dataset = torch.load('data/2d_{}dof_{}.pt'.format(DOF, env_name))
21 | cfgs = dataset['data']
22 | cfgs = cfgs.view(len(cfgs), -1)
23 | labels = dataset['label']
24 | obstacles = dataset['obs']
25 | robot = dataset['robot'](*dataset['rparam'])
26 | train_num = 35000
27 | indices = torch.LongTensor(np.random.choice(len(cfgs), train_num, replace=False))
28 | fkine = robot.fkine
29 | checker = DiffCo(obstacles, kernel_func=kernel.LineFKKernel(fkine, kernel.RQKernel(lmbda)), beta=1.0) # kernel.LineKernel(kernel.FKKernel(fkine, kernel.RQKernel(lmbda)))
30 | # checker = MultiDiffCo(obstacles, kernel_func=kernel.FKKernel(fkine, kernel.RQKernel(10)), beta=1.0)
31 | keep_all = False
32 | if 'compare' not in env_name:
33 | checker.train(cfgs[:train_num], labels[:train_num], max_iteration=len(cfgs[:train_num]),
34 | keep_all=keep_all)
35 | else:
36 | checker.train(cfgs[indices], labels[indices], max_iteration=len(cfgs[indices]),
37 | keep_all=keep_all)
38 |
39 | # Check DiffCo test ACC
40 | test_preds = (checker.score(cfgs[train_num:]) > 0) * 2 - 1
41 | test_acc = torch.sum(test_preds == labels[train_num:], dtype=torch.float32)/len(test_preds.view(-1))
42 | test_tpr = torch.sum(test_preds[labels[train_num:]==1] == 1, dtype=torch.float32) / len(test_preds[labels[train_num:]==1])
43 | test_tnr = torch.sum(test_preds[labels[train_num:]==-1] == -1, dtype=torch.float32) / len(test_preds[labels[train_num:]==-1])
44 | print('Test acc: {}, TPR {}, TNR {}'.format(test_acc, test_tpr, test_tnr))
45 | print(len(checker.gains), 'Support Points')
46 | # assert(test_acc > 0.9)
47 |
48 | return
49 |
50 | fitting_target = 'label' # {label, dist, hypo}
51 | Epsilon = 0.01
52 | checker.fit_poly(kernel_func=kernel.Polyharmonic(1, Epsilon), target=fitting_target, fkine=fkine)
53 | # checker.fit_poly(kernel_func=kernel.MultiQuadratic(Epsilon), target=fitting_target, fkine=fkine)
54 | # checker.fit_full_poly(epsilon=Epsilon, target=fitting_target, fkine=fkine) #, lmbd=10)
55 | dist_est = checker.rbf_score
56 | # = checker.score
57 | # dist_est = checker.poly_score
58 |
59 | ''' ==================3-figure compare (work, c space 1, c space 2)==========
60 | size = [400, 400]
61 | env_name_gt = env_name if 'compare' in env_name else env_name+'_for_compare'
62 | # gt_grid = torch.load('data/2d_{}dof_{}.pt'.format(DOF, env_name_gt))['dist']
63 | # grid_points = torch.load('data/2d_{}dof_{}.pt'.format(DOF, env_name_gt))['data']
64 | raw_grid_score = checker.score(grid_points)
65 |
66 | est, c_axes = create_plots(robot, obstacles, dist_est, gt_grid) # raw_grid_score)#gt_grid)
67 | plt.show()
68 | # plt.savefig('figs/original_DiffCo_score_compared.pdf'.format(env_name), dpi=500)
69 | # plt.savefig('figs/vis_{}.png'.format(env_name), dpi=500)
70 | '''
71 |
72 | '''# =============== test error ============
73 | # est = est / est.std() * gt_grid.std()
74 | # print('{:.4f}, {:.4f}, {:.4f}'.format(
75 | # (est-gt_grid).mean(), (est-gt_grid).std(), gt_grid.std()))
76 | '''
77 |
78 | # ''' =============== correlation ==============
79 | gt_grid = torch.load('data/2d_{}dof_{}.pt'.format(DOF, env_name))['dist']
80 |
81 | # yy, xx = torch.meshgrid(torch.linspace(-np.pi, np.pi, size[0]), torch.linspace(-np.pi, np.pi, size[1]))
82 | # grid_points = torch.stack([xx, yy], axis=2).reshape((-1, 2))
83 | est_grid = dist_est(cfgs[train_num:])
84 |
85 | # indices = np.random.choice(range(len(est_grid)), size=400, replace=False)
86 | gt_grid = gt_grid[train_num:]
87 | # est_grid = est_grid[indices]
88 |
89 | # ''' plot
90 | fig = plt.figure(figsize=(5, 5)) # temp
91 | plt.rcParams.update({
92 | "text.usetex": True,
93 | "font.family": "sans-serif",
94 | "font.sans-serif": ["Helvetica"]})
95 | # gs = fig.add_gridspec(1, 3)
96 | ax = fig.add_subplot(111)
97 | # ax.set_aspect('equal', adjustable='box')
98 | # ax.axis('equal')
99 | # ax.set_xlim((-4, 4))
100 | # ax.set_ylim((-3, 3))
101 |
102 | ax.scatter(gt_grid, est_grid, s=5)
103 | xlim_max = torch.FloatTensor(ax.get_xlim()).abs().max()
104 | ax.set_xlim(-xlim_max, xlim_max)
105 | ylim_max = torch.FloatTensor(ax.get_ylim()).abs().max()
106 | ax.set_ylim(-ylim_max, ylim_max)
107 | ax.axhline(0, linestyle='-', color='gray', alpha=0.5)
108 | ax.axvline(0, linestyle='-', color='gray', alpha=0.5)
109 | # ax.spines['left'].set_position('center')
110 | # ax.spines['bottom'].set_position('center')
111 | # ax.
112 |
113 | from scipy import stats
114 | slope, intercept, r_value, p_value, std_err = stats.linregress(est_grid.numpy().reshape(-1), gt_grid.numpy().reshape(-1))
115 | print('{}DOF, environment {}, with FK {}, r-squared: {}'.format(DOF, env_name, checker.fkine is not None, r_value**2))
116 | ax.text(xlim_max/4, -7*ylim_max/8, '$\\mathrm{R}^2='+('{:.4f}$'.format(r_value**2)), fontsize=15,
117 | bbox=dict(boxstyle='round', facecolor='wheat', alpha=1))#, fontdict={"family": "Times New Roman",})
118 |
119 | # plt.show()
120 | # plt.savefig('figs/correlation/{}dof_{}_{}.pdf'.format(DOF, env_name, fitting_target))#, dpi=500)
121 | plt.savefig('figs/correlation/{}dof_{}_{}_{}_rsquare.png'.format(DOF, env_name, fitting_target, 'woFK' if checker.fkine is None else 'withFK'), dpi=300)
122 | # '''
123 |
124 | # '''
125 |
126 | ''' timing
127 | # times = []
128 | # st = time()
129 | # # for i, cfg in enumerate(cfgs):
130 | # # st1 = time()
131 | # # dist_est(cfg)
132 | # # end1 = time()
133 | # # times.append(end1-st1)
134 | # dist_est(cfgs)
135 | # end = time()
136 | # times = np.array(times)
137 | # print('std: {}, mean {}, avg {}'.format(times.std(), times.mean(), (end-st)/len(cfgs)))
138 | '''
139 |
140 | ''' decomposition
141 | env_name_gt = env_name if 'compare' in env_name else env_name+'_for_compare'
142 | gt_grid = torch.load('data/2d_{}dof_{}.pt'.format(DOF, env_name_gt))['label']
143 | est, c_axes = create_plots(robot, obstacles, dist_est, gt_grid) # raw_grid_score)#gt_grid)
144 | DiffCoClustering(cfgs, fkine, c_axes[0])
145 | plt.show()
146 | '''
147 | return r_value ** 2
148 |
149 |
150 | if __name__ == "__main__":
151 | # DOF = 2
152 | # env_name = '1rect' # '2rect' # '1rect_1circle' '1rect' 'narrow' '2instance' 3circle
153 | envs = [
154 | # (2, '1rect'),
155 | # (2, '3circle'),
156 | # (7, '1rect_1circle_7d'),
157 | # (7, '3circle_7d'),
158 | (2, '1rect_1circle_line'),
159 | ]
160 | for DOF, env_name in envs:
161 | main(DOF, env_name, lmbda=10)
162 | # lmbdas = np.power(10, np.arange(-1, 3, step=0.1))
163 | # rs = []
164 | # for DOF, env_name in envs:
165 | # for lmbda in lmbdas:
166 | # rs.append(main(DOF, env_name, lmbda))
167 | # plt.plot(lmbdas, rs)
168 | # plt.xticks(lmbdas)
169 | # plt.yticks(rs)
170 | # plt.show()
171 | # with open('rvalue_tests.json', 'w') as f:
172 | # json.dump(
173 | # {'lambda': lmbdas.tolist(),
174 | # 'rvalues': rs}, f)
--------------------------------------------------------------------------------
/scripts/motion_planner.py:
--------------------------------------------------------------------------------
1 | from ompl import base as ob
2 | from ompl import geometric as og
3 | from diffco.model import Model
4 | from diffco.utils import dense_path
5 | import torch
6 | from time import time
7 |
8 | class MyObjective(ob.OptimizationObjective):
9 | def __init__(self, si, func):
10 | super(MyObjective, self).__init__(si)
11 | self.func = func
12 | self.si_ = si
13 | self.dof = self.si_.getStateSpace().getDimension()
14 |
15 | def motionCost(self, s1, s2):
16 | s1 = to_tensor(s1, self.dof)
17 | s2 = to_tensor(s2, self.dof)
18 | return ob.Cost(self.func(s1, s2))
19 |
20 | def to_tensor(s, dof):
21 | return torch.tensor([s[i] for i in range(dof)], dtype=torch.double)
22 |
23 | class ValidityCheckerWrapper:
24 | def __init__(self, func, dof):
25 | self.func = func
26 | self.dof = dof
27 | self.counter = 0
28 |
29 | def __call__(self, s):
30 | self.counter += 1
31 | if not isinstance(s, torch.Tensor):
32 | s = to_tensor(s, self.dof)
33 | return self.func(s)
34 |
35 | def reset_count(self):
36 | self.counter = 0
37 |
38 |
39 | class MotionPlanner(object):
40 | def __init__(self, robot: Model, collision_checker_function, motion_cost_function):
41 | self.robot = robot
42 | self.collision_checker_function = collision_checker_function
43 |
44 | self.space = ob.RealVectorStateSpace(robot.dof)
45 | self.bounds = ob.RealVectorBounds(robot.dof)
46 | for i, (l, h) in enumerate(robot.limits):
47 | self.bounds.setLow(i, l.item())
48 | self.bounds.setHigh(i, h.item())
49 | self.space.setBounds(self.bounds)
50 | # self.space.distance = motion_cost_function
51 |
52 | self.valid_checker = ValidityCheckerWrapper(collision_checker_function, robot.dof)
53 | self.si = ob.SpaceInformation(self.space)
54 | self.si.setStateValidityChecker(ob.StateValidityCheckerFn(self.valid_checker))
55 |
56 | self.pdef = ob.ProblemDefinition(self.si)
57 | self.objetive = MyObjective(self.si, motion_cost_function) #ob.PathLengthOptimizationObjective(self.si))
58 | self.pdef.setOptimizationObjective(self.objetive)
59 | self.planner = og.RRTConnect(self.si)
60 | print(f'planner: {self.planner.getName()}')
61 | self.planner.setProblemDefinition(self.pdef)
62 | self.planner.setup()
63 | self.longest_valid_segment_length = self.space.getLongestValidSegmentLength()
64 |
65 | def plan(self, start_cfg, target_cfg, args):
66 | start = ob.State(self.space)
67 | target = ob.State(self.space)
68 | for i, (scfg, tcfg) in enumerate(zip(start_cfg, target_cfg)):
69 | start[i] = float(scfg)
70 | target[i] = float(tcfg)
71 | self.pdef.clearSolutionPaths()
72 | self.pdef.setStartAndGoalStates(start, target)
73 | # self.planner.clearQuery()
74 | self.planner.clear()
75 | self.planner.setProblemDefinition(self.pdef)
76 | self.planner.setup()
77 | self.valid_checker.reset_count()
78 |
79 | maxtime = args['maxtime']
80 | plan_time = time()
81 | solved = self.planner.solve(maxtime)
82 | plan_time = time() - plan_time
83 | path = self.pdef.getSolutionPath()
84 |
85 | rec = {
86 | 'start_cfg': start_cfg.numpy().tolist(),
87 | 'target_cfg': target_cfg.numpy().tolist(),
88 | 'cnt_check': self.valid_checker.counter,
89 | 'cost': None,
90 | 'time': plan_time,
91 | 'success': None,
92 | 'solution': None,
93 | }
94 | if path:
95 | rec['success'] = True
96 | # rec['cost'] = path.cost(self.objetive).value()
97 | path = path.getStates()
98 | path = torch.stack([to_tensor(s, self.robot.dof) for s in path], dim=0).to(dtype=start_cfg.dtype)
99 | assert torch.isclose(path[0], start_cfg).all() and torch.isclose(path[-1], target_cfg).all(), \
100 | f'path[0] = {path[0]}, start_cfg = {start_cfg}, path[-1] = {path[-1]}, target_cfg = {target_cfg}'
101 | path = dense_path(path, max_step=self.longest_valid_segment_length)
102 | rec['cost'] = sum([self.objetive.func(path[i], path[i+1]) for i in range(len(path)-1)])
103 | path = path.numpy().tolist()
104 | rec['solution'] = path
105 | else:
106 | rec['success'] = False
107 |
108 | return rec
109 |
110 | def test_custom_motion_cost():
111 | from diffco.model import BaxterLeftArmFK
112 | robot = BaxterLeftArmFK()
113 | def foocheck(a):
114 | return torch.rand(1).item() > 0.05
115 | def foodist(a, b):
116 | return float(torch.norm(a[:3]-b[:3]))
117 | mp = MotionPlanner(robot, foocheck, foodist)
118 | import torch
119 | path = mp.plan(torch.zeros(7)*0.0, torch.ones(7)*0.5)
120 | print(path)
121 | # print('path length = ', path.length())
122 | # print('path cost = ', path.cost(mp.objetive).value())
123 | # path = path.getStates()
124 | # path = [to_tensor(s, robot.dof) for s in path]
125 | print('check length = ', torch.sum(torch.stack([torch.norm(path[i+1]-path[i]) for i in range(len(path)-1)])))
126 | print('Check cost = ', torch.sum(torch.tensor([foodist(path[i+1], path[i]) for i in range(len(path)-1)])))
127 |
128 | if __name__ == "__main__":
129 | test_custom_motion_cost()
130 |
131 |
132 |
--------------------------------------------------------------------------------
/scripts/record_moveit_path.py:
--------------------------------------------------------------------------------
1 | import rospy
2 | import pickle
3 | import json
4 | from moveit_msgs.msg import DisplayTrajectory, RobotTrajectory
5 |
6 | def record_path(msg):
7 | path = []
8 | for point in msg.trajectory[0].joint_trajectory.points:
9 | path.append(list(point.positions))
10 | with open('data/moveit_latest_path.json', 'w') as f:
11 | json.dump({'path': path}, f, indent=1)
12 | # pickle.dump(msg, f)
13 | print('Updated')
14 | print(path)
15 |
16 | rospy.init_node('path_recorder')
17 | path_sub = rospy.Subscriber('/move_group/display_planned_path', DisplayTrajectory, record_path)
18 | rospy.spin()
--------------------------------------------------------------------------------
/scripts/temporal1d_data_generation.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | import matplotlib.patheffects as path_effects
3 | import sys
4 | from diffco import Simple1DDynamicChecker
5 | from diffco import utils
6 | from diffco.Obstacles import Simple1DDynamicObstacle
7 | from matplotlib import pyplot as plt
8 | import numpy as np
9 | import torch
10 | from diffco.model import PointRobot1D
11 | from diffco.Obstacles import ObstacleMotion, LinearMotion, SineMotion
12 | import fcl
13 | from scipy import ndimage
14 | from matplotlib import animation
15 | from matplotlib.patches import Rectangle, FancyBboxPatch, Circle
16 | import seaborn as sns
17 | sns.set()
18 |
19 | def main():
20 | # ========================== Data generqation =========================
21 | # !! Unlike previous scripts, starting from this one (temporal1d_data_generation.py)
22 | # I will try to normalize configurations (and time) to be between [0, 1]
23 | env_name = '1obs_sine'
24 | label_type = 'binary' # [instance, class, binary]
25 | num_class = 2
26 | DOF = 1
27 |
28 | obstacles = {
29 | '1obs_linear': [(1, LinearMotion(A=1, B=1)), ],
30 | '1obs_sine': [(1, SineMotion(A=1, alpha=1, beta=0, bias=5))]
31 | }
32 | obstacles = obstacles[env_name]
33 |
34 | temporal_obs = [Simple1DDynamicObstacle(
35 | *obs) for obs in obstacles]
36 |
37 | joint_limits = torch.FloatTensor([0, 20])
38 | t_range = torch.FloatTensor([0, 10])
39 |
40 | robot = PointRobot1D(limits=torch.vstack([joint_limits, t_range]))
41 |
42 | np.random.seed(1917)
43 | torch.random.manual_seed(1917)
44 | num_init_points = 8000
45 | cfgs = torch.rand((num_init_points, DOF+1), dtype=torch.float32)
46 |
47 | gt_checker = Simple1DDynamicChecker(temporal_obs, robot)
48 |
49 | st = time()
50 | labels, dists = gt_checker.predict(cfgs, distance=True)
51 | end = time()
52 | print('mean: {} secs.'.format((end-st)/num_init_points))
53 | print('{} collisions, {} free'.format(
54 | torch.sum(labels == 1), torch.sum(labels == -1)))
55 | dataset = {'data': cfgs, 'label': labels, 'dist': dists,
56 | 'obs': obstacles, 'robot': robot.__class__, 'rparam': [robot.limits]}
57 | torch.save(dataset, 'data/temp1d_{}dof_{}.pt'.format(DOF, env_name))
58 |
59 |
60 | if __name__ == "__main__":
61 | main()
62 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | with open("requirements.txt") as f:
4 | requirements = f.read().splitlines()
5 |
6 | setup(
7 | name="diffco", # Replace with your package name
8 | version="1.1.0", # Replace with your version
9 | author="Yuheng Zhi",
10 | author_email="yzhi@ucsd.edu",
11 | description="Differentiable Proxy Model for Collision Checking in Robot Motion Generation",
12 | long_description=open("README.md").read(),
13 | long_description_content_type="text/markdown",
14 | url="https://github.com/ucsdarclab/diffco", # Replace with your repository URL
15 | packages=find_packages(), # Automatically find and include packages
16 | install_requires=requirements, # Use requirements from requirements.txt
17 | classifiers=[
18 | "Programming Language :: Python :: 3",
19 | "License :: BSD-3-Clause",
20 | "Operating System :: OS Independent",
21 | ],
22 | )
--------------------------------------------------------------------------------