├── .gitignore
├── .readthedocs.yml
├── README.md
├── copy_rect_labels.py
├── docs
├── .gitignore
├── Makefile
├── build_doc.sh
├── make.bat
├── requirements.txt
└── source
│ ├── _static
│ ├── 6d_example.png
│ ├── after_nms.png
│ ├── before_nms.png
│ ├── convert_6d_after.png
│ ├── convert_6d_before.png
│ ├── convert_rect_after.png
│ ├── convert_rect_before.png
│ ├── convert_single_after.png
│ ├── convert_single_before.png
│ ├── grasp_definition.png
│ ├── graspnetlogo1-blue.png
│ ├── rect_example.png
│ ├── rect_grasp_definition.png
│ ├── transformation.png
│ ├── vis_6d.png
│ ├── vis_grasp.png
│ ├── vis_rect.png
│ └── vis_single.png
│ ├── about.rst
│ ├── conf.py
│ ├── example_check_data.rst
│ ├── example_convert.rst
│ ├── example_eval.rst
│ ├── example_generate_rectangle_labels.rst
│ ├── example_loadGrasp.rst
│ ├── example_nms.rst
│ ├── example_vis.rst
│ ├── grasp_format.rst
│ ├── graspnetAPI.rst
│ ├── graspnetAPI.utils.dexnet.grasping.meshpy.rst
│ ├── graspnetAPI.utils.dexnet.grasping.rst
│ ├── graspnetAPI.utils.dexnet.rst
│ ├── graspnetAPI.utils.rst
│ ├── index.rst
│ ├── install.rst
│ └── modules.rst
├── examples
├── exam_check_data.py
├── exam_convert.py
├── exam_eval.py
├── exam_generate_rectangle_grasp.py
├── exam_grasp_format.py
├── exam_loadGrasp.py
├── exam_nms.py
└── exam_vis.py
├── gen_pickle_dexmodel.py
├── grasp_definition.png
├── grasp_definition.vsdx
├── graspnetAPI
├── __init__.py
├── grasp.py
├── graspnet.py
├── graspnet_eval.py
└── utils
│ ├── __init__.py
│ ├── config.py
│ ├── dexnet
│ ├── LICENSE
│ ├── __init__.py
│ ├── abstractstatic.py
│ ├── constants.py
│ └── grasping
│ │ ├── __init__.py
│ │ ├── contacts.py
│ │ ├── grasp.py
│ │ ├── grasp_quality_config.py
│ │ ├── grasp_quality_function.py
│ │ ├── graspable_object.py
│ │ ├── meshpy
│ │ ├── LICENSE
│ │ ├── __init__.py
│ │ ├── mesh.py
│ │ ├── obj_file.py
│ │ ├── sdf.py
│ │ ├── sdf_file.py
│ │ └── stable_pose.py
│ │ └── quality.py
│ ├── eval_utils.py
│ ├── pose.py
│ ├── rotation.py
│ ├── trans3d.py
│ ├── utils.py
│ ├── vis.py
│ └── xmlhandler.py
└── setup.py
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.so
3 | *.o
4 | *.egg-info/
5 | /graspnetAPI/dump_full/
6 | /graspnetAPI/eval/acc_novel
7 | /dump_full/
8 | /dist/
9 | /build/
10 | /.vscode/
11 | /graspnms/build/
12 | *.npy
13 | /graspnms/grasp_nms.cpp
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | # Required
6 | version: 2
7 |
8 | # Build documentation in the docs/ directory with Sphinx
9 | sphinx:
10 | configuration: docs/source/conf.py
11 |
12 | # Build documentation with MkDocs
13 | #mkdocs:
14 | # configuration: mkdocs.yml
15 | build:
16 | image: stable
17 | # Optionally build your docs in additional formats such as PDF
18 | formats:
19 | - pdf
20 | - epub
21 |
22 | # Optionally set the version of Python and requirements required to build your docs
23 |
24 | python:
25 | version: 3.6
26 | install:
27 | - requirements: docs/requirements.txt
28 | - method: pip
29 | path: .
30 | system_packages: true
31 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # graspnetAPI
2 | [](https://badge.fury.io/py/graspnetAPI)
3 |
4 | ## Dataset
5 |
6 | Visit the [GraspNet Website](http://graspnet.net) to get the dataset.
7 |
8 | ## Install
9 | You can install using pip. (Note: The pip version might be old, install from the source is recommended.)
10 | ```bash
11 | pip install graspnetAPI
12 | ```
13 |
14 | You can also install from source.
15 |
16 | ```bash
17 | git clone https://github.com/graspnet/graspnetAPI.git
18 | cd graspnetAPI
19 | pip install .
20 | ```
21 |
22 | ## Document
23 |
24 | Refer to [online document](https://graspnetapi.readthedocs.io/en/latest/index.html) for more details.
25 | [PDF Document](https://graspnetapi.readthedocs.io/_/downloads/en/latest/pdf/) is available, too.
26 |
27 | You can also build the doc manually.
28 | ```bash
29 | cd docs
30 | pip install -r requirements.txt
31 | bash build_doc.sh
32 | ```
33 |
34 | LaTeX is required to build the pdf, but html can be built anyway.
35 |
36 | ## Grasp Definition
37 | The frame of our gripper is defined as
38 |
39 |

40 |
41 |
42 |
43 | ## Examples
44 | ```bash
45 | cd examples
46 |
47 | # change the path of graspnet root
48 |
49 | # How to load labels from graspnet.
50 | python3 exam_loadGrasp.py
51 |
52 | # How to convert between 6d and rectangle grasps.
53 | python3 exam_convert.py
54 |
55 | # Check the completeness of the data.
56 | python3 exam_check_data.py
57 |
58 | # you can also run other examples
59 | ```
60 |
61 | Please refer to our document for more examples.
62 |
63 | ## Citation
64 | Please cite these papers in your publications if it helps your research:
65 | ```
66 | @article{fang2023robust,
67 | title={Robust grasping across diverse sensor qualities: The GraspNet-1Billion dataset},
68 | author={Fang, Hao-Shu and Gou, Minghao and Wang, Chenxi and Lu, Cewu},
69 | journal={The International Journal of Robotics Research},
70 | year={2023},
71 | publisher={SAGE Publications Sage UK: London, England}
72 | }
73 |
74 | @inproceedings{fang2020graspnet,
75 | title={GraspNet-1Billion: A Large-Scale Benchmark for General Object Grasping},
76 | author={Fang, Hao-Shu and Wang, Chenxi and Gou, Minghao and Lu, Cewu},
77 | booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition(CVPR)},
78 | pages={11444--11453},
79 | year={2020}
80 | }
81 | ```
82 |
83 | ## Change Log
84 |
85 | #### 1.2.6
86 |
87 | - Add transformation for Grasp and GraspGroup.
88 |
89 | #### 1.2.7
90 |
91 | - Add inpainting for depth image.
92 |
93 | #### 1.2.8
94 |
95 | - Minor fix bug on loadScenePointCloud.
96 |
--------------------------------------------------------------------------------
/copy_rect_labels.py:
--------------------------------------------------------------------------------
1 | import os
2 | from tqdm import tqdm
3 |
4 | ### change the root to you path ####
5 | graspnet_root = '/home/gmh/graspnet'
6 |
7 | ### change the root to the folder contains rectangle grasp labels ###
8 | rect_labels_root = 'rect_labels'
9 |
10 | for sceneId in tqdm(range(190), 'Copying Rectangle Grasp Labels'):
11 | for camera in ['kinect', 'realsense']:
12 | dest_dir = os.path.join(graspnet_root, 'scenes', 'scene_%04d' % sceneId, camera, 'rect')
13 | src_dir = os.path.join(rect_labels_root, 'scene_%04d' % sceneId, camera)
14 | if not os.path.exists(dest_dir):
15 | os.mkdir(dest_dir)
16 | for annId in range(256):
17 | src_path = os.path.join(src_dir,'%04d.npy' % annId)
18 | assert os.path.exists(src_path)
19 | os.system('cp {} {}'.format(src_path, dest_dir))
20 |
--------------------------------------------------------------------------------
/docs/.gitignore:
--------------------------------------------------------------------------------
1 | /build/
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = source
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/docs/build_doc.sh:
--------------------------------------------------------------------------------
1 | rm source/graspnetAPI.*
2 | rm source/modules.rst
3 | sphinx-apidoc -o ./source ../graspnetAPI
4 | make clean
5 | make html
6 | make latex
7 | cd build/latex
8 | make
9 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | sphinx==3.0.3
2 | sphinx_rtd_theme
3 | open3d==0.11.0
4 |
--------------------------------------------------------------------------------
/docs/source/_static/6d_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/6d_example.png
--------------------------------------------------------------------------------
/docs/source/_static/after_nms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/after_nms.png
--------------------------------------------------------------------------------
/docs/source/_static/before_nms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/before_nms.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_6d_after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_6d_after.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_6d_before.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_6d_before.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_rect_after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_rect_after.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_rect_before.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_rect_before.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_single_after.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_single_after.png
--------------------------------------------------------------------------------
/docs/source/_static/convert_single_before.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/convert_single_before.png
--------------------------------------------------------------------------------
/docs/source/_static/grasp_definition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/grasp_definition.png
--------------------------------------------------------------------------------
/docs/source/_static/graspnetlogo1-blue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/graspnetlogo1-blue.png
--------------------------------------------------------------------------------
/docs/source/_static/rect_example.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/rect_example.png
--------------------------------------------------------------------------------
/docs/source/_static/rect_grasp_definition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/rect_grasp_definition.png
--------------------------------------------------------------------------------
/docs/source/_static/transformation.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/transformation.png
--------------------------------------------------------------------------------
/docs/source/_static/vis_6d.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/vis_6d.png
--------------------------------------------------------------------------------
/docs/source/_static/vis_grasp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/vis_grasp.png
--------------------------------------------------------------------------------
/docs/source/_static/vis_rect.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/vis_rect.png
--------------------------------------------------------------------------------
/docs/source/_static/vis_single.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/docs/source/_static/vis_single.png
--------------------------------------------------------------------------------
/docs/source/about.rst:
--------------------------------------------------------------------------------
1 | About graspnetAPI
2 | =================
3 |
4 | .. image:: _static/graspnetlogo1-blue.png
5 |
6 | GraspNet is an open project for general object grasping that is continuously enriched. Currently we release GraspNet-1Billion, a large-scale benchmark for general object grasping, as well as other related areas (e.g. 6D pose estimation, unseen object segmentation, etc.). graspnetAPI is a Python API that assists in loading, parsing and visualizing the annotations in GraspNet. Please visit graspnet website_ for more information on GraspNet, including for the data, paper, and tutorials. The exact format of the annotations is also described on the GraspNet website. In addition to this API, please download both the GraspNet images and annotations in order to run the demo.
7 |
8 | .. _website: https://graspnet.net/
9 |
10 |
11 | Resources
12 | ---------
13 | - Documentations_
14 | - PDF_Documentations_
15 | - Website_
16 | - Code_
17 |
18 | .. _Code: https://github.com/graspnet/graspnetapi
19 |
20 | .. _Documentations: https://graspnetapi.readthedocs.io/en/latest/
21 |
22 | .. _PDF_Documentations: https://graspnetapi.readthedocs.io/_/downloads/en/latest/pdf/
23 |
24 | .. _Website: https://graspnet.net/
25 |
26 | License
27 | -------
28 | graspnetAPI is licensed under the none commercial CC4.0 license [see https://graspnet.net/about]
29 |
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 | sys.path.insert(0, os.path.abspath('../../graspnetAPI'))
16 |
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = 'graspnetAPI'
21 | copyright = '2021, MVIG, Shanghai Jiao Tong University'
22 | author = 'graspnet'
23 |
24 | # The full version, including alpha/beta/rc tags
25 | release = '1.2.11'
26 |
27 |
28 | # -- General configuration ---------------------------------------------------
29 |
30 | # Add any Sphinx extension module names here, as strings. They can be
31 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
32 | # ones.
33 | extensions = ['sphinx.ext.autodoc',
34 | 'sphinx.ext.todo',
35 | 'sphinx.ext.viewcode'
36 | ]
37 |
38 | # Add any paths that contain templates here, relative to this directory.
39 | templates_path = ['_templates']
40 |
41 | # List of patterns, relative to source directory, that match files and
42 | # directories to ignore when looking for source files.
43 | # This pattern also affects html_static_path and html_extra_path.
44 | exclude_patterns = []
45 |
46 |
47 | # -- Options for HTML output -------------------------------------------------
48 |
49 | # The theme to use for HTML and HTML Help pages. See the documentation for
50 | # a list of builtin themes.
51 | #
52 | html_theme = 'sphinx_rtd_theme'
53 |
54 | # Add any paths that contain custom static files (such as style sheets) here,
55 | # relative to this directory. They are copied after the builtin static files,
56 | # so a file named "default.css" will overwrite the builtin "default.css".
57 | html_static_path = ['_static']
58 | master_doc = 'index'
59 |
--------------------------------------------------------------------------------
/docs/source/example_check_data.rst:
--------------------------------------------------------------------------------
1 | .. _example_check_data:
2 |
3 | Check Dataset Files
4 | ===================
5 |
6 | You can check if there is any missing file in the dataset by the following code.
7 |
8 | .. literalinclude:: ../../examples/exam_check_data.py
--------------------------------------------------------------------------------
/docs/source/example_convert.rst:
--------------------------------------------------------------------------------
1 | .. _example_vis:
2 |
3 | Convert Labels between rectangle format and 6d format
4 | =====================================================
5 |
6 | Get a GraspNet instance.
7 |
8 | .. literalinclude:: ../../examples/exam_convert.py
9 | :lines: 4-22
10 |
11 | Convert rectangle format to 6d format
12 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
13 |
14 | First, load rectangle labels from dataset.
15 |
16 | .. literalinclude:: ../../examples/exam_convert.py
17 | :lines: 24-25
18 |
19 | **Convert a single RectGrasp to Grasp.**
20 |
21 | .. note:: This conversion may fail due to invalid depth information.
22 |
23 | .. literalinclude:: ../../examples/exam_convert.py
24 | :lines: 27-42
25 |
26 | Before Conversion:
27 |
28 | .. image:: _static/convert_single_before.png
29 |
30 | After Conversion:
31 |
32 | .. image:: _static/convert_single_after.png
33 |
34 | **Convert RectGraspGroup to GraspGroup.**
35 |
36 | .. literalinclude:: ../../examples/exam_convert.py
37 | :lines: 44-56
38 |
39 | Before Conversion:
40 |
41 | .. image:: _static/convert_rect_before.png
42 |
43 | After Conversion:
44 |
45 | .. image:: _static/convert_rect_after.png
46 |
47 | Convert 6d format to rectangle format
48 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
49 |
50 | .. note:: Grasp to RectGrasp conversion is not applicable as only very few 6d grasp can be converted to rectangle grasp.
51 |
52 | .. literalinclude:: ../../examples/exam_convert.py
53 | :lines: 58-
54 |
55 | Before Conversion:
56 |
57 | .. image:: _static/convert_6d_before.png
58 |
59 | After Conversion:
60 |
61 | .. image:: _static/convert_6d_after.png
62 |
63 |
--------------------------------------------------------------------------------
/docs/source/example_eval.rst:
--------------------------------------------------------------------------------
1 | .. _example_eval:
2 |
3 | Evaluation
4 | ==========
5 |
6 | Data Preparation
7 | ^^^^^^^^^^^^^^^^
8 |
9 | The first step of evaluation is to prepare your own results.
10 | You need to run your code and generate a `GraspGroup` for each image in each scene.
11 | Then call the `save_npy` function of `GraspGroup` to dump the results.
12 |
13 | To generate a `GraspGroup` and save it, you can directly input a 2D numpy array for the `GraspGroup` class:
14 | ::
15 |
16 | gg=GraspGroup(np.array([[score_1, width_1, height_1, depth_1, rotation_matrix_1(9), translation_1(3), object_id_1],
17 | [score_2, width_2, height_2, depth_2, rotation_matrix_2(9), translation_2(3), object_id_2],
18 | ...,
19 | [score_N, width_N, height_N, depth_N, rotation_matrix_N(9), translation_N(3), object_id_N]]
20 | ))
21 | gg.save_npy(save_path)
22 |
23 | where your algorithm predicts N grasp poses for an image. For the `object_id`, you can simply input `0`. For the meaning of other entries, you should refer to the doc for Grasp Label Format-API Loaded Labels
24 |
25 | The file structure of dump folder should be as follows:
26 |
27 | ::
28 |
29 | |-- dump_folder
30 | |-- scene_0100
31 | | |-- kinect
32 | | | |
33 | | | --- 0000.npy to 0255.npy
34 | | |
35 | | --- realsense
36 | | |
37 | | --- 0000.npy to 0255.npy
38 | |
39 | |-- scene_0101
40 | |
41 | ...
42 | |
43 | --- scene_0189
44 |
45 | You can choose to generate dump files for only one camera, there will be no error for doing that.
46 |
47 | Evaluation API
48 | ^^^^^^^^^^^^^^
49 |
50 | Get GraspNetEval instances.
51 |
52 | .. literalinclude:: ../../examples/exam_eval.py
53 | :lines: 4-17
54 |
55 | Evaluate A Single Scene
56 | -----------------------
57 |
58 | .. literalinclude:: ../../examples/exam_eval.py
59 | :lines: 19-23
60 |
61 | Evaluate All Scenes
62 | -------------------
63 |
64 | .. literalinclude:: ../../examples/exam_eval.py
65 | :lines: 25-27
66 |
67 | Evaluate 'Seen' Split
68 | ---------------------
69 |
70 | .. literalinclude:: ../../examples/exam_eval.py
71 | :lines: 29-31
72 |
73 |
74 |
--------------------------------------------------------------------------------
/docs/source/example_generate_rectangle_labels.rst:
--------------------------------------------------------------------------------
1 | .. _example_generate_rectangle_labels:
2 |
3 | Generating Rectangle Grasp Labels
4 | =================================
5 |
6 | You can generate the rectangle grasp labels by yourself.
7 |
8 | Import necessary libs:
9 |
10 | .. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
11 | :lines: 4-11
12 |
13 | Setup how many processes to use in generating the labels.
14 |
15 | .. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
16 | :lines: 13-15
17 |
18 | The function to generate labels.
19 |
20 | .. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
21 | :lines: 17-31
22 |
23 | Run the function for each scene and camera.
24 |
25 | .. literalinclude:: ../../examples/exam_generate_rectangle_grasp.py
26 | :lines: 33-52
--------------------------------------------------------------------------------
/docs/source/example_loadGrasp.rst:
--------------------------------------------------------------------------------
1 | .. _example_loadGrasp:
2 |
3 | Loading Grasp Labels
4 | ====================
5 |
6 | Both `6d` and `rect` format labels can be loaded.
7 |
8 | First, import relative libs.
9 |
10 | .. literalinclude:: ../../examples/exam_loadGrasp.py
11 | :lines: 4-7
12 |
13 | Then, get a GraspNet instance and setup parameters.
14 |
15 | .. literalinclude:: ../../examples/exam_loadGrasp.py
16 | :lines: 11-19
17 |
18 | Load GraspLabel in `6d` format and visulize the result.
19 |
20 | .. literalinclude:: ../../examples/exam_loadGrasp.py
21 | :lines: 21-29
22 |
23 | .. image:: _static/6d_example.png
24 |
25 | Load GraspLabel in `rect` format and visulize the result.
26 |
27 | .. literalinclude:: ../../examples/exam_loadGrasp.py
28 | :lines: 31-40
29 |
30 | .. image:: _static/rect_example.png
--------------------------------------------------------------------------------
/docs/source/example_nms.rst:
--------------------------------------------------------------------------------
1 | .. _example_nms:
2 |
3 | Apply NMS on Grasps
4 | ===================
5 |
6 |
7 | Get a GraspNet instance.
8 |
9 | .. literalinclude:: ../../examples/exam_nms.py
10 | :lines: 4-19
11 |
12 | Loading and visualizing grasp lables before NMS.
13 |
14 | .. literalinclude:: ../../examples/exam_nms.py
15 | :lines: 21-29
16 |
17 | ::
18 |
19 | 6d grasp:
20 | ----------
21 | Grasp Group, Number=90332:
22 | Grasp: score:0.9000000357627869, width:0.11247877031564713, height:0.019999999552965164, depth:0.029999999329447746, translation:[-0.09166837 -0.16910084 0.39480919]
23 | rotation:
24 | [[-0.81045675 -0.57493848 0.11227506]
25 | [ 0.49874267 -0.77775514 -0.38256073]
26 | [ 0.30727136 -0.25405255 0.91708326]]
27 | object id:66
28 | Grasp: score:0.9000000357627869, width:0.10030215978622437, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.09166837 -0.16910084 0.39480919]
29 | rotation:
30 | [[-0.73440629 -0.67870212 0.0033038 ]
31 | [ 0.64608938 -0.70059127 -0.3028869 ]
32 | [ 0.20788456 -0.22030747 0.95302087]]
33 | object id:66
34 | Grasp: score:0.9000000357627869, width:0.08487851172685623, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.10412319 -0.13797761 0.38312319]
35 | rotation:
36 | [[ 0.03316294 0.78667939 -0.61647028]
37 | [-0.47164679 0.55612743 0.68430364]
38 | [ 0.88116372 0.26806271 0.38947764]]
39 | object id:66
40 | ......
41 | Grasp: score:0.9000000357627869, width:0.11909123510122299, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.05140382 0.11790846 0.48782501]
42 | rotation:
43 | [[-0.71453273 0.63476181 -0.2941435 ]
44 | [-0.07400083 0.3495101 0.93400562]
45 | [ 0.69567728 0.68914449 -0.20276351]]
46 | object id:14
47 | Grasp: score:0.9000000357627869, width:0.10943549126386642, height:0.019999999552965164, depth:0.019999999552965164, translation:[-0.05140382 0.11790846 0.48782501]
48 | rotation:
49 | [[ 0.08162415 0.4604325 -0.88393396]
50 | [-0.52200603 0.77526748 0.3556262 ]
51 | [ 0.84902728 0.4323912 0.30362913]]
52 | object id:14
53 | Grasp: score:0.9000000357627869, width:0.11654743552207947, height:0.019999999552965164, depth:0.009999999776482582, translation:[-0.05140382 0.11790846 0.48782501]
54 | rotation:
55 | [[-0.18380146 0.39686993 -0.89928377]
56 | [-0.61254776 0.66926688 0.42055583]
57 | [ 0.76876676 0.62815309 0.12008961]]
58 | object id:14
59 | ------------
60 |
61 | .. image:: _static/before_nms.png
62 |
63 | Apply nms to GraspGroup and visualizing the result.
64 |
65 | .. literalinclude:: ../../examples/exam_nms.py
66 | :lines: 31-38
67 |
68 | ::
69 |
70 | grasp after nms:
71 | ----------
72 | Grasp Group, Number=358:
73 | Grasp: score:1.0, width:0.11948642134666443, height:0.019999999552965164, depth:0.03999999910593033, translation:[-0.00363996 0.03692623 0.3311775 ]
74 | rotation:
75 | [[ 0.32641056 -0.8457799 0.42203382]
76 | [-0.68102902 -0.52005678 -0.51550031]
77 | [ 0.65548128 -0.11915252 -0.74575269]]
78 | object id:0
79 | Grasp: score:1.0, width:0.12185929715633392, height:0.019999999552965164, depth:0.009999999776482582, translation:[-0.03486454 0.08384828 0.35117128]
80 | rotation:
81 | [[-0.00487804 -0.8475557 0.53068405]
82 | [-0.27290785 -0.50941664 -0.81609803]
83 | [ 0.96202785 -0.14880882 -0.22881967]]
84 | object id:0
85 | Grasp: score:1.0, width:0.04842342436313629, height:0.019999999552965164, depth:0.019999999552965164, translation:[0.10816982 0.10254505 0.50272578]
86 | rotation:
87 | [[-0.98109186 -0.01696888 -0.19279723]
88 | [-0.1817532 0.42313483 0.88765001]
89 | [ 0.06651681 0.90590769 -0.41821831]]
90 | object id:20
91 | ......
92 | Grasp: score:0.9000000357627869, width:0.006192661356180906, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.0122985 0.29616502 0.53319722]
93 | rotation:
94 | [[-0.26423979 0.39734706 0.87880182]
95 | [-0.95826042 -0.00504095 -0.28585231]
96 | [-0.10915259 -0.91765451 0.38209397]]
97 | object id:46
98 | Grasp: score:0.9000000357627869, width:0.024634981527924538, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.11430283 0.18761221 0.51991153]
99 | rotation:
100 | [[-0.17379239 -0.96953499 0.17262182]
101 | [-0.9434278 0.11365268 -0.31149188]
102 | [ 0.28238329 -0.2169912 -0.93443805]]
103 | object id:70
104 | Grasp: score:0.9000000357627869, width:0.03459500893950462, height:0.019999999552965164, depth:0.009999999776482582, translation:[0.02079188 0.11184558 0.50796509]
105 | rotation:
106 | [[ 0.38108557 -0.27480939 0.88275337]
107 | [-0.92043257 -0.20266907 0.33425891]
108 | [ 0.08704928 -0.93989623 -0.33017775]]
109 | object id:20
110 | ----------
111 |
112 | .. image:: _static/after_nms.png
--------------------------------------------------------------------------------
/docs/source/example_vis.rst:
--------------------------------------------------------------------------------
1 | .. _example_vis:
2 |
3 | Visualization of Dataset
4 | ========================
5 |
6 |
7 | Get a GraspNet instance.
8 |
9 | .. literalinclude:: ../../examples/exam_vis.py
10 | :lines: 7-14
11 |
12 |
13 | Show grasp labels on a object.
14 |
15 | .. literalinclude:: ../../examples/exam_vis.py
16 | :lines: 16-17
17 |
18 | .. image:: _static/vis_single.png
19 |
20 | Show 6D poses of objects in a scene.
21 |
22 | .. literalinclude:: ../../examples/exam_vis.py
23 | :lines: 19-20
24 |
25 | .. image:: _static/vis_6d.png
26 |
27 | Show Rectangle grasp labels in a scene.
28 |
29 | .. literalinclude:: ../../examples/exam_vis.py
30 | :lines: 22-23
31 |
32 | .. image:: _static/vis_rect.png
33 |
34 | Show 6D grasp labels in a scene.
35 |
36 | .. literalinclude:: ../../examples/exam_vis.py
37 | :lines: 25-26
38 |
39 | .. image:: _static/vis_grasp.png
40 |
--------------------------------------------------------------------------------
/docs/source/grasp_format.rst:
--------------------------------------------------------------------------------
1 | .. grasp_format:
2 |
3 | Grasp Label Format
4 | ==================
5 |
6 | Raw Label Format
7 | ----------------
8 | The raw label is composed of two parts, i.e. labels for all grasp candidates on each object and collision masks for each scene.
9 |
10 |
11 |
12 | Labels on Objects
13 | ^^^^^^^^^^^^^^^^^
14 | The raw label on each object is a list of numpy arrays.
15 |
16 | ::
17 |
18 | >>> import numpy as np
19 | >>> l = np.load('000_labels.npz') # GRASPNET_ROOT/grasp_label/000_labels.npz
20 | >>> l.files
21 | ['points', 'offsets', 'collision', 'scores']
22 | >>> l['points'].shape
23 | (3459, 3)
24 | >>> l['offsets'].shape
25 | (3459, 300, 12, 4, 3)
26 | >>> l['collision'].shape
27 | (3459, 300, 12, 4)
28 | >>> l['collision'].dtype
29 | dtype('bool')
30 | >>> l['scores'].shape
31 | (3459, 300, 12, 4)
32 | >>> l['scores'][0][0][0][0]
33 | -1.0
34 |
35 | - 'points' records the grasp center point coordinates in model frame.
36 |
37 | - 'offsets' records the in-plane rotation, depth and width of the gripper respectively in the last dimension.
38 |
39 | - 'collision' records the bool mask for if the grasp pose collides with the model.
40 |
41 | - 'scores' records the minimum coefficient of friction between the gripper and object to achieve a stable grasp.
42 |
43 | .. note::
44 |
45 | In the raw label, the **lower** score the grasp has, the **better** it is. However, -1.0 score means the grasp pose is totally not acceptable.
46 |
47 | 300, 12, 4 denote view id, in-plane rotation id and depth id respectively. The views are defined here in graspnetAPI/utils/utils.py.
48 |
49 | .. literalinclude:: ../../graspnetAPI/utils/utils.py
50 | :lines: 51-58
51 | :linenos:
52 |
53 | Collision Masks on Each Scene
54 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
55 |
56 | Collision mask on each scene is a list of numpy arrays.
57 |
58 | ::
59 |
60 | >>> import numpy as np
61 | >>> c = np.load('collision_labels.npz') # GRASPNET_ROOT/collision_label/scene_0000/collision_labels.npz
62 | >>> c.files
63 | ['arr_0', 'arr_4', 'arr_5', 'arr_2', 'arr_3', 'arr_7', 'arr_1', 'arr_8', 'arr_6']
64 | >>> c['arr_0'].shape
65 | (487, 300, 12, 4)
66 | >>> c['arr_0'].dtype
67 | dtype('bool')
68 | >>> c['arr_0'][10][20][3]
69 | array([ True, True, True, True])
70 |
71 | 'arr_i' is the collision mask for the `i` th object in the `object_id_list.txt` for each scene whose shape is (num_points, 300, 12, 4).
72 | num_points, 300, 12, 4 denote the number of points in the object, view id, in-plane rotation id and depth id respectively.
73 |
74 | Users can refer to :py:func:`graspnetAPI.GraspNet.loadGrasp` for more details of how to use the labels.
75 |
76 | API Loaded Labels
77 | -----------------
78 |
79 | Dealing with the raw labels are time-consuming and need high familiarity with graspnet.
80 | So the API also provides an easy access to the labels.
81 |
82 | By calling :py:func:`graspnetAPI.GraspNet.loadGrasp`, users can get all the positive grasp labels in a scene with their parameters and scores.
83 |
84 | There are totally four kinds of data structures for loaded grasp labels: **Grasp**, **GraspGroup**, **RectGrasp** and **RectGraspGroup**.
85 | The internal data format of each class is a numpy array which is more efficient than the Python list.
86 | Their definitions are given in grasp.py
87 |
88 | Example Labels
89 | ^^^^^^^^^^^^^^
90 |
91 | Before looking into the details, an example is given below.
92 |
93 | Loading a GraspGroup instance.
94 |
95 | .. literalinclude:: ../../examples/exam_grasp_format.py
96 | :lines: 1-27
97 |
98 | Users can access elements by index or slice.
99 |
100 | .. literalinclude:: ../../examples/exam_grasp_format.py
101 | :lines: 29-35
102 |
103 | Each element of GraspGroup is a Grasp instance.
104 | The properties of Grasp can be accessed via provided methods.
105 |
106 | .. literalinclude:: ../../examples/exam_grasp_format.py
107 | :lines: 37-46
108 |
109 | RectGrasp is the class for rectangle grasps. The format is different from Grasp.
110 | But the provided APIs are similar.
111 |
112 | .. literalinclude:: ../../examples/exam_grasp_format.py
113 | :lines: 49-65
114 |
115 | 6D Grasp
116 | ^^^^^^^^
117 | Actually, 17 float numbers are used to define a general 6d grasp.
118 | The width, height, depth, score and attached object id are also part of the definition.
119 |
120 | .. note::
121 |
122 | In the loaded label, the **higher** score the grasp has, the **better** it is which is different from raw labels. Actually, score = 1.1 - raw_score (which is the coefficient of friction)
123 |
124 | .. literalinclude:: ../../graspnetAPI/graspnet.py
125 | :lines: 635-637
126 | :emphasize-lines: 2
127 |
128 | The detailed defition of each parameter is shown in the figure.
129 |
130 | .. image:: _static/grasp_definition.png
131 |
132 | .. literalinclude:: ../../graspnetAPI/grasp.py
133 | :lines: 14-36
134 |
135 | 6D Grasp Group
136 | ^^^^^^^^^^^^^^
137 |
138 | Usually, there are a lot of grasps in a scene, :py:class:`GraspGroup` is a class for these grasps.
139 | Compared with :py:class:`Grasp`, :py:class:`GraspGroup` contains a 2D numpy array, the additional dimension is the index for each grasp.
140 |
141 | .. literalinclude:: ../../graspnetAPI/grasp.py
142 | :lines: 201-218
143 |
144 | Common operations on a list such as indexing, slicing and sorting are implemented.
145 | Besides, one important function is that users can **dump** a GraspGroup into a numpy file and **load** it in another program by calling :py:func:`GraspGroup.save_npy` and :py:func:`GraspGroup.from_npy`.
146 |
147 | Rectangle Grasp
148 | ^^^^^^^^^^^^^^^
149 | 7 float numbers are used to define a general rectangle grasp, i.e. the center point, the open point, height, score and the attached object id.
150 | The detailed definition of each parameter is shown in the figure above and below and the coordinates for center point and open point are in the pixel frame.
151 |
152 | .. image:: _static/rect_grasp_definition.png
153 |
154 | .. literalinclude:: ../../graspnetAPI/grasp.py
155 | :lines: 553-572
156 |
157 | Rectangle Grasp Group
158 | ^^^^^^^^^^^^^^^^^^^^^
159 |
160 | The format for :py:class:`RectGraspGroup` is similar to that of :py:class:`RectGrasp` and :py:class:`GraspGroup`.
161 |
162 | .. literalinclude:: ../../graspnetAPI/grasp.py
163 | :lines: 752-769
164 |
165 | .. note::
166 |
167 | We recommend users to access and modify the labels by provided functions although users can also manipulate the data directly but it is **Not Recommended**.
168 | Please refer to the Python API for more details.
169 |
170 | Grasp and GraspGroup Transformation
171 | -----------------------------------
172 |
173 | Users can transform a Grasp or GraspGroup giving a 4x4 matrix.
174 |
175 | .. literalinclude:: ../../examples/exam_grasp_format.py
176 | :lines: 67-95
177 |
178 | .. image:: _static/transformation.png
179 |
--------------------------------------------------------------------------------
/docs/source/graspnetAPI.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI package
2 | ===================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | graspnetAPI.utils
11 |
12 | Submodules
13 | ----------
14 |
15 | graspnetAPI.grasp module
16 | ------------------------
17 |
18 | .. automodule:: graspnetAPI.grasp
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | graspnetAPI.graspnet module
24 | ---------------------------
25 |
26 | .. automodule:: graspnetAPI.graspnet
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | graspnetAPI.graspnet\_eval module
32 | ---------------------------------
33 |
34 | .. automodule:: graspnetAPI.graspnet_eval
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 |
40 | Module contents
41 | ---------------
42 |
43 | .. automodule:: graspnetAPI
44 | :members:
45 | :undoc-members:
46 | :show-inheritance:
47 |
--------------------------------------------------------------------------------
/docs/source/graspnetAPI.utils.dexnet.grasping.meshpy.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI.utils.dexnet.grasping.meshpy package
2 | ================================================
3 |
4 | Submodules
5 | ----------
6 |
7 | graspnetAPI.utils.dexnet.grasping.meshpy.mesh module
8 | ----------------------------------------------------
9 |
10 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.mesh
11 | :members:
12 | :undoc-members:
13 | :show-inheritance:
14 |
15 | graspnetAPI.utils.dexnet.grasping.meshpy.obj\_file module
16 | ---------------------------------------------------------
17 |
18 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.obj_file
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | graspnetAPI.utils.dexnet.grasping.meshpy.sdf module
24 | ---------------------------------------------------
25 |
26 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.sdf
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | graspnetAPI.utils.dexnet.grasping.meshpy.sdf\_file module
32 | ---------------------------------------------------------
33 |
34 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.sdf_file
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | graspnetAPI.utils.dexnet.grasping.meshpy.stable\_pose module
40 | ------------------------------------------------------------
41 |
42 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy.stable_pose
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 |
48 | Module contents
49 | ---------------
50 |
51 | .. automodule:: graspnetAPI.utils.dexnet.grasping.meshpy
52 | :members:
53 | :undoc-members:
54 | :show-inheritance:
55 |
--------------------------------------------------------------------------------
/docs/source/graspnetAPI.utils.dexnet.grasping.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI.utils.dexnet.grasping package
2 | =========================================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | graspnetAPI.utils.dexnet.grasping.meshpy
11 |
12 | Submodules
13 | ----------
14 |
15 | graspnetAPI.utils.dexnet.grasping.contacts module
16 | -------------------------------------------------
17 |
18 | .. automodule:: graspnetAPI.utils.dexnet.grasping.contacts
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | graspnetAPI.utils.dexnet.grasping.grasp module
24 | ----------------------------------------------
25 |
26 | .. automodule:: graspnetAPI.utils.dexnet.grasping.grasp
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | graspnetAPI.utils.dexnet.grasping.grasp\_quality\_config module
32 | ---------------------------------------------------------------
33 |
34 | .. automodule:: graspnetAPI.utils.dexnet.grasping.grasp_quality_config
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | graspnetAPI.utils.dexnet.grasping.grasp\_quality\_function module
40 | -----------------------------------------------------------------
41 |
42 | .. automodule:: graspnetAPI.utils.dexnet.grasping.grasp_quality_function
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | graspnetAPI.utils.dexnet.grasping.graspable\_object module
48 | ----------------------------------------------------------
49 |
50 | .. automodule:: graspnetAPI.utils.dexnet.grasping.graspable_object
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | graspnetAPI.utils.dexnet.grasping.quality module
56 | ------------------------------------------------
57 |
58 | .. automodule:: graspnetAPI.utils.dexnet.grasping.quality
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 |
64 | Module contents
65 | ---------------
66 |
67 | .. automodule:: graspnetAPI.utils.dexnet.grasping
68 | :members:
69 | :undoc-members:
70 | :show-inheritance:
71 |
--------------------------------------------------------------------------------
/docs/source/graspnetAPI.utils.dexnet.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI.utils.dexnet package
2 | ================================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | graspnetAPI.utils.dexnet.grasping
11 |
12 | Submodules
13 | ----------
14 |
15 | graspnetAPI.utils.dexnet.abstractstatic module
16 | ----------------------------------------------
17 |
18 | .. automodule:: graspnetAPI.utils.dexnet.abstractstatic
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | graspnetAPI.utils.dexnet.constants module
24 | -----------------------------------------
25 |
26 | .. automodule:: graspnetAPI.utils.dexnet.constants
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 |
32 | Module contents
33 | ---------------
34 |
35 | .. automodule:: graspnetAPI.utils.dexnet
36 | :members:
37 | :undoc-members:
38 | :show-inheritance:
39 |
--------------------------------------------------------------------------------
/docs/source/graspnetAPI.utils.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI.utils package
2 | =========================
3 |
4 | Subpackages
5 | -----------
6 |
7 | .. toctree::
8 | :maxdepth: 4
9 |
10 | graspnetAPI.utils.dexnet
11 |
12 | Submodules
13 | ----------
14 |
15 | graspnetAPI.utils.config module
16 | -------------------------------
17 |
18 | .. automodule:: graspnetAPI.utils.config
19 | :members:
20 | :undoc-members:
21 | :show-inheritance:
22 |
23 | graspnetAPI.utils.eval\_utils module
24 | ------------------------------------
25 |
26 | .. automodule:: graspnetAPI.utils.eval_utils
27 | :members:
28 | :undoc-members:
29 | :show-inheritance:
30 |
31 | graspnetAPI.utils.pose module
32 | -----------------------------
33 |
34 | .. automodule:: graspnetAPI.utils.pose
35 | :members:
36 | :undoc-members:
37 | :show-inheritance:
38 |
39 | graspnetAPI.utils.rotation module
40 | ---------------------------------
41 |
42 | .. automodule:: graspnetAPI.utils.rotation
43 | :members:
44 | :undoc-members:
45 | :show-inheritance:
46 |
47 | graspnetAPI.utils.trans3d module
48 | --------------------------------
49 |
50 | .. automodule:: graspnetAPI.utils.trans3d
51 | :members:
52 | :undoc-members:
53 | :show-inheritance:
54 |
55 | graspnetAPI.utils.utils module
56 | ------------------------------
57 |
58 | .. automodule:: graspnetAPI.utils.utils
59 | :members:
60 | :undoc-members:
61 | :show-inheritance:
62 |
63 | graspnetAPI.utils.vis module
64 | ----------------------------
65 |
66 | .. automodule:: graspnetAPI.utils.vis
67 | :members:
68 | :undoc-members:
69 | :show-inheritance:
70 |
71 | graspnetAPI.utils.xmlhandler module
72 | -----------------------------------
73 |
74 | .. automodule:: graspnetAPI.utils.xmlhandler
75 | :members:
76 | :undoc-members:
77 | :show-inheritance:
78 |
79 |
80 | Module contents
81 | ---------------
82 |
83 | .. automodule:: graspnetAPI.utils
84 | :members:
85 | :undoc-members:
86 | :show-inheritance:
87 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. graspnetAPI documentation master file, created by
2 | sphinx-quickstart on Tue Nov 3 13:04:51 2020.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to graspnetAPI's documentation!
7 | =======================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 | about
14 | install
15 | grasp_format
16 |
17 | Examples
18 | =========
19 |
20 | .. toctree::
21 | :maxdepth: 1
22 | :caption: Examples
23 |
24 | example_check_data
25 | example_generate_rectangle_labels
26 | example_loadGrasp
27 | example_vis
28 | example_nms
29 | example_convert
30 | example_eval
31 |
32 |
33 | Python API
34 | ==========
35 |
36 | .. toctree::
37 | :maxdepth: 1
38 | :caption: Modules
39 |
40 | graspnetAPI
41 | graspnetAPI.utils
42 |
43 | Indices and tables
44 | ==================
45 |
46 | * :ref:`genindex`
47 | * :ref:`modindex`
48 | * :ref:`search`
--------------------------------------------------------------------------------
/docs/source/install.rst:
--------------------------------------------------------------------------------
1 | Installation
2 | ============
3 |
4 | .. note::
5 |
6 | Only Python 3 on Linux is supported.
7 |
8 | Prerequisites
9 | ^^^^^^^^^^^^^
10 |
11 | Python version under 3.6 is not tested.
12 |
13 | Dataset
14 | ^^^^^^^
15 |
16 | Download
17 | --------
18 |
19 | Download the dataset at https://graspnet.net/datasets.html
20 |
21 | Unzip
22 | -----
23 |
24 | Unzip the files as shown in https://graspnet.net/datasets.html.
25 |
26 | Rectangle Grasp Labels
27 | ----------------------
28 | Rectangle grasp labels are optional if you need labels in this format.
29 | You can both generate the labels or download the file_.
30 |
31 | If you want to generate the labels by yourself, you may refer to :ref:`example_generate_rectangle_labels`.
32 |
33 | .. note::
34 |
35 | Generating rectangle grasp labels may take a long time.
36 |
37 | After generating the labels or unzipping the labels, you need to run copy_rect_labels.py_ to copy rectangle grasp labels to corresponding folders.
38 |
39 | .. _copy_rect_labels.py: https://github.com/graspnet/graspnetAPI/blob/master/copy_rect_labels.py
40 |
41 | .. _file: https://graspnet.net/datasets.html
42 |
43 | Dexnet Model Cache
44 | ------------------
45 |
46 | Dexnet model cache is optional without which the evaluation will be much slower(about 10x time slower).
47 | You can both download the file or generate it by yourself by running gen_pickle_dexmodel.py_(recommended).
48 |
49 | .. _gen_pickle_dexmodel.py: https://github.com/graspnet/graspnetAPI/blob/master/gen_pickle_dexmodel.py
50 |
51 | Install API
52 | ^^^^^^^^^^^
53 | You may install using pip::
54 |
55 | pip install graspnetAPI
56 |
57 | You can also install from source::
58 |
59 | git clone https://github.com/graspnet/graspnetAPI.git
60 | cd graspnetAPI/
61 | pip install .
62 |
--------------------------------------------------------------------------------
/docs/source/modules.rst:
--------------------------------------------------------------------------------
1 | graspnetAPI
2 | ===========
3 |
4 | .. toctree::
5 | :maxdepth: 4
6 |
7 | graspnetAPI
8 |
--------------------------------------------------------------------------------
/examples/exam_check_data.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | from graspnetAPI import GraspNet
5 |
6 | # GraspNetAPI example for checking the data completeness.
7 | # change the graspnet_root path
8 |
9 | if __name__ == '__main__':
10 |
11 | ####################################################################
12 | graspnet_root = '/home/gmh/graspnet' ### ROOT PATH FOR GRASPNET ###
13 | ####################################################################
14 |
15 | g = GraspNet(graspnet_root, 'kinect', 'all')
16 | if g.checkDataCompleteness():
17 | print('Check for kinect passed')
18 |
19 |
20 | g = GraspNet(graspnet_root, 'realsense', 'all')
21 | if g.checkDataCompleteness():
22 | print('Check for realsense passed')
23 |
--------------------------------------------------------------------------------
/examples/exam_convert.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | from graspnetAPI import GraspNet
5 | import cv2
6 | import open3d as o3d
7 |
8 | # GraspNetAPI example for checking the data completeness.
9 | # change the graspnet_root path
10 |
11 | camera = 'kinect'
12 | sceneId = 5
13 | annId = 3
14 |
15 | ####################################################################
16 | graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
17 | ####################################################################
18 |
19 | g = GraspNet(graspnet_root, camera = camera, split = 'all')
20 |
21 | bgr = g.loadBGR(sceneId = sceneId, camera = camera, annId = annId)
22 | depth = g.loadDepth(sceneId = sceneId, camera = camera, annId = annId)
23 |
24 | # Rect to 6d
25 | rect_grasp_group = g.loadGrasp(sceneId = sceneId, camera = camera, annId = annId, fric_coef_thresh = 0.2, format = 'rect')
26 |
27 | # RectGrasp to Grasp
28 | rect_grasp = rect_grasp_group.random_sample(1)[0]
29 | img = rect_grasp.to_opencv_image(bgr)
30 |
31 | cv2.imshow('rect grasp', img)
32 | cv2.waitKey(0)
33 | cv2.destroyAllWindows()
34 |
35 | grasp = rect_grasp.to_grasp(camera, depth)
36 | if grasp is not None:
37 | geometry = []
38 | geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
39 | geometry.append(grasp.to_open3d_geometry())
40 | o3d.visualization.draw_geometries(geometry)
41 | else:
42 | print('No result because the depth is invalid, please try again!')
43 |
44 | # RectGraspGroup to GraspGroup
45 | sample_rect_grasp_group = rect_grasp_group.random_sample(20)
46 | img = sample_rect_grasp_group.to_opencv_image(bgr)
47 | cv2.imshow('rect grasp', img)
48 | cv2.waitKey(0)
49 | cv2.destroyAllWindows()
50 |
51 | grasp_group = sample_rect_grasp_group.to_grasp_group(camera, depth)
52 | if grasp_group is not None:
53 | geometry = []
54 | geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
55 | geometry += grasp_group.to_open3d_geometry_list()
56 | o3d.visualization.draw_geometries(geometry)
57 |
58 | # 6d to Rect
59 | _6d_grasp_group = g.loadGrasp(sceneId = sceneId, camera = camera, annId = annId, fric_coef_thresh = 0.2, format = '6d')
60 |
61 | # Grasp to RectGrasp conversion is not applicable as only very few 6d grasp can be converted to rectangle grasp.
62 |
63 | # GraspGroup to RectGraspGroup
64 | sample_6d_grasp_group = _6d_grasp_group.random_sample(20)
65 | geometry = []
66 | geometry.append(g.loadScenePointCloud(sceneId, camera, annId))
67 | geometry += sample_6d_grasp_group.to_open3d_geometry_list()
68 | o3d.visualization.draw_geometries(geometry)
69 |
70 | rect_grasp_group = _6d_grasp_group.to_rect_grasp_group(camera)
71 | img = rect_grasp_group.to_opencv_image(bgr)
72 |
73 | cv2.imshow('rect grasps', img)
74 | cv2.waitKey(0)
75 | cv2.destroyAllWindows()
76 |
--------------------------------------------------------------------------------
/examples/exam_eval.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | # GraspNetAPI example for evaluate grasps for a scene.
5 | # change the graspnet_root path
6 | import numpy as np
7 | from graspnetAPI import GraspNetEval
8 |
9 | ####################################################################
10 | graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
11 | dump_folder = '/home/gmh/git/rgbd_graspnet/dump_affordance_iounan/' # ROOT PATH FOR DUMP
12 | ####################################################################
13 |
14 | sceneId = 121
15 | camera = 'kinect'
16 | ge_k = GraspNetEval(root = graspnet_root, camera = 'kinect', split = 'test')
17 | ge_r = GraspNetEval(root = graspnet_root, camera = 'realsense', split = 'test')
18 |
19 | # eval a single scene
20 | print('Evaluating scene:{}, camera:{}'.format(sceneId, camera))
21 | acc = ge_k.eval_scene(scene_id = sceneId, dump_folder = dump_folder)
22 | np_acc = np.array(acc)
23 | print('mean accuracy:{}'.format(np.mean(np_acc)))
24 |
25 | # # eval all data for kinect
26 | # print('Evaluating kinect')
27 | # res, ap = ge_k.eval_all(dump_folder, proc = 24)
28 |
29 | # # eval 'seen' split for realsense
30 | # print('Evaluating realsense')
31 | # res, ap = ge_r.eval_seen(dump_folder, proc = 24)
32 |
--------------------------------------------------------------------------------
/examples/exam_generate_rectangle_grasp.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | # GraspNetAPI example for generating rectangle grasp from 6d grasp.
5 | # change the graspnet_root path and NUM_PROCESS
6 |
7 | from graspnetAPI import GraspNet
8 | from graspnetAPI.graspnet import TOTAL_SCENE_NUM
9 | import os
10 | import numpy as np
11 | from tqdm import tqdm
12 |
13 | ######################################################################
14 | NUM_PROCESS = 24 # change NUM_PROCESS to the number of cores to use. #
15 | ######################################################################
16 |
17 | def generate_scene_rectangle_grasp(sceneId, dump_folder, camera):
18 | g = GraspNet(graspnet_root, camera=camera, split='all')
19 | objIds = g.getObjIds(sceneIds = sceneId)
20 | grasp_labels = g.loadGraspLabels(objIds)
21 | collision_labels = g.loadCollisionLabels(sceneIds = sceneId)
22 | scene_dir = os.path.join(dump_folder,'scene_%04d' % sceneId)
23 | if not os.path.exists(scene_dir):
24 | os.mkdir(scene_dir)
25 | camera_dir = os.path.join(scene_dir, camera)
26 | if not os.path.exists(camera_dir):
27 | os.mkdir(camera_dir)
28 | for annId in tqdm(range(256), 'Scene:{}, Camera:{}'.format(sceneId, camera)):
29 | _6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = camera, grasp_labels = grasp_labels, collision_labels = collision_labels, fric_coef_thresh = 1.0)
30 | rect_grasp_group = _6d_grasp.to_rect_grasp_group(camera)
31 | rect_grasp_group.save_npy(os.path.join(camera_dir, '%04d.npy' % annId))
32 |
33 | if __name__ == '__main__':
34 | ####################################################################
35 | graspnet_root = '/home/minghao/graspnet' # ROOT PATH FOR GRASPNET ##
36 | ####################################################################
37 |
38 | dump_folder = 'rect_labels'
39 | if not os.path.exists(dump_folder):
40 | os.mkdir(dump_folder)
41 |
42 | if NUM_PROCESS > 1:
43 | from multiprocessing import Pool
44 | pool = Pool(24)
45 | for camera in ['realsense', 'kinect']:
46 | for sceneId in range(120):
47 | pool.apply_async(func = generate_scene_rectangle_grasp, args = (sceneId, dump_folder, camera))
48 | pool.close()
49 | pool.join()
50 |
51 | else:
52 | generate_scene_rectangle_grasp(sceneId, dump_folder, camera)
53 |
--------------------------------------------------------------------------------
/examples/exam_grasp_format.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | from graspnetAPI import GraspNet, Grasp, GraspGroup
5 | import open3d as o3d
6 | import cv2
7 | import numpy as np
8 |
9 | # GraspNetAPI example for loading grasp for a scene.
10 | # change the graspnet_root path
11 |
12 | ####################################################################
13 | graspnet_root = '/disk1/graspnet' # ROOT PATH FOR GRASPNET
14 | ####################################################################
15 |
16 | sceneId = 1
17 | annId = 3
18 |
19 | # initialize a GraspNet instance
20 | g = GraspNet(graspnet_root, camera='kinect', split='train')
21 |
22 | # load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
23 | _6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
24 | print('6d grasp:\n{}'.format(_6d_grasp))
25 |
26 | # _6d_grasp is an GraspGroup instance defined in grasp.py
27 | print('_6d_grasp:\n{}'.format(_6d_grasp))
28 |
29 | # index
30 | grasp = _6d_grasp[0]
31 | print('_6d_grasp[0](grasp):\n{}'.format(grasp))
32 |
33 | # slice
34 | print('_6d_grasp[0:2]:\n{}'.format(_6d_grasp[0:2]))
35 | print('_6d_grasp[[0,1]]:\n{}'.format(_6d_grasp[[0,1]]))
36 |
37 | # grasp is a Grasp instance defined in grasp.py
38 | # access and set properties
39 | print('grasp.translation={}'.format(grasp.translation))
40 | grasp.translation = np.array([1.0, 2.0, 3.0])
41 | print('After modification, grasp.translation={}'.format(grasp.translation))
42 | print('grasp.rotation_matrix={}'.format(grasp.rotation_matrix))
43 | grasp.rotation_matrix = np.eye(3).reshape((9))
44 | print('After modification, grasp.rotation_matrix={}'.format(grasp.rotation_matrix))
45 | print('grasp.width={}, height:{}, depth:{}, score:{}'.format(grasp.width, grasp.height, grasp.depth, grasp.score))
46 | print('More operation on Grasp and GraspGroup can be seen in the API document')
47 |
48 |
49 | # load rectangle grasps of scene 1 with annotation id = 3, camera = realsense and fric_coef_thresh = 0.2
50 | rect_grasp_group = g.loadGrasp(sceneId = sceneId, annId = annId, format = 'rect', camera = 'realsense', fric_coef_thresh = 0.2)
51 | print('rectangle grasp group:\n{}'.format(rect_grasp_group))
52 |
53 | # rect_grasp is an RectGraspGroup instance defined in grasp.py
54 | print('rect_grasp_group:\n{}'.format(rect_grasp_group))
55 |
56 | # index
57 | rect_grasp = rect_grasp_group[0]
58 | print('rect_grasp_group[0](rect_grasp):\n{}'.format(rect_grasp))
59 |
60 | # slice
61 | print('rect_grasp_group[0:2]:\n{}'.format(rect_grasp_group[0:2]))
62 | print('rect_grasp_group[[0,1]]:\n{}'.format(rect_grasp_group[[0,1]]))
63 |
64 | # properties of rect_grasp
65 | print('rect_grasp.center_point:{}, open_point:{}, height:{}, score:{}'.format(rect_grasp.center_point, rect_grasp.open_point, rect_grasp.height, rect_grasp.score))
66 |
67 | # transform grasp
68 | g = Grasp() # simple Grasp
69 | frame = o3d.geometry.TriangleMesh.create_coordinate_frame(0.1)
70 |
71 | # Grasp before transformation
72 | o3d.visualization.draw_geometries([g.to_open3d_geometry(), frame])
73 | g.translation = np.array((0,0,0.01))
74 |
75 | # setup a transformation matrix
76 | T = np.eye(4)
77 | T[:3,3] = np.array((0.01, 0.02, 0.03))
78 | T[:3,:3] = np.array([[0,0,1.0],[1,0,0],[0,1,0]])
79 | g.transform(T)
80 |
81 | # Grasp after transformation
82 | o3d.visualization.draw_geometries([g.to_open3d_geometry(), frame])
83 |
84 | g1 = Grasp()
85 | gg = GraspGroup()
86 | gg.add(g)
87 | gg.add(g1)
88 |
89 | # GraspGroup before transformation
90 | o3d.visualization.draw_geometries([*gg.to_open3d_geometry_list(), frame])
91 |
92 | gg.transform(T)
93 |
94 | # GraspGroup after transformation
95 | o3d.visualization.draw_geometries([*gg.to_open3d_geometry_list(), frame])
--------------------------------------------------------------------------------
/examples/exam_loadGrasp.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | from graspnetAPI import GraspNet
5 | import open3d as o3d
6 | import cv2
7 |
8 | # GraspNetAPI example for loading grasp for a scene.
9 | # change the graspnet_root path
10 |
11 | ####################################################################
12 | graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
13 | ####################################################################
14 |
15 | sceneId = 1
16 | annId = 3
17 |
18 | # initialize a GraspNet instance
19 | g = GraspNet(graspnet_root, camera='kinect', split='train')
20 |
21 | # load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
22 | _6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
23 | print('6d grasp:\n{}'.format(_6d_grasp))
24 |
25 | # visualize the grasps using open3d
26 | geometries = []
27 | geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
28 | geometries += _6d_grasp.random_sample(numGrasp = 20).to_open3d_geometry_list()
29 | o3d.visualization.draw_geometries(geometries)
30 |
31 | # load rectangle grasps of scene 1 with annotation id = 3, camera = realsense and fric_coef_thresh = 0.2
32 | rect_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = 'rect', camera = 'realsense', fric_coef_thresh = 0.2)
33 | print('rectangle grasp:\n{}'.format(rect_grasp))
34 |
35 | # visualize the rectanglegrasps using opencv
36 | bgr = g.loadBGR(sceneId = sceneId, annId = annId, camera = 'realsense')
37 | img = rect_grasp.to_opencv_image(bgr, numGrasp = 20)
38 | cv2.imshow('rectangle grasps', img)
39 | cv2.waitKey(0)
40 | cv2.destroyAllWindows()
--------------------------------------------------------------------------------
/examples/exam_nms.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | # GraspNetAPI example for grasp nms.
5 | # change the graspnet_root path
6 |
7 | ####################################################################
8 | graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
9 | ####################################################################
10 |
11 | sceneId = 1
12 | annId = 3
13 |
14 | from graspnetAPI import GraspNet
15 | import open3d as o3d
16 | import cv2
17 |
18 | # initialize a GraspNet instance
19 | g = GraspNet(graspnet_root, camera='kinect', split='train')
20 |
21 | # load grasps of scene 1 with annotation id = 3, camera = kinect and fric_coef_thresh = 0.2
22 | _6d_grasp = g.loadGrasp(sceneId = sceneId, annId = annId, format = '6d', camera = 'kinect', fric_coef_thresh = 0.2)
23 | print('6d grasp:\n{}'.format(_6d_grasp))
24 |
25 | # visualize the grasps using open3d
26 | geometries = []
27 | geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
28 | geometries += _6d_grasp.random_sample(numGrasp = 1000).to_open3d_geometry_list()
29 | o3d.visualization.draw_geometries(geometries)
30 |
31 | nms_grasp = _6d_grasp.nms(translation_thresh = 0.1, rotation_thresh = 30 / 180.0 * 3.1416)
32 | print('grasp after nms:\n{}'.format(nms_grasp))
33 |
34 | # visualize the grasps using open3d
35 | geometries = []
36 | geometries.append(g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = 'kinect'))
37 | geometries += nms_grasp.to_open3d_geometry_list()
38 | o3d.visualization.draw_geometries(geometries)
--------------------------------------------------------------------------------
/examples/exam_vis.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.0'
3 |
4 | # GraspNetAPI example for visualization.
5 | # change the graspnet_root path
6 |
7 | ####################################################################
8 | graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET
9 | ####################################################################
10 |
11 | from graspnetAPI import GraspNet
12 |
13 | # initialize a GraspNet instance
14 | g = GraspNet(graspnet_root, camera='kinect', split='train')
15 |
16 | # show object grasps
17 | g.showObjGrasp(objIds = 0, show=True)
18 |
19 | # show 6d poses
20 | g.show6DPose(sceneIds = 0, show = True)
21 |
22 | # show scene rectangle grasps
23 | g.showSceneGrasp(sceneId = 0, camera = 'realsense', annId = 0, format = 'rect', numGrasp = 20)
24 |
25 | # show scene 6d grasps(You may need to wait several minutes)
26 | g.showSceneGrasp(sceneId = 4, camera = 'kinect', annId = 2, format = '6d')
--------------------------------------------------------------------------------
/gen_pickle_dexmodel.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 |
3 | from graspnetAPI.utils.eval_utils import load_dexnet_model
4 | from tqdm import tqdm
5 | import pickle
6 | import os
7 |
8 | ##### Change the root to your path #####
9 | graspnet_root = '/home/gmh/graspnet'
10 |
11 | ##### Do NOT change this folder name #####
12 | dex_folder = 'dex_models'
13 | if not os.path.exists(dex_folder):
14 | os.makedirs(dex_folder)
15 |
16 | model_dir = os.path.join(graspnet_root, 'models')
17 | for obj_id in tqdm(range(88), 'dump models'):
18 | dex_model = load_dexnet_model(os.path.join(model_dir, '%03d' % obj_id, 'textured'))
19 | with open(os.path.join(dex_folder, '%03d.pkl' % obj_id), 'wb') as f:
20 | pickle.dump(dex_model, f)
21 |
22 |
--------------------------------------------------------------------------------
/grasp_definition.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/grasp_definition.png
--------------------------------------------------------------------------------
/grasp_definition.vsdx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/grasp_definition.vsdx
--------------------------------------------------------------------------------
/graspnetAPI/__init__.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou'
2 | __version__ = '1.2.11'
3 |
4 | from .graspnet import GraspNet
5 | from .graspnet_eval import GraspNetEval
6 | from .grasp import Grasp, GraspGroup, RectGrasp, RectGraspGroup
7 |
--------------------------------------------------------------------------------
/graspnetAPI/graspnet_eval.py:
--------------------------------------------------------------------------------
1 | __author__ = 'mhgou, cxwang and hsfang'
2 |
3 | import numpy as np
4 | import os
5 | import time
6 | import pickle
7 | import open3d as o3d
8 |
9 | from .graspnet import GraspNet
10 | from .grasp import GraspGroup
11 | from .utils.config import get_config
12 | from .utils.eval_utils import get_scene_name, create_table_points, parse_posevector, load_dexnet_model, transform_points, compute_point_distance, compute_closest_points, voxel_sample_points, topk_grasps, get_grasp_score, collision_detection, eval_grasp
13 | from .utils.xmlhandler import xmlReader
14 | from .utils.utils import generate_scene_model
15 |
16 | class GraspNetEval(GraspNet):
17 | '''
18 | Class for evaluation on GraspNet dataset.
19 |
20 | **Input:**
21 |
22 | - root: string of root path for the dataset.
23 |
24 | - camera: string of type of the camera.
25 |
26 | - split: string of the date split.
27 | '''
28 | def __init__(self, root, camera, split = 'test'):
29 | super(GraspNetEval, self).__init__(root, camera, split)
30 |
31 | def get_scene_models(self, scene_id, ann_id):
32 | '''
33 | return models in model coordinate
34 | '''
35 | model_dir = os.path.join(self.root, 'models')
36 | # print('Scene {}, {}'.format(scene_id, camera))
37 | scene_reader = xmlReader(os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'annotations', '%04d.xml' % (ann_id,)))
38 | posevectors = scene_reader.getposevectorlist()
39 | obj_list = []
40 | model_list = []
41 | dexmodel_list = []
42 | for posevector in posevectors:
43 | obj_idx, _ = parse_posevector(posevector)
44 | obj_list.append(obj_idx)
45 | for obj_idx in obj_list:
46 | model = o3d.io.read_point_cloud(os.path.join(model_dir, '%03d' % obj_idx, 'nontextured.ply'))
47 | dex_cache_path = os.path.join(self.root, 'dex_models', '%03d.pkl' % obj_idx)
48 | if os.path.exists(dex_cache_path):
49 | with open(dex_cache_path, 'rb') as f:
50 | dexmodel = pickle.load(f)
51 | else:
52 | dexmodel = load_dexnet_model(os.path.join(model_dir, '%03d' % obj_idx, 'textured'))
53 | points = np.array(model.points)
54 | model_list.append(points)
55 | dexmodel_list.append(dexmodel)
56 | return model_list, dexmodel_list, obj_list
57 |
58 |
59 | def get_model_poses(self, scene_id, ann_id):
60 | '''
61 | **Input:**
62 |
63 | - scene_id: int of the scen index.
64 |
65 | - ann_id: int of the annotation index.
66 |
67 | **Output:**
68 |
69 | - obj_list: list of int of object index.
70 |
71 | - pose_list: list of 4x4 matrices of object poses.
72 |
73 | - camera_pose: 4x4 matrix of the camera pose relative to the first frame.
74 |
75 | - align mat: 4x4 matrix of camera relative to the table.
76 | '''
77 | scene_dir = os.path.join(self.root, 'scenes')
78 | camera_poses_path = os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'camera_poses.npy')
79 | camera_poses = np.load(camera_poses_path)
80 | camera_pose = camera_poses[ann_id]
81 | align_mat_path = os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'cam0_wrt_table.npy')
82 | align_mat = np.load(align_mat_path)
83 | # print('Scene {}, {}'.format(scene_id, camera))
84 | scene_reader = xmlReader(os.path.join(scene_dir, get_scene_name(scene_id), self.camera, 'annotations', '%04d.xml'% (ann_id,)))
85 | posevectors = scene_reader.getposevectorlist()
86 | obj_list = []
87 | pose_list = []
88 | for posevector in posevectors:
89 | obj_idx, mat = parse_posevector(posevector)
90 | obj_list.append(obj_idx)
91 | pose_list.append(mat)
92 | return obj_list, pose_list, camera_pose, align_mat
93 |
94 | def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis = False, max_width = 0.1):
95 | '''
96 | **Input:**
97 |
98 | - scene_id: int of the scene index.
99 |
100 | - dump_folder: string of the folder that saves the dumped npy files.
101 |
102 | - TOP_K: int of the top number of grasp to evaluate
103 |
104 | - return_list: bool of whether to return the result list.
105 |
106 | - vis: bool of whether to show the result
107 |
108 | - max_width: float of the maximum gripper width in evaluation
109 |
110 | **Output:**
111 |
112 | - scene_accuracy: np.array of shape (256, 50, 6) of the accuracy tensor.
113 | '''
114 | config = get_config()
115 | table = create_table_points(1.0, 1.0, 0.05, dx=-0.5, dy=-0.5, dz=-0.05, grid_size=0.008)
116 |
117 | list_coe_of_friction = [0.2,0.4,0.6,0.8,1.0,1.2]
118 |
119 | model_list, dexmodel_list, _ = self.get_scene_models(scene_id, ann_id=0)
120 |
121 | model_sampled_list = list()
122 | for model in model_list:
123 | model_sampled = voxel_sample_points(model, 0.008)
124 | model_sampled_list.append(model_sampled)
125 |
126 | scene_accuracy = []
127 | grasp_list_list = []
128 | score_list_list = []
129 | collision_list_list = []
130 |
131 | for ann_id in range(256):
132 | grasp_group = GraspGroup().from_npy(os.path.join(dump_folder,get_scene_name(scene_id), self.camera, '%04d.npy' % (ann_id,)))
133 | _, pose_list, camera_pose, align_mat = self.get_model_poses(scene_id, ann_id)
134 | table_trans = transform_points(table, np.linalg.inv(np.matmul(align_mat, camera_pose)))
135 |
136 | # clip width to [0,max_width]
137 | gg_array = grasp_group.grasp_group_array
138 | min_width_mask = (gg_array[:,1] < 0)
139 | max_width_mask = (gg_array[:,1] > max_width)
140 | gg_array[min_width_mask,1] = 0
141 | gg_array[max_width_mask,1] = max_width
142 | grasp_group.grasp_group_array = gg_array
143 |
144 | grasp_list, score_list, collision_mask_list = eval_grasp(grasp_group, model_sampled_list, dexmodel_list, pose_list, config, table=table_trans, voxel_size=0.008, TOP_K = TOP_K)
145 |
146 | # remove empty
147 | grasp_list = [x for x in grasp_list if len(x) != 0]
148 | score_list = [x for x in score_list if len(x) != 0]
149 | collision_mask_list = [x for x in collision_mask_list if len(x)!=0]
150 |
151 | if len(grasp_list) == 0:
152 | grasp_accuracy = np.zeros((TOP_K,len(list_coe_of_friction)))
153 | scene_accuracy.append(grasp_accuracy)
154 | grasp_list_list.append([])
155 | score_list_list.append([])
156 | collision_list_list.append([])
157 | print('\rMean Accuracy for scene:{} ann:{}='.format(scene_id, ann_id),np.mean(grasp_accuracy[:,:]), end='')
158 | continue
159 |
160 | # concat into scene level
161 | grasp_list, score_list, collision_mask_list = np.concatenate(grasp_list), np.concatenate(score_list), np.concatenate(collision_mask_list)
162 |
163 | if vis:
164 | t = o3d.geometry.PointCloud()
165 | t.points = o3d.utility.Vector3dVector(table_trans)
166 | model_list = generate_scene_model(self.root, 'scene_%04d' % scene_id , ann_id, return_poses=False, align=False, camera=self.camera)
167 | import copy
168 | gg = GraspGroup(copy.deepcopy(grasp_list))
169 | scores = np.array(score_list)
170 | scores = scores / 2 + 0.5 # -1 -> 0, 0 -> 0.5, 1 -> 1
171 | scores[collision_mask_list] = 0.3
172 | gg.scores = scores
173 | gg.widths = 0.1 * np.ones((len(gg)), dtype = np.float32)
174 | grasps_geometry = gg.to_open3d_geometry_list()
175 | pcd = self.loadScenePointCloud(scene_id, self.camera, ann_id)
176 |
177 | o3d.visualization.draw_geometries([pcd, *grasps_geometry])
178 | o3d.visualization.draw_geometries([pcd, *grasps_geometry, *model_list])
179 | o3d.visualization.draw_geometries([*grasps_geometry, *model_list, t])
180 |
181 | # sort in scene level
182 | grasp_confidence = grasp_list[:,0]
183 | indices = np.argsort(-grasp_confidence)
184 | grasp_list, score_list, collision_mask_list = grasp_list[indices], score_list[indices], collision_mask_list[indices]
185 |
186 | grasp_list_list.append(grasp_list)
187 | score_list_list.append(score_list)
188 | collision_list_list.append(collision_mask_list)
189 |
190 | #calculate AP
191 | grasp_accuracy = np.zeros((TOP_K,len(list_coe_of_friction)))
192 | for fric_idx, fric in enumerate(list_coe_of_friction):
193 | for k in range(0,TOP_K):
194 | if k+1 > len(score_list):
195 | grasp_accuracy[k,fric_idx] = np.sum(((score_list<=fric) & (score_list>0)).astype(int))/(k+1)
196 | else:
197 | grasp_accuracy[k,fric_idx] = np.sum(((score_list[0:k+1]<=fric) & (score_list[0:k+1]>0)).astype(int))/(k+1)
198 |
199 | print('\rMean Accuracy for scene:%04d ann:%04d = %.3f' % (scene_id, ann_id, 100.0 * np.mean(grasp_accuracy[:,:])), end='', flush=True)
200 | scene_accuracy.append(grasp_accuracy)
201 | if not return_list:
202 | return scene_accuracy
203 | else:
204 | return scene_accuracy, grasp_list_list, score_list_list, collision_list_list
205 |
206 | def parallel_eval_scenes(self, scene_ids, dump_folder, proc = 2):
207 | '''
208 | **Input:**
209 |
210 | - scene_ids: list of int of scene index.
211 |
212 | - dump_folder: string of the folder that saves the npy files.
213 |
214 | - proc: int of the number of processes to use to evaluate.
215 |
216 | **Output:**
217 |
218 | - scene_acc_list: list of the scene accuracy.
219 | '''
220 | from multiprocessing import Pool
221 | p = Pool(processes = proc)
222 | res_list = []
223 | for scene_id in scene_ids:
224 | res_list.append(p.apply_async(self.eval_scene, (scene_id, dump_folder)))
225 | p.close()
226 | p.join()
227 | scene_acc_list = []
228 | for res in res_list:
229 | scene_acc_list.append(res.get())
230 | return scene_acc_list
231 |
232 | def eval_seen(self, dump_folder, proc = 2):
233 | '''
234 | **Input:**
235 |
236 | - dump_folder: string of the folder that saves the npy files.
237 |
238 | - proc: int of the number of processes to use to evaluate.
239 |
240 | **Output:**
241 |
242 | - res: numpy array of the detailed accuracy.
243 |
244 | - ap: float of the AP for seen split.
245 | '''
246 | res = np.array(self.parallel_eval_scenes(scene_ids = list(range(100, 130)), dump_folder = dump_folder, proc = proc))
247 | ap = np.mean(res)
248 | print('\nEvaluation Result:\n----------\n{}, AP Seen={}'.format(self.camera, ap))
249 | return res, ap
250 |
251 | def eval_similar(self, dump_folder, proc = 2):
252 | '''
253 | **Input:**
254 |
255 | - dump_folder: string of the folder that saves the npy files.
256 |
257 | - proc: int of the number of processes to use to evaluate.
258 |
259 | **Output:**
260 |
261 | - res: numpy array of the detailed accuracy.
262 |
263 | - ap: float of the AP for similar split.
264 | '''
265 | res = np.array(self.parallel_eval_scenes(scene_ids = list(range(130, 160)), dump_folder = dump_folder, proc = proc))
266 | ap = np.mean(res)
267 | print('\nEvaluation Result:\n----------\n{}, AP={}, AP Similar={}'.format(self.camera, ap, ap))
268 | return res, ap
269 |
270 | def eval_novel(self, dump_folder, proc = 2):
271 | '''
272 | **Input:**
273 |
274 | - dump_folder: string of the folder that saves the npy files.
275 |
276 | - proc: int of the number of processes to use to evaluate.
277 |
278 | **Output:**
279 |
280 | - res: numpy array of the detailed accuracy.
281 |
282 | - ap: float of the AP for novel split.
283 | '''
284 | res = np.array(self.parallel_eval_scenes(scene_ids = list(range(160, 190)), dump_folder = dump_folder, proc = proc))
285 | ap = np.mean(res)
286 | print('\nEvaluation Result:\n----------\n{}, AP={}, AP Novel={}'.format(self.camera, ap, ap))
287 | return res, ap
288 |
289 | def eval_all(self, dump_folder, proc = 2):
290 | '''
291 | **Input:**
292 |
293 | - dump_folder: string of the folder that saves the npy files.
294 |
295 | - proc: int of the number of processes to use to evaluate.
296 |
297 | **Output:**
298 |
299 | - res: numpy array of the detailed accuracy.
300 |
301 | - ap: float of the AP for all split.
302 | '''
303 | res = np.array(self.parallel_eval_scenes(scene_ids = list(range(100, 190)), dump_folder = dump_folder, proc = proc))
304 | ap = [np.mean(res), np.mean(res[0:30]), np.mean(res[30:60]), np.mean(res[60:90])]
305 | print('\nEvaluation Result:\n----------\n{}, AP={}, AP Seen={}, AP Similar={}, AP Novel={}'.format(self.camera, ap[0], ap[1], ap[2], ap[3]))
306 | return res, ap
307 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/graspnet/graspnetAPI/d931564a816aa6bc1e5e53932a1dc66e1d656113/graspnetAPI/utils/__init__.py
--------------------------------------------------------------------------------
/graspnetAPI/utils/config.py:
--------------------------------------------------------------------------------
1 | def get_config():
2 | '''
3 | - return the config dict
4 | '''
5 | config = dict()
6 | force_closure = dict()
7 | force_closure['quality_method'] = 'force_closure'
8 | force_closure['num_cone_faces'] = 8
9 | force_closure['soft_fingers'] = 1
10 | force_closure['quality_type'] = 'quasi_static'
11 | force_closure['all_contacts_required']= 1
12 | force_closure['check_approach'] = False
13 | force_closure['torque_scaling'] = 0.01
14 | force_closure['wrench_norm_thresh'] = 0.001
15 | force_closure['wrench_regularizer'] = 0.0000000001
16 | config['metrics'] = dict()
17 | config['metrics']['force_closure'] = force_closure
18 | return config
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
2 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
3 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
4 | hereby granted, provided that the above copyright notice, this paragraph and the following two
5 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
6 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
8 |
9 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
10 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
11 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
12 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
13 |
14 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
15 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
17 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
18 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/__init__.py:
--------------------------------------------------------------------------------
1 | # # -*- coding: utf-8 -*-
2 | # """
3 | # Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | # Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | # research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | # hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | # paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | # Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | # 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | # IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | # INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | # THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | # REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | # PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | # HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | # MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | # """
22 | # # from .constants import *
23 | # # from .abstractstatic import abstractstatic
24 | # # from .api import DexNet
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/abstractstatic.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | """
22 | # Abstact static methods
23 | # Source: https://stackoverflow.com/questions/4474395/staticmethod-and-abc-abstractmethod-will-it-blend
24 |
25 | class abstractstatic(staticmethod):
26 | __slots__ = ()
27 | def __init__(self, function):
28 | super(abstractstatic, self).__init__(function)
29 | function.__isabstractmethod__ = True
30 | __isabstractmethod__ = True
31 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/constants.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | """
22 | # Grasp contact params
23 | NO_CONTACT_DIST = 0.2 # distance to points that are not in contact for window extraction
24 | WIN_DIST_LIM = 0.02 # limits for window plotting
25 |
26 | # File extensions
27 | HDF5_EXT = '.hdf5'
28 | OBJ_EXT = '.obj'
29 | OFF_EXT = '.off'
30 | STL_EXT = '.stl'
31 | SDF_EXT = '.sdf'
32 | URDF_EXT = '.urdf'
33 |
34 | # Tags for intermediate files
35 | DEC_TAG = '_dec'
36 | PROC_TAG = '_proc'
37 |
38 | # Solver default max iterations
39 | DEF_MAX_ITER = 100
40 |
41 | # Access levels for db
42 | READ_ONLY_ACCESS = 'READ_ONLY'
43 | READ_WRITE_ACCESS = 'READ_WRITE'
44 | WRITE_ACCESS = 'WRITE'
45 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
3 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
4 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
5 | hereby granted, provided that the above copyright notice, this paragraph and the following two
6 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
7 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
8 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
9 |
10 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
11 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
12 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
13 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
14 |
15 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
16 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
17 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
18 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
19 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
20 | """
21 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/grasp_quality_config.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | """
22 | """
23 | Configurations for grasp quality computation.
24 | Author: Jeff Mahler
25 | """
26 | from abc import ABCMeta, abstractmethod
27 |
28 | import copy
29 | import itertools as it
30 | import logging
31 | import matplotlib.pyplot as plt
32 | try:
33 | import mayavi.mlab as mlab
34 | except:
35 | # logging.warning('Failed to import mayavi')
36 | pass
37 |
38 | import numpy as np
39 | import os
40 | import sys
41 | import time
42 |
43 | import IPython
44 |
45 | # class GraspQualityConfig(object, metaclass=ABCMeta):
46 | class GraspQualityConfig(object):
47 | """
48 | Base wrapper class for parameters used in grasp quality computation.
49 | Used to elegantly enforce existence and type of required parameters.
50 |
51 | Attributes
52 | ----------
53 | config : :obj:`dict`
54 | dictionary mapping parameter names to parameter values
55 | """
56 | __metaclass__ = ABCMeta
57 | def __init__(self, config):
58 | # check valid config
59 | self.check_valid(config)
60 |
61 | # parse config
62 | for key, value in list(config.items()):
63 | setattr(self, key, value)
64 |
65 | def contains(self, key):
66 | """ Checks whether or not the key is supported """
67 | if key in list(self.__dict__.keys()):
68 | return True
69 | return False
70 |
71 | def __getattr__(self, key):
72 | if self.contains(key):
73 | return object.__getattribute__(self, key)
74 | return None
75 |
76 | def __getitem__(self, key):
77 | if self.contains(key):
78 | return object.__getattribute__(self, key)
79 | raise KeyError('Key %s not found' %(key))
80 |
81 | def keys(self):
82 | return list(self.__dict__.keys())
83 |
84 | @abstractmethod
85 | def check_valid(self, config):
86 | """ Raise an exception if the config is missing required keys """
87 | pass
88 |
89 | class QuasiStaticGraspQualityConfig(GraspQualityConfig):
90 | """
91 | Parameters for quasi-static grasp quality computation.
92 |
93 | Attributes
94 | ----------
95 | config : :obj:`dict`
96 | dictionary mapping parameter names to parameter values
97 |
98 | Notes
99 | -----
100 | Required configuration key-value pairs in Other Parameters.
101 |
102 | Other Parameters
103 | ----------------
104 | quality_method : :obj:`str`
105 | string name of quasi-static quality metric
106 | friction_coef : float
107 | coefficient of friction at contact point
108 | num_cone_faces : int
109 | number of faces to use in friction cone approximation
110 | soft_fingers : bool
111 | whether to use a soft finger model
112 | quality_type : :obj:`str`
113 | string name of grasp quality type (e.g. quasi-static, robust quasi-static)
114 | check_approach : bool
115 | whether or not to check the approach direction
116 | """
117 | REQUIRED_KEYS = ['quality_method',
118 | 'friction_coef',
119 | 'num_cone_faces',
120 | 'soft_fingers',
121 | 'quality_type',
122 | 'check_approach',
123 | 'all_contacts_required']
124 |
125 | def __init__(self, config):
126 | GraspQualityConfig.__init__(self, config)
127 |
128 | def __copy__(self):
129 | """ Makes a copy of the config """
130 | obj_copy = QuasiStaticGraspQualityConfig(self.__dict__)
131 | return obj_copy
132 |
133 | def check_valid(self, config):
134 | for key in QuasiStaticGraspQualityConfig.REQUIRED_KEYS:
135 | if key not in list(config.keys()):
136 | raise ValueError('Invalid configuration. Key %s must be specified' %(key))
137 |
138 | class RobustQuasiStaticGraspQualityConfig(GraspQualityConfig):
139 | """
140 | Parameters for quasi-static grasp quality computation.
141 |
142 | Attributes
143 | ----------
144 | config : :obj:`dict`
145 | dictionary mapping parameter names to parameter values
146 |
147 | Notes
148 | -----
149 | Required configuration key-value pairs in Other Parameters.
150 |
151 | Other Parameters
152 | ----------------
153 | quality_method : :obj:`str`
154 | string name of quasi-static quality metric
155 | friction_coef : float
156 | coefficient of friction at contact point
157 | num_cone_faces : int
158 | number of faces to use in friction cone approximation
159 | soft_fingers : bool
160 | whether to use a soft finger model
161 | quality_type : :obj:`str`
162 | string name of grasp quality type (e.g. quasi-static, robust quasi-static)
163 | check_approach : bool
164 | whether or not to check the approach direction
165 | num_quality_samples : int
166 | number of samples to use
167 | """
168 | ROBUST_REQUIRED_KEYS = ['num_quality_samples']
169 |
170 | def __init__(self, config):
171 | GraspQualityConfig.__init__(self, config)
172 |
173 | def __copy__(self):
174 | """ Makes a copy of the config """
175 | obj_copy = RobustQuasiStaticGraspQualityConfig(self.__dict__)
176 | return obj_copy
177 |
178 | def check_valid(self, config):
179 | required_keys = QuasiStaticGraspQualityConfig.REQUIRED_KEYS + \
180 | RobustQuasiStaticGraspQualityConfig.ROBUST_REQUIRED_KEYS
181 | for key in required_keys:
182 | if key not in list(config.keys()):
183 | raise ValueError('Invalid configuration. Key %s must be specified' %(key))
184 |
185 | class GraspQualityConfigFactory:
186 | """ Helper class to automatically create grasp quality configurations of different types. """
187 | @staticmethod
188 | def create_config(config):
189 | """ Automatically create a quality config from a dictionary.
190 |
191 | Parameters
192 | ----------
193 | config : :obj:`dict`
194 | dictionary mapping parameter names to parameter values
195 | """
196 | if config['quality_type'] == 'quasi_static':
197 | return QuasiStaticGraspQualityConfig(config)
198 | elif config['quality_type'] == 'robust_quasi_static':
199 | return RobustQuasiStaticGraspQualityConfig(config)
200 | else:
201 | raise ValueError('Quality config type %s not supported' %(config['type']))
202 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/grasp_quality_function.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | """
22 | """
23 | User-friendly functions for computing grasp quality metrics.
24 | Author: Jeff Mahler
25 | """
26 | from abc import ABCMeta, abstractmethod
27 |
28 | import copy
29 | import itertools as it
30 | import logging
31 | import matplotlib.pyplot as plt
32 |
33 | import numpy as np
34 | import os
35 | import scipy.stats
36 | import sys
37 | import time
38 |
39 | from .grasp import Grasp
40 | from .graspable_object import GraspableObject
41 | from .graspable_object import GraspQualityConfig
42 | from .robust_grasp_quality import RobustPointGraspMetrics3D
43 | from .random_variables import GraspableObjectPoseGaussianRV, ParallelJawGraspPoseGaussianRV, ParamsGaussianRV
44 | from .quality import PointGraspMetrics3D
45 |
46 | from autolab_core import RigidTransform
47 | import IPython
48 |
49 | class GraspQualityResult:
50 | """ Stores the results of grasp quality computation.
51 |
52 | Attributes
53 | ----------
54 | quality : float
55 | value of quality
56 | uncertainty : float
57 | uncertainty estimate of the quality value
58 | quality_config : :obj:`GraspQualityConfig`
59 | """
60 | def __init__(self, quality, uncertainty=0.0, quality_config=None):
61 | self.quality = quality
62 | self.uncertainty = uncertainty
63 | self.quality_config = quality_config
64 |
65 | # class GraspQualityFunction(object, metaclass=ABCMeta):
66 | class GraspQualityFunction(object):
67 | """
68 | Abstraction for grasp quality functions to make scripts for labeling with quality functions simple and readable.
69 |
70 | Attributes
71 | ----------
72 | graspable : :obj:`GraspableObject3D`
73 | object to evaluate grasp quality on
74 | quality_config : :obj:`GraspQualityConfig`
75 | set of parameters to evaluate grasp quality
76 | """
77 | __metaclass__ = ABCMeta
78 |
79 |
80 | def __init__(self, graspable, quality_config):
81 | # check valid types
82 | if not isinstance(graspable, GraspableObject):
83 | raise ValueError('Must provide GraspableObject')
84 | if not isinstance(quality_config, GraspQualityConfig):
85 | raise ValueError('Must provide GraspQualityConfig')
86 |
87 | # set member variables
88 | self.graspable_ = graspable
89 | self.quality_config_ = quality_config
90 |
91 | self._setup()
92 |
93 | def __call__(self, grasp):
94 | return self.quality(grasp)
95 |
96 | @abstractmethod
97 | def _setup(self):
98 | """ Sets up common variables for grasp quality evaluations """
99 | pass
100 |
101 | @abstractmethod
102 | def quality(self, grasp):
103 | """ Compute grasp quality.
104 |
105 | Parameters
106 | ----------
107 | grasp : :obj:`GraspableObject3D`
108 | grasp to quality quality on
109 |
110 | Returns
111 | -------
112 | :obj:`GraspQualityResult`
113 | result of quality computation
114 | """
115 | pass
116 |
117 | class QuasiStaticQualityFunction(GraspQualityFunction):
118 | """ Grasp quality metric using a quasi-static model.
119 | """
120 | def __init__(self, graspable, quality_config):
121 | GraspQualityFunction.__init__(self, graspable, quality_config)
122 |
123 | @property
124 | def graspable(self):
125 | return self.graspable_
126 |
127 | @graspable.setter
128 | def graspable(self, obj):
129 | self.graspable_ = obj
130 |
131 | def _setup(self):
132 | if self.quality_config_.quality_type != 'quasi_static':
133 | raise ValueError('Quality configuration must be quasi static')
134 |
135 | def quality(self, grasp):
136 | """ Compute grasp quality using a quasistatic method.
137 |
138 | Parameters
139 | ----------
140 | grasp : :obj:`GraspableObject3D`
141 | grasp to quality quality on
142 |
143 | Returns
144 | -------
145 | :obj:`GraspQualityResult`
146 | result of quality computation
147 | """
148 | if not isinstance(grasp, Grasp):
149 | raise ValueError('Must provide Grasp object to compute quality')
150 |
151 | quality = PointGraspMetrics3D.grasp_quality(grasp, self.graspable_,
152 | self.quality_config_)
153 | return GraspQualityResult(quality, quality_config=self.quality_config_)
154 |
155 | class RobustQuasiStaticQualityFunction(GraspQualityFunction):
156 | """ Grasp quality metric using a robust quasi-static model (average over random perturbations)
157 | """
158 | def __init__(self, graspable, quality_config, T_obj_world=RigidTransform(from_frame='obj', to_frame='world')):
159 | self.T_obj_world_ = T_obj_world
160 | GraspQualityFunction.__init__(self, graspable, quality_config)
161 |
162 | @property
163 | def graspable(self):
164 | return self.graspable_
165 |
166 | @graspable.setter
167 | def graspable(self, obj):
168 | self.graspable_ = obj
169 | self._setup()
170 |
171 | def _setup(self):
172 | if self.quality_config_.quality_type != 'robust_quasi_static':
173 | raise ValueError('Quality configuration must be robust quasi static')
174 | self.graspable_rv_ = GraspableObjectPoseGaussianRV(self.graspable_,
175 | self.T_obj_world_,
176 | self.quality_config_.obj_uncertainty)
177 | self.params_rv_ = ParamsGaussianRV(self.quality_config_,
178 | self.quality_config_.params_uncertainty)
179 |
180 | def quality(self, grasp):
181 | """ Compute grasp quality using a robust quasistatic method.
182 |
183 | Parameters
184 | ----------
185 | grasp : :obj:`GraspableObject3D`
186 | grasp to quality quality on
187 |
188 | Returns
189 | -------
190 | :obj:`GraspQualityResult`
191 | result of quality computation
192 | """
193 | if not isinstance(grasp, Grasp):
194 | raise ValueError('Must provide Grasp object to compute quality')
195 | grasp_rv = ParallelJawGraspPoseGaussianRV(grasp,
196 | self.quality_config_.grasp_uncertainty)
197 | mean_q, std_q = RobustPointGraspMetrics3D.expected_quality(grasp_rv,
198 | self.graspable_rv_,
199 | self.params_rv_,
200 | self.quality_config_)
201 | return GraspQualityResult(mean_q, std_q, quality_config=self.quality_config_)
202 |
203 | class GraspQualityFunctionFactory:
204 | @staticmethod
205 | def create_quality_function(graspable, quality_config):
206 | """ Creates a quality function for a particular object based on a configuration, which can be passed directly from a configuration file.
207 |
208 | Parameters
209 | ----------
210 | graspable : :obj:`GraspableObject3D`
211 | object to create quality function for
212 | quality_config : :obj:`GraspQualityConfig`
213 | parameters for quality function
214 | """
215 | # check valid types
216 | if not isinstance(graspable, GraspableObject):
217 | raise ValueError('Must provide GraspableObject')
218 | if not isinstance(quality_config, GraspQualityConfig):
219 | raise ValueError('Must provide GraspQualityConfig')
220 |
221 | if quality_config.quality_type == 'quasi_static':
222 | return QuasiStaticQualityFunction(graspable, quality_config)
223 | elif quality_config.quality_type == 'robust_quasi_static':
224 | return RobustQuasiStaticQualityFunction(graspable, quality_config)
225 | else:
226 | raise ValueError('Grasp quality type %s not supported' %(quality_config.quality_type))
227 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/graspable_object.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
4 | Permission to use, copy, modify, and distribute this software and its documentation for educational,
5 | research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
6 | hereby granted, provided that the above copyright notice, this paragraph and the following two
7 | paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
8 | Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
9 | 7201, otl@berkeley.edu, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
10 |
11 | IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
12 | INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
13 | THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
14 | ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 |
16 | REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
17 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
18 | PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
19 | HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
20 | MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
21 | """
22 | """
23 | Encapsulates data and operations on a 2D or 3D object that can be grasped
24 | Author: Jeff Mahler
25 | """
26 | from abc import ABCMeta, abstractmethod
27 |
28 | import copy
29 | import logging
30 | import numpy as np
31 |
32 | from .meshpy import mesh as m
33 | from .meshpy import sdf as s
34 |
35 | import IPython
36 | import matplotlib.pyplot as plt
37 |
38 | from autolab_core import RigidTransform, SimilarityTransform
39 |
40 |
41 | # class GraspableObject(metaclass=ABCMeta):
42 | class GraspableObject:
43 | """ Encapsulates geometric structures for computing contact in grasping.
44 |
45 | Attributes
46 | ----------
47 | sdf : :obj:`Sdf3D`
48 | signed distance field, for quickly computing contact points
49 | mesh : :obj:`Mesh3D`
50 | 3D triangular mesh to specify object geometry, should match SDF
51 | key : :obj:`str`
52 | object identifier, usually given from the database
53 | model_name : :obj:`str`
54 | name of the object mesh as a .obj file, for use in collision checking
55 | mass : float
56 | mass of the object
57 | convex_pieces : :obj:`list` of :obj:`Mesh3D`
58 | convex decomposition of the object geom for collision checking
59 | """
60 | __metaclass__ = ABCMeta
61 |
62 | def __init__(self, sdf, mesh, key='', model_name='', mass=1.0, convex_pieces=None):
63 | self.sdf_ = sdf
64 | self.mesh_ = mesh
65 |
66 | self.key_ = key
67 | self.model_name_ = model_name # for OpenRave usage, gross!
68 | self.mass_ = mass
69 | self.convex_pieces_ = convex_pieces
70 |
71 | @property
72 | def sdf(self):
73 | return self.sdf_
74 |
75 | @property
76 | def mesh(self):
77 | return self.mesh_
78 |
79 | @property
80 | def mass(self):
81 | return self.mass_
82 |
83 | @property
84 | def key(self):
85 | return self.key_
86 |
87 | @property
88 | def model_name(self):
89 | return self.model_name_
90 |
91 | @property
92 | def convex_pieces(self):
93 | return self.convex_pieces_
94 |
95 | class GraspableObject3D(GraspableObject):
96 | """ 3D Graspable object for computing contact in grasping.
97 |
98 | Attributes
99 | ----------
100 | sdf : :obj:`Sdf3D`
101 | signed distance field, for quickly computing contact points
102 | mesh : :obj:`Mesh3D`
103 | 3D triangular mesh to specify object geometry, should match SDF
104 | key : :obj:`str`
105 | object identifier, usually given from the database
106 | model_name : :obj:`str`
107 | name of the object mesh as a .obj file, for use in collision checking
108 | mass : float
109 | mass of the object
110 | convex_pieces : :obj:`list` of :obj:`Mesh3D`
111 | convex decomposition of the object geom for collision checking
112 | """
113 | def __init__(self, sdf, mesh, key='',
114 | model_name='', mass=1.0,
115 | convex_pieces=None):
116 | if not isinstance(sdf, s.Sdf3D):
117 | raise ValueError('Must initialize 3D graspable object with 3D sdf')
118 | if not isinstance(mesh, m.Mesh3D):
119 | raise ValueError('Must initialize 3D graspable object with 3D mesh')
120 |
121 | GraspableObject.__init__(self, sdf, mesh, key=key,
122 | model_name=model_name, mass=mass,
123 | convex_pieces=convex_pieces)
124 |
125 | def moment_arm(self, x):
126 | """ Computes the moment arm to a point x.
127 |
128 | Parameters
129 | ----------
130 | x : 3x1 :obj:`numpy.ndarray`
131 | point to get moment arm for
132 |
133 | Returns
134 | -------
135 | 3x1 :obj:`numpy.ndarray`
136 | """
137 | return x - self.mesh.center_of_mass
138 |
139 | def rescale(self, scale):
140 | """ Rescales uniformly by a given factor.
141 |
142 | Parameters
143 | ----------
144 | scale : float
145 | the amount to scale the object
146 |
147 | Returns
148 | -------
149 | :obj:`GraspableObject3D`
150 | the graspable object rescaled by the given factor
151 | """
152 | stf = SimilarityTransform(scale=scale)
153 | sdf_rescaled = self.sdf_.rescale(scale)
154 | mesh_rescaled = self.mesh_.transform(stf)
155 | convex_pieces_rescaled = None
156 | if self.convex_pieces_ is not None:
157 | convex_pieces_rescaled = []
158 | for convex_piece in self.convex_pieces_:
159 | convex_piece_rescaled = convex_piece.transform(stf)
160 | convex_pieces_rescaled.append(convex_piece_rescaled)
161 | return GraspableObject3D(sdf_rescaled, mesh_rescaled, key=self.key,
162 | model_name=self.model_name, mass=self.mass,
163 | convex_pieces=convex_pieces_rescaled)
164 |
165 | def transform(self, delta_T):
166 | """ Transform by a delta transform.
167 |
168 |
169 | Parameters
170 | ----------
171 | delta_T : :obj:`RigidTransform`
172 | the transformation from the current reference frame to the alternate reference frame
173 |
174 | Returns
175 | -------
176 | :obj:`GraspableObject3D`
177 | graspable object trasnformed by the delta
178 | """
179 | sdf_tf = self.sdf_.transform(delta_T)
180 | mesh_tf = self.mesh_.transform(delta_T)
181 | convex_pieces_tf = None
182 | if self.convex_pieces_ is not None:
183 | convex_pieces_tf = []
184 | for convex_piece in self.convex_pieces_:
185 | convex_piece_tf = convex_piece.transform(delta_T)
186 | convex_pieces_tf.append(convex_piece_tf)
187 | return GraspableObject3D(sdf_tf, mesh_tf, key=self.key,
188 | model_name=self.model_name, mass=self.mass,
189 | convex_pieces=convex_pieces_tf)
190 |
191 | def surface_information(self, grasp, width, num_steps, plot=False, direction1=None, direction2=None):
192 | """ Returns the patches on this object for a given grasp.
193 |
194 | Parameters
195 | ----------
196 | grasp : :obj:`ParallelJawPtGrasp3D`
197 | grasp to get the patch information for
198 | width : float
199 | width of jaw opening
200 | num_steps : int
201 | number of steps
202 | plot : bool
203 | whether to plot the intermediate computation, for debugging
204 | direction1 : normalized 3x1 :obj:`numpy.ndarray`
205 | direction along which to compute the surface information for the first jaw, if None then defaults to grasp axis
206 | direction2 : normalized 3x1 :obj:`numpy.ndarray`
207 | direction along which to compute the surface information for the second jaw, if None then defaults to grasp axis
208 |
209 | Returns
210 | -------
211 | :obj:`list` of :obj:`SurfaceWindow`
212 | surface patches, one for each contact
213 | """
214 | contacts_found, contacts = grasp.close_fingers(self)#, vis=True)
215 | if not contacts_found:
216 | raise ValueError('Failed to find contacts')
217 | contact1, contact2 = contacts
218 |
219 | if plot:
220 | plt.figure()
221 | contact1.plot_friction_cone()
222 | contact2.plot_friction_cone()
223 |
224 | ax = plt.gca(projection = '3d')
225 | ax.set_xlim3d(0, self.sdf.dims_[0])
226 | ax.set_ylim3d(0, self.sdf.dims_[1])
227 | ax.set_zlim3d(0, self.sdf.dims_[2])
228 |
229 | window1 = contact1.surface_information(width, num_steps, direction=direction1)
230 | window2 = contact2.surface_information(width, num_steps, direction=direction2)
231 | return window1, window2, contact1, contact2
232 |
233 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/meshpy/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "{}"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2017 Berkeley AUTOLAB & University of California, Berkeley
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/meshpy/__init__.py:
--------------------------------------------------------------------------------
1 | # try:
2 | # # from meshpy import meshrender
3 | # import meshrender
4 | # except:
5 | # print('Unable to import meshrender shared library! Rendering will not work. Likely due to missing Boost.Numpy')
6 | # print('Boost.Numpy can be installed following the instructions in https://github.com/ndarray/Boost.NumPy')
7 | from .mesh import Mesh3D
8 | # from .image_converter import ImageToMeshConverter
9 | from .obj_file import ObjFile
10 | # from .off_file import OffFile
11 | # from .render_modes import RenderMode
12 | from .sdf import Sdf, Sdf3D
13 | from .sdf_file import SdfFile
14 | from .stable_pose import StablePose
15 | from . import mesh
16 | from . import obj_file
17 | from . import sdf_file
18 | # from .stp_file import StablePoseFile
19 | # from .urdf_writer import UrdfWriter, convex_decomposition
20 | # from .lighting import MaterialProperties, LightingProperties
21 |
22 | # from .mesh_renderer import ViewsphereDiscretizer, PlanarWorksurfaceDiscretizer, VirtualCamera, SceneObject
23 | # from .random_variables import CameraSample, RenderSample, UniformViewsphereRandomVariable, \
24 | # UniformPlanarWorksurfaceRandomVariable, UniformPlanarWorksurfaceImageRandomVariable
25 |
26 | __all__ = ['Mesh3D','ObjFile','Sdf','Sdf3D','SdfFile','StablePose','mesh','obj_file','sdf_file']
27 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/meshpy/obj_file.py:
--------------------------------------------------------------------------------
1 | """
2 | File for loading and saving meshes from .OBJ files
3 | Author: Jeff Mahler
4 | """
5 | import os
6 | try:
7 | from . import mesh
8 | except ImportError:
9 | import mesh
10 |
11 |
12 | class ObjFile(object):
13 | """
14 | A Wavefront .obj file reader and writer.
15 |
16 | Attributes
17 | ----------
18 | filepath : :obj:`str`
19 | The full path to the .obj file associated with this reader/writer.
20 | """
21 |
22 | def __init__(self, filepath):
23 | """Construct and initialize a .obj file reader and writer.
24 |
25 | Parameters
26 | ----------
27 | filepath : :obj:`str`
28 | The full path to the desired .obj file
29 |
30 | Raises
31 | ------
32 | ValueError
33 | If the file extension is not .obj.
34 | """
35 | self.filepath_ = filepath
36 | file_root, file_ext = os.path.splitext(self.filepath_)
37 | if file_ext != '.obj':
38 | raise ValueError('Extension %s invalid for OBJs' %(file_ext))
39 |
40 | @property
41 | def filepath(self):
42 | """Returns the full path to the .obj file associated with this reader/writer.
43 |
44 | Returns
45 | -------
46 | :obj:`str`
47 | The full path to the .obj file associated with this reader/writer.
48 | """
49 | return self.filepath_
50 |
51 | def read(self):
52 | """Reads in the .obj file and returns a Mesh3D representation of that mesh.
53 |
54 | Returns
55 | -------
56 | :obj:`Mesh3D`
57 | A Mesh3D created from the data in the .obj file.
58 | """
59 | numVerts = 0
60 | verts = []
61 | norms = None
62 | faces = []
63 | tex_coords = []
64 | face_norms = []
65 | f = open(self.filepath_, 'r')
66 |
67 | for line in f:
68 | # Break up the line by whitespace
69 | vals = line.split()
70 | if len(vals) > 0:
71 | # Look for obj tags (see http://en.wikipedia.org/wiki/Wavefront_.obj_file)
72 | if vals[0] == 'v':
73 | # Add vertex
74 | v = list(map(float, vals[1:4]))
75 | verts.append(v)
76 | if vals[0] == 'vn':
77 | # Add normal
78 | if norms is None:
79 | norms = []
80 | n = list(map(float, vals[1:4]))
81 | norms.append(n)
82 | if vals[0] == 'f':
83 | # Add faces (includes vertex indices, texture coordinates, and normals)
84 | vi = []
85 | vti = []
86 | nti = []
87 | if vals[1].find('/') == -1:
88 | vi = list(map(int, vals[1:]))
89 | vi = [i - 1 for i in vi]
90 | else:
91 | for j in range(1, len(vals)):
92 | # Break up like by / to read vert inds, tex coords, and normal inds
93 | val = vals[j]
94 | tokens = val.split('/')
95 | for i in range(len(tokens)):
96 | if i == 0:
97 | vi.append(int(tokens[i]) - 1) # adjust for python 0 - indexing
98 | elif i == 1:
99 | if tokens[i] != '':
100 | vti.append(int(tokens[i]))
101 | elif i == 2:
102 | nti.append(int(tokens[i]))
103 | faces.append(vi)
104 | # Below two lists are currently not in use
105 | tex_coords.append(vti)
106 | face_norms.append(nti)
107 | f.close()
108 |
109 | return mesh.Mesh3D(verts, faces, norms)
110 |
111 | def write(self, mesh):
112 | """Writes a Mesh3D object out to a .obj file format
113 |
114 | Parameters
115 | ----------
116 | mesh : :obj:`Mesh3D`
117 | The Mesh3D object to write to the .obj file.
118 |
119 | Note
120 | ----
121 | Does not support material files or texture coordinates.
122 | """
123 | f = open(self.filepath_, 'w')
124 | vertices = mesh.vertices
125 | faces = mesh.triangles
126 | normals = mesh.normals
127 |
128 | # write human-readable header
129 | f.write('###########################################################\n')
130 | f.write('# OBJ file generated by UC Berkeley Automation Sciences Lab\n')
131 | f.write('#\n')
132 | f.write('# Num Vertices: %d\n' %(vertices.shape[0]))
133 | f.write('# Num Triangles: %d\n' %(faces.shape[0]))
134 | f.write('#\n')
135 | f.write('###########################################################\n')
136 | f.write('\n')
137 |
138 | for v in vertices:
139 | f.write('v %f %f %f\n' %(v[0], v[1], v[2]))
140 |
141 | # write the normals list
142 | if normals is not None and normals.shape[0] > 0:
143 | for n in normals:
144 | f.write('vn %f %f %f\n' %(n[0], n[1], n[2]))
145 |
146 | # write the normals list
147 | for t in faces:
148 | f.write('f %d %d %d\n' %(t[0]+1, t[1]+1, t[2]+1)) # convert back to 1-indexing
149 |
150 | f.close()
151 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/meshpy/sdf_file.py:
--------------------------------------------------------------------------------
1 | '''
2 | Reads and writes sdfs to file
3 | Author: Jeff Mahler
4 | '''
5 | import numpy as np
6 | import os
7 |
8 | from . import sdf
9 |
10 | class SdfFile:
11 | """
12 | A Signed Distance Field .sdf file reader and writer.
13 |
14 | Attributes
15 | ----------
16 | filepath : :obj:`str`
17 | The full path to the .sdf or .csv file associated with this reader/writer.
18 | """
19 | def __init__(self, filepath):
20 | """Construct and initialize a .sdf file reader and writer.
21 |
22 | Parameters
23 | ----------
24 | filepath : :obj:`str`
25 | The full path to the desired .sdf or .csv file
26 |
27 | Raises
28 | ------
29 | ValueError
30 | If the file extension is not .sdf of .csv.
31 | """
32 | self.filepath_ = filepath
33 | file_root, file_ext = os.path.splitext(self.filepath_)
34 |
35 | if file_ext == '.sdf':
36 | self.use_3d_ = True
37 | elif file_ext == '.csv':
38 | self.use_3d_ = False
39 | else:
40 | raise ValueError('Extension %s invalid for SDFs' %(file_ext))
41 |
42 | @property
43 | def filepath(self):
44 | """Returns the full path to the file associated with this reader/writer.
45 |
46 | Returns
47 | -------
48 | :obj:`str`
49 | The full path to the file associated with this reader/writer.
50 | """
51 | return self.filepath_
52 |
53 | def read(self):
54 | """Reads in the SDF file and returns a Sdf object.
55 |
56 | Returns
57 | -------
58 | :obj:`Sdf`
59 | A Sdf created from the data in the file.
60 | """
61 | if self.use_3d_:
62 | return self._read_3d()
63 | else:
64 | return self._read_2d()
65 |
66 |
67 | def _read_3d(self):
68 | """Reads in a 3D SDF file and returns a Sdf object.
69 |
70 | Returns
71 | -------
72 | :obj:`Sdf3D`
73 | A 3DSdf created from the data in the file.
74 | """
75 | if not os.path.exists(self.filepath_):
76 | return None
77 |
78 | my_file = open(self.filepath_, 'r')
79 | nx, ny, nz = [int(i) for i in my_file.readline().split()] #dimension of each axis should all be equal for LSH
80 | ox, oy, oz = [float(i) for i in my_file.readline().split()] #shape origin
81 | dims = np.array([nx, ny, nz])
82 | origin = np.array([ox, oy, oz])
83 |
84 | resolution = float(my_file.readline()) # resolution of the grid cells in original mesh coords
85 | sdf_data = np.zeros(dims)
86 |
87 | # loop through file, getting each value
88 | count = 0
89 | for k in range(nz):
90 | for j in range(ny):
91 | for i in range(nx):
92 | sdf_data[i][j][k] = float(my_file.readline())
93 | count += 1
94 | my_file.close()
95 | return sdf.Sdf3D(sdf_data, origin, resolution)
96 |
97 | def _read_2d(self):
98 | """Reads in a 2D SDF file and returns a Sdf object.
99 |
100 | Returns
101 | -------
102 | :obj:`Sdf2D`
103 | A 2DSdf created from the data in the file.
104 | """
105 | if not os.path.exists(self.filepath_):
106 | return None
107 |
108 | sdf_data = np.loadtxt(self.filepath_, delimiter=',')
109 | return sdf.Sdf2D(sdf_data)
110 |
111 | def write(self, sdf):
112 | """Writes an SDF to a file.
113 |
114 | Parameters
115 | ----------
116 | sdf : :obj:`Sdf`
117 | An Sdf object to write out.
118 |
119 | Note
120 | ----
121 | This is not currently implemented or supported.
122 | """
123 | pass
124 |
125 | if __name__ == '__main__':
126 | pass
127 |
128 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/dexnet/grasping/meshpy/stable_pose.py:
--------------------------------------------------------------------------------
1 | """
2 | A basic struct-like Stable Pose class to make accessing pose probability and rotation matrix easier
3 |
4 | Author: Matt Matl and Nikhil Sharma
5 | """
6 | import numpy as np
7 |
8 | from autolab_core import RigidTransform
9 |
10 | d_theta = np.deg2rad(1)
11 |
12 | class StablePose(object):
13 | """A representation of a mesh's stable pose.
14 |
15 | Attributes
16 | ----------
17 | p : float
18 | Probability associated with this stable pose.
19 | r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float
20 | 3x3 rotation matrix that rotates the mesh into the stable pose from
21 | standardized coordinates.
22 | x0 : :obj:`numpy.ndarray` of float
23 | 3D point in the mesh that is resting on the table.
24 | face : :obj:`numpy.ndarray`
25 | 3D vector of indices corresponding to vertices forming the resting face
26 | stp_id : :obj:`str`
27 | A string identifier for the stable pose
28 | T_obj_table : :obj:`RigidTransform`
29 | A RigidTransform representation of the pose's rotation matrix.
30 | """
31 | def __init__(self, p, r, x0, face=None, stp_id=-1):
32 | """Create a new stable pose object.
33 |
34 | Parameters
35 | ----------
36 | p : float
37 | Probability associated with this stable pose.
38 | r : :obj:`numpy.ndarray` of :obj`numpy.ndarray` of float
39 | 3x3 rotation matrix that rotates the mesh into the stable pose from
40 | standardized coordinates.
41 | x0 : :obj:`numpy.ndarray` of float
42 | 3D point in the mesh that is resting on the table.
43 | face : :obj:`numpy.ndarray`
44 | 3D vector of indices corresponding to vertices forming the resting face
45 | stp_id : :obj:`str`
46 | A string identifier for the stable pose
47 | """
48 | self.p = p
49 | self.r = r
50 | self.x0 = x0
51 | self.face = face
52 | self.id = stp_id
53 |
54 | # fix stable pose bug
55 | if np.abs(np.linalg.det(self.r) + 1) < 0.01:
56 | self.r[1,:] = -self.r[1,:]
57 |
58 | def __eq__(self, other):
59 | """ Check equivalence by rotation about the z axis """
60 | if not isinstance(other, StablePose):
61 | raise ValueError('Can only compare stable pose objects')
62 | R0 = self.r
63 | R1 = other.r
64 | dR = R1.T.dot(R0)
65 | theta = 0
66 | while theta < 2 * np.pi:
67 | Rz = RigidTransform.z_axis_rotation(theta)
68 | dR = R1.T.dot(Rz).dot(R0)
69 | if np.linalg.norm(dR - np.eye(3)) < 1e-5:
70 | return True
71 | theta += d_theta
72 | return False
73 |
74 | @property
75 | def T_obj_table(self):
76 | return RigidTransform(rotation=self.r, from_frame='obj', to_frame='table')
77 |
78 |
79 | @property
80 | def T_obj_world(self):
81 | T_world_obj = RigidTransform(rotation=self.r.T,
82 | translation=self.x0,
83 | from_frame='world',
84 | to_frame='obj')
85 | return T_world_obj.inverse()
86 |
87 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/eval_utils.py:
--------------------------------------------------------------------------------
1 | __author__ = 'cxwang, mhgou'
2 | __version__ = '1.0'
3 |
4 | import os
5 | import time
6 | import numpy as np
7 | import open3d as o3d
8 | from transforms3d.euler import euler2mat, quat2mat
9 |
10 | from .rotation import batch_viewpoint_params_to_matrix, matrix_to_dexnet_params
11 |
12 | from .dexnet.grasping.quality import PointGraspMetrics3D
13 | from .dexnet.grasping.grasp import ParallelJawPtGrasp3D
14 | from .dexnet.grasping.graspable_object import GraspableObject3D
15 | from .dexnet.grasping.grasp_quality_config import GraspQualityConfigFactory
16 | from .dexnet.grasping.contacts import Contact3D
17 | from .dexnet.grasping.meshpy.obj_file import ObjFile
18 | from .dexnet.grasping.meshpy.sdf_file import SdfFile
19 |
20 | def get_scene_name(num):
21 | '''
22 | **Input:**
23 | - num: int of the scene number.
24 |
25 | **Output:**
26 | - string of the scene name.
27 | '''
28 | return ('scene_%04d' % (num,))
29 |
30 | def create_table_points(lx, ly, lz, dx=0, dy=0, dz=0, grid_size=0.01):
31 | '''
32 | **Input:**
33 | - lx:
34 | - ly:
35 | - lz:
36 | **Output:**
37 | - numpy array of the points with shape (-1, 3).
38 | '''
39 | xmap = np.linspace(0, lx, int(lx/grid_size))
40 | ymap = np.linspace(0, ly, int(ly/grid_size))
41 | zmap = np.linspace(0, lz, int(lz/grid_size))
42 | xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
43 | xmap += dx
44 | ymap += dy
45 | zmap += dz
46 | points = np.stack([xmap, ymap, zmap], axis=-1)
47 | points = points.reshape([-1, 3])
48 | return points
49 |
50 | def parse_posevector(posevector):
51 | '''
52 | **Input:**
53 | - posevector: list of pose
54 | **Output:**
55 | - obj_idx: int of the index of object.
56 | - mat: numpy array of shape (4, 4) of the 6D pose of object.
57 | '''
58 | mat = np.zeros([4,4],dtype=np.float32)
59 | alpha, beta, gamma = posevector[4:7]
60 | alpha = alpha / 180.0 * np.pi
61 | beta = beta / 180.0 * np.pi
62 | gamma = gamma / 180.0 * np.pi
63 | mat[:3,:3] = euler2mat(alpha, beta, gamma)
64 | mat[:3,3] = posevector[1:4]
65 | mat[3,3] = 1
66 | obj_idx = int(posevector[0])
67 | return obj_idx, mat
68 |
69 | def load_dexnet_model(data_path):
70 | '''
71 | **Input:**
72 |
73 | - data_path: path to load .obj & .sdf files
74 |
75 | **Output:**
76 | - obj: dexnet model
77 | '''
78 | of = ObjFile('{}.obj'.format(data_path))
79 | sf = SdfFile('{}.sdf'.format(data_path))
80 | mesh = of.read()
81 | sdf = sf.read()
82 | obj = GraspableObject3D(sdf, mesh)
83 | return obj
84 |
85 | def transform_points(points, trans):
86 | '''
87 | **Input:**
88 |
89 | - points: (N, 3)
90 |
91 | - trans: (4, 4)
92 |
93 | **Output:**
94 | - points_trans: (N, 3)
95 | '''
96 | ones = np.ones([points.shape[0],1], dtype=points.dtype)
97 | points_ = np.concatenate([points, ones], axis=-1)
98 | points_ = np.matmul(trans, points_.T).T
99 | points_trans = points_[:,:3]
100 | return points_trans
101 |
102 | def compute_point_distance(A, B):
103 | '''
104 | **Input:**
105 | - A: (N, 3)
106 |
107 | - B: (M, 3)
108 |
109 | **Output:**
110 | - dists: (N, M)
111 | '''
112 | A = A[:, np.newaxis, :]
113 | B = B[np.newaxis, :, :]
114 | dists = np.linalg.norm(A-B, axis=-1)
115 | return dists
116 |
117 | def compute_closest_points(A, B):
118 | '''
119 | **Input:**
120 |
121 | - A: (N, 3)
122 |
123 | - B: (M, 3)
124 |
125 | **Output:**
126 |
127 | - indices: (N,) closest point index in B for each point in A
128 | '''
129 | dists = compute_point_distance(A, B)
130 | indices = np.argmin(dists, axis=-1)
131 | return indices
132 |
133 | def voxel_sample_points(points, voxel_size=0.008):
134 | '''
135 | **Input:**
136 |
137 | - points: (N, 3)
138 |
139 | **Output:**
140 |
141 | - points: (n, 3)
142 | '''
143 | cloud = o3d.geometry.PointCloud()
144 | cloud.points = o3d.utility.Vector3dVector(points)
145 | cloud = cloud.voxel_down_sample(voxel_size)
146 | points = np.array(cloud.points)
147 | return points
148 |
149 | def topk_grasps(grasps, k=10):
150 | '''
151 | **Input:**
152 |
153 | - grasps: (N, 17)
154 |
155 | - k: int
156 |
157 | **Output:**
158 |
159 | - topk_grasps: (k, 17)
160 | '''
161 | assert(k > 0)
162 | grasp_confidence = grasps[:, 0]
163 | indices = np.argsort(-grasp_confidence)
164 | topk_indices = indices[:min(k, len(grasps))]
165 | topk_grasps = grasps[topk_indices]
166 | return topk_grasps
167 |
168 | def get_grasp_score(grasp, obj, fc_list, force_closure_quality_config):
169 | tmp, is_force_closure = False, False
170 | quality = -1
171 | for ind_, value_fc in enumerate(fc_list):
172 | value_fc = round(value_fc, 2)
173 | tmp = is_force_closure
174 | is_force_closure = PointGraspMetrics3D.grasp_quality(grasp, obj, force_closure_quality_config[value_fc])
175 | if tmp and not is_force_closure:
176 | quality = round(fc_list[ind_ - 1], 2)
177 | break
178 | elif is_force_closure and value_fc == fc_list[-1]:
179 | quality = value_fc
180 | break
181 | elif value_fc == fc_list[0] and not is_force_closure:
182 | break
183 | return quality
184 |
185 | def collision_detection(grasp_list, model_list, dexnet_models, poses, scene_points, outlier=0.05, empty_thresh=10, return_dexgrasps=False):
186 | '''
187 | **Input:**
188 |
189 | - grasp_list: [(k1, 17), (k2, 17), ..., (kn, 17)] in camera coordinate
190 |
191 | - model_list: [(N1, 3), (N2, 3), ..., (Nn, 3)] in camera coordinate
192 |
193 | - dexnet_models: [GraspableObject3D,] in object coordinate
194 |
195 | - poses: [(4, 4),] from model coordinate to camera coordinate
196 |
197 | - scene_points: (Ns, 3) in camera coordinate
198 |
199 | - outlier: float, used to compute workspace mask
200 |
201 | - empty_thresh: int, 'num_inner_points < empty_thresh' means empty grasp
202 |
203 | - return_dexgrasps: bool, return grasps in dex-net format while True
204 |
205 | **Output:**
206 |
207 | - collsion_mask_list: [(k1,), (k2,), ..., (kn,)]
208 |
209 | - empty_mask_list: [(k1,), (k2,), ..., (kn,)]
210 |
211 | - dexgrasp_list: [[ParallelJawPtGrasp3D,],] in object coordinate
212 | '''
213 | height = 0.02
214 | depth_base = 0.02
215 | finger_width = 0.01
216 | collision_mask_list = list()
217 | num_models = len(model_list)
218 | empty_mask_list = list()
219 | dexgrasp_list = list()
220 |
221 | for i in range(num_models):
222 | if len(grasp_list[i]) == 0:
223 | collision_mask_list.append(list())
224 | empty_mask_list.append(list())
225 | if return_dexgrasps:
226 | dexgrasp_list.append(list())
227 | continue
228 |
229 | ## parse grasp parameters
230 | model = model_list[i]
231 | obj_pose = poses[i]
232 | dexnet_model = dexnet_models[i]
233 | grasps = grasp_list[i]
234 | grasp_points = grasps[:, 13:16]
235 | grasp_poses = grasps[:, 4:13].reshape([-1,3,3])
236 | grasp_depths = grasps[:, 3]
237 | grasp_widths = grasps[:, 1]
238 |
239 | ## crop scene, remove outlier
240 | xmin, xmax = model[:,0].min(), model[:,0].max()
241 | ymin, ymax = model[:,1].min(), model[:,1].max()
242 | zmin, zmax = model[:,2].min(), model[:,2].max()
243 | xlim = ((scene_points[:,0] > xmin-outlier) & (scene_points[:,0] < xmax+outlier))
244 | ylim = ((scene_points[:,1] > ymin-outlier) & (scene_points[:,1] < ymax+outlier))
245 | zlim = ((scene_points[:,2] > zmin-outlier) & (scene_points[:,2] < zmax+outlier))
246 | workspace = scene_points[xlim & ylim & zlim]
247 |
248 | # transform scene to gripper frame
249 | target = (workspace[np.newaxis,:,:] - grasp_points[:,np.newaxis,:])
250 | target = np.matmul(target, grasp_poses)
251 |
252 | # compute collision mask
253 | mask1 = ((target[:,:,2]>-height/2) & (target[:,:,2]-depth_base) & (target[:,:,0]-(grasp_widths[:,np.newaxis]/2+finger_width))
256 | mask4 = (target[:,:,1]<-grasp_widths[:,np.newaxis]/2)
257 | mask5 = (target[:,:,1]<(grasp_widths[:,np.newaxis]/2+finger_width))
258 | mask6 = (target[:,:,1]>grasp_widths[:,np.newaxis]/2)
259 | mask7 = ((target[:,:,0]>-(depth_base+finger_width)) & (target[:,:,0]<-depth_base))
260 |
261 | left_mask = (mask1 & mask2 & mask3 & mask4)
262 | right_mask = (mask1 & mask2 & mask5 & mask6)
263 | bottom_mask = (mask1 & mask3 & mask5 & mask7)
264 | inner_mask = (mask1 & mask2 &(~mask4) & (~mask6))
265 | collision_mask = np.any((left_mask | right_mask | bottom_mask), axis=-1)
266 | empty_mask = (np.sum(inner_mask, axis=-1) < empty_thresh)
267 | collision_mask = (collision_mask | empty_mask)
268 | collision_mask_list.append(collision_mask)
269 | empty_mask_list.append(empty_mask)
270 |
271 | ## generate grasps in dex-net format
272 | if return_dexgrasps:
273 | dexgrasps = list()
274 | for grasp_id,_ in enumerate(grasps):
275 | grasp_point = grasp_points[grasp_id]
276 | R = grasp_poses[grasp_id]
277 | width = grasp_widths[grasp_id]
278 | depth = grasp_depths[grasp_id]
279 | points_in_gripper = target[grasp_id][inner_mask[grasp_id]]
280 | if empty_mask[grasp_id]:
281 | dexgrasps.append(None)
282 | continue
283 | center = np.array([depth, 0, 0]).reshape([3, 1]) # gripper coordinate
284 | center = np.dot(grasp_poses[grasp_id], center).reshape([3])
285 | center = (center + grasp_point).reshape([1,3]) # camera coordinate
286 | center = transform_points(center, np.linalg.inv(obj_pose)).reshape([3]) # object coordinate
287 | R = np.dot(obj_pose[:3,:3].T, R)
288 | binormal, approach_angle = matrix_to_dexnet_params(R)
289 | grasp = ParallelJawPtGrasp3D(ParallelJawPtGrasp3D.configuration_from_params(
290 | center, binormal, width, approach_angle), depth)
291 | dexgrasps.append(grasp)
292 | dexgrasp_list.append(dexgrasps)
293 |
294 | if return_dexgrasps:
295 | return collision_mask_list, empty_mask_list, dexgrasp_list
296 | else:
297 | return collision_mask_list, empty_mask_list
298 |
299 | def eval_grasp(grasp_group, models, dexnet_models, poses, config, table=None, voxel_size=0.008, TOP_K = 50):
300 | '''
301 | **Input:**
302 |
303 | - grasp_group: GraspGroup instance for evaluation.
304 |
305 | - models: in model coordinate
306 |
307 | - dexnet_models: models in dexnet format
308 |
309 | - poses: from model to camera coordinate
310 |
311 | - config: dexnet config.
312 |
313 | - table: in camera coordinate
314 |
315 | - voxel_size: float of the voxel size.
316 |
317 | - TOP_K: int of the number of top grasps to evaluate.
318 | '''
319 | num_models = len(models)
320 | ## grasp nms
321 | grasp_group = grasp_group.nms(0.03, 30.0/180*np.pi)
322 |
323 | ## assign grasps to object
324 | # merge and sample scene
325 | model_trans_list = list()
326 | seg_mask = list()
327 | for i,model in enumerate(models):
328 | model_trans = transform_points(model, poses[i])
329 | seg = i * np.ones(model_trans.shape[0], dtype=np.int32)
330 | model_trans_list.append(model_trans)
331 | seg_mask.append(seg)
332 | seg_mask = np.concatenate(seg_mask, axis=0)
333 | scene = np.concatenate(model_trans_list, axis=0)
334 |
335 | # assign grasps
336 | indices = compute_closest_points(grasp_group.translations, scene)
337 | model_to_grasp = seg_mask[indices]
338 | pre_grasp_list = list()
339 | for i in range(num_models):
340 | grasp_i = grasp_group[model_to_grasp==i]
341 | grasp_i.sort_by_score()
342 | pre_grasp_list.append(grasp_i[:10].grasp_group_array)
343 | all_grasp_list = np.vstack(pre_grasp_list)
344 | remain_mask = np.argsort(all_grasp_list[:,0])[::-1]
345 | min_score = all_grasp_list[remain_mask[min(49,len(remain_mask) - 1)],0]
346 |
347 | grasp_list = []
348 | for i in range(num_models):
349 | remain_mask_i = pre_grasp_list[i][:,0] >= min_score
350 | grasp_list.append(pre_grasp_list[i][remain_mask_i])
351 | # grasp_list = pre_grasp_list
352 |
353 | ## collision detection
354 | if table is not None:
355 | scene = np.concatenate([scene, table])
356 |
357 | collision_mask_list, empty_list, dexgrasp_list = collision_detection(
358 | grasp_list, model_trans_list, dexnet_models, poses, scene, outlier=0.05, return_dexgrasps=True)
359 |
360 | ## evaluate grasps
361 | # score configurations
362 | force_closure_quality_config = dict()
363 | fc_list = np.array([1.2, 1.0, 0.8, 0.6, 0.4, 0.2])
364 | for value_fc in fc_list:
365 | value_fc = round(value_fc, 2)
366 | config['metrics']['force_closure']['friction_coef'] = value_fc
367 | force_closure_quality_config[value_fc] = GraspQualityConfigFactory.create_config(config['metrics']['force_closure'])
368 | # get grasp scores
369 | score_list = list()
370 |
371 | for i in range(num_models):
372 | dexnet_model = dexnet_models[i]
373 | collision_mask = collision_mask_list[i]
374 | dexgrasps = dexgrasp_list[i]
375 | scores = list()
376 | num_grasps = len(dexgrasps)
377 | for grasp_id in range(num_grasps):
378 | if collision_mask[grasp_id]:
379 | scores.append(-1.)
380 | continue
381 | if dexgrasps[grasp_id] is None:
382 | scores.append(-1.)
383 | continue
384 | grasp = dexgrasps[grasp_id]
385 | score = get_grasp_score(grasp, dexnet_model, fc_list, force_closure_quality_config)
386 | scores.append(score)
387 | score_list.append(np.array(scores))
388 |
389 | return grasp_list, score_list, collision_mask_list
390 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/pose.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Minghao Gou'
2 | __version__ = '1.0'
3 | """
4 | define the pose class and functions associated with this class.
5 | """
6 |
7 | import numpy as np
8 | from . import trans3d
9 | from transforms3d.euler import euler2quat
10 |
11 | class Pose:
12 | def __init__(self,id,x,y,z,alpha,beta,gamma):
13 | self.id = id
14 | self.x = x
15 | self.y = y
16 | self.z = z
17 | # alpha, bata, gamma is in degree
18 | self.alpha = alpha
19 | self.beta = beta
20 | self.gamma = gamma
21 | self.quat = self.get_quat()
22 | self.mat_4x4 = self.get_mat_4x4()
23 | self.translation = self.get_translation()
24 |
25 | def __repr__(self):
26 | return '\nPose id=%d,x=%f,y=%f,z=%f,alpha=%f,beta=%f,gamma=%f' %(self.id,self.x,self.y,self.z,self.alpha,self.beta,self.gamma)+'\n'+'translation:'+self.translation.__repr__() + '\nquat:'+self.quat.__repr__()+'\nmat_4x4:'+self.mat_4x4.__repr__()
27 |
28 | def get_id(self):
29 | """
30 | **Output:**
31 |
32 | - return the id of this object
33 | """
34 | return self.id
35 |
36 | def get_translation(self):
37 | """
38 | **Output:**
39 |
40 | - Convert self.x, self.y, self.z into self.translation
41 | """
42 | return np.array([self.x,self.y,self.z])
43 |
44 | def get_quat(self):
45 | """
46 | **Output:**
47 |
48 | - Convert self.alpha, self.beta, self.gamma into self.quat
49 | """
50 | euler = np.array([self.alpha, self.beta, self.gamma]) / 180.0 * np.pi
51 | quat = euler2quat(euler[0],euler[1],euler[2])
52 | return quat
53 |
54 | def get_mat_4x4(self):
55 | """
56 | **Output:**
57 |
58 | - Convert self.x, self.y, self.z, self.alpha, self.beta and self.gamma into mat_4x4 pose
59 | """
60 | mat_4x4 = trans3d.get_mat(self.x,self.y,self.z,self.alpha,self.beta,self.gamma)
61 | return mat_4x4
62 |
63 | def pose_from_pose_vector(pose_vector):
64 | """
65 | **Input:**
66 |
67 | - pose_vector: A list in the format of [id,x,y,z,alpha,beta,gamma]
68 |
69 | **Output:**
70 |
71 | - A pose class instance
72 | """
73 | return Pose(id = pose_vector[0],
74 | x = pose_vector[1],
75 | y = pose_vector[2],
76 | z = pose_vector[3],
77 | alpha = pose_vector[4],
78 | beta = pose_vector[5],
79 | gamma = pose_vector[6])
80 |
81 | def pose_list_from_pose_vector_list(pose_vector_list):
82 | """
83 | **Input:**
84 |
85 | - Pose vector list defined in xmlhandler.py
86 |
87 | **Output:**
88 |
89 | - list of poses.
90 | """
91 | pose_list = []
92 | for pose_vector in pose_vector_list:
93 | pose_list.append(pose_from_pose_vector(pose_vector))
94 | return pose_list
--------------------------------------------------------------------------------
/graspnetAPI/utils/rotation.py:
--------------------------------------------------------------------------------
1 | """ Author: chenxi-wang
2 | Transformation matrices from/to viewpoints and dexnet gripper params.
3 | """
4 |
5 | import numpy as np
6 | from math import pi
7 |
8 | def rotation_matrix(alpha, beta, gamma):
9 | '''
10 | **Input:**
11 |
12 | - alpha: float of alpha angle.
13 |
14 | - beta: float of beta angle.
15 |
16 | - gamma: float of the gamma angle.
17 |
18 | **Output:**
19 |
20 | - numpy array of shape (3, 3) of rotation matrix.
21 | '''
22 | Rx = np.array([[1, 0, 0],
23 | [0, np.cos(alpha), -np.sin(alpha)],
24 | [0, np.sin(alpha), np.cos(alpha)]])
25 | Ry = np.array([[np.cos(beta), 0, np.sin(beta)],
26 | [0, 1, 0],
27 | [-np.sin(beta), 0, np.cos(beta)]])
28 | Rz = np.array([[np.cos(gamma), -np.sin(gamma), 0],
29 | [np.sin(gamma), np.cos(gamma), 0],
30 | [0, 0, 1]])
31 | R = Rz.dot(Ry).dot(Rx)
32 | return R
33 |
34 | def matrix_to_dexnet_params(matrix):
35 | '''
36 | **Input:**
37 |
38 | - numpy array of shape (3, 3) of the rotation matrix.
39 |
40 | **Output:**
41 |
42 | - binormal: numpy array of shape (3,).
43 |
44 | - angle: float of the angle.
45 | '''
46 | approach = matrix[:, 0]
47 | binormal = matrix[:, 1]
48 | axis_y = binormal
49 | axis_x = np.array([axis_y[1], -axis_y[0], 0])
50 | if np.linalg.norm(axis_x) == 0:
51 | axis_x = np.array([1, 0, 0])
52 | axis_x = axis_x / np.linalg.norm(axis_x)
53 | axis_y = axis_y / np.linalg.norm(axis_y)
54 | axis_z = np.cross(axis_x, axis_y)
55 | R = np.c_[axis_x, np.c_[axis_y, axis_z]]
56 | approach = R.T.dot(approach)
57 | cos_t, sin_t = approach[0], -approach[2]
58 | angle = np.arccos(max(min(cos_t,1),-1))
59 | if sin_t < 0:
60 | angle = pi * 2 - angle
61 | return binormal, angle
62 |
63 | def viewpoint_params_to_matrix(towards, angle):
64 | '''
65 | **Input:**
66 |
67 | - towards: numpy array towards vector with shape (3,).
68 |
69 | - angle: float of in-plane rotation.
70 |
71 | **Output:**
72 |
73 | - numpy array of the rotation matrix with shape (3, 3).
74 | '''
75 | axis_x = towards
76 | axis_y = np.array([-axis_x[1], axis_x[0], 0])
77 | if np.linalg.norm(axis_y) == 0:
78 | axis_y = np.array([0, 1, 0])
79 | axis_x = axis_x / np.linalg.norm(axis_x)
80 | axis_y = axis_y / np.linalg.norm(axis_y)
81 | axis_z = np.cross(axis_x, axis_y)
82 | R1 = np.array([[1, 0, 0],
83 | [0, np.cos(angle), -np.sin(angle)],
84 | [0, np.sin(angle), np.cos(angle)]])
85 | R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
86 | matrix = R2.dot(R1)
87 | return matrix.astype(np.float32)
88 |
89 | def batch_viewpoint_params_to_matrix(batch_towards, batch_angle):
90 | '''
91 | **Input:**
92 |
93 | - towards: numpy array towards vectors with shape (n, 3).
94 |
95 | - angle: numpy array of in-plane rotations (n, ).
96 |
97 | **Output:**
98 |
99 | - numpy array of the rotation matrix with shape (n, 3, 3).
100 | '''
101 | axis_x = batch_towards
102 | ones = np.ones(axis_x.shape[0], dtype=axis_x.dtype)
103 | zeros = np.zeros(axis_x.shape[0], dtype=axis_x.dtype)
104 | axis_y = np.stack([-axis_x[:,1], axis_x[:,0], zeros], axis=-1)
105 | mask_y = (np.linalg.norm(axis_y, axis=-1) == 0)
106 | axis_y[mask_y] = np.array([0, 1, 0])
107 | axis_x = axis_x / np.linalg.norm(axis_x, axis=-1, keepdims=True)
108 | axis_y = axis_y / np.linalg.norm(axis_y, axis=-1, keepdims=True)
109 | axis_z = np.cross(axis_x, axis_y)
110 | sin = np.sin(batch_angle)
111 | cos = np.cos(batch_angle)
112 | R1 = np.stack([ones, zeros, zeros, zeros, cos, -sin, zeros, sin, cos], axis=-1)
113 | R1 = R1.reshape([-1,3,3])
114 | R2 = np.stack([axis_x, axis_y, axis_z], axis=-1)
115 | matrix = np.matmul(R2, R1)
116 | return matrix.astype(np.float32)
117 |
118 | def dexnet_params_to_matrix(binormal, angle):
119 | '''
120 | **Input:**
121 |
122 | - binormal: numpy array of shape (3,).
123 |
124 | - angle: float of the angle.
125 |
126 | **Output:**
127 |
128 | - numpy array of shape (3, 3) of the rotation matrix.
129 | '''
130 | axis_y = binormal
131 | axis_x = np.array([axis_y[1], -axis_y[0], 0])
132 | if np.linalg.norm(axis_x) == 0:
133 | axis_x = np.array([1, 0, 0])
134 | axis_x = axis_x / np.linalg.norm(axis_x)
135 | axis_y = axis_y / np.linalg.norm(axis_y)
136 | axis_z = np.cross(axis_x, axis_y)
137 | R1 = np.array([[np.cos(angle), 0, np.sin(angle)],
138 | [0, 1, 0],
139 | [-np.sin(angle), 0, np.cos(angle)]])
140 | R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
141 | matrix = R2.dot(R1)
142 | return matrix
143 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/trans3d.py:
--------------------------------------------------------------------------------
1 | from transforms3d.quaternions import mat2quat, quat2mat
2 | from transforms3d.euler import quat2euler, euler2quat
3 | import numpy as np
4 |
5 | def get_pose(pose):
6 | pos, quat = pose_4x4_to_pos_quat(pose)
7 | euler = np.array([quat2euler(quat)[0], quat2euler(quat)[1],quat2euler(quat)[2]])
8 | euler = euler * 180.0 / np.pi
9 | alpha, beta, gamma = euler[0], euler[1], euler[2]
10 | x, y, z = pos[0], pos[1], pos[2]
11 | return x,y,z, alpha, beta, gamma
12 |
13 | def get_mat(x,y,z, alpha, beta, gamma):
14 | """
15 | Calls get_mat() to get the 4x4 matrix
16 | """
17 | try:
18 | euler = np.array([alpha, beta, gamma]) / 180.0 * np.pi
19 | quat = np.array(euler2quat(euler[0],euler[1],euler[2]))
20 | pose = pos_quat_to_pose_4x4(np.array([x,y,z]), quat)
21 | return pose
22 | except Exception as e:
23 | print(str(e))
24 | pass
25 |
26 | def pos_quat_to_pose_4x4(pos, quat):
27 | """pose = pos_quat_to_pose_4x4(pos, quat)
28 | Convert pos and quat into pose, 4x4 format
29 |
30 | Args:
31 | pos: length-3 position
32 | quat: length-4 quaternion
33 |
34 | Returns:
35 | pose: numpy array, 4x4
36 | """
37 | pose = np.zeros([4, 4])
38 | mat = quat2mat(quat)
39 | pose[0:3, 0:3] = mat[:, :]
40 | pose[0:3, -1] = pos[:]
41 | pose[-1, -1] = 1
42 | return pose
43 |
44 |
45 | def pose_4x4_to_pos_quat(pose):
46 | """
47 | Convert pose, 4x4 format into pos and quat
48 |
49 | Args:
50 | pose: numpy array, 4x4
51 | Returns:
52 | pos: length-3 position
53 | quat: length-4 quaternion
54 |
55 | """
56 | mat = pose[:3, :3]
57 | quat = mat2quat(mat)
58 | pos = np.zeros([3])
59 | pos[0] = pose[0, 3]
60 | pos[1] = pose[1, 3]
61 | pos[2] = pose[2, 3]
62 | return pos, quat
--------------------------------------------------------------------------------
/graspnetAPI/utils/utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import open3d as o3d
3 | import numpy as np
4 | from PIL import Image
5 | from transforms3d.euler import euler2mat
6 |
7 | from .rotation import batch_viewpoint_params_to_matrix
8 | from .xmlhandler import xmlReader
9 |
10 | class CameraInfo():
11 | ''' Author: chenxi-wang
12 | Camera intrinsics for point cloud generation.
13 | '''
14 | def __init__(self, width, height, fx, fy, cx, cy, scale):
15 | self.width = width
16 | self.height = height
17 | self.fx = fx
18 | self.fy = fy
19 | self.cx = cx
20 | self.cy = cy
21 | self.scale = scale
22 |
23 | def get_camera_intrinsic(camera):
24 | '''
25 | **Input:**
26 |
27 | - camera: string of type of camera, "realsense" or "kinect".
28 |
29 | **Output:**
30 |
31 | - numpy array of shape (3, 3) of the camera intrinsic matrix.
32 | '''
33 | param = o3d.camera.PinholeCameraParameters()
34 | if camera == 'kinect':
35 | param.intrinsic.set_intrinsics(1280,720,631.55,631.21,638.43,366.50)
36 | elif camera == 'realsense':
37 | param.intrinsic.set_intrinsics(1280,720,927.17,927.37,651.32,349.62)
38 | intrinsic = param.intrinsic.intrinsic_matrix
39 | return intrinsic
40 |
41 | def create_point_cloud_from_depth_image(depth, camera, organized=True):
42 | assert(depth.shape[0] == camera.height and depth.shape[1] == camera.width)
43 | xmap = np.arange(camera.width)
44 | ymap = np.arange(camera.height)
45 | xmap, ymap = np.meshgrid(xmap, ymap)
46 | points_z = depth / camera.scale
47 | points_x = (xmap - camera.cx) * points_z / camera.fx
48 | points_y = (ymap - camera.cy) * points_z / camera.fy
49 | cloud = np.stack([points_x, points_y, points_z], axis=-1)
50 | if not organized:
51 | cloud = cloud.reshape([-1, 3])
52 | return cloud
53 |
54 | def generate_views(N, phi=(np.sqrt(5)-1)/2, center=np.zeros(3, dtype=np.float32), R=1):
55 | ''' Author: chenxi-wang
56 | View sampling on a sphere using Febonacci lattices.
57 |
58 | **Input:**
59 |
60 | - N: int, number of viewpoints.
61 |
62 | - phi: float, constant angle to sample views, usually 0.618.
63 |
64 | - center: numpy array of (3,), sphere center.
65 |
66 | - R: float, sphere radius.
67 |
68 | **Output:**
69 |
70 | - numpy array of (N, 3), coordinates of viewpoints.
71 | '''
72 | idxs = np.arange(N, dtype=np.float32)
73 | Z = (2 * idxs + 1) / N - 1
74 | X = np.sqrt(1 - Z**2) * np.cos(2 * idxs * np.pi * phi)
75 | Y = np.sqrt(1 - Z**2) * np.sin(2 * idxs * np.pi * phi)
76 | views = np.stack([X,Y,Z], axis=1)
77 | views = R * np.array(views) + center
78 | return views
79 |
80 | def generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=False, align=False, camera='realsense'):
81 | '''
82 | Author: chenxi-wang
83 |
84 | **Input:**
85 |
86 | - dataset_root: str, graspnet dataset root
87 |
88 | - scene_name: str, name of scene folder, e.g. scene_0000
89 |
90 | - anno_idx: int, frame index from 0-255
91 |
92 | - return_poses: bool, return object ids and 6D poses if set to True
93 |
94 | - align: bool, transform to table coordinates if set to True
95 |
96 | - camera: str, camera name (realsense or kinect)
97 |
98 | **Output:**
99 |
100 | - list of open3d.geometry.PointCloud.
101 | '''
102 | if align:
103 | camera_poses = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camera_poses.npy'))
104 | camera_pose = camera_poses[anno_idx]
105 | align_mat = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
106 | camera_pose = np.matmul(align_mat,camera_pose)
107 | print('Scene {}, {}'.format(scene_name, camera))
108 | scene_reader = xmlReader(os.path.join(dataset_root, 'scenes', scene_name, camera, 'annotations', '%04d.xml'%anno_idx))
109 | posevectors = scene_reader.getposevectorlist()
110 | obj_list = []
111 | mat_list = []
112 | model_list = []
113 | pose_list = []
114 | for posevector in posevectors:
115 | obj_idx, pose = parse_posevector(posevector)
116 | obj_list.append(obj_idx)
117 | mat_list.append(pose)
118 |
119 | for obj_idx, pose in zip(obj_list, mat_list):
120 | plyfile = os.path.join(dataset_root, 'models', '%03d'%obj_idx, 'nontextured.ply')
121 | model = o3d.io.read_point_cloud(plyfile)
122 | points = np.array(model.points)
123 | if align:
124 | pose = np.dot(camera_pose, pose)
125 | points = transform_points(points, pose)
126 | model.points = o3d.utility.Vector3dVector(points)
127 | model_list.append(model)
128 | pose_list.append(pose)
129 |
130 | if return_poses:
131 | return model_list, obj_list, pose_list
132 | else:
133 | return model_list
134 |
135 | def generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=False, camera='kinect'):
136 | '''
137 | Author: chenxi-wang
138 |
139 | **Input:**
140 |
141 | - dataset_root: str, graspnet dataset root
142 |
143 | - scene_name: str, name of scene folder, e.g. scene_0000
144 |
145 | - anno_idx: int, frame index from 0-255
146 |
147 | - align: bool, transform to table coordinates if set to True
148 |
149 | - camera: str, camera name (realsense or kinect)
150 |
151 | **Output:**
152 |
153 | - open3d.geometry.PointCloud.
154 | '''
155 | colors = np.array(Image.open(os.path.join(dataset_root, 'scenes', scene_name, camera, 'rgb', '%04d.png'%anno_idx)), dtype=np.float32) / 255.0
156 | depths = np.array(Image.open(os.path.join(dataset_root, 'scenes', scene_name, camera, 'depth', '%04d.png'%anno_idx)))
157 | intrinsics = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camK.npy'))
158 | fx, fy = intrinsics[0,0], intrinsics[1,1]
159 | cx, cy = intrinsics[0,2], intrinsics[1,2]
160 | s = 1000.0
161 |
162 | if align:
163 | camera_poses = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'camera_poses.npy'))
164 | camera_pose = camera_poses[anno_idx]
165 | align_mat = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
166 | camera_pose = align_mat.dot(camera_pose)
167 |
168 | xmap, ymap = np.arange(colors.shape[1]), np.arange(colors.shape[0])
169 | xmap, ymap = np.meshgrid(xmap, ymap)
170 |
171 | points_z = depths / s
172 | points_x = (xmap - cx) / fx * points_z
173 | points_y = (ymap - cy) / fy * points_z
174 |
175 | mask = (points_z > 0)
176 | points = np.stack([points_x, points_y, points_z], axis=-1)
177 | points = points[mask]
178 | colors = colors[mask]
179 | if align:
180 | points = transform_points(points, camera_pose)
181 |
182 | cloud = o3d.geometry.PointCloud()
183 | cloud.points = o3d.utility.Vector3dVector(points)
184 | cloud.colors = o3d.utility.Vector3dVector(colors)
185 |
186 | return cloud
187 |
188 | def rotation_matrix(rx, ry, rz):
189 | '''
190 | Author: chenxi-wang
191 |
192 | **Input:**
193 |
194 | - rx/ry/rz: float, rotation angle along x/y/z-axis
195 |
196 | **Output:**
197 |
198 | - numpy array of (3, 3), rotation matrix.
199 | '''
200 | Rx = np.array([[1, 0, 0],
201 | [0, np.cos(rx), -np.sin(rx)],
202 | [0, np.sin(rx), np.cos(rx)]])
203 | Ry = np.array([[ np.cos(ry), 0, np.sin(ry)],
204 | [ 0, 1, 0],
205 | [-np.sin(ry), 0, np.cos(ry)]])
206 | Rz = np.array([[np.cos(rz), -np.sin(rz), 0],
207 | [np.sin(rz), np.cos(rz), 0],
208 | [ 0, 0, 1]])
209 | R = Rz.dot(Ry).dot(Rx)
210 | return R
211 |
212 | def transform_matrix(tx, ty, tz, rx, ry, rz):
213 | '''
214 | Author: chenxi-wang
215 |
216 | **Input:**
217 |
218 | - tx/ty/tz: float, translation along x/y/z-axis
219 |
220 | - rx/ry/rz: float, rotation angle along x/y/z-axis
221 |
222 | **Output:**
223 |
224 | - numpy array of (4, 4), transformation matrix.
225 | '''
226 | trans = np.eye(4)
227 | trans[:3,3] = np.array([tx, ty, tz])
228 | rot_x = np.array([[1, 0, 0],
229 | [0, np.cos(rx), -np.sin(rx)],
230 | [0, np.sin(rx), np.cos(rx)]])
231 | rot_y = np.array([[ np.cos(ry), 0, np.sin(ry)],
232 | [ 0, 1, 0],
233 | [-np.sin(ry), 0, np.cos(ry)]])
234 | rot_z = np.array([[np.cos(rz), -np.sin(rz), 0],
235 | [np.sin(rz), np.cos(rz), 0],
236 | [ 0, 0, 1]])
237 | trans[:3,:3] = rot_x.dot(rot_y).dot(rot_z)
238 | return trans
239 |
240 | def matrix_to_dexnet_params(matrix):
241 | '''
242 | Author: chenxi-wang
243 |
244 | **Input:**
245 |
246 | - numpy array of shape (3, 3) of the rotation matrix.
247 |
248 | **Output:**
249 |
250 | - binormal: numpy array of shape (3,).
251 |
252 | - angle: float of the angle.
253 | '''
254 | approach = matrix[:, 0]
255 | binormal = matrix[:, 1]
256 | axis_y = binormal
257 | axis_x = np.array([axis_y[1], -axis_y[0], 0])
258 | if np.linalg.norm(axis_x) == 0:
259 | axis_x = np.array([1, 0, 0])
260 | axis_x = axis_x / np.linalg.norm(axis_x)
261 | axis_y = axis_y / np.linalg.norm(axis_y)
262 | axis_z = np.cross(axis_x, axis_y)
263 | R = np.c_[axis_x, np.c_[axis_y, axis_z]]
264 | approach = R.T.dot(approach)
265 | cos_t, sin_t = approach[0], -approach[2]
266 | angle = np.arccos(cos_t)
267 | if sin_t < 0:
268 | angle = np.pi * 2 - angle
269 | return binormal, angle
270 |
271 | def viewpoint_params_to_matrix(towards, angle):
272 | '''
273 | Author: chenxi-wang
274 |
275 | **Input:**
276 |
277 | - towards: numpy array towards vector with shape (3,).
278 |
279 | - angle: float of in-plane rotation.
280 |
281 | **Output:**
282 |
283 | - numpy array of the rotation matrix with shape (3, 3).
284 | '''
285 | axis_x = towards
286 | axis_y = np.array([-axis_x[1], axis_x[0], 0])
287 | if np.linalg.norm(axis_y) == 0:
288 | axis_y = np.array([0, 1, 0])
289 | axis_x = axis_x / np.linalg.norm(axis_x)
290 | axis_y = axis_y / np.linalg.norm(axis_y)
291 | axis_z = np.cross(axis_x, axis_y)
292 | R1 = np.array([[1, 0, 0],
293 | [0, np.cos(angle), -np.sin(angle)],
294 | [0, np.sin(angle), np.cos(angle)]])
295 | R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
296 | matrix = R2.dot(R1)
297 | return matrix
298 |
299 | def dexnet_params_to_matrix(binormal, angle):
300 | '''
301 | Author: chenxi-wang
302 |
303 | **Input:**
304 |
305 | - binormal: numpy array of shape (3,).
306 |
307 | - angle: float of the angle.
308 |
309 | **Output:**
310 |
311 | - numpy array of shape (3, 3) of the rotation matrix.
312 | '''
313 | axis_y = binormal
314 | axis_x = np.array([axis_y[1], -axis_y[0], 0])
315 | if np.linalg.norm(axis_x) == 0:
316 | axis_x = np.array([1, 0, 0])
317 | axis_x = axis_x / np.linalg.norm(axis_x)
318 | axis_y = axis_y / np.linalg.norm(axis_y)
319 | axis_z = np.cross(axis_x, axis_y)
320 | R1 = np.array([[np.cos(angle), 0, np.sin(angle)],
321 | [0, 1, 0],
322 | [-np.sin(angle), 0, np.cos(angle)]])
323 | R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
324 | matrix = R2.dot(R1)
325 | return matrix
326 |
327 | def transform_points(points, trans):
328 | '''
329 | Author: chenxi-wang
330 |
331 | **Input:**
332 |
333 | - points: numpy array of (N,3), point cloud
334 |
335 | - trans: numpy array of (4,4), transformation matrix
336 |
337 | **Output:**
338 |
339 | - numpy array of (N,3), transformed points.
340 | '''
341 | ones = np.ones([points.shape[0],1], dtype=points.dtype)
342 | points_ = np.concatenate([points, ones], axis=-1)
343 | points_ = np.matmul(trans, points_.T).T
344 | return points_[:,:3]
345 |
346 | def get_model_grasps(datapath):
347 | ''' Author: chenxi-wang
348 | Load grasp labels from .npz files.
349 | '''
350 | label = np.load(datapath)
351 | points = label['points']
352 | offsets = label['offsets']
353 | scores = label['scores']
354 | collision = label['collision']
355 | return points, offsets, scores, collision
356 |
357 | def parse_posevector(posevector):
358 | ''' Author: chenxi-wang
359 | Decode posevector to object id and transformation matrix.
360 | '''
361 | mat = np.zeros([4,4],dtype=np.float32)
362 | alpha, beta, gamma = posevector[4:7]
363 | alpha = alpha / 180.0 * np.pi
364 | beta = beta / 180.0 * np.pi
365 | gamma = gamma / 180.0 * np.pi
366 | mat[:3,:3] = euler2mat(alpha, beta, gamma)
367 | mat[:3,3] = posevector[1:4]
368 | mat[3,3] = 1
369 | obj_idx = int(posevector[0])
370 | return obj_idx, mat
371 |
372 | def create_mesh_box(width, height, depth, dx=0, dy=0, dz=0):
373 | ''' Author: chenxi-wang
374 | Create box instance with mesh representation.
375 | '''
376 | box = o3d.geometry.TriangleMesh()
377 | vertices = np.array([[0,0,0],
378 | [width,0,0],
379 | [0,0,depth],
380 | [width,0,depth],
381 | [0,height,0],
382 | [width,height,0],
383 | [0,height,depth],
384 | [width,height,depth]])
385 | vertices[:,0] += dx
386 | vertices[:,1] += dy
387 | vertices[:,2] += dz
388 | triangles = np.array([[4,7,5],[4,6,7],[0,2,4],[2,6,4],
389 | [0,1,2],[1,3,2],[1,5,7],[1,7,3],
390 | [2,3,7],[2,7,6],[0,4,1],[1,4,5]])
391 | box.vertices = o3d.utility.Vector3dVector(vertices)
392 | box.triangles = o3d.utility.Vector3iVector(triangles)
393 | return box
394 |
395 | def create_table_cloud(width, height, depth, dx=0, dy=0, dz=0, grid_size=0.01):
396 | '''
397 | Author: chenxi-wang
398 |
399 | **Input:**
400 |
401 | - width/height/depth: float, table width/height/depth along x/z/y-axis in meters
402 |
403 | - dx/dy/dz: float, offset along x/y/z-axis in meters
404 |
405 | - grid_size: float, point distance along x/y/z-axis in meters
406 |
407 | **Output:**
408 |
409 | - open3d.geometry.PointCloud
410 | '''
411 | xmap = np.linspace(0, width, int(width/grid_size))
412 | ymap = np.linspace(0, depth, int(depth/grid_size))
413 | zmap = np.linspace(0, height, int(height/grid_size))
414 | xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
415 | xmap += dx
416 | ymap += dy
417 | zmap += dz
418 | points = np.stack([xmap, ymap, zmap], axis=-1)
419 | points = points.reshape([-1, 3])
420 | cloud = o3d.geometry.PointCloud()
421 | cloud.points = o3d.utility.Vector3dVector(points)
422 | return cloud
423 |
424 | def create_axis(length,grid_size = 0.01):
425 | num = int(length / grid_size)
426 | xmap = np.linspace(0,length,num)
427 | ymap = np.linspace(0,2*length,num)
428 | zmap = np.linspace(0,3*length,num)
429 | x_p = np.vstack([xmap.T,np.zeros((1,num)),np.zeros((1,num))])
430 | y_p = np.vstack([np.zeros((1,num)),ymap.T,np.zeros((1,num))])
431 | z_p = np.vstack([np.zeros((1,num)),np.zeros((1,num)),zmap.T])
432 | p = np.hstack([x_p,y_p,z_p])
433 | # print('p',p.shape)
434 | cloud = o3d.geometry.PointCloud()
435 | cloud.points = o3d.utility.Vector3dVector(p.T)
436 | return cloud
437 |
438 | def plot_axis(R,center,length,grid_size = 0.01):
439 | num = int(length / grid_size)
440 | xmap = np.linspace(0,length,num)
441 | ymap = np.linspace(0,2*length,num)
442 | zmap = np.linspace(0,3*length,num)
443 | x_p = np.vstack([xmap.T,np.zeros((1,num)),np.zeros((1,num))])
444 | y_p = np.vstack([np.zeros((1,num)),ymap.T,np.zeros((1,num))])
445 | z_p = np.vstack([np.zeros((1,num)),np.zeros((1,num)),zmap.T])
446 | p = np.hstack([x_p,y_p,z_p])
447 | # print('p',p.shape)
448 | p = np.dot(R, p).T + center
449 | cloud = o3d.geometry.PointCloud()
450 | cloud.points = o3d.utility.Vector3dVector(p)
451 | return cloud
452 |
453 | def plot_gripper_pro_max(center, R, width, depth, score=1, color=None):
454 | '''
455 | Author: chenxi-wang
456 |
457 | **Input:**
458 |
459 | - center: numpy array of (3,), target point as gripper center
460 |
461 | - R: numpy array of (3,3), rotation matrix of gripper
462 |
463 | - width: float, gripper width
464 |
465 | - score: float, grasp quality score
466 |
467 | **Output:**
468 |
469 | - open3d.geometry.TriangleMesh
470 | '''
471 | x, y, z = center
472 | height=0.004
473 | finger_width = 0.004
474 | tail_length = 0.04
475 | depth_base = 0.02
476 |
477 | if color is not None:
478 | color_r, color_g, color_b = color
479 | else:
480 | color_r = score # red for high score
481 | color_g = 0
482 | color_b = 1 - score # blue for low score
483 |
484 | left = create_mesh_box(depth+depth_base+finger_width, finger_width, height)
485 | right = create_mesh_box(depth+depth_base+finger_width, finger_width, height)
486 | bottom = create_mesh_box(finger_width, width, height)
487 | tail = create_mesh_box(tail_length, finger_width, height)
488 |
489 | left_points = np.array(left.vertices)
490 | left_triangles = np.array(left.triangles)
491 | left_points[:,0] -= depth_base + finger_width
492 | left_points[:,1] -= width/2 + finger_width
493 | left_points[:,2] -= height/2
494 |
495 | right_points = np.array(right.vertices)
496 | right_triangles = np.array(right.triangles) + 8
497 | right_points[:,0] -= depth_base + finger_width
498 | right_points[:,1] += width/2
499 | right_points[:,2] -= height/2
500 |
501 | bottom_points = np.array(bottom.vertices)
502 | bottom_triangles = np.array(bottom.triangles) + 16
503 | bottom_points[:,0] -= finger_width + depth_base
504 | bottom_points[:,1] -= width/2
505 | bottom_points[:,2] -= height/2
506 |
507 | tail_points = np.array(tail.vertices)
508 | tail_triangles = np.array(tail.triangles) + 24
509 | tail_points[:,0] -= tail_length + finger_width + depth_base
510 | tail_points[:,1] -= finger_width / 2
511 | tail_points[:,2] -= height/2
512 |
513 | vertices = np.concatenate([left_points, right_points, bottom_points, tail_points], axis=0)
514 | vertices = np.dot(R, vertices.T).T + center
515 | triangles = np.concatenate([left_triangles, right_triangles, bottom_triangles, tail_triangles], axis=0)
516 | colors = np.array([ [color_r,color_g,color_b] for _ in range(len(vertices))])
517 |
518 | gripper = o3d.geometry.TriangleMesh()
519 | gripper.vertices = o3d.utility.Vector3dVector(vertices)
520 | gripper.triangles = o3d.utility.Vector3iVector(triangles)
521 | gripper.vertex_colors = o3d.utility.Vector3dVector(colors)
522 | return gripper
523 |
524 |
525 | def find_scene_by_model_id(dataset_root, model_id_list):
526 | picked_scene_names = []
527 | scene_names = ['scene_'+str(i).zfill(4) for i in range(190)]
528 | for scene_name in scene_names:
529 | try:
530 | scene_reader = xmlReader(os.path.join(dataset_root, 'scenes', scene_name, 'kinect', 'annotations', '0000.xml'))
531 | except:
532 | continue
533 | posevectors = scene_reader.getposevectorlist()
534 | for posevector in posevectors:
535 | obj_idx, _ = parse_posevector(posevector)
536 | if obj_idx in model_id_list:
537 | picked_scene_names.append(scene_name)
538 | print(obj_idx, scene_name)
539 | break
540 | return picked_scene_names
541 |
542 | def generate_scene(scene_idx, anno_idx, return_poses=False, align=False, camera='realsense'):
543 | camera_poses = np.load(os.path.join('scenes','scene_%04d' %(scene_idx,),camera, 'camera_poses.npy'))
544 | camera_pose = camera_poses[anno_idx]
545 | if align:
546 | align_mat = np.load(os.path.join('camera_poses', '{}_alignment.npy'.format(camera)))
547 | camera_pose = align_mat.dot(camera_pose)
548 | camera_split = 'data' if camera == 'realsense' else 'data_kinect'
549 | # print('Scene {}, {}'.format(scene_idx, camera_split))
550 | scene_reader = xmlReader(os.path.join(scenedir % (scene_idx, camera), 'annotations', '%04d.xml'%(anno_idx)))
551 | posevectors = scene_reader.getposevectorlist()
552 | obj_list = []
553 | mat_list = []
554 | model_list = []
555 | pose_list = []
556 | for posevector in posevectors:
557 | obj_idx, mat = parse_posevector(posevector)
558 | obj_list.append(obj_idx)
559 | mat_list.append(mat)
560 |
561 | for obj_idx, mat in zip(obj_list, mat_list):
562 | model = o3d.io.read_point_cloud(os.path.join(modeldir, '%03d'%obj_idx, 'nontextured.ply'))
563 | points = np.array(model.points)
564 | pose = np.dot(camera_pose, mat)
565 | points = transform_points(points, pose)
566 | model.points = o3d.utility.Vector3dVector(points)
567 | model_list.append(model)
568 | pose_list.append(pose)
569 |
570 | if return_poses:
571 | return model_list, obj_list, pose_list
572 | else:
573 | return model_list
574 |
575 | def get_obj_pose_list(camera_pose, pose_vectors):
576 | import numpy as np
577 | obj_list = []
578 | mat_list = []
579 | pose_list = []
580 | for posevector in pose_vectors:
581 | obj_idx, mat = parse_posevector(posevector)
582 | obj_list.append(obj_idx)
583 | mat_list.append(mat)
584 |
585 | for obj_idx, mat in zip(obj_list, mat_list):
586 | pose = np.dot(camera_pose, mat)
587 | pose_list.append(pose)
588 |
589 | return obj_list, pose_list
590 |
591 | def batch_rgbdxyz_2_rgbxy_depth(points, camera):
592 | '''
593 | **Input:**
594 |
595 | - points: np.array(-1,3) of the points in camera frame
596 |
597 | - camera: string of the camera type
598 |
599 | **Output:**
600 |
601 | - coords: float of xy in pixel frame [-1, 2]
602 |
603 | - depths: float of the depths of pixel frame [-1]
604 | '''
605 | intrinsics = get_camera_intrinsic(camera)
606 | fx, fy = intrinsics[0,0], intrinsics[1,1]
607 | cx, cy = intrinsics[0,2], intrinsics[1,2]
608 | s = 1000.0
609 | depths = s * points[:,2] # point_z
610 | ###################################
611 | # x and y should be inverted here #
612 | ###################################
613 | # y = point[0] / point[2] * fx + cx
614 | # x = point[1] / point[2] * fy + cy
615 | # cx = 640, cy = 360
616 | coords_x = points[:,0] / points[:,2] * fx + cx
617 | coords_y = points[:,1] / points[:,2] * fy + cy
618 | coords = np.stack([coords_x, coords_y], axis=-1)
619 | return coords, depths
620 |
621 | def get_batch_key_points(centers, Rs, widths):
622 | '''
623 | **Input:**
624 |
625 | - centers: np.array(-1,3) of the translation
626 |
627 | - Rs: np.array(-1,3,3) of the rotation matrix
628 |
629 | - widths: np.array(-1) of the grasp width
630 |
631 | **Output:**
632 |
633 | - key_points: np.array(-1,4,3) of the key point of the grasp
634 | '''
635 | import numpy as np
636 | depth_base = 0.02
637 | height = 0.02
638 | key_points = np.zeros((centers.shape[0],4,3),dtype = np.float32)
639 | key_points[:,:,0] -= depth_base
640 | key_points[:,1:,1] -= widths[:,np.newaxis] / 2
641 | key_points[:,2,2] += height / 2
642 | key_points[:,3,2] -= height / 2
643 | key_points = np.matmul(Rs, key_points.transpose(0,2,1)).transpose(0,2,1)
644 | key_points = key_points + centers[:,np.newaxis,:]
645 | return key_points
646 |
647 | def batch_key_points_2_tuple(key_points, scores, object_ids, camera):
648 | '''
649 | **Input:**
650 |
651 | - key_points: np.array(-1,4,3) of grasp key points, definition is shown in key_points.png
652 |
653 | - scores: numpy array of batch grasp scores.
654 |
655 | - camera: string of 'realsense' or 'kinect'.
656 |
657 | **Output:**
658 |
659 | - np.array([center_x,center_y,open_x,open_y,height])
660 | '''
661 | import numpy as np
662 | centers, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,0,:], camera)
663 | opens, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,1,:], camera)
664 | lefts, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,2,:], camera)
665 | rights, _ = batch_rgbdxyz_2_rgbxy_depth(key_points[:,3,:], camera)
666 | heights = np.linalg.norm(lefts - rights, axis=-1, keepdims=True)
667 | tuples = np.concatenate([centers, opens, heights, scores[:, np.newaxis], object_ids[:, np.newaxis]], axis=-1).astype(np.float32)
668 | return tuples
669 |
670 | def framexy_depth_2_xyz(pixel_x, pixel_y, depth, camera):
671 | '''
672 | **Input:**
673 |
674 | - pixel_x: int of the pixel x coordinate.
675 |
676 | - pixel_y: int of the pixle y coordicate.
677 |
678 | - depth: float of depth. The unit is millimeter.
679 |
680 | - camera: string of type of camera. "realsense" or "kinect".
681 |
682 | **Output:**
683 |
684 | - x, y, z: float of x, y and z coordinates in camera frame. The unit is millimeter.
685 | '''
686 | intrinsics = get_camera_intrinsic(camera)
687 | fx, fy = intrinsics[0,0], intrinsics[1,1]
688 | cx, cy = intrinsics[0,2], intrinsics[1,2]
689 | z = depth # mm
690 | x = z / fx * (pixel_x - cx) # mm
691 | y = z / fy * (pixel_y - cy) # mm
692 | return x, y, z
693 |
694 | def batch_framexy_depth_2_xyz(pixel_x, pixel_y, depth, camera):
695 | '''
696 | **Input:**
697 |
698 | - pixel_x: numpy array of int of the pixel x coordinate. shape: (-1,)
699 |
700 | - pixel_y: numpy array of int of the pixle y coordicate. shape: (-1,)
701 |
702 | - depth: numpy array of float of depth. The unit is millimeter. shape: (-1,)
703 |
704 | - camera: string of type of camera. "realsense" or "kinect".
705 |
706 | **Output:**
707 |
708 | x, y, z: numpy array of float of x, y and z coordinates in camera frame. The unit is millimeter.
709 | '''
710 | intrinsics = get_camera_intrinsic(camera)
711 | fx, fy = intrinsics[0,0], intrinsics[1,1]
712 | cx, cy = intrinsics[0,2], intrinsics[1,2]
713 | z = depth # mm
714 | x = z / fx * (pixel_x - cx) # mm
715 | y = z / fy * (pixel_y - cy) # mm
716 | return x, y, z
717 |
718 | def center_depth(depths, center, open_point, upper_point):
719 | '''
720 | **Input:**
721 |
722 | - depths: numpy array of the depths.
723 |
724 | - center: numpy array of the center point.
725 |
726 | - open_point: numpy array of the open point.
727 |
728 | - upper_point: numpy array of the upper point.
729 |
730 | **Output:**
731 |
732 | - depth: float of the grasp depth.
733 | '''
734 | return depths[int(round(center[1])), int(round(center[0]))]
735 |
736 | def batch_center_depth(depths, centers, open_points, upper_points):
737 | '''
738 | **Input:**
739 |
740 | - depths: numpy array of the depths.
741 |
742 | - centers: numpy array of the center points of shape(-1, 2).
743 |
744 | - open_points: numpy array of the open points of shape(-1, 2).
745 |
746 | - upper_points: numpy array of the upper points of shape(-1, 2).
747 |
748 | **Output:**
749 |
750 | - depths: numpy array of the grasp depth of shape (-1).
751 | '''
752 | x = np.round(centers[:,0]).astype(np.int32)
753 | y = np.round(centers[:,1]).astype(np.int32)
754 | return depths[y, x]
755 |
756 | def key_point_2_rotation(center_xyz, open_point_xyz, upper_point_xyz):
757 | '''
758 | **Input:**
759 |
760 | - center_xyz: numpy array of the center point.
761 |
762 | - open_point_xyz: numpy array of the open point.
763 |
764 | - upper_point_xyz: numpy array of the upper point.
765 |
766 | **Output:**
767 |
768 | - rotation: numpy array of the rotation matrix.
769 | '''
770 | open_point_vector = open_point_xyz - center_xyz
771 | upper_point_vector = upper_point_xyz - center_xyz
772 | unit_open_point_vector = open_point_vector / np.linalg.norm(open_point_vector)
773 | unit_upper_point_vector = upper_point_vector / np.linalg.norm(upper_point_vector)
774 | rotation = np.hstack((
775 | np.array([[0],[0],[1.0]]),
776 | unit_open_point_vector.reshape((-1, 1)),
777 | unit_upper_point_vector.reshape((-1, 1))
778 | ))
779 | return rotation
780 |
781 | def batch_key_point_2_rotation(centers_xyz, open_points_xyz, upper_points_xyz):
782 | '''
783 | **Input:**
784 |
785 | - centers_xyz: numpy array of the center points of shape (-1, 3).
786 |
787 | - open_points_xyz: numpy array of the open points of shape (-1, 3).
788 |
789 | - upper_points_xyz: numpy array of the upper points of shape (-1, 3).
790 |
791 | **Output:**
792 |
793 | - rotations: numpy array of the rotation matrix of shape (-1, 3, 3).
794 | '''
795 | # print('open_points_xyz:{}'.format(open_points_xyz))
796 | # print('upper_points_xyz:{}'.format(upper_points_xyz))
797 | open_points_vector = open_points_xyz - centers_xyz # (-1, 3)
798 | upper_points_vector = upper_points_xyz - centers_xyz # (-1, 3)
799 | open_point_norm = np.linalg.norm(open_points_vector, axis = 1).reshape(-1, 1)
800 | upper_point_norm = np.linalg.norm(upper_points_vector, axis = 1).reshape(-1, 1)
801 | # print('open_point_norm:{}, upper_point_norm:{}'.format(open_point_norm, upper_point_norm))
802 | unit_open_points_vector = open_points_vector / np.hstack((open_point_norm, open_point_norm, open_point_norm)) # (-1, 3)
803 | unit_upper_points_vector = upper_points_vector / np.hstack((upper_point_norm, upper_point_norm, upper_point_norm)) # (-1, 3)
804 | num = open_points_vector.shape[0]
805 | x_axis = np.hstack((np.zeros((num, 1)), np.zeros((num, 1)), np.ones((num, 1)))).astype(np.float32).reshape(-1, 3, 1)
806 | rotations = np.dstack((x_axis, unit_open_points_vector.reshape((-1, 3, 1)), unit_upper_points_vector.reshape((-1, 3, 1))))
807 | return rotations
--------------------------------------------------------------------------------
/graspnetAPI/utils/vis.py:
--------------------------------------------------------------------------------
1 | import os
2 | import time
3 | import numpy as np
4 | import open3d as o3d
5 | from transforms3d.euler import euler2mat, quat2mat
6 | from .utils import generate_scene_model, generate_scene_pointcloud, generate_views, get_model_grasps, plot_gripper_pro_max, transform_points
7 | from .rotation import viewpoint_params_to_matrix, batch_viewpoint_params_to_matrix
8 |
9 | def create_table_cloud(width, height, depth, dx=0, dy=0, dz=0, grid_size=0.01):
10 | '''
11 | Author: chenxi-wang
12 |
13 | **Input:**
14 |
15 | - width/height/depth: float, table width/height/depth along x/z/y-axis in meters
16 |
17 | - dx/dy/dz: float, offset along x/y/z-axis in meters
18 |
19 | - grid_size: float, point distance along x/y/z-axis in meters
20 |
21 | **Output:**
22 |
23 | - open3d.geometry.PointCloud
24 | '''
25 | xmap = np.linspace(0, width, int(width/grid_size))
26 | ymap = np.linspace(0, depth, int(depth/grid_size))
27 | zmap = np.linspace(0, height, int(height/grid_size))
28 | xmap, ymap, zmap = np.meshgrid(xmap, ymap, zmap, indexing='xy')
29 | xmap += dx
30 | ymap += dy
31 | zmap += dz
32 | points = np.stack([xmap, -ymap, -zmap], axis=-1)
33 | points = points.reshape([-1, 3])
34 | cloud = o3d.geometry.PointCloud()
35 | cloud.points = o3d.utility.Vector3dVector(points)
36 | return cloud
37 |
38 |
39 | def get_camera_parameters(camera='kinect'):
40 | '''
41 | author: Minghao Gou
42 |
43 | **Input:**
44 |
45 | - camera: string of type of camera: 'kinect' or 'realsense'
46 |
47 | **Output:**
48 |
49 | - open3d.camera.PinholeCameraParameters
50 | '''
51 | import open3d as o3d
52 | param = o3d.camera.PinholeCameraParameters()
53 | param.extrinsic = np.eye(4,dtype=np.float64)
54 | # param.intrinsic = o3d.camera.PinholeCameraIntrinsic()
55 | if camera == 'kinect':
56 | param.intrinsic.set_intrinsics(1280,720,631.5,631.2,639.5,359.5)
57 | elif camera == 'realsense':
58 | param.intrinsic.set_intrinsics(1280,720,927.17,927.37,639.5,359.5)
59 | return param
60 |
61 | def visAnno(dataset_root, scene_name, anno_idx, camera, num_grasp=10, th=0.3, align_to_table=True, max_width=0.08, save_folder='save_fig', show=False, per_obj=False):
62 | '''
63 | Author: chenxi-wang
64 |
65 | **Input:**
66 |
67 | - dataset_root: str, graspnet dataset root
68 |
69 | - scene_name: str, name of scene folder, e.g. scene_0000
70 |
71 | - anno_idx: int, frame index from 0-255
72 |
73 | - camera: str, camera name (realsense or kinect)
74 |
75 | - num_grasp: int, number of sampled grasps
76 |
77 | - th: float, threshold of friction coefficient
78 |
79 | - align_to_table: bool, transform to table coordinates if set to True
80 |
81 | - max_width: float, only visualize grasps with width<=max_width
82 |
83 | - save_folder: str, folder to save screen captures
84 |
85 | - show: bool, show visualization in open3d window if set to True
86 |
87 | - per_obj: bool, show grasps on each object
88 | '''
89 | model_list, obj_list, pose_list = generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=True, align=align_to_table, camera=camera)
90 | point_cloud = generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=align_to_table, camera=camera)
91 |
92 | table = create_table_cloud(1.0, 0.02, 1.0, dx=-0.5, dy=-0.5, dz=0, grid_size=0.01)
93 | num_views, num_angles, num_depths = 300, 12, 4
94 | views = generate_views(num_views)
95 | collision_label = np.load('{}/collision_label/{}/collision_labels.npz'.format(dataset_root,scene_name))
96 |
97 | vis = o3d.visualization.Visualizer()
98 | vis.create_window(width = 1280, height = 720)
99 | ctr = vis.get_view_control()
100 | param = get_camera_parameters(camera=camera)
101 |
102 | if align_to_table:
103 | cam_pos = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
104 | param.extrinsic = np.linalg.inv(cam_pos).tolist()
105 |
106 | grippers = []
107 | vis.add_geometry(point_cloud)
108 | for i, (obj_idx, trans) in enumerate(zip(obj_list, pose_list)):
109 | sampled_points, offsets, scores, _ = get_model_grasps('%s/grasp_label/%03d_labels.npz'%(dataset_root, obj_idx))
110 | collision = collision_label['arr_{}'.format(i)]
111 |
112 | cnt = 0
113 | point_inds = np.arange(sampled_points.shape[0])
114 | np.random.shuffle(point_inds)
115 |
116 | for point_ind in point_inds:
117 | target_point = sampled_points[point_ind]
118 | offset = offsets[point_ind]
119 | score = scores[point_ind]
120 | view_inds = np.arange(300)
121 | np.random.shuffle(view_inds)
122 | flag = False
123 | for v in view_inds:
124 | if flag: break
125 | view = views[v]
126 | angle_inds = np.arange(12)
127 | np.random.shuffle(angle_inds)
128 | for a in angle_inds:
129 | if flag: break
130 | depth_inds = np.arange(4)
131 | np.random.shuffle(depth_inds)
132 | for d in depth_inds:
133 | if flag: break
134 | angle, depth, width = offset[v, a, d]
135 | if score[v, a, d] > th or score[v, a, d] < 0:
136 | continue
137 | if width > max_width:
138 | continue
139 | if collision[point_ind, v, a, d]:
140 | continue
141 | R = viewpoint_params_to_matrix(-view, angle)
142 | t = transform_points(target_point[np.newaxis,:], trans).squeeze()
143 | R = np.dot(trans[:3,:3], R)
144 | gripper = plot_gripper_pro_max(t, R, width, depth, 1.1-score[v, a, d])
145 | grippers.append(gripper)
146 | flag = True
147 | if flag:
148 | cnt += 1
149 | if cnt == num_grasp:
150 | break
151 |
152 | if per_obj:
153 | for gripper in grippers:
154 | vis.add_geometry(gripper)
155 | ctr.convert_from_pinhole_camera_parameters(param)
156 | vis.poll_events()
157 | filename = os.path.join(save_folder, '{}_{}_pointcloud_{}.png'.format(scene_name, camera, obj_idx))
158 | if not os.path.exists(save_folder):
159 | os.mkdir(save_folder)
160 | vis.capture_screen_image(filename, do_render=True)
161 |
162 | for gripper in grippers:
163 | vis.remove_geometry(gripper)
164 | grippers = []
165 |
166 | if not per_obj:
167 | for gripper in grippers:
168 | vis.add_geometry(gripper)
169 | ctr.convert_from_pinhole_camera_parameters(param)
170 | vis.poll_events()
171 | filename = os.path.join(save_folder, '{}_{}_pointcloud.png'.format(scene_name, camera))
172 | if not os.path.exists(save_folder):
173 | os.mkdir(save_folder)
174 | vis.capture_screen_image(filename, do_render=True)
175 | if show:
176 | o3d.visualization.draw_geometries([point_cloud, *grippers])
177 |
178 | vis.remove_geometry(point_cloud)
179 | vis.add_geometry(table)
180 | for model in model_list:
181 | vis.add_geometry(model)
182 | ctr.convert_from_pinhole_camera_parameters(param)
183 | vis.poll_events()
184 | filename = os.path.join(save_folder, '{}_{}_model.png'.format(scene_name, camera))
185 | vis.capture_screen_image(filename, do_render=True)
186 | if show:
187 | o3d.visualization.draw_geometries([table, *model_list, *grippers])
188 |
189 |
190 | def vis6D(dataset_root, scene_name, anno_idx, camera, align_to_table=True, save_folder='save_fig', show=False, per_obj=False):
191 | '''
192 | **Input:**
193 |
194 | - dataset_root: str, graspnet dataset root
195 |
196 | - scene_name: str, name of scene folder, e.g. scene_0000
197 |
198 | - anno_idx: int, frame index from 0-255
199 |
200 | - camera: str, camera name (realsense or kinect)
201 |
202 | - align_to_table: bool, transform to table coordinates if set to True
203 |
204 | - save_folder: str, folder to save screen captures
205 |
206 | - show: bool, show visualization in open3d window if set to True
207 |
208 | - per_obj: bool, show pose of each object
209 | '''
210 | model_list, obj_list, pose_list = generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=True, align=align_to_table, camera=camera)
211 | point_cloud = generate_scene_pointcloud(dataset_root, scene_name, anno_idx, align=align_to_table, camera=camera)
212 | point_cloud = point_cloud.voxel_down_sample(voxel_size=0.005)
213 |
214 | vis = o3d.visualization.Visualizer()
215 | vis.create_window(width = 1280, height = 720)
216 | ctr = vis.get_view_control()
217 | param = get_camera_parameters(camera=camera)
218 |
219 | if align_to_table:
220 | cam_pos = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy'))
221 | param.extrinsic = np.linalg.inv(cam_pos).tolist()
222 |
223 | vis.add_geometry(point_cloud)
224 | if per_obj:
225 | for i,model in zip(obj_list,model_list):
226 | vis.add_geometry(model)
227 | ctr.convert_from_pinhole_camera_parameters(param)
228 | vis.poll_events()
229 | filename = os.path.join(save_folder, '{}_{}_6d_{}.png'.format(scene_name, camera, i))
230 | vis.capture_screen_image(filename, do_render=True)
231 | vis.remove_geometry(model)
232 | else:
233 | for model in model_list:
234 | vis.add_geometry(model)
235 | ctr.convert_from_pinhole_camera_parameters(param)
236 | vis.poll_events()
237 | filename = os.path.join(save_folder, '{}_{}_6d.png'.format(scene_name, camera))
238 | vis.capture_screen_image(filename, do_render=True)
239 | if show:
240 | o3d.visualization.draw_geometries([point_cloud, *model_list])
241 |
242 |
243 |
244 | def visObjGrasp(dataset_root, obj_idx, num_grasp=10, th=0.5, max_width=0.08, save_folder='save_fig', show=False):
245 | '''
246 | Author: chenxi-wang
247 |
248 | **Input:**
249 |
250 | - dataset_root: str, graspnet dataset root
251 |
252 | - obj_idx: int, index of object model
253 |
254 | - num_grasp: int, number of sampled grasps
255 |
256 | - th: float, threshold of friction coefficient
257 |
258 | - max_width: float, only visualize grasps with width<=max_width
259 |
260 | - save_folder: str, folder to save screen captures
261 |
262 | - show: bool, show visualization in open3d window if set to True
263 | '''
264 | plyfile = os.path.join(dataset_root, 'models', '%03d'%obj_idx, 'nontextured.ply')
265 | model = o3d.io.read_point_cloud(plyfile)
266 |
267 | num_views, num_angles, num_depths = 300, 12, 4
268 | views = generate_views(num_views)
269 |
270 | vis = o3d.visualization.Visualizer()
271 | vis.create_window(width = 1280, height = 720)
272 | ctr = vis.get_view_control()
273 | param = get_camera_parameters(camera='kinect')
274 |
275 | cam_pos = np.load(os.path.join(dataset_root, 'scenes', 'scene_0000', 'kinect', 'cam0_wrt_table.npy'))
276 | param.extrinsic = np.linalg.inv(cam_pos).tolist()
277 |
278 | sampled_points, offsets, scores, _ = get_model_grasps('%s/grasp_label/%03d_labels.npz'%(dataset_root, obj_idx))
279 |
280 | cnt = 0
281 | point_inds = np.arange(sampled_points.shape[0])
282 | np.random.shuffle(point_inds)
283 | grippers = []
284 |
285 | for point_ind in point_inds:
286 | target_point = sampled_points[point_ind]
287 | offset = offsets[point_ind]
288 | score = scores[point_ind]
289 | view_inds = np.arange(300)
290 | np.random.shuffle(view_inds)
291 | flag = False
292 | for v in view_inds:
293 | if flag: break
294 | view = views[v]
295 | angle_inds = np.arange(12)
296 | np.random.shuffle(angle_inds)
297 | for a in angle_inds:
298 | if flag: break
299 | depth_inds = np.arange(4)
300 | np.random.shuffle(depth_inds)
301 | for d in depth_inds:
302 | if flag: break
303 | angle, depth, width = offset[v, a, d]
304 | if score[v, a, d] > th or score[v, a, d] < 0 or width > max_width:
305 | continue
306 | R = viewpoint_params_to_matrix(-view, angle)
307 | t = target_point
308 | gripper = plot_gripper_pro_max(t, R, width, depth, 1.1-score[v, a, d])
309 | grippers.append(gripper)
310 | flag = True
311 | if flag:
312 | cnt += 1
313 | if cnt == num_grasp:
314 | break
315 |
316 | vis.add_geometry(model)
317 | for gripper in grippers:
318 | vis.add_geometry(gripper)
319 | ctr.convert_from_pinhole_camera_parameters(param)
320 | vis.poll_events()
321 | filename = os.path.join(save_folder, 'object_{}_grasp.png'.format(obj_idx))
322 | vis.capture_screen_image(filename, do_render=True)
323 | if show:
324 | o3d.visualization.draw_geometries([model, *grippers])
325 |
326 | def vis_rec_grasp(rec_grasp_tuples,numGrasp,image_path,save_path,show=False):
327 | '''
328 | author: Minghao Gou
329 |
330 | **Input:**
331 |
332 | - rec_grasp_tuples: np.array of rectangle grasps
333 |
334 | - numGrasp: int of total grasps number to show
335 |
336 | - image_path: string of path of the image
337 |
338 | - image_path: string of the path to save the image
339 |
340 | - show: bool of whether to show the image
341 |
342 | **Output:**
343 |
344 | - no output but display the rectangle grasps in image
345 | '''
346 | import cv2
347 | import numpy as np
348 | import os
349 | img = cv2.imread(image_path)
350 | if len(rec_grasp_tuples) > numGrasp:
351 | np.random.shuffle(rec_grasp_tuples)
352 | rec_grasp_tuples = rec_grasp_tuples[0:numGrasp]
353 | for rec_grasp_tuple in rec_grasp_tuples:
354 | center_x,center_y,open_x,open_y,height,score = rec_grasp_tuple
355 | center = np.array([center_x,center_y])
356 | left = np.array([open_x,open_y])
357 | axis = left - center
358 | normal = np.array([-axis[1],axis[0]])
359 | normal = normal / np.linalg.norm(normal) * height / 2
360 | p1 = center + normal + axis
361 | p2 = center + normal - axis
362 | p3 = center - normal - axis
363 | p4 = center - normal + axis
364 | cv2.line(img, (int(p1[0]),int(p1[1])), (int(p2[0]),int(p2[1])), (0,0,255), 1, 8)
365 | cv2.line(img, (int(p2[0]),int(p2[1])), (int(p3[0]),int(p3[1])), (255,0,0), 3, 8)
366 | cv2.line(img, (int(p3[0]),int(p3[1])), (int(p4[0]),int(p4[1])), (0,0,255), 1, 8)
367 | cv2.line(img, (int(p4[0]),int(p4[1])), (int(p1[0]),int(p1[1])), (255,0,0), 3, 8)
368 | cv2.imwrite(save_path,img)
369 | if show:
370 | cv2.imshow('grasp',img)
371 | cv2.waitKey(0)
372 | cv2.destroyAllWindows()
373 |
374 |
375 | if __name__ == '__main__':
376 | camera = 'kinect'
377 | dataset_root = '../'
378 | scene_name = 'scene_0000'
379 | anno_idx = 0
380 | obj_idx = 0
381 | visAnno(dataset_root, scene_name, anno_idx, camera, num_grasp=1, th=0.5, align_to_table=True, max_width=0.08, save_folder='save_fig', show=False)
382 | vis6D(dataset_root, scene_name, anno_idx, camera, align_to_table=True, save_folder='save_fig', show=False)
383 | visObjGrasp(dataset_root, obj_idx, num_grasp=10, th=0.5, save_folder='save_fig', show=False)
384 |
--------------------------------------------------------------------------------
/graspnetAPI/utils/xmlhandler.py:
--------------------------------------------------------------------------------
1 | __author__ = 'Minghao Gou'
2 | __version__ = '1.0'
3 |
4 | from xml.etree.ElementTree import Element, SubElement, tostring
5 | import xml.etree.ElementTree as ET
6 | import xml.dom.minidom
7 | from transforms3d.quaternions import mat2quat, quat2axangle
8 | from transforms3d.euler import quat2euler
9 | import numpy as np
10 | from .trans3d import get_mat, pos_quat_to_pose_4x4
11 | import os
12 | from .pose import pose_list_from_pose_vector_list
13 |
14 |
15 | class xmlWriter():
16 | def __init__(self, topfromreader=None):
17 | self.topfromreader = topfromreader
18 | self.poselist = []
19 | self.objnamelist = []
20 | self.objpathlist = []
21 | self.objidlist = []
22 | def addobject(self, pose, objname, objpath, objid):
23 | # pose is the 4x4 matrix representation of 6d pose
24 | self.poselist.append(pose)
25 | self.objnamelist.append(objname)
26 | self.objpathlist.append(objpath)
27 | self.objidlist.append(objid)
28 |
29 | def objectlistfromposevectorlist(self, posevectorlist, objdir, objnamelist, objidlist):
30 | self.poselist = []
31 | self.objnamelist = []
32 | self.objidlist = []
33 | self.objpathlist = []
34 | for i in range(len(posevectorlist)):
35 | id, x, y, z, alpha, beta, gamma = posevectorlist[i]
36 | objname = objnamelist[objidlist[i]]
37 | self.addobject(get_mat(x, y, z, alpha, beta, gamma),
38 | objname, os.path.join(objdir, objname), id)
39 |
40 | def writexml(self, xmlfilename='scene.xml'):
41 | if self.topfromreader is not None:
42 | self.top = self.topfromreader
43 | else:
44 | self.top = Element('scene')
45 | for i in range(len(self.poselist)):
46 | obj_entry = SubElement(self.top, 'obj')
47 |
48 | obj_name = SubElement(obj_entry, 'obj_id')
49 | obj_name.text = str(self.objidlist[i])
50 |
51 | obj_name = SubElement(obj_entry, 'obj_name')
52 | obj_name.text = self.objnamelist[i]
53 |
54 | obj_path = SubElement(obj_entry, 'obj_path')
55 | obj_path.text = self.objpathlist[i]
56 | pose = self.poselist[i]
57 | pose_in_world = SubElement(obj_entry, 'pos_in_world')
58 | pose_in_world.text = '{:.4f} {:.4f} {:.4f}'.format(
59 | pose[0, 3], pose[1, 3], pose[2, 3])
60 |
61 | rotationMatrix = pose[0:3, 0:3]
62 | quat = mat2quat(rotationMatrix)
63 |
64 | ori_in_world = SubElement(obj_entry, 'ori_in_world')
65 | ori_in_world.text = '{:.4f} {:.4f} {:.4f} {:.4f}'.format(
66 | quat[0], quat[1], quat[2], quat[3])
67 | xmlstr = xml.dom.minidom.parseString(
68 | tostring(self.top)).toprettyxml(indent=' ')
69 | # remove blank line
70 | xmlstr = "".join([s for s in xmlstr.splitlines(True) if s.strip()])
71 | with open(xmlfilename, 'w') as f:
72 | f.write(xmlstr)
73 | #print('log:write annotation file '+xmlfilename)
74 |
75 |
76 | class xmlReader():
77 | def __init__(self, xmlfilename):
78 | self.xmlfilename = xmlfilename
79 | etree = ET.parse(self.xmlfilename)
80 | self.top = etree.getroot()
81 |
82 | def showinfo(self):
83 | print('Resumed object(s) already stored in '+self.xmlfilename+':')
84 | for i in range(len(self.top)):
85 | print(self.top[i][1].text)
86 |
87 | def gettop(self):
88 | return self.top
89 |
90 | def getposevectorlist(self):
91 | # posevector foramat: [objectid,x,y,z,alpha,beta,gamma]
92 | posevectorlist = []
93 | for i in range(len(self.top)):
94 | objectid = int(self.top[i][0].text)
95 | objectname = self.top[i][1].text
96 | objectpath = self.top[i][2].text
97 | translationtext = self.top[i][3].text.split()
98 | translation = []
99 | for text in translationtext:
100 | translation.append(float(text))
101 | quattext = self.top[i][4].text.split()
102 | quat = []
103 | for text in quattext:
104 | quat.append(float(text))
105 | alpha, beta, gamma = quat2euler(quat)
106 | x, y, z = translation
107 | alpha *= (180.0 / np.pi)
108 | beta *= (180.0 / np.pi)
109 | gamma *= (180.0 / np.pi)
110 | posevectorlist.append([objectid, x, y, z, alpha, beta, gamma])
111 | return posevectorlist
112 |
113 | def get_pose_list(self):
114 | pose_vector_list = self.getposevectorlist()
115 | return pose_list_from_pose_vector_list(pose_vector_list)
116 |
117 | def empty_pose_vector(objectid):
118 | # [object id,x,y,z,alpha,beta,gamma]
119 | # alpha, beta and gamma are in degree
120 | return [objectid, 0.0, 0.0, 0.4, 0.0, 0.0, 0.0]
121 |
122 |
123 | def empty_pose_vector_list(objectidlist):
124 | pose_vector_list = []
125 | for id in objectidlist:
126 | pose_vector_list.append(empty_pose_vector(id))
127 | return pose_vector_list
128 |
129 |
130 | def getposevectorlist(objectidlist, is_resume, num_frame, frame_number, xml_dir):
131 | if not is_resume or (not os.path.exists(os.path.join(xml_dir, '%04d.xml' % num_frame))):
132 | print('log:create empty pose vector list')
133 | return empty_pose_vector_list(objectidlist)
134 | else:
135 | print('log:resume pose vector from ' +
136 | os.path.join(xml_dir, '%04d.xml' % num_frame))
137 | xmlfile = os.path.join(xml_dir, '%04d.xml' % num_frame)
138 | mainxmlReader = xmlReader(xmlfile)
139 | xmlposevectorlist = mainxmlReader.getposevectorlist()
140 | posevectorlist = []
141 | for objectid in objectidlist:
142 | posevector = [objectid, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
143 | for xmlposevector in xmlposevectorlist:
144 | if xmlposevector[0] == objectid:
145 | posevector = xmlposevector
146 | posevectorlist.append(posevector)
147 | return posevectorlist
148 |
149 |
150 | def getframeposevectorlist(objectidlist, is_resume, frame_number, xml_dir):
151 | frameposevectorlist = []
152 | for num_frame in range(frame_number):
153 | if not is_resume or (not os.path.exists(os.path.join(xml_dir,'%04d.xml' % num_frame))):
154 | posevectorlist=getposevectorlist(objectidlist,False,num_frame,frame_number,xml_dir)
155 | else:
156 | posevectorlist=getposevectorlist(objectidlist,True,num_frame,frame_number,xml_dir)
157 | frameposevectorlist.append(posevectorlist)
158 | return frameposevectorlist
159 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from distutils.core import setup
2 | from setuptools import find_packages
3 | from setuptools.command.install import install
4 | import os
5 |
6 | setup(
7 | name='graspnetAPI',
8 | version='1.2.11',
9 | description='graspnet API',
10 | author='Hao-Shu Fang, Chenxi Wang, Minghao Gou',
11 | author_email='gouminghao@gmail.com',
12 | url='https://graspnet.net',
13 | packages=find_packages(),
14 | install_requires=[
15 | 'numpy==1.23.4',
16 | 'scipy',
17 | 'transforms3d==0.3.1',
18 | 'open3d>=0.8.0.0',
19 | 'trimesh',
20 | 'tqdm',
21 | 'Pillow',
22 | 'opencv-python',
23 | 'pillow',
24 | 'matplotlib',
25 | 'pywavefront',
26 | 'trimesh',
27 | 'scikit-image',
28 | 'autolab_core',
29 | 'autolab-perception',
30 | 'cvxopt',
31 | 'dill',
32 | 'h5py',
33 | 'sklearn',
34 | 'grasp_nms'
35 | ]
36 | )
37 |
--------------------------------------------------------------------------------