├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── question.md
└── workflows
│ └── build.yml
├── .gitignore
├── .readthedocs.yml
├── LICENSE
├── Makefile
├── README.md
├── docs
├── _static
│ ├── img
│ │ ├── flip.png
│ │ ├── opengl.png
│ │ ├── pickandplace.png
│ │ ├── push.png
│ │ ├── reach.png
│ │ ├── slide.png
│ │ ├── stack.png
│ │ ├── tiny.png
│ │ └── top_view.png
│ └── notebook
│ │ └── PickAndPlace.ipynb
├── base_class
│ ├── pybullet.rst
│ ├── robot.rst
│ ├── robot_task.rst
│ └── task.rst
├── conf.py
├── custom
│ ├── custom_env.rst
│ ├── custom_robot.rst
│ └── custom_task.rst
├── guide
│ ├── install.rst
│ └── quick_start.rst
├── index.rst
├── robots
│ └── panda.rst
└── usage
│ ├── advanced_rendering.rst
│ ├── environments.rst
│ ├── manual_control.rst
│ ├── save_restore_state.rst
│ └── train_with_sb3.rst
├── examples
├── PickAndPlace.ipynb
├── reach.py
├── rgb_rendering.py
└── train_push.py
├── panda_gym
├── __init__.py
├── assets
│ ├── __init__.py
│ └── colored_cube.png
├── envs
│ ├── __init__.py
│ ├── core.py
│ ├── panda_tasks.py
│ ├── robots
│ │ ├── __init__.py
│ │ └── panda.py
│ └── tasks
│ │ ├── __init__.py
│ │ ├── flip.py
│ │ ├── pick_and_place.py
│ │ ├── push.py
│ │ ├── reach.py
│ │ ├── slide.py
│ │ └── stack.py
├── pybullet.py
├── utils.py
└── version.txt
├── setup.py
└── test
├── envs_test.py
├── import_test.py
├── pybullet_test.py
├── render_test.py
├── save_and_restore_test.py
└── seed_test.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | ## Describe the bug
11 |
12 | A clear and concise description of what the bug is.
13 |
14 | ## To Reproduce
15 |
16 | Provide a **minimal** code :
17 |
18 | ```python
19 | import panda_gym
20 | ...
21 | ```
22 |
23 | ## System
24 |
25 | - OS:
26 | - Python version (`python --version`):
27 | - Package version (`pip list | grep panda-gym`):
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/question.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Question
3 | about: Ask your question about panda-gym
4 | title: ''
5 | labels: question
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 |
--------------------------------------------------------------------------------
/.github/workflows/build.yml:
--------------------------------------------------------------------------------
1 | name: build
2 |
3 | on: [pull_request, workflow_dispatch]
4 |
5 | jobs:
6 | test:
7 | runs-on: ${{ matrix.os }}
8 | strategy:
9 | matrix:
10 | os: [ubuntu-latest, macos-latest, macos-13, windows-latest]
11 | python-version: ['3.7', '3.8', '3.9', '3.10']
12 | exclude:
13 | # Exclude the combination of macOS-latest and Python 3.7 as arm64 doesn't support Python 3.7
14 | - os: macos-latest
15 | python-version: '3.7'
16 | steps:
17 | # Check out the repository code
18 | - uses: actions/checkout@v4
19 |
20 | # Set up the specified Python version
21 | - name: Set up Python ${{ matrix.python-version }}
22 | uses: actions/setup-python@v5
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 |
26 | # Install dependencies
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | pip install -e .
31 | pip install pytest-cov
32 |
33 | # Run tests with pytest and generate coverage report
34 | - name: Test with pytest
35 | run: |
36 | pytest --cov=./ --cov-report=xml
37 |
38 | # Additional steps only for ubuntu-latest and Python 3.10
39 | # Upload the coverage report as an artifact
40 | - name: Save coverage report
41 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
42 | uses: actions/upload-artifact@v4
43 | with:
44 | name: coverage-report
45 | path: ./coverage.xml
46 |
47 | # Install development dependencies
48 | - name: Install dev dependencies
49 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
50 | run: |
51 | pip install -e .[develop]
52 |
53 | # Run Pytype for type checking
54 | - name: Pytype
55 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
56 | run: |
57 | pytype panda_gym
58 |
59 | # Check code style with black and isort
60 | - name: Check codestyle
61 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
62 | run: |
63 | black -l 127 --check panda_gym test
64 | isort -l 127 --profile black --check panda_gym test
65 |
66 | # Build documentation
67 | - name: Make docs
68 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
69 | run: |
70 | make html
71 |
72 | # Upload coverage to Codecov
73 | - name: Upload coverage to Codecov
74 | if: matrix.os == 'ubuntu-latest' && matrix.python-version == '3.10'
75 | uses: codecov/codecov-action@v4
76 | with:
77 | files: ./coverage.xml
78 | fail_ci_if_error: true
79 | name: codecov-umbrella
80 | verbose: true
81 | token: ${{ secrets.CODECOV_TOKEN }} # required
82 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | env
2 | *__pycache__
3 | *.egg-info
4 | *.db
5 | .vscode
6 | _build/
7 | build/
8 | .DS_Store
9 | sandbox
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | # .readthedocs.yaml
2 | # Read the Docs configuration file
3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
4 |
5 | version: 2
6 |
7 | # Set the version of Python and other tools you might need
8 | build:
9 | os: ubuntu-20.04
10 | tools:
11 | python: "3.9"
12 |
13 |
14 | # Build documentation in the docs/ directory with Sphinx
15 | sphinx:
16 | configuration: docs/conf.py
17 |
18 | # If using Sphinx, optionally build your docs in additional formats such as PDF
19 | formats:
20 | - pdf
21 |
22 | # Optionally declare the Python requirements required to build your docs
23 | python:
24 | install:
25 | - method: pip
26 | path: .
27 | extra_requirements:
28 | - docs
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Quentin Gallouédec
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SOURCEDIR = docs
9 | BUILDDIR = build
10 |
11 | # Put it first so that "make" without argument is like "make help".
12 | help:
13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
14 |
15 | .PHONY: help Makefile
16 |
17 | # Catch-all target: route all unknown targets to Sphinx using the new
18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
19 | %: Makefile
20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
21 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # panda-gym
2 |
3 | Set of robotic environments based on PyBullet physics engine and gymnasium.
4 |
5 | [](https://pypi.org/project/panda-gym/)
6 | [](https://pepy.tech/project/panda-gym)
7 | [](LICENSE.txt)
8 | [](https://github.com/qgallouedec/panda-gym/actions/workflows/build.yml)
9 | [](https://codecov.io/gh/qgallouedec/panda-gym)
10 | [](https://github.com/psf/black)
11 | [](https://arxiv.org/abs/2106.13687)
12 |
13 | ## Documentation
14 |
15 | Check out the [documentation](https://panda-gym.readthedocs.io/en/latest/).
16 |
17 | ## Installation
18 |
19 | ### Using PyPI
20 |
21 | ```bash
22 | pip install panda-gym
23 | ```
24 |
25 | ### From source
26 |
27 | ```bash
28 | git clone https://github.com/qgallouedec/panda-gym.git
29 | pip install -e panda-gym
30 | ```
31 |
32 | ## Usage
33 |
34 | ```python
35 | import gymnasium as gym
36 | import panda_gym
37 |
38 | env = gym.make('PandaReach-v3', render_mode="human")
39 |
40 | observation, info = env.reset()
41 |
42 | for _ in range(1000):
43 | action = env.action_space.sample() # random action
44 | observation, reward, terminated, truncated, info = env.step(action)
45 |
46 | if terminated or truncated:
47 | observation, info = env.reset()
48 |
49 | env.close()
50 | ```
51 |
52 | You can also [](https://colab.research.google.com/github/qgallouedec/panda-gym/blob/master/examples/PickAndPlace.ipynb)
53 |
54 | ## Environments
55 |
56 | | | |
57 | | :------------------------------: | :--------------------------------------------: |
58 | | `PandaReach-v3` | `PandaPush-v3` |
59 | |  |  |
60 | | `PandaSlide-v3` | `PandaPickAndPlace-v3` |
61 | |  |  |
62 | | `PandaStack-v3` | `PandaFlip-v3` |
63 | |  |  |
64 |
65 | ## Baselines results
66 |
67 | Baselines results are available in [rl-baselines3-zoo](https://github.com/DLR-RM/rl-baselines3-zoo) and the pre-trained agents in the [Hugging Face Hub](https://huggingface.co/sb3).
68 |
69 | ## Citation
70 |
71 | Cite as
72 |
73 | ```bib
74 | @article{gallouedec2021pandagym,
75 | title = {{panda-gym: Open-Source Goal-Conditioned Environments for Robotic Learning}},
76 | author = {Gallou{\'e}dec, Quentin and Cazin, Nicolas and Dellandr{\'e}a, Emmanuel and Chen, Liming},
77 | year = 2021,
78 | journal = {4th Robot Learning Workshop: Self-Supervised and Lifelong Learning at NeurIPS},
79 | }
80 | ```
81 |
82 | Environments are widely inspired from [OpenAI Fetch environments](https://openai.com/blog/ingredients-for-robotics-research/).
83 |
--------------------------------------------------------------------------------
/docs/_static/img/flip.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/flip.png
--------------------------------------------------------------------------------
/docs/_static/img/opengl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/opengl.png
--------------------------------------------------------------------------------
/docs/_static/img/pickandplace.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/pickandplace.png
--------------------------------------------------------------------------------
/docs/_static/img/push.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/push.png
--------------------------------------------------------------------------------
/docs/_static/img/reach.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/reach.png
--------------------------------------------------------------------------------
/docs/_static/img/slide.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/slide.png
--------------------------------------------------------------------------------
/docs/_static/img/stack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/stack.png
--------------------------------------------------------------------------------
/docs/_static/img/tiny.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/tiny.png
--------------------------------------------------------------------------------
/docs/_static/img/top_view.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/docs/_static/img/top_view.png
--------------------------------------------------------------------------------
/docs/_static/notebook/PickAndPlace.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "view-in-github"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "id": "TFFVaHq_7fSe"
17 | },
18 | "source": [
19 | "# `panda-gym` code example\n",
20 | "\n",
21 | "## Install `panda-gym`"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "%pip install panda-gym"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {
36 | "id": "qE8atPQm870Q"
37 | },
38 | "source": [
39 | "## Interract"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "import gymnasium as gym\n",
49 | "import panda_gym\n",
50 | "\n",
51 | "env = gym.make(\"PandaPickAndPlace-v3\", render_mode=\"rgb_array\")\n",
52 | "observation, info = env.reset()\n",
53 | "\n",
54 | "images = [env.render()]\n",
55 | "for _ in range(1000):\n",
56 | " action = env.action_space.sample()\n",
57 | " observation, reward, terminated, truncated, info = env.step(action)\n",
58 | " images.append(env.render())\n",
59 | "\n",
60 | " if terminated or truncated:\n",
61 | " observation, info = env.reset()\n",
62 | " images.append(env.render())\n",
63 | "\n",
64 | "env.close()"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "id": "Nkl-EtEZ_EQH"
71 | },
72 | "source": [
73 | "## Convert into animation\n"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "%pip3 install numpngw\n",
83 | "from numpngw import write_apng\n",
84 | "\n",
85 | "write_apng(\"anim.png\", images, delay=40) # real-time rendering = 40 ms between frames"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {
91 | "id": "dFadL1ne_NG8"
92 | },
93 | "source": [
94 | "## Show animation"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "from IPython.display import Image\n",
104 | "\n",
105 | "Image(filename=\"anim.png\")"
106 | ]
107 | }
108 | ],
109 | "metadata": {
110 | "kernelspec": {
111 | "display_name": "Python 3",
112 | "language": "python",
113 | "name": "python3"
114 | },
115 | "language_info": {
116 | "name": "python",
117 | "version": "3.9.6 (default, Sep 26 2022, 11:37:49) \n[Clang 14.0.0 (clang-1400.0.29.202)]"
118 | },
119 | "vscode": {
120 | "interpreter": {
121 | "hash": "31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6"
122 | }
123 | }
124 | },
125 | "nbformat": 4,
126 | "nbformat_minor": 0
127 | }
128 |
--------------------------------------------------------------------------------
/docs/base_class/pybullet.rst:
--------------------------------------------------------------------------------
1 | .. _pybullet:
2 |
3 | PyBullet Class
4 | ==============
5 |
6 |
7 | .. automodule:: panda_gym.pybullet
8 | :members:
--------------------------------------------------------------------------------
/docs/base_class/robot.rst:
--------------------------------------------------------------------------------
1 | .. _robot:
2 |
3 | Robot
4 | =====
5 |
6 |
7 | .. autoclass:: panda_gym.envs.core.PyBulletRobot
8 | :members:
--------------------------------------------------------------------------------
/docs/base_class/robot_task.rst:
--------------------------------------------------------------------------------
1 | .. _robot_task:
2 |
3 | Robot-Task
4 | ==========
5 |
6 |
7 | .. autoclass:: panda_gym.envs.core.RobotTaskEnv
8 | :members:
--------------------------------------------------------------------------------
/docs/base_class/task.rst:
--------------------------------------------------------------------------------
1 | .. _task:
2 |
3 | Task
4 | =====
5 |
6 |
7 | .. autoclass:: panda_gym.envs.core.Task
8 | :members:
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | sys.path.insert(0, os.path.abspath("."))
17 |
18 |
19 | # -- Project information -----------------------------------------------------
20 |
21 | project = "panda-gym"
22 | copyright = "2021, Quentin Gallouédec"
23 | author = "Quentin Gallouédec"
24 |
25 | # The full version, including alpha/beta/rc tags
26 | release = "v3.0.8"
27 |
28 |
29 | # -- General configuration ---------------------------------------------------
30 |
31 | # Add any Sphinx extension module names here, as strings. They can be
32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
33 | # ones.
34 | extensions = ["sphinx.ext.autodoc", "sphinx.ext.coverage", "sphinx.ext.napoleon"]
35 | napoleon_custom_sections = [("Returns", "params_style")]
36 |
37 | # Add any paths that contain templates here, relative to this directory.
38 | templates_path = ["_templates"]
39 |
40 | # List of patterns, relative to source directory, that match files and
41 | # directories to ignore when looking for source files.
42 | # This pattern also affects html_static_path and html_extra_path.
43 | exclude_patterns = []
44 |
45 |
46 | # -- Options for HTML output -------------------------------------------------
47 |
48 | # The theme to use for HTML and HTML Help pages. See the documentation for
49 | # a list of builtin themes.
50 | #
51 | html_theme = "sphinx_rtd_theme"
52 |
53 | # Add any paths that contain custom static files (such as style sheets) here,
54 | # relative to this directory. They are copied after the builtin static files,
55 | # so a file named "default.css" will overwrite the builtin "default.css".
56 | html_static_path = ["_static"]
57 |
--------------------------------------------------------------------------------
/docs/custom/custom_env.rst:
--------------------------------------------------------------------------------
1 | .. _custom_env:
2 |
3 | Custom environment
4 | ==================
5 |
6 | A customized environment is the junction of a **task** and a **robot**.
7 | You can choose to :ref:`define your own task`, or use one of the tasks present in the package. Similarly, you can choose to :ref:`define your own robot`, or use one of the robots present in the package.
8 |
9 | Then, you have to inherit from the :py:class:`RobotTaskEnv` class, in the following way.
10 |
11 | .. code-block:: python
12 |
13 | from panda_gym.envs.core import RobotTaskEnv
14 | from panda_gym.pybullet import PyBullet
15 |
16 |
17 | class MyRobotTaskEnv(RobotTaskEnv):
18 | """My robot-task environment."""
19 |
20 | def __init__(self, render_mode):
21 | sim = PyBullet(render_mode=render_mode)
22 | robot = MyRobot(sim)
23 | task = MyTask(sim)
24 | super().__init__(robot, task)
25 |
26 | That's it.
27 |
28 | Test it
29 | -------
30 |
31 | You can now test your environment by running the following code.
32 |
33 | .. code-block:: python
34 |
35 | env = MyRobotTaskEnv(render_mode="human")
36 |
37 | observation, info = env.reset()
38 |
39 | for _ in range(1000):
40 | action = env.action_space.sample() # random action
41 | observation, reward, terminated, truncated, info = env.step(action)
42 |
43 | if terminated or truncated:
44 | observation, info = env.reset()
45 |
46 |
--------------------------------------------------------------------------------
/docs/custom/custom_robot.rst:
--------------------------------------------------------------------------------
1 | .. _custom_robot:
2 |
3 | Custom robot
4 | ============
5 |
6 | Prerequisites
7 | -------------
8 |
9 | To create your own robot, you will need a URDF file describing the robot.
10 |
11 | Code
12 | ----
13 |
14 | To define your own robot, you need to define the following methods and attributes:
15 |
16 | - the joint indices,
17 | - the joint forces,
18 | - the ``set_action(action)`` method,
19 | - the ``get_obs()`` method and
20 | - the ``reset()`` method.
21 |
22 | For the purpose of the example, let's use a very simple robot, whose URDF file is given below. It consists in two links and a single joint.
23 |
24 | .. code-block:: xml
25 |
26 |
27 |
28 | ...
29 | ...
30 |
31 |
32 |
33 | ...
34 |
35 |
36 |
37 |
38 | Joint indices
39 | ~~~~~~~~~~~~~
40 |
41 | The first step is to identify the joints you want to be able to control with the agent. These joints will be identified by their index in the URDF file. Here, it is the index 0 joint that you want to control (it is also the only one in the URDF file). For the following, you will use ``joint_indices=np.array([0])``.
42 |
43 | Joint forces
44 | ~~~~~~~~~~~~~
45 |
46 | For each joint, you must define a maximum force. This data is usually found in the technical specifications of the robot, and sometimes in the URDF file (```` for a maximum effort of 1.0 Nm). Here, let's consider that the maximum effort is 1.0 Nm.
47 | For the following, you will use ``joint_forces=np.array([1.0])``.
48 |
49 | ``set_action`` method
50 | ~~~~~~~~~~~~~~~~~~~~~
51 |
52 | The ``set_action`` method specify what the robot must do with the action. In the example, the robot only uses the action as a target angle for its single joint. Thus:
53 |
54 | .. code-block:: python
55 |
56 | def set_action(self, action):
57 | self.control_joints(target_angles=action)
58 |
59 |
60 | ``get_obs`` method
61 | ~~~~~~~~~~~~~~~~~~
62 |
63 | The ``get_obs`` method returns the observation associated with the robot. In the example, the robot only returns the position of it single joint.
64 |
65 | .. code-block:: python
66 |
67 | def get_obs(self):
68 | return self.get_joint_angle(joint=0)
69 |
70 |
71 | ``reset`` method
72 | ~~~~~~~~~~~~~~~~
73 |
74 | The ``reset`` method specify how to reset the robot. In the example, the robot resets its single joint to an angle of 0.
75 |
76 | .. code-block:: python
77 |
78 | def reset(self):
79 | neutral_angle = np.array([0.0])
80 | self.set_joint_angles(angles=neutral_angle)
81 |
82 | Full code
83 | ~~~~~~~~~
84 |
85 | You now have everything you need to define your custom robot. You only have to inherit the class :py:class:`PyBulletRobot` in the following way.
86 |
87 | .. code-block:: python
88 |
89 | import numpy as np
90 | from gymnasium import spaces
91 |
92 | from panda_gym.envs.core import PyBulletRobot
93 |
94 |
95 | class MyRobot(PyBulletRobot):
96 | """My robot"""
97 |
98 | def __init__(self, sim):
99 | action_dim = 1 # = number of joints; here, 1 joint, so dimension = 1
100 | action_space = spaces.Box(-1.0, 1.0, shape=(action_dim,), dtype=np.float32)
101 | super().__init__(
102 | sim,
103 | body_name="my_robot", # choose the name you want
104 | file_name="my_robot.urdf", # the path of the URDF file
105 | base_position=np.zeros(3), # the position of the base
106 | action_space=action_space,
107 | joint_indices=np.array([0]), # list of the indices, as defined in the URDF
108 | joint_forces=np.array([1.0]), # force applied when robot is controled (Nm)
109 | )
110 |
111 | def set_action(self, action):
112 | self.control_joints(target_angles=action)
113 |
114 | def get_obs(self):
115 | return self.get_joint_angle(joint=0)
116 |
117 | def reset(self):
118 | neutral_angle = np.array([0.0])
119 | self.set_joint_angles(angles=neutral_angle)
120 |
121 |
122 | Obviously, you have to adapt the example to your robot, especially concerning the number and the indices of the joints, as well as the forces applied for the control.
123 |
124 | You can also use other types of control, using all the methods of the parent class :py:class:`PyBulletRobot` and the simulation instance :py:class:`PyBullet`. For example for inverse kinematics you can use the method :py:meth:`PyBulletRobot.inverse_kinematics`.
125 |
126 | Test it
127 | -------
128 |
129 | The robot is ready. To see it move, execute the following code.
130 |
131 | .. code-block:: python
132 |
133 | from panda_gym.pybullet import PyBullet
134 |
135 | sim = PyBullet(render_mode="human")
136 | robot = MyRobot(sim)
137 |
138 | for _ in range(50):
139 | robot.set_action(np.array([1.0]))
140 | sim.step()
141 |
142 | To see how to use this robot to define a new environment, see the :ref:`custom environment` section.
--------------------------------------------------------------------------------
/docs/custom/custom_task.rst:
--------------------------------------------------------------------------------
1 | .. _custom_task:
2 |
3 | Custom task
4 | ===========
5 |
6 | Prerequisites
7 | -------------
8 |
9 | To create your own robot, you will need its URDF file.
10 |
11 | Code
12 | ----
13 |
14 | To define your own task, you need to inherit from :py:class:`Task`, and define the following 5 methods:
15 |
16 | - ``reset()``: how the task is reset; you must define `self.goal` in this function
17 | - ``get_obs()``: returns the observation
18 | - ``get_achieved_goal()``: returns the achieved goal
19 | - ``is_success(achieved_goal, desired_goal, info)``: returns whether the task is successful
20 | - ``compute_reward(achieved_goal, desired_goal, info)``: returns the reward
21 |
22 | For the purpose of the example, let's consider here a very simple task, consisting in moving a cube toward a target position. The goal position is sampled within a volume of 10 m x 10 m x 10 m.
23 |
24 | .. code-block:: python
25 |
26 | import numpy as np
27 |
28 | from panda_gym.envs.core import Task
29 | from panda_gym.utils import distance
30 |
31 |
32 | class MyTask(Task):
33 | def __init__(self, sim):
34 | super().__init__(sim)
35 | # create an cube
36 | self.sim.create_box(body_name="object", half_extents=np.array([1, 1, 1]), mass=1.0, position=np.array([0.0, 0.0, 0.0]))
37 |
38 | def reset(self):
39 | # randomly sample a goal position
40 | self.goal = np.random.uniform(-10, 10, 3)
41 | # reset the position of the object
42 | self.sim.set_base_pose("object", position=np.array([0.0, 0.0, 0.0]), orientation=np.array([1.0, 0.0, 0.0, 0.0]))
43 |
44 | def get_obs(self):
45 | # the observation is the position of the object
46 | observation = self.sim.get_base_position("object")
47 | return observation
48 |
49 | def get_achieved_goal(self):
50 | # the achieved goal is the current position of the object
51 | achieved_goal = self.sim.get_base_position("object")
52 | return achieved_goal
53 |
54 | def is_success(self, achieved_goal, desired_goal, info={}): # info is here for consistency
55 | # compute the distance between the goal position and the current object position
56 | d = distance(achieved_goal, desired_goal)
57 | # return True if the distance is < 1.0, and False otherwise
58 | return np.array(d < 1.0, dtype=bool)
59 |
60 | def compute_reward(self, achieved_goal, desired_goal, info={}): # info is here for consistency
61 | # for this example, reward = 1.0 if the task is successful, 0.0 otherwise
62 | return self.is_success(achieved_goal, desired_goal, info).astype(np.float32)
63 |
64 |
65 |
66 | Obviously, you have to adapt the example to your task.
67 |
68 | Test it
69 | -------
70 |
71 | The task is ready. To test it, execute the following code.
72 |
73 | .. code-block:: python
74 |
75 | from panda_gym.pybullet import PyBullet
76 |
77 | sim = PyBullet(render_mode="human")
78 | task = MyTask(sim)
79 |
80 | task.reset()
81 | print(task.get_obs())
82 | print(task.get_achieved_goal())
83 | print(task.is_success(task.get_achieved_goal(), task.get_goal()))
84 | print(task.compute_reward(task.get_achieved_goal(), task.get_goal()))
85 |
--------------------------------------------------------------------------------
/docs/guide/install.rst:
--------------------------------------------------------------------------------
1 | .. _install:
2 |
3 | Installation
4 | ============
5 |
6 | ``panda-gym`` is at least compatible with Windows, MacOS and Ubuntu, and python 3.7, 3.8, 3.9 and 3.10.
7 |
8 | With pip
9 | --------
10 |
11 | To install ``panda-gym`` with pip, run:
12 |
13 | .. code-block:: bash
14 |
15 | pip install panda-gym
16 |
17 | From source
18 | -----------
19 |
20 | To install ``panda-gym`` from source, run:
21 |
22 | .. code-block:: bash
23 |
24 | git clone https://github.com/qgallouedec/panda-gym/
25 | cd panda-gym
26 | pip install -e .
27 |
--------------------------------------------------------------------------------
/docs/guide/quick_start.rst:
--------------------------------------------------------------------------------
1 | .. _quick_start:
2 |
3 | Quick Start
4 | ===========
5 |
6 | Once ``panda-gym`` installed, you can start the "Reach" task by executing the following lines.
7 |
8 | .. code-block:: python
9 |
10 | import gymnasium as gym
11 | import panda_gym
12 |
13 | env = gym.make('PandaReach-v3', render_mode="human")
14 |
15 | observation, info = env.reset()
16 |
17 | for _ in range(1000):
18 | action = env.action_space.sample() # random action
19 | observation, reward, terminated, truncated, info = env.step(action)
20 |
21 | if terminated or truncated:
22 | observation, info = env.reset()
23 |
24 |
25 | Obviously, since the chosen actions are random, you will not see any learning. To access the section dedicated to the learning of the tasks, refer to the section :ref:`Train with stable-baselines3`.
26 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. panda-gym documentation master file, created by
2 | sphinx-quickstart on Tue Oct 19 19:29:42 2021.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to ``panda-gym``'s documentation!
7 | =========================================
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :caption: Getting started
12 |
13 | guide/install
14 | guide/quick_start
15 |
16 | .. toctree::
17 | :maxdepth: 1
18 | :caption: Usage
19 |
20 | usage/environments
21 | usage/manual_control
22 | usage/advanced_rendering
23 | usage/save_restore_state
24 | usage/train_with_sb3
25 |
26 | .. toctree::
27 | :maxdepth: 1
28 | :caption: Your custom environment
29 |
30 | custom/custom_robot
31 | custom/custom_task
32 | custom/custom_env
33 |
34 | .. toctree::
35 | :maxdepth: 1
36 | :caption: Base classes
37 |
38 | base_class/pybullet
39 | base_class/robot
40 | base_class/task
41 | base_class/robot_task
42 |
43 | .. toctree::
44 | :maxdepth: 1
45 | :caption: Robots
46 |
47 | robots/panda
48 |
49 |
50 | Citing
51 | ------
52 | To cite this project in publications:
53 |
54 | .. code-block:: bibtex
55 |
56 | @article{gallouedec2021pandagym,
57 | title = {{panda-gym: Open-Source Goal-Conditioned Environments for Robotic Learning}},
58 | author = {Gallou{\'e}dec, Quentin and Cazin, Nicolas and Dellandr{\'e}a, Emmanuel and Chen, Liming},
59 | year = 2021,
60 | journal = {4th Robot Learning Workshop: Self-Supervised and Lifelong Learning at NeurIPS},
61 | }
62 |
63 |
64 | Indices and tables
65 | ------------------
66 |
67 | * :ref:`genindex`
68 | * :ref:`modindex`
69 | * :ref:`search`
70 |
--------------------------------------------------------------------------------
/docs/robots/panda.rst:
--------------------------------------------------------------------------------
1 | .. _panda:
2 |
3 | Panda
4 | =====
5 |
6 | .. automodule:: panda_gym.envs.robots.panda
7 | :members:
--------------------------------------------------------------------------------
/docs/usage/advanced_rendering.rst:
--------------------------------------------------------------------------------
1 | .. _advanced_rendering:
2 |
3 | Advanced rendering
4 | ==================
5 |
6 | Renderer
7 | --------
8 |
9 | There are two render modes available - ``"human"`` and ``"rgb_array"``. The ``"human"`` mode opens a window to display the live scene, while the ``"rgb_array"`` mode renders the scene as an RGB array.
10 |
11 | When it comes to renderers, there are two options: OpenGL and Tiny Renderer. The OpenGL engine is used when the render mode is set to ``"human"``. However, when the render mode is set to ``"rgb_array"`` you have the choice between using either the OpenGL or Tiny Renderer engine. The Tiny Renderer can work in headless mode, but it may not produce as high-quality results as the OpenGL engine, particularly in regards to transparency, background and shadows. If headless mode is not a requirement, it is recommended to use the OpenGL engine. To do this, pass ``renderer="OpenGL"`` to the ``gymnasium.make`` function:
12 |
13 | .. code-block:: python
14 |
15 | import gymnasium as gym
16 | import panda_gym
17 |
18 | env = gym.make("PandaReach-v3", render_mode="rgb_array", renderer="OpenGL")
19 | env.reset()
20 | image = env.render() # RGB rendering of shape (480, 720, 3)
21 | env.close()
22 |
23 | .. |tiny| image:: ../_static/img/tiny.png
24 | .. |opengl| image:: ../_static/img/opengl.png
25 |
26 | .. list-table::
27 | :widths: 50 50
28 | :header-rows: 1
29 | :align: center
30 |
31 | * - ``renderer="Tiny"``
32 | - ``renderer="OpenGL"``
33 | * - |tiny|
34 | - |opengl|
35 |
36 |
37 | Viewpoint
38 | ---------
39 |
40 | You can render from a different point of view than the default. For this, the following arguments are available:
41 |
42 | - ``render_width`` (int, optional): Image width. Defaults to 720.
43 | - ``render_height`` (int, optional): Image height. Defaults to 480.
44 | - ``render_target_position`` (np.ndarray, optional): Camera targeting this position, as (x, y, z). Defaults to [0., 0., 0.].
45 | - ``render_distance`` (float, optional): Distance of the camera. Defaults to 1.4.
46 | - ``render_yaw`` (float, optional): Yaw of the camera. Defaults to 45.
47 | - ``render_pitch`` (float, optional): Pitch of the camera. Defaults to -30.
48 | - ``render_roll`` (int, optional): Roll of the camera. Defaults to 0.
49 |
50 | Example
51 | ~~~~~~~
52 |
53 | .. code-block:: python
54 |
55 | import gymnasium as gym
56 | import panda_gym
57 |
58 |
59 | env = gym.make(
60 | "PandaSlide-v3",
61 | render_mode="rgb_array",
62 | renderer="OpenGL",
63 | render_width=480,
64 | render_height=480,
65 | render_target_position=[0.2, 0, 0],
66 | render_distance=1.0,
67 | render_yaw=90,
68 | render_pitch=-70,
69 | render_roll=0,
70 | )
71 | env.reset()
72 | image = env.render() # RGB rendering of shape (480, 480, 3)
73 | env.close()
74 |
75 | .. figure:: ../_static/img/top_view.png
76 |
--------------------------------------------------------------------------------
/docs/usage/environments.rst:
--------------------------------------------------------------------------------
1 | .. _environments:
2 |
3 | Environments
4 | ============
5 |
6 | ``panda-gym`` includes:
7 |
8 | - 1 robot:
9 | - the Franka Emika Panda robot,
10 | - 6 tasks:
11 | - **Reach**: the robot must place its end-effector at a target position,
12 | - **Push**: the robot has to push a cube to a target position,
13 | - **Slide**: the robot has to slide an object to a target position,
14 | - **Pick and place**: the robot has to pick up and place an object at a target position,
15 | - **Stack**: the robot has to stack two cubes at a target position,
16 | - **Flip**: the robot must flip the cube to a target orientation,
17 | - 2 control modes:
18 | - **End-effector displacement control**: the action corresponds to the displacement of the end-effector.
19 | - **Joints control**: the action corresponds to the individual motion of each joint,
20 | - 2 reward types:
21 | - **Sparse**: the environment return a reward if and only if the task is completed,
22 | - **Dense**: the closer the agent is to completing the task, the higher the reward.
23 |
24 | By default, the reward is sparse and the control mode is the end-effector displacement.
25 | The complete set of environments present in the package is presented in the following list.
26 |
27 | Sparse reward, end-effector control (default setting)
28 | -----------------------------------------------------
29 |
30 | * ``PandaReach-v3``
31 | * ``PandaPush-v3``
32 | * ``PandaSlide-v3``
33 | * ``PandaPickAndPlace-v3``
34 | * ``PandaStack-v3``
35 | * ``PandaFlip-v3``
36 |
37 | Dense reward, end-effector control
38 | ----------------------------------
39 |
40 | * ``PandaReachDense-v3``
41 | * ``PandaPushDense-v3``
42 | * ``PandaSlideDense-v3``
43 | * ``PandaPickAndPlaceDense-v3``
44 | * ``PandaStackDense-v3``
45 | * ``PandaFlipDense-v3``
46 |
47 | Sparse reward, joints control
48 | -----------------------------
49 |
50 | * ``PandaReachJoints-v3``
51 | * ``PandaPushJoints-v3``
52 | * ``PandaSlideJoints-v3``
53 | * ``PandaPickAndPlaceJoints-v3``
54 | * ``PandaStackJoints-v3``
55 | * ``PandaFlipJoints-v3``
56 |
57 | Dense reward, joints control
58 | ----------------------------
59 |
60 | * ``PandaReachJointsDense-v3``
61 | * ``PandaPushJointsDense-v3``
62 | * ``PandaSlideJointsDense-v3``
63 | * ``PandaPickAndPlaceJointsDense-v3``
64 | * ``PandaStackJointsDense-v3``
65 | * ``PandaFlipJointsDense-v3``
66 |
--------------------------------------------------------------------------------
/docs/usage/manual_control.rst:
--------------------------------------------------------------------------------
1 | .. _manual_control:
2 |
3 | Manual control
4 | ==============
5 |
6 | It is possible to manually control the robot, giving it deterministic actions, depending on the observations. For example, for the realization of the task Reach, here is a possibility for the realization of the task.
7 |
8 | .. code-block:: python
9 |
10 | import gymnasium as gym
11 | import panda_gym
12 |
13 | env = gym.make("PandaReach-v3", render_mode="human")
14 | observation, info = env.reset()
15 |
16 | for _ in range(1000):
17 | current_position = observation["observation"][0:3]
18 | desired_position = observation["desired_goal"][0:3]
19 | action = 5.0 * (desired_position - current_position)
20 | observation, reward, terminated, truncated, info = env.step(action)
21 |
22 | if terminated or truncated:
23 | observation, info = env.reset()
24 |
25 | env.close()
26 |
27 | The result is as follows.
28 |
29 | .. image:: https://gallouedec.com/uploads/img/manual_reach.png
30 | :alt: Manual push rendering
--------------------------------------------------------------------------------
/docs/usage/save_restore_state.rst:
--------------------------------------------------------------------------------
1 | .. _save_restore_states:
2 |
3 | Save and Restore States
4 | =======================
5 |
6 | It is possible to save a state of the entire simulation environment. This is useful if your application requires lookahead search. Below is an example of a greedy random search.
7 |
8 | .. code-block:: python
9 |
10 | import gymnasium as gym
11 | import numpy as np
12 |
13 | import panda_gym
14 |
15 | env = gym.make("PandaReachDense-v3", render_mode="human")
16 | observation, _ = env.reset()
17 |
18 | for _ in range(1000):
19 | state_id = env.save_state()
20 |
21 | # Sample 5 actions and choose the one that yields the best reward.
22 | best_reward = -np.inf
23 | best_action = None
24 | for _ in range(5):
25 | env.restore_state(state_id)
26 | action = env.action_space.sample()
27 | observation, reward, _, _, _ = env.step(action)
28 | if reward > best_reward:
29 | best_reward = reward
30 | best_action = action
31 |
32 | env.restore_state(state_id)
33 | env.remove_state(state_id) # discard the state, as it is no longer needed
34 |
35 | # Step with the best action
36 | observation, reward, terminated, truncated, info = env.step(best_action)
37 |
38 | if terminated:
39 | observation, info = env.reset()
40 |
41 | env.close()
42 |
--------------------------------------------------------------------------------
/docs/usage/train_with_sb3.rst:
--------------------------------------------------------------------------------
1 | .. _train_with_sb3:
2 |
3 | Train with stable-baselines3
4 | ============================
5 |
6 | .. warning::
7 | SB3 is not compatible with ``panda-gym`` v3 for the moment. (See `SB3/PR#780 `_). The following documentation is therefore not yet valid. To use ``panda-gym`` with SB3, you will have to use ``panda-gym==2.0.0``.
8 |
9 | You can train the environments with any gymnasium compatible library. In this documentation we explain how to use one of them: `stable-baselines3 (SB3) `_.
10 |
11 | Install SB3
12 | -----------
13 |
14 | To install SB3, follow the instructions from its documentation `Install stable-baselines3 `_.
15 |
16 | Train
17 | -----
18 |
19 | Now that SB3 is installed, you can run the following code to train an agent. You can use every algorithm compatible with ``Box`` action space, see `stable-baselines3/RL Algorithm `_). In the following example, a DDPG agent is trained to solve th Reach task.
20 |
21 | .. code-block:: python
22 |
23 | import gymnasium as gym
24 | import panda_gym
25 | from stable_baselines3 import DDPG
26 |
27 | env = gym.make("PandaReach-v2")
28 | model = DDPG(policy="MultiInputPolicy", env=env)
29 | model.learn(30_000)
30 |
31 | .. note::
32 |
33 | Here we provide the canonical code for training with SB3. For any information on the setting of hyperparameters, verbosity, saving the model and more please read the `SB3 documentation `_.
34 |
35 |
36 | Bonus: Train with RL Baselines3 Zoo
37 | -----------------------------------
38 |
39 | `RL Baselines3 Zoo `_ is the training framework associated with SB3.
40 | It provides scripts for training, evaluating agents, setting hyperparameters, plotting results and recording video. It also contains already optimized hyperparameters, including for some ``panda-gym`` environments.
41 |
42 | .. warning::
43 | The current version of RL Baselines3 Zoo provides hyperparameters for version 1 of ``panda-gym``, but not for version 2. Before training with RL Baselines3 Zoo, you will have to set your own hyperparameters by editing ``hyperparameters/.yml``. For more information, please read the `README of RL Baselines3 Zoo `_.
44 |
45 | Train
46 | ~~~~~
47 |
48 | To use it, follow the `instructions for its installation `_, then use the following command.
49 |
50 | .. code-block:: bash
51 |
52 | python train.py --algo --env
53 |
54 | For example, to train an agent with TQC on ``PandaPickAndPlace-v3``:
55 |
56 | .. code-block:: bash
57 |
58 | python train.py --algo tqc --env PandaPickAndPlace-v3
59 |
60 | Enjoy
61 | ~~~~~
62 |
63 | To visualize the trained agent, follow the `instructions `_ in the SB3 documentation. It is necessary to add ``--env-kwargs render_mode:human`` when running the enjoy script.
64 |
65 | .. code-block:: bash
66 |
67 | python enjoy.py --algo --env --folder --env-kwargs render_mode:human
--------------------------------------------------------------------------------
/examples/PickAndPlace.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "colab_type": "text",
7 | "id": "view-in-github"
8 | },
9 | "source": [
10 | "
"
11 | ]
12 | },
13 | {
14 | "cell_type": "markdown",
15 | "metadata": {
16 | "id": "TFFVaHq_7fSe"
17 | },
18 | "source": [
19 | "# `panda-gym` code example\n",
20 | "\n",
21 | "## Install `panda-gym`"
22 | ]
23 | },
24 | {
25 | "cell_type": "code",
26 | "execution_count": null,
27 | "metadata": {},
28 | "outputs": [],
29 | "source": [
30 | "%pip install panda-gym"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {
36 | "id": "qE8atPQm870Q"
37 | },
38 | "source": [
39 | "## Interract"
40 | ]
41 | },
42 | {
43 | "cell_type": "code",
44 | "execution_count": null,
45 | "metadata": {},
46 | "outputs": [],
47 | "source": [
48 | "import gymnasium as gym\n",
49 | "import panda_gym\n",
50 | "\n",
51 | "env = gym.make(\"PandaPickAndPlace-v3\", render_mode=\"rgb_array\")\n",
52 | "observation, info = env.reset()\n",
53 | "\n",
54 | "images = [env.render()]\n",
55 | "for _ in range(200):\n",
56 | " action = env.action_space.sample()\n",
57 | " observation, reward, terminated, truncated, info = env.step(action)\n",
58 | " images.append(env.render())\n",
59 | "\n",
60 | " if terminated or truncated:\n",
61 | " observation, info = env.reset()\n",
62 | " images.append(env.render())\n",
63 | "\n",
64 | "env.close()"
65 | ]
66 | },
67 | {
68 | "cell_type": "markdown",
69 | "metadata": {
70 | "id": "Nkl-EtEZ_EQH"
71 | },
72 | "source": [
73 | "## Convert into animation\n"
74 | ]
75 | },
76 | {
77 | "cell_type": "code",
78 | "execution_count": null,
79 | "metadata": {},
80 | "outputs": [],
81 | "source": [
82 | "%pip install numpngw\n",
83 | "from numpngw import write_apng\n",
84 | "\n",
85 | "write_apng(\"anim.png\", images, delay=40) # real-time rendering = 40 ms between frames"
86 | ]
87 | },
88 | {
89 | "cell_type": "markdown",
90 | "metadata": {
91 | "id": "dFadL1ne_NG8"
92 | },
93 | "source": [
94 | "## Show animation"
95 | ]
96 | },
97 | {
98 | "cell_type": "code",
99 | "execution_count": null,
100 | "metadata": {},
101 | "outputs": [],
102 | "source": [
103 | "from IPython.display import Image\n",
104 | "\n",
105 | "Image(filename=\"anim.png\")"
106 | ]
107 | }
108 | ],
109 | "metadata": {
110 | "language_info": {
111 | "name": "python"
112 | }
113 | },
114 | "nbformat": 4,
115 | "nbformat_minor": 0
116 | }
117 |
--------------------------------------------------------------------------------
/examples/reach.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 |
3 | import panda_gym
4 |
5 | env = gym.make("PandaReach-v3", render_mode="human")
6 |
7 | observation, info = env.reset()
8 |
9 | for _ in range(1000):
10 | action = env.action_space.sample()
11 | observation, reward, terminated, truncated, info = env.step(action)
12 |
13 | if terminated or truncated:
14 | observation, info = env.reset()
15 |
16 | env.close()
17 |
--------------------------------------------------------------------------------
/examples/rgb_rendering.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 | from numpngw import write_apng # pip install numpngw
3 |
4 | import panda_gym
5 |
6 | env = gym.make("PandaStack-v3", render_mode="rgb_array")
7 | images = []
8 |
9 |
10 | observation, info = env.reset()
11 | images.append(env.render())
12 |
13 | for _ in range(1000):
14 | action = env.action_space.sample()
15 | observation, reward, terminated, truncated, info = env.step(action)
16 | images.append(env.render())
17 |
18 | if terminated or truncated:
19 | observation, info = env.reset()
20 | images.append(env.render())
21 |
22 | env.close()
23 |
24 | write_apng("stack.png", images, delay=40)
25 |
--------------------------------------------------------------------------------
/examples/train_push.py:
--------------------------------------------------------------------------------
1 | # WARNING, This file will not be functional until stable-baselines3 is compatible
2 | # with gymnasium. See https://github.com/DLR-RM/stable-baselines3/pull/780 for more information.
3 | import gymnasium as gym
4 | from stable_baselines3 import DDPG, HerReplayBuffer
5 |
6 | import panda_gym
7 |
8 | env = gym.make("PandaPush-v3")
9 |
10 | model = DDPG(policy="MultiInputPolicy", env=env, replay_buffer_class=HerReplayBuffer, verbose=1)
11 |
12 | model.learn(total_timesteps=100000)
13 |
--------------------------------------------------------------------------------
/panda_gym/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from gymnasium.envs.registration import register
4 |
5 | with open(os.path.join(os.path.dirname(__file__), "version.txt"), "r") as file_handler:
6 | __version__ = file_handler.read().strip()
7 |
8 | ENV_IDS = []
9 |
10 | for task in ["Reach", "Slide", "Push", "PickAndPlace", "Stack", "Flip"]:
11 | for reward_type in ["sparse", "dense"]:
12 | for control_type in ["ee", "joints"]:
13 | reward_suffix = "Dense" if reward_type == "dense" else ""
14 | control_suffix = "Joints" if control_type == "joints" else ""
15 | env_id = f"Panda{task}{control_suffix}{reward_suffix}-v3"
16 |
17 | register(
18 | id=env_id,
19 | entry_point=f"panda_gym.envs:Panda{task}Env",
20 | kwargs={"reward_type": reward_type, "control_type": control_type},
21 | max_episode_steps=100 if task == "Stack" else 50,
22 | )
23 |
24 | ENV_IDS.append(env_id)
25 |
--------------------------------------------------------------------------------
/panda_gym/assets/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | def get_data_path():
5 | resdir = os.path.join(os.path.dirname(__file__))
6 | return resdir
7 |
--------------------------------------------------------------------------------
/panda_gym/assets/colored_cube.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/panda_gym/assets/colored_cube.png
--------------------------------------------------------------------------------
/panda_gym/envs/__init__.py:
--------------------------------------------------------------------------------
1 | from panda_gym.envs.panda_tasks import (
2 | PandaFlipEnv,
3 | PandaPickAndPlaceEnv,
4 | PandaPushEnv,
5 | PandaReachEnv,
6 | PandaSlideEnv,
7 | PandaStackEnv,
8 | )
9 |
--------------------------------------------------------------------------------
/panda_gym/envs/core.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Any, Dict, Optional, Tuple
3 |
4 | import gymnasium as gym
5 | import numpy as np
6 | from gymnasium import spaces
7 | from gymnasium.utils import seeding
8 |
9 | from panda_gym.pybullet import PyBullet
10 |
11 |
12 | class PyBulletRobot(ABC):
13 | """Base class for robot env.
14 |
15 | Args:
16 | sim (PyBullet): Simulation instance.
17 | body_name (str): The name of the robot within the simulation.
18 | file_name (str): Path of the urdf file.
19 | base_position (np.ndarray): Position of the base of the robot as (x, y, z).
20 | """
21 |
22 | def __init__(
23 | self,
24 | sim: PyBullet,
25 | body_name: str,
26 | file_name: str,
27 | base_position: np.ndarray,
28 | action_space: spaces.Space,
29 | joint_indices: np.ndarray,
30 | joint_forces: np.ndarray,
31 | ) -> None:
32 | self.sim = sim
33 | self.body_name = body_name
34 | with self.sim.no_rendering():
35 | self._load_robot(file_name, base_position)
36 | self.setup()
37 | self.action_space = action_space
38 | self.joint_indices = joint_indices
39 | self.joint_forces = joint_forces
40 |
41 | def _load_robot(self, file_name: str, base_position: np.ndarray) -> None:
42 | """Load the robot.
43 |
44 | Args:
45 | file_name (str): The URDF file name of the robot.
46 | base_position (np.ndarray): The position of the robot, as (x, y, z).
47 | """
48 | self.sim.loadURDF(
49 | body_name=self.body_name,
50 | fileName=file_name,
51 | basePosition=base_position,
52 | useFixedBase=True,
53 | )
54 |
55 | def setup(self) -> None:
56 | """Called after robot loading."""
57 | pass
58 |
59 | @abstractmethod
60 | def set_action(self, action: np.ndarray) -> None:
61 | """Set the action. Must be called just before sim.step().
62 |
63 | Args:
64 | action (np.ndarray): The action.
65 | """
66 |
67 | @abstractmethod
68 | def get_obs(self) -> np.ndarray:
69 | """Return the observation associated to the robot.
70 |
71 | Returns:
72 | np.ndarray: The observation.
73 | """
74 |
75 | @abstractmethod
76 | def reset(self) -> None:
77 | """Reset the robot and return the observation."""
78 |
79 | def get_link_position(self, link: int) -> np.ndarray:
80 | """Returns the position of a link as (x, y, z)
81 |
82 | Args:
83 | link (int): The link index.
84 |
85 | Returns:
86 | np.ndarray: Position as (x, y, z)
87 | """
88 | return self.sim.get_link_position(self.body_name, link)
89 |
90 | def get_link_velocity(self, link: int) -> np.ndarray:
91 | """Returns the velocity of a link as (vx, vy, vz)
92 |
93 | Args:
94 | link (int): The link index.
95 |
96 | Returns:
97 | np.ndarray: Velocity as (vx, vy, vz)
98 | """
99 | return self.sim.get_link_velocity(self.body_name, link)
100 |
101 | def get_joint_angle(self, joint: int) -> float:
102 | """Returns the angle of a joint
103 |
104 | Args:
105 | joint (int): The joint index.
106 |
107 | Returns:
108 | float: Joint angle
109 | """
110 | return self.sim.get_joint_angle(self.body_name, joint)
111 |
112 | def get_joint_velocity(self, joint: int) -> float:
113 | """Returns the velocity of a joint as (wx, wy, wz)
114 |
115 | Args:
116 | joint (int): The joint index.
117 |
118 | Returns:
119 | np.ndarray: Joint velocity as (wx, wy, wz)
120 | """
121 | return self.sim.get_joint_velocity(self.body_name, joint)
122 |
123 | def control_joints(self, target_angles: np.ndarray) -> None:
124 | """Control the joints of the robot.
125 |
126 | Args:
127 | target_angles (np.ndarray): The target angles. The length of the array must equal to the number of joints.
128 | """
129 | self.sim.control_joints(
130 | body=self.body_name,
131 | joints=self.joint_indices,
132 | target_angles=target_angles,
133 | forces=self.joint_forces,
134 | )
135 |
136 | def set_joint_angles(self, angles: np.ndarray) -> None:
137 | """Set the joint position of a body. Can induce collisions.
138 |
139 | Args:
140 | angles (list): Joint angles.
141 | """
142 | self.sim.set_joint_angles(self.body_name, joints=self.joint_indices, angles=angles)
143 |
144 | def inverse_kinematics(self, link: int, position: np.ndarray, orientation: np.ndarray) -> np.ndarray:
145 | """Compute the inverse kinematics and return the new joint values.
146 |
147 | Args:
148 | link (int): The link.
149 | position (x, y, z): Desired position of the link.
150 | orientation (x, y, z, w): Desired orientation of the link.
151 |
152 | Returns:
153 | List of joint values.
154 | """
155 | inverse_kinematics = self.sim.inverse_kinematics(self.body_name, link=link, position=position, orientation=orientation)
156 | return inverse_kinematics
157 |
158 |
159 | class Task(ABC):
160 | """Base class for tasks.
161 | Args:
162 | sim (PyBullet): Simulation instance.
163 | """
164 |
165 | def __init__(self, sim: PyBullet) -> None:
166 | self.sim = sim
167 | self.goal = None
168 |
169 | @abstractmethod
170 | def reset(self) -> None:
171 | """Reset the task: sample a new goal."""
172 |
173 | @abstractmethod
174 | def get_obs(self) -> np.ndarray:
175 | """Return the observation associated to the task."""
176 |
177 | @abstractmethod
178 | def get_achieved_goal(self) -> np.ndarray:
179 | """Return the achieved goal."""
180 |
181 | def get_goal(self) -> np.ndarray:
182 | """Return the current goal."""
183 | if self.goal is None:
184 | raise RuntimeError("No goal yet, call reset() first")
185 | else:
186 | return self.goal.copy()
187 |
188 | @abstractmethod
189 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
190 | """Returns whether the achieved goal match the desired goal."""
191 |
192 | @abstractmethod
193 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
194 | """Compute reward associated to the achieved and the desired goal."""
195 |
196 |
197 | class RobotTaskEnv(gym.Env):
198 | """Robotic task goal env, as the junction of a task and a robot.
199 |
200 | Args:
201 | robot (PyBulletRobot): The robot.
202 | task (Task): The task.
203 | render_width (int, optional): Image width. Defaults to 720.
204 | render_height (int, optional): Image height. Defaults to 480.
205 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
206 | Defaults to [0., 0., 0.].
207 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
208 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
209 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
210 | render_roll (int, optional): Roll of the camera. Defaults to 0.
211 | """
212 |
213 | metadata = {"render_modes": ["human", "rgb_array"]}
214 |
215 | def __init__(
216 | self,
217 | robot: PyBulletRobot,
218 | task: Task,
219 | render_width: int = 720,
220 | render_height: int = 480,
221 | render_target_position: Optional[np.ndarray] = None,
222 | render_distance: float = 1.4,
223 | render_yaw: float = 45,
224 | render_pitch: float = -30,
225 | render_roll: float = 0,
226 | ) -> None:
227 | assert robot.sim == task.sim, "The robot and the task must belong to the same simulation."
228 | self.sim = robot.sim
229 | self.render_mode = self.sim.render_mode
230 | self.metadata["render_fps"] = 1 / self.sim.dt
231 | self.robot = robot
232 | self.task = task
233 | observation, _ = self.reset() # required for init; seed can be changed later
234 | observation_shape = observation["observation"].shape
235 | achieved_goal_shape = observation["achieved_goal"].shape
236 | desired_goal_shape = observation["desired_goal"].shape
237 | self.observation_space = spaces.Dict(
238 | dict(
239 | observation=spaces.Box(-10.0, 10.0, shape=observation_shape, dtype=np.float32),
240 | desired_goal=spaces.Box(-10.0, 10.0, shape=desired_goal_shape, dtype=np.float32),
241 | achieved_goal=spaces.Box(-10.0, 10.0, shape=achieved_goal_shape, dtype=np.float32),
242 | )
243 | )
244 | self.action_space = self.robot.action_space
245 | self.compute_reward = self.task.compute_reward
246 | self._saved_goal = dict() # For state saving and restoring
247 |
248 | self.render_width = render_width
249 | self.render_height = render_height
250 | self.render_target_position = (
251 | render_target_position if render_target_position is not None else np.array([0.0, 0.0, 0.0])
252 | )
253 | self.render_distance = render_distance
254 | self.render_yaw = render_yaw
255 | self.render_pitch = render_pitch
256 | self.render_roll = render_roll
257 | with self.sim.no_rendering():
258 | self.sim.place_visualizer(
259 | target_position=self.render_target_position,
260 | distance=self.render_distance,
261 | yaw=self.render_yaw,
262 | pitch=self.render_pitch,
263 | )
264 |
265 | def _get_obs(self) -> Dict[str, np.ndarray]:
266 | robot_obs = self.robot.get_obs().astype(np.float32) # robot state
267 | task_obs = self.task.get_obs().astype(np.float32) # object position, velocity, etc...
268 | observation = np.concatenate([robot_obs, task_obs])
269 | achieved_goal = self.task.get_achieved_goal().astype(np.float32)
270 | return {
271 | "observation": observation,
272 | "achieved_goal": achieved_goal,
273 | "desired_goal": self.task.get_goal().astype(np.float32),
274 | }
275 |
276 | def reset(
277 | self, seed: Optional[int] = None, options: Optional[dict] = None
278 | ) -> Tuple[Dict[str, np.ndarray], Dict[str, Any]]:
279 | super().reset(seed=seed, options=options)
280 | self.task.np_random = self.np_random
281 | with self.sim.no_rendering():
282 | self.robot.reset()
283 | self.task.reset()
284 | observation = self._get_obs()
285 | info = {"is_success": self.task.is_success(observation["achieved_goal"], self.task.get_goal())}
286 | return observation, info
287 |
288 | def save_state(self) -> int:
289 | """Save the current state of the environment. Restore with `restore_state`.
290 |
291 | Returns:
292 | int: State unique identifier.
293 | """
294 | state_id = self.sim.save_state()
295 | self._saved_goal[state_id] = self.task.goal
296 | return state_id
297 |
298 | def restore_state(self, state_id: int) -> None:
299 | """Restore the state associated with the unique identifier.
300 |
301 | Args:
302 | state_id (int): State unique identifier.
303 | """
304 | self.sim.restore_state(state_id)
305 | self.task.goal = self._saved_goal[state_id]
306 |
307 | def remove_state(self, state_id: int) -> None:
308 | """Remove a saved state.
309 |
310 | Args:
311 | state_id (int): State unique identifier.
312 | """
313 | self._saved_goal.pop(state_id)
314 | self.sim.remove_state(state_id)
315 |
316 | def step(self, action: np.ndarray) -> Tuple[Dict[str, np.ndarray], float, bool, bool, Dict[str, Any]]:
317 | self.robot.set_action(action)
318 | self.sim.step()
319 | observation = self._get_obs()
320 | # An episode is terminated iff the agent has reached the target
321 | terminated = bool(self.task.is_success(observation["achieved_goal"], self.task.get_goal()))
322 | truncated = False
323 | info = {"is_success": terminated}
324 | reward = float(self.task.compute_reward(observation["achieved_goal"], self.task.get_goal(), info))
325 | return observation, reward, terminated, truncated, info
326 |
327 | def close(self) -> None:
328 | self.sim.close()
329 |
330 | def render(self) -> Optional[np.ndarray]:
331 | """Render.
332 |
333 | If render mode is "rgb_array", return an RGB array of the scene. Else, do nothing and return None.
334 |
335 | Returns:
336 | RGB np.ndarray or None: An RGB array if mode is 'rgb_array', else None.
337 | """
338 | return self.sim.render(
339 | width=self.render_width,
340 | height=self.render_height,
341 | target_position=self.render_target_position,
342 | distance=self.render_distance,
343 | yaw=self.render_yaw,
344 | pitch=self.render_pitch,
345 | roll=self.render_roll,
346 | )
347 |
--------------------------------------------------------------------------------
/panda_gym/envs/panda_tasks.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import RobotTaskEnv
6 | from panda_gym.envs.robots.panda import Panda
7 | from panda_gym.envs.tasks.flip import Flip
8 | from panda_gym.envs.tasks.pick_and_place import PickAndPlace
9 | from panda_gym.envs.tasks.push import Push
10 | from panda_gym.envs.tasks.reach import Reach
11 | from panda_gym.envs.tasks.slide import Slide
12 | from panda_gym.envs.tasks.stack import Stack
13 | from panda_gym.pybullet import PyBullet
14 |
15 |
16 | class PandaFlipEnv(RobotTaskEnv):
17 | """Pick and Place task wih Panda robot.
18 |
19 | Args:
20 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
21 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
22 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
23 | Defaults to "ee".
24 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
25 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
26 | render_width (int, optional): Image width. Defaults to 720.
27 | render_height (int, optional): Image height. Defaults to 480.
28 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
29 | Defaults to [0., 0., 0.].
30 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
31 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
32 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
33 | render_roll (int, optional): Roll of the camera. Defaults to 0.
34 |
35 | """
36 |
37 | def __init__(
38 | self,
39 | render_mode: str = "rgb_array",
40 | reward_type: str = "sparse",
41 | control_type: str = "ee",
42 | renderer: str = "Tiny",
43 | render_width: int = 720,
44 | render_height: int = 480,
45 | render_target_position: Optional[np.ndarray] = None,
46 | render_distance: float = 1.4,
47 | render_yaw: float = 45,
48 | render_pitch: float = -30,
49 | render_roll: float = 0,
50 | ) -> None:
51 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
52 | robot = Panda(sim, block_gripper=False, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
53 | task = Flip(sim, reward_type=reward_type)
54 | super().__init__(
55 | robot,
56 | task,
57 | render_width=render_width,
58 | render_height=render_height,
59 | render_target_position=render_target_position,
60 | render_distance=render_distance,
61 | render_yaw=render_yaw,
62 | render_pitch=render_pitch,
63 | render_roll=render_roll,
64 | )
65 |
66 |
67 | class PandaPickAndPlaceEnv(RobotTaskEnv):
68 | """Pick and Place task wih Panda robot.
69 |
70 | Args:
71 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
72 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
73 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
74 | Defaults to "ee".
75 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
76 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
77 | render_width (int, optional): Image width. Defaults to 720.
78 | render_height (int, optional): Image height. Defaults to 480.
79 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
80 | Defaults to [0., 0., 0.].
81 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
82 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
83 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
84 | render_roll (int, optional): Roll of the camera. Defaults to 0.
85 | """
86 |
87 | def __init__(
88 | self,
89 | render_mode: str = "rgb_array",
90 | reward_type: str = "sparse",
91 | control_type: str = "ee",
92 | renderer: str = "Tiny",
93 | render_width: int = 720,
94 | render_height: int = 480,
95 | render_target_position: Optional[np.ndarray] = None,
96 | render_distance: float = 1.4,
97 | render_yaw: float = 45,
98 | render_pitch: float = -30,
99 | render_roll: float = 0,
100 | ) -> None:
101 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
102 | robot = Panda(sim, block_gripper=False, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
103 | task = PickAndPlace(sim, reward_type=reward_type)
104 | super().__init__(
105 | robot,
106 | task,
107 | render_width=render_width,
108 | render_height=render_height,
109 | render_target_position=render_target_position,
110 | render_distance=render_distance,
111 | render_yaw=render_yaw,
112 | render_pitch=render_pitch,
113 | render_roll=render_roll,
114 | )
115 |
116 |
117 | class PandaPushEnv(RobotTaskEnv):
118 | """Push task wih Panda robot.
119 |
120 | Args:
121 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
122 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
123 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
124 | Defaults to "ee".
125 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
126 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
127 | render_width (int, optional): Image width. Defaults to 720.
128 | render_height (int, optional): Image height. Defaults to 480.
129 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
130 | Defaults to [0., 0., 0.].
131 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
132 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
133 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
134 | render_roll (int, optional): Roll of the camera. Defaults to 0.
135 | """
136 |
137 | def __init__(
138 | self,
139 | render_mode: str = "rgb_array",
140 | reward_type: str = "sparse",
141 | control_type: str = "ee",
142 | renderer: str = "Tiny",
143 | render_width: int = 720,
144 | render_height: int = 480,
145 | render_target_position: Optional[np.ndarray] = None,
146 | render_distance: float = 1.4,
147 | render_yaw: float = 45,
148 | render_pitch: float = -30,
149 | render_roll: float = 0,
150 | ) -> None:
151 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
152 | robot = Panda(sim, block_gripper=True, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
153 | task = Push(sim, reward_type=reward_type)
154 | super().__init__(
155 | robot,
156 | task,
157 | render_width=render_width,
158 | render_height=render_height,
159 | render_target_position=render_target_position,
160 | render_distance=render_distance,
161 | render_yaw=render_yaw,
162 | render_pitch=render_pitch,
163 | render_roll=render_roll,
164 | )
165 |
166 |
167 | class PandaReachEnv(RobotTaskEnv):
168 | """Reach task wih Panda robot.
169 |
170 | Args:
171 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
172 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
173 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
174 | Defaults to "ee".
175 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
176 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
177 | render_width (int, optional): Image width. Defaults to 720.
178 | render_height (int, optional): Image height. Defaults to 480.
179 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
180 | Defaults to [0., 0., 0.].
181 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
182 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
183 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
184 | render_roll (int, optional): Roll of the camera. Defaults to 0.
185 | """
186 |
187 | def __init__(
188 | self,
189 | render_mode: str = "rgb_array",
190 | reward_type: str = "sparse",
191 | control_type: str = "ee",
192 | renderer: str = "Tiny",
193 | render_width: int = 720,
194 | render_height: int = 480,
195 | render_target_position: Optional[np.ndarray] = None,
196 | render_distance: float = 1.4,
197 | render_yaw: float = 45,
198 | render_pitch: float = -30,
199 | render_roll: float = 0,
200 | ) -> None:
201 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
202 | robot = Panda(sim, block_gripper=True, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
203 | task = Reach(sim, reward_type=reward_type, get_ee_position=robot.get_ee_position)
204 | super().__init__(
205 | robot,
206 | task,
207 | render_width=render_width,
208 | render_height=render_height,
209 | render_target_position=render_target_position,
210 | render_distance=render_distance,
211 | render_yaw=render_yaw,
212 | render_pitch=render_pitch,
213 | render_roll=render_roll,
214 | )
215 |
216 |
217 | class PandaSlideEnv(RobotTaskEnv):
218 | """Slide task wih Panda robot.
219 |
220 | Args:
221 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
222 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
223 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
224 | Defaults to "ee".
225 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
226 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
227 | render_width (int, optional): Image width. Defaults to 720.
228 | render_height (int, optional): Image height. Defaults to 480.
229 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
230 | Defaults to [0., 0., 0.].
231 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
232 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
233 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
234 | render_roll (int, optional): Roll of the camera. Defaults to 0.
235 | """
236 |
237 | def __init__(
238 | self,
239 | render_mode: str = "rgb_array",
240 | reward_type: str = "sparse",
241 | control_type: str = "ee",
242 | renderer: str = "Tiny",
243 | render_width: int = 720,
244 | render_height: int = 480,
245 | render_target_position: Optional[np.ndarray] = None,
246 | render_distance: float = 1.4,
247 | render_yaw: float = 45,
248 | render_pitch: float = -30,
249 | render_roll: float = 0,
250 | ) -> None:
251 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
252 | robot = Panda(sim, block_gripper=True, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
253 | task = Slide(sim, reward_type=reward_type)
254 | super().__init__(
255 | robot,
256 | task,
257 | render_width=render_width,
258 | render_height=render_height,
259 | render_target_position=render_target_position,
260 | render_distance=render_distance,
261 | render_yaw=render_yaw,
262 | render_pitch=render_pitch,
263 | render_roll=render_roll,
264 | )
265 |
266 |
267 | class PandaStackEnv(RobotTaskEnv):
268 | """Stack task wih Panda robot.
269 |
270 | Args:
271 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
272 | reward_type (str, optional): "sparse" or "dense". Defaults to "sparse".
273 | control_type (str, optional): "ee" to control end-effector position or "joints" to control joint values.
274 | Defaults to "ee".
275 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
276 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
277 | render_width (int, optional): Image width. Defaults to 720.
278 | render_height (int, optional): Image height. Defaults to 480.
279 | render_target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
280 | Defaults to [0., 0., 0.].
281 | render_distance (float, optional): Distance of the camera. Defaults to 1.4.
282 | render_yaw (float, optional): Yaw of the camera. Defaults to 45.
283 | render_pitch (float, optional): Pitch of the camera. Defaults to -30.
284 | render_roll (int, optional): Roll of the camera. Defaults to 0.
285 | """
286 |
287 | def __init__(
288 | self,
289 | render_mode: str = "rgb_array",
290 | reward_type: str = "sparse",
291 | control_type: str = "ee",
292 | renderer: str = "Tiny",
293 | render_width: int = 720,
294 | render_height: int = 480,
295 | render_target_position: Optional[np.ndarray] = None,
296 | render_distance: float = 1.4,
297 | render_yaw: float = 45,
298 | render_pitch: float = -30,
299 | render_roll: float = 0,
300 | ) -> None:
301 | sim = PyBullet(render_mode=render_mode, renderer=renderer)
302 | robot = Panda(sim, block_gripper=False, base_position=np.array([-0.6, 0.0, 0.0]), control_type=control_type)
303 | task = Stack(sim, reward_type=reward_type)
304 | super().__init__(
305 | robot,
306 | task,
307 | render_width=render_width,
308 | render_height=render_height,
309 | render_target_position=render_target_position,
310 | render_distance=render_distance,
311 | render_yaw=render_yaw,
312 | render_pitch=render_pitch,
313 | render_roll=render_roll,
314 | )
315 |
--------------------------------------------------------------------------------
/panda_gym/envs/robots/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/panda_gym/envs/robots/__init__.py
--------------------------------------------------------------------------------
/panda_gym/envs/robots/panda.py:
--------------------------------------------------------------------------------
1 | from typing import Optional
2 |
3 | import numpy as np
4 | from gymnasium import spaces
5 |
6 | from panda_gym.envs.core import PyBulletRobot
7 | from panda_gym.pybullet import PyBullet
8 |
9 |
10 | class Panda(PyBulletRobot):
11 | """Panda robot in PyBullet.
12 |
13 | Args:
14 | sim (PyBullet): Simulation instance.
15 | block_gripper (bool, optional): Whether the gripper is blocked. Defaults to False.
16 | base_position (np.ndarray, optional): Position of the base base of the robot, as (x, y, z). Defaults to (0, 0, 0).
17 | control_type (str, optional): "ee" to control end-effector displacement or "joints" to control joint angles.
18 | Defaults to "ee".
19 | """
20 |
21 | def __init__(
22 | self,
23 | sim: PyBullet,
24 | block_gripper: bool = False,
25 | base_position: Optional[np.ndarray] = None,
26 | control_type: str = "ee",
27 | ) -> None:
28 | base_position = base_position if base_position is not None else np.zeros(3)
29 | self.block_gripper = block_gripper
30 | self.control_type = control_type
31 | n_action = 3 if self.control_type == "ee" else 7 # control (x, y z) if "ee", else, control the 7 joints
32 | n_action += 0 if self.block_gripper else 1
33 | action_space = spaces.Box(-1.0, 1.0, shape=(n_action,), dtype=np.float32)
34 | super().__init__(
35 | sim,
36 | body_name="panda",
37 | file_name="franka_panda/panda.urdf",
38 | base_position=base_position,
39 | action_space=action_space,
40 | joint_indices=np.array([0, 1, 2, 3, 4, 5, 6, 9, 10]),
41 | joint_forces=np.array([87.0, 87.0, 87.0, 87.0, 12.0, 120.0, 120.0, 170.0, 170.0]),
42 | )
43 |
44 | self.fingers_indices = np.array([9, 10])
45 | self.neutral_joint_values = np.array([0.00, 0.41, 0.00, -1.85, 0.00, 2.26, 0.79, 0.00, 0.00])
46 | self.ee_link = 11
47 | self.sim.set_lateral_friction(self.body_name, self.fingers_indices[0], lateral_friction=1.0)
48 | self.sim.set_lateral_friction(self.body_name, self.fingers_indices[1], lateral_friction=1.0)
49 | self.sim.set_spinning_friction(self.body_name, self.fingers_indices[0], spinning_friction=0.001)
50 | self.sim.set_spinning_friction(self.body_name, self.fingers_indices[1], spinning_friction=0.001)
51 |
52 | def set_action(self, action: np.ndarray) -> None:
53 | action = action.copy() # ensure action don't change
54 | action = np.clip(action, self.action_space.low, self.action_space.high)
55 | if self.control_type == "ee":
56 | ee_displacement = action[:3]
57 | target_arm_angles = self.ee_displacement_to_target_arm_angles(ee_displacement)
58 | else:
59 | arm_joint_ctrl = action[:7]
60 | target_arm_angles = self.arm_joint_ctrl_to_target_arm_angles(arm_joint_ctrl)
61 |
62 | if self.block_gripper:
63 | target_fingers_width = 0
64 | else:
65 | fingers_ctrl = action[-1] * 0.2 # limit maximum change in position
66 | fingers_width = self.get_fingers_width()
67 | target_fingers_width = fingers_width + fingers_ctrl
68 |
69 | target_angles = np.concatenate((target_arm_angles, [target_fingers_width / 2, target_fingers_width / 2]))
70 | self.control_joints(target_angles=target_angles)
71 |
72 | def ee_displacement_to_target_arm_angles(self, ee_displacement: np.ndarray) -> np.ndarray:
73 | """Compute the target arm angles from the end-effector displacement.
74 |
75 | Args:
76 | ee_displacement (np.ndarray): End-effector displacement, as (dx, dy, dy).
77 |
78 | Returns:
79 | np.ndarray: Target arm angles, as the angles of the 7 arm joints.
80 | """
81 | ee_displacement = ee_displacement[:3] * 0.05 # limit maximum change in position
82 | # get the current position and the target position
83 | ee_position = self.get_ee_position()
84 | target_ee_position = ee_position + ee_displacement
85 | # Clip the height target. For some reason, it has a great impact on learning
86 | target_ee_position[2] = np.max((0, target_ee_position[2]))
87 | # compute the new joint angles
88 | target_arm_angles = self.inverse_kinematics(
89 | link=self.ee_link, position=target_ee_position, orientation=np.array([1.0, 0.0, 0.0, 0.0])
90 | )
91 | target_arm_angles = target_arm_angles[:7] # remove fingers angles
92 | return target_arm_angles
93 |
94 | def arm_joint_ctrl_to_target_arm_angles(self, arm_joint_ctrl: np.ndarray) -> np.ndarray:
95 | """Compute the target arm angles from the arm joint control.
96 |
97 | Args:
98 | arm_joint_ctrl (np.ndarray): Control of the 7 joints.
99 |
100 | Returns:
101 | np.ndarray: Target arm angles, as the angles of the 7 arm joints.
102 | """
103 | arm_joint_ctrl = arm_joint_ctrl * 0.05 # limit maximum change in position
104 | # get the current position and the target position
105 | current_arm_joint_angles = np.array([self.get_joint_angle(joint=i) for i in range(7)])
106 | target_arm_angles = current_arm_joint_angles + arm_joint_ctrl
107 | return target_arm_angles
108 |
109 | def get_obs(self) -> np.ndarray:
110 | # end-effector position and velocity
111 | ee_position = np.array(self.get_ee_position())
112 | ee_velocity = np.array(self.get_ee_velocity())
113 | # fingers opening
114 | if not self.block_gripper:
115 | fingers_width = self.get_fingers_width()
116 | observation = np.concatenate((ee_position, ee_velocity, [fingers_width]))
117 | else:
118 | observation = np.concatenate((ee_position, ee_velocity))
119 | return observation
120 |
121 | def reset(self) -> None:
122 | self.set_joint_neutral()
123 |
124 | def set_joint_neutral(self) -> None:
125 | """Set the robot to its neutral pose."""
126 | self.set_joint_angles(self.neutral_joint_values)
127 |
128 | def get_fingers_width(self) -> float:
129 | """Get the distance between the fingers."""
130 | finger1 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[0])
131 | finger2 = self.sim.get_joint_angle(self.body_name, self.fingers_indices[1])
132 | return finger1 + finger2
133 |
134 | def get_ee_position(self) -> np.ndarray:
135 | """Returns the position of the end-effector as (x, y, z)"""
136 | return self.get_link_position(self.ee_link)
137 |
138 | def get_ee_velocity(self) -> np.ndarray:
139 | """Returns the velocity of the end-effector as (vx, vy, vz)"""
140 | return self.get_link_velocity(self.ee_link)
141 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/qgallouedec/panda-gym/10c4d8adaab07d8a29b2f8dc0aa85f8edd8532ae/panda_gym/envs/tasks/__init__.py
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/flip.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Tuple
2 |
3 | import numpy as np
4 | from scipy.spatial.transform import Rotation as R
5 |
6 | from panda_gym.envs.core import Task
7 | from panda_gym.pybullet import PyBullet
8 | from panda_gym.utils import angle_distance
9 |
10 |
11 | class Flip(Task):
12 | def __init__(
13 | self,
14 | sim: PyBullet,
15 | reward_type: str = "sparse",
16 | distance_threshold: float = 0.2,
17 | obj_xy_range: float = 0.3,
18 | ) -> None:
19 | super().__init__(sim)
20 | self.reward_type = reward_type
21 | self.distance_threshold = distance_threshold
22 | self.object_size = 0.04
23 | self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
24 | self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
25 | with self.sim.no_rendering():
26 | self._create_scene()
27 |
28 | def _create_scene(self) -> None:
29 | """Create the scene."""
30 | self.sim.create_plane(z_offset=-0.4)
31 | self.sim.create_table(length=1.1, width=0.7, height=0.4, x_offset=-0.3)
32 | self.sim.create_box(
33 | body_name="object",
34 | half_extents=np.ones(3) * self.object_size / 2,
35 | mass=1.0,
36 | position=np.array([0.0, 0.0, self.object_size / 2]),
37 | texture="colored_cube.png",
38 | )
39 | self.sim.create_box(
40 | body_name="target",
41 | half_extents=np.ones(3) * self.object_size / 2,
42 | mass=0.0,
43 | ghost=True,
44 | position=np.array([0.0, 0.0, 3 * self.object_size / 2]),
45 | rgba_color=np.array([1.0, 1.0, 1.0, 0.5]),
46 | texture="colored_cube.png",
47 | )
48 |
49 | def get_obs(self) -> np.ndarray:
50 | # position, rotation of the object
51 | object_position = self.sim.get_base_position("object")
52 | object_rotation = self.sim.get_base_rotation("object", "quaternion")
53 | object_velocity = self.sim.get_base_velocity("object")
54 | object_angular_velocity = self.sim.get_base_angular_velocity("object")
55 | observation = np.concatenate([object_position, object_rotation, object_velocity, object_angular_velocity])
56 | return observation
57 |
58 | def get_achieved_goal(self) -> np.ndarray:
59 | object_rotation = np.array(self.sim.get_base_rotation("object", "quaternion"))
60 | return object_rotation
61 |
62 | def reset(self) -> None:
63 | self.goal = self._sample_goal()
64 | object_position, object_orientation = self._sample_object()
65 | self.sim.set_base_pose("target", np.array([0.0, 0.0, 3 * self.object_size / 2]), self.goal)
66 | self.sim.set_base_pose("object", object_position, object_orientation)
67 |
68 | def _sample_goal(self) -> np.ndarray:
69 | """Randomize goal."""
70 | goal = R.random(random_state=self.np_random).as_quat()
71 | return goal
72 |
73 | def _sample_object(self) -> Tuple[np.ndarray, np.ndarray]:
74 | """Randomize start position of object."""
75 | object_position = np.array([0.0, 0.0, self.object_size / 2])
76 | noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
77 | object_position += noise
78 | object_rotation = np.zeros(3)
79 | return object_position, object_rotation
80 |
81 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
82 | d = angle_distance(achieved_goal, desired_goal)
83 | return np.array(d < self.distance_threshold, dtype=bool)
84 |
85 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
86 | d = angle_distance(achieved_goal, desired_goal)
87 | if self.reward_type == "sparse":
88 | return -np.array(d > self.distance_threshold, dtype=np.float32)
89 | else:
90 | return -d.astype(np.float32)
91 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/pick_and_place.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import Task
6 | from panda_gym.pybullet import PyBullet
7 | from panda_gym.utils import distance
8 |
9 |
10 | class PickAndPlace(Task):
11 | def __init__(
12 | self,
13 | sim: PyBullet,
14 | reward_type: str = "sparse",
15 | distance_threshold: float = 0.05,
16 | goal_xy_range: float = 0.3,
17 | goal_z_range: float = 0.2,
18 | obj_xy_range: float = 0.3,
19 | ) -> None:
20 | super().__init__(sim)
21 | self.reward_type = reward_type
22 | self.distance_threshold = distance_threshold
23 | self.object_size = 0.04
24 | self.goal_range_low = np.array([-goal_xy_range / 2, -goal_xy_range / 2, 0])
25 | self.goal_range_high = np.array([goal_xy_range / 2, goal_xy_range / 2, goal_z_range])
26 | self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
27 | self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
28 | with self.sim.no_rendering():
29 | self._create_scene()
30 |
31 | def _create_scene(self) -> None:
32 | """Create the scene."""
33 | self.sim.create_plane(z_offset=-0.4)
34 | self.sim.create_table(length=1.1, width=0.7, height=0.4, x_offset=-0.3)
35 | self.sim.create_box(
36 | body_name="object",
37 | half_extents=np.ones(3) * self.object_size / 2,
38 | mass=1.0,
39 | position=np.array([0.0, 0.0, self.object_size / 2]),
40 | rgba_color=np.array([0.1, 0.9, 0.1, 1.0]),
41 | )
42 | self.sim.create_box(
43 | body_name="target",
44 | half_extents=np.ones(3) * self.object_size / 2,
45 | mass=0.0,
46 | ghost=True,
47 | position=np.array([0.0, 0.0, 0.05]),
48 | rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
49 | )
50 |
51 | def get_obs(self) -> np.ndarray:
52 | # position, rotation of the object
53 | object_position = self.sim.get_base_position("object")
54 | object_rotation = self.sim.get_base_rotation("object")
55 | object_velocity = self.sim.get_base_velocity("object")
56 | object_angular_velocity = self.sim.get_base_angular_velocity("object")
57 | observation = np.concatenate([object_position, object_rotation, object_velocity, object_angular_velocity])
58 | return observation
59 |
60 | def get_achieved_goal(self) -> np.ndarray:
61 | object_position = np.array(self.sim.get_base_position("object"))
62 | return object_position
63 |
64 | def reset(self) -> None:
65 | self.goal = self._sample_goal()
66 | object_position = self._sample_object()
67 | self.sim.set_base_pose("target", self.goal, np.array([0.0, 0.0, 0.0, 1.0]))
68 | self.sim.set_base_pose("object", object_position, np.array([0.0, 0.0, 0.0, 1.0]))
69 |
70 | def _sample_goal(self) -> np.ndarray:
71 | """Sample a goal."""
72 | goal = np.array([0.0, 0.0, self.object_size / 2]) # z offset for the cube center
73 | noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
74 | if self.np_random.random() < 0.3:
75 | noise[2] = 0.0
76 | goal += noise
77 | return goal
78 |
79 | def _sample_object(self) -> np.ndarray:
80 | """Randomize start position of object."""
81 | object_position = np.array([0.0, 0.0, self.object_size / 2])
82 | noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
83 | object_position += noise
84 | return object_position
85 |
86 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
87 | d = distance(achieved_goal, desired_goal)
88 | return np.array(d < self.distance_threshold, dtype=bool)
89 |
90 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
91 | d = distance(achieved_goal, desired_goal)
92 | if self.reward_type == "sparse":
93 | return -np.array(d > self.distance_threshold, dtype=np.float32)
94 | else:
95 | return -d.astype(np.float32)
96 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/push.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import Task
6 | from panda_gym.utils import distance
7 |
8 |
9 | class Push(Task):
10 | def __init__(
11 | self,
12 | sim,
13 | reward_type="sparse",
14 | distance_threshold=0.05,
15 | goal_xy_range=0.3,
16 | obj_xy_range=0.3,
17 | ) -> None:
18 | super().__init__(sim)
19 | self.reward_type = reward_type
20 | self.distance_threshold = distance_threshold
21 | self.object_size = 0.04
22 | self.goal_range_low = np.array([-goal_xy_range / 2, -goal_xy_range / 2, 0])
23 | self.goal_range_high = np.array([goal_xy_range / 2, goal_xy_range / 2, 0])
24 | self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
25 | self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
26 | with self.sim.no_rendering():
27 | self._create_scene()
28 |
29 | def _create_scene(self) -> None:
30 | self.sim.create_plane(z_offset=-0.4)
31 | self.sim.create_table(length=1.1, width=0.7, height=0.4, x_offset=-0.3)
32 | self.sim.create_box(
33 | body_name="object",
34 | half_extents=np.ones(3) * self.object_size / 2,
35 | mass=1.0,
36 | position=np.array([0.0, 0.0, self.object_size / 2]),
37 | rgba_color=np.array([0.1, 0.9, 0.1, 1.0]),
38 | )
39 | self.sim.create_box(
40 | body_name="target",
41 | half_extents=np.ones(3) * self.object_size / 2,
42 | mass=0.0,
43 | ghost=True,
44 | position=np.array([0.0, 0.0, self.object_size / 2]),
45 | rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
46 | )
47 |
48 | def get_obs(self) -> np.ndarray:
49 | # position, rotation of the object
50 | object_position = np.array(self.sim.get_base_position("object"))
51 | object_rotation = np.array(self.sim.get_base_rotation("object"))
52 | object_velocity = np.array(self.sim.get_base_velocity("object"))
53 | object_angular_velocity = np.array(self.sim.get_base_angular_velocity("object"))
54 | observation = np.concatenate(
55 | [
56 | object_position,
57 | object_rotation,
58 | object_velocity,
59 | object_angular_velocity,
60 | ]
61 | )
62 | return observation
63 |
64 | def get_achieved_goal(self) -> np.ndarray:
65 | object_position = np.array(self.sim.get_base_position("object"))
66 | return object_position
67 |
68 | def reset(self) -> None:
69 | self.goal = self._sample_goal()
70 | object_position = self._sample_object()
71 | self.sim.set_base_pose("target", self.goal, np.array([0.0, 0.0, 0.0, 1.0]))
72 | self.sim.set_base_pose("object", object_position, np.array([0.0, 0.0, 0.0, 1.0]))
73 |
74 | def _sample_goal(self) -> np.ndarray:
75 | """Randomize goal."""
76 | goal = np.array([0.0, 0.0, self.object_size / 2]) # z offset for the cube center
77 | noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
78 | goal += noise
79 | return goal
80 |
81 | def _sample_object(self) -> np.ndarray:
82 | """Randomize start position of object."""
83 | object_position = np.array([0.0, 0.0, self.object_size / 2])
84 | noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
85 | object_position += noise
86 | return object_position
87 |
88 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
89 | d = distance(achieved_goal, desired_goal)
90 | return np.array(d < self.distance_threshold, dtype=bool)
91 |
92 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
93 | d = distance(achieved_goal, desired_goal)
94 | if self.reward_type == "sparse":
95 | return -np.array(d > self.distance_threshold, dtype=np.float32)
96 | else:
97 | return -d.astype(np.float32)
98 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/reach.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import Task
6 | from panda_gym.utils import distance
7 |
8 |
9 | class Reach(Task):
10 | def __init__(
11 | self,
12 | sim,
13 | get_ee_position,
14 | reward_type="sparse",
15 | distance_threshold=0.05,
16 | goal_range=0.3,
17 | ) -> None:
18 | super().__init__(sim)
19 | self.reward_type = reward_type
20 | self.distance_threshold = distance_threshold
21 | self.get_ee_position = get_ee_position
22 | self.goal_range_low = np.array([-goal_range / 2, -goal_range / 2, 0])
23 | self.goal_range_high = np.array([goal_range / 2, goal_range / 2, goal_range])
24 | with self.sim.no_rendering():
25 | self._create_scene()
26 |
27 | def _create_scene(self) -> None:
28 | self.sim.create_plane(z_offset=-0.4)
29 | self.sim.create_table(length=1.1, width=0.7, height=0.4, x_offset=-0.3)
30 | self.sim.create_sphere(
31 | body_name="target",
32 | radius=0.02,
33 | mass=0.0,
34 | ghost=True,
35 | position=np.zeros(3),
36 | rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
37 | )
38 |
39 | def get_obs(self) -> np.ndarray:
40 | return np.array([]) # no task-specific observation
41 |
42 | def get_achieved_goal(self) -> np.ndarray:
43 | ee_position = np.array(self.get_ee_position())
44 | return ee_position
45 |
46 | def reset(self) -> None:
47 | self.goal = self._sample_goal()
48 | self.sim.set_base_pose("target", self.goal, np.array([0.0, 0.0, 0.0, 1.0]))
49 |
50 | def _sample_goal(self) -> np.ndarray:
51 | """Randomize goal."""
52 | goal = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
53 | return goal
54 |
55 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
56 | d = distance(achieved_goal, desired_goal)
57 | return np.array(d < self.distance_threshold, dtype=bool)
58 |
59 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
60 | d = distance(achieved_goal, desired_goal)
61 | if self.reward_type == "sparse":
62 | return -np.array(d > self.distance_threshold, dtype=np.float32)
63 | else:
64 | return -d.astype(np.float32)
65 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/slide.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import Task
6 | from panda_gym.utils import distance
7 |
8 |
9 | class Slide(Task):
10 | def __init__(
11 | self,
12 | sim,
13 | reward_type="sparse",
14 | distance_threshold=0.05,
15 | goal_xy_range=0.3,
16 | goal_x_offset=0.4,
17 | obj_xy_range=0.3,
18 | ) -> None:
19 | super().__init__(sim)
20 | self.reward_type = reward_type
21 | self.distance_threshold = distance_threshold
22 | self.object_size = 0.06
23 | self.goal_range_low = np.array([-goal_xy_range / 2 + goal_x_offset, -goal_xy_range / 2, 0])
24 | self.goal_range_high = np.array([goal_xy_range / 2 + goal_x_offset, goal_xy_range / 2, 0])
25 | self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
26 | self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
27 | with self.sim.no_rendering():
28 | self._create_scene()
29 |
30 | def _create_scene(self) -> None:
31 | self.sim.create_plane(z_offset=-0.4)
32 | self.sim.create_table(length=1.4, width=0.7, height=0.4, x_offset=-0.1)
33 | self.sim.create_cylinder(
34 | body_name="object",
35 | mass=1.0,
36 | radius=self.object_size / 2,
37 | height=self.object_size / 2,
38 | position=np.array([0.0, 0.0, self.object_size / 2]),
39 | rgba_color=np.array([0.1, 0.9, 0.1, 1.0]),
40 | lateral_friction=0.04,
41 | )
42 | self.sim.create_cylinder(
43 | body_name="target",
44 | mass=0.0,
45 | ghost=True,
46 | radius=self.object_size / 2,
47 | height=self.object_size / 2,
48 | position=np.array([0.0, 0.0, self.object_size / 2]),
49 | rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
50 | )
51 |
52 | def get_obs(self) -> np.ndarray:
53 | # position, rotation of the object
54 | object_position = np.array(self.sim.get_base_position("object"))
55 | object_rotation = np.array(self.sim.get_base_rotation("object"))
56 | object_velocity = np.array(self.sim.get_base_velocity("object"))
57 | object_angular_velocity = np.array(self.sim.get_base_angular_velocity("object"))
58 | observation = np.concatenate(
59 | [
60 | object_position,
61 | object_rotation,
62 | object_velocity,
63 | object_angular_velocity,
64 | ]
65 | )
66 | return observation
67 |
68 | def get_achieved_goal(self) -> np.ndarray:
69 | object_position = np.array(self.sim.get_base_position("object"))
70 | return object_position.copy()
71 |
72 | def reset(self) -> None:
73 | self.goal = self._sample_goal()
74 | object_position = self._sample_object()
75 | self.sim.set_base_pose("target", self.goal, np.array([0.0, 0.0, 0.0, 1.0]))
76 | self.sim.set_base_pose("object", object_position, np.array([0.0, 0.0, 0.0, 1.0]))
77 |
78 | def _sample_goal(self) -> np.ndarray:
79 | """Randomize goal."""
80 | goal = np.array([0.0, 0.0, self.object_size / 2]) # z offset for the cube center
81 | noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
82 | goal += noise
83 | return goal.copy()
84 |
85 | def _sample_object(self) -> np.ndarray:
86 | """Randomize start position of object."""
87 | object_position = np.array([0.0, 0.0, self.object_size / 2])
88 | noise = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
89 | object_position += noise
90 | return object_position
91 |
92 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
93 | d = distance(achieved_goal, desired_goal)
94 | return np.array(d < self.distance_threshold, dtype=bool)
95 |
96 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
97 | d = distance(achieved_goal, desired_goal)
98 | if self.reward_type == "sparse":
99 | return -np.array(d > self.distance_threshold, dtype=np.float32)
100 | else:
101 | return -d.astype(np.float32)
102 |
--------------------------------------------------------------------------------
/panda_gym/envs/tasks/stack.py:
--------------------------------------------------------------------------------
1 | from typing import Any, Dict, Tuple
2 |
3 | import numpy as np
4 |
5 | from panda_gym.envs.core import Task
6 | from panda_gym.utils import distance
7 |
8 |
9 | class Stack(Task):
10 | def __init__(
11 | self,
12 | sim,
13 | reward_type="sparse",
14 | distance_threshold=0.1,
15 | goal_xy_range=0.3,
16 | obj_xy_range=0.3,
17 | ) -> None:
18 | super().__init__(sim)
19 | self.reward_type = reward_type
20 | self.distance_threshold = distance_threshold
21 | self.object_size = 0.04
22 | self.goal_range_low = np.array([-goal_xy_range / 2, -goal_xy_range / 2, 0])
23 | self.goal_range_high = np.array([goal_xy_range / 2, goal_xy_range / 2, 0])
24 | self.obj_range_low = np.array([-obj_xy_range / 2, -obj_xy_range / 2, 0])
25 | self.obj_range_high = np.array([obj_xy_range / 2, obj_xy_range / 2, 0])
26 | with self.sim.no_rendering():
27 | self._create_scene()
28 |
29 | def _create_scene(self) -> None:
30 | self.sim.create_plane(z_offset=-0.4)
31 | self.sim.create_table(length=1.1, width=0.7, height=0.4, x_offset=-0.3)
32 | self.sim.create_box(
33 | body_name="object1",
34 | half_extents=np.ones(3) * self.object_size / 2,
35 | mass=2.0,
36 | position=np.array([0.0, 0.0, self.object_size / 2]),
37 | rgba_color=np.array([0.1, 0.1, 0.9, 1.0]),
38 | )
39 | self.sim.create_box(
40 | body_name="target1",
41 | half_extents=np.ones(3) * self.object_size / 2,
42 | mass=0.0,
43 | ghost=True,
44 | position=np.array([0.0, 0.0, 0.05]),
45 | rgba_color=np.array([0.1, 0.1, 0.9, 0.3]),
46 | )
47 | self.sim.create_box(
48 | body_name="object2",
49 | half_extents=np.ones(3) * self.object_size / 2,
50 | mass=1.0,
51 | position=np.array([0.5, 0.0, self.object_size / 2]),
52 | rgba_color=np.array([0.1, 0.9, 0.1, 1.0]),
53 | )
54 | self.sim.create_box(
55 | body_name="target2",
56 | half_extents=np.ones(3) * self.object_size / 2,
57 | mass=0.0,
58 | ghost=True,
59 | position=np.array([0.5, 0.0, 0.05]),
60 | rgba_color=np.array([0.1, 0.9, 0.1, 0.3]),
61 | )
62 |
63 | def get_obs(self) -> np.ndarray:
64 | # position, rotation of the object
65 | object1_position = np.array(self.sim.get_base_position("object1"))
66 | object1_rotation = np.array(self.sim.get_base_rotation("object1"))
67 | object1_velocity = np.array(self.sim.get_base_velocity("object1"))
68 | object1_angular_velocity = np.array(self.sim.get_base_angular_velocity("object1"))
69 | object2_position = np.array(self.sim.get_base_position("object2"))
70 | object2_rotation = np.array(self.sim.get_base_rotation("object2"))
71 | object2_velocity = np.array(self.sim.get_base_velocity("object2"))
72 | object2_angular_velocity = np.array(self.sim.get_base_angular_velocity("object2"))
73 | observation = np.concatenate(
74 | [
75 | object1_position,
76 | object1_rotation,
77 | object1_velocity,
78 | object1_angular_velocity,
79 | object2_position,
80 | object2_rotation,
81 | object2_velocity,
82 | object2_angular_velocity,
83 | ]
84 | )
85 | return observation
86 |
87 | def get_achieved_goal(self) -> np.ndarray:
88 | object1_position = self.sim.get_base_position("object1")
89 | object2_position = self.sim.get_base_position("object2")
90 | achieved_goal = np.concatenate((object1_position, object2_position))
91 | return achieved_goal
92 |
93 | def reset(self) -> None:
94 | self.goal = self._sample_goal()
95 | object1_position, object2_position = self._sample_objects()
96 | self.sim.set_base_pose("target1", self.goal[:3], np.array([0.0, 0.0, 0.0, 1.0]))
97 | self.sim.set_base_pose("target2", self.goal[3:], np.array([0.0, 0.0, 0.0, 1.0]))
98 | self.sim.set_base_pose("object1", object1_position, np.array([0.0, 0.0, 0.0, 1.0]))
99 | self.sim.set_base_pose("object2", object2_position, np.array([0.0, 0.0, 0.0, 1.0]))
100 |
101 | def _sample_goal(self) -> np.ndarray:
102 | goal1 = np.array([0.0, 0.0, self.object_size / 2]) # z offset for the cube center
103 | goal2 = np.array([0.0, 0.0, 3 * self.object_size / 2]) # z offset for the cube center
104 | noise = self.np_random.uniform(self.goal_range_low, self.goal_range_high)
105 | goal1 += noise
106 | goal2 += noise
107 | return np.concatenate((goal1, goal2))
108 |
109 | def _sample_objects(self) -> Tuple[np.ndarray, np.ndarray]:
110 | # while True: # make sure that cubes are distant enough
111 | object1_position = np.array([0.0, 0.0, self.object_size / 2])
112 | object2_position = np.array([0.0, 0.0, 3 * self.object_size / 2])
113 | noise1 = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
114 | noise2 = self.np_random.uniform(self.obj_range_low, self.obj_range_high)
115 | object1_position += noise1
116 | object2_position += noise2
117 | # if distance(object1_position, object2_position) > 0.1:
118 | return object1_position, object2_position
119 |
120 | def is_success(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
121 | # must be vectorized !!
122 | d = distance(achieved_goal, desired_goal)
123 | return np.array((d < self.distance_threshold), dtype=bool)
124 |
125 | def compute_reward(self, achieved_goal: np.ndarray, desired_goal: np.ndarray, info: Dict[str, Any] = {}) -> np.ndarray:
126 | d = distance(achieved_goal, desired_goal)
127 | if self.reward_type == "sparse":
128 | return -np.array((d > self.distance_threshold), dtype=np.float32)
129 | else:
130 | return -d.astype(np.float32)
131 |
--------------------------------------------------------------------------------
/panda_gym/pybullet.py:
--------------------------------------------------------------------------------
1 | import os
2 | from contextlib import contextmanager
3 | from typing import Any, Dict, Iterator, Optional
4 |
5 | import numpy as np
6 | import pybullet as p
7 | import pybullet_data
8 | import pybullet_utils.bullet_client as bc
9 |
10 | import panda_gym.assets
11 |
12 |
13 | class PyBullet:
14 | """Convenient class to use PyBullet physics engine.
15 |
16 | Args:
17 | render_mode (str, optional): Render mode. Defaults to "rgb_array".
18 | n_substeps (int, optional): Number of sim substep when step() is called. Defaults to 20.
19 | background_color (np.ndarray, optional): The background color as (red, green, blue).
20 | Defaults to np.array([223, 54, 45]).
21 | renderer (str, optional): Renderer, either "Tiny" or OpenGL". Defaults to "Tiny" if render mode is "human"
22 | and "OpenGL" if render mode is "rgb_array". Only "OpenGL" is available for human render mode.
23 | """
24 |
25 | def __init__(
26 | self,
27 | render_mode: str = "rgb_array",
28 | n_substeps: int = 20,
29 | background_color: Optional[np.ndarray] = None,
30 | renderer: str = "Tiny",
31 | ) -> None:
32 | self.render_mode = render_mode
33 | background_color = background_color if background_color is not None else np.array([223.0, 54.0, 45.0])
34 | self.background_color = background_color.astype(np.float32) / 255
35 | options = "--background_color_red={} --background_color_green={} --background_color_blue={}".format(
36 | *self.background_color
37 | )
38 | if self.render_mode == "human":
39 | self.connection_mode = p.GUI
40 | elif self.render_mode == "rgb_array":
41 | if renderer == "OpenGL":
42 | self.connection_mode = p.GUI
43 | elif renderer == "Tiny":
44 | self.connection_mode = p.DIRECT
45 | else:
46 | raise ValueError("The 'renderer' argument is must be in {'Tiny', 'OpenGL'}")
47 | else:
48 | raise ValueError("The 'render' argument is must be in {'rgb_array', 'human'}")
49 | self.physics_client = bc.BulletClient(connection_mode=self.connection_mode, options=options)
50 | self.physics_client.configureDebugVisualizer(p.COV_ENABLE_GUI, 0)
51 | self.physics_client.configureDebugVisualizer(p.COV_ENABLE_MOUSE_PICKING, 0)
52 |
53 | self.n_substeps = n_substeps
54 | self.timestep = 1.0 / 500
55 | self.physics_client.setTimeStep(self.timestep)
56 | self.physics_client.resetSimulation()
57 | self.physics_client.setAdditionalSearchPath(pybullet_data.getDataPath())
58 | self.physics_client.setGravity(0, 0, -9.81)
59 | self._bodies_idx = {}
60 |
61 | @property
62 | def dt(self):
63 | """Timestep."""
64 | return self.timestep * self.n_substeps
65 |
66 | def step(self) -> None:
67 | """Step the simulation."""
68 | for _ in range(self.n_substeps):
69 | self.physics_client.stepSimulation()
70 |
71 | def close(self) -> None:
72 | """Close the simulation."""
73 | if self.physics_client.isConnected():
74 | self.physics_client.disconnect()
75 |
76 | def save_state(self) -> int:
77 | """Save the current simulation state.
78 |
79 | Returns:
80 | int: A state id assigned by PyBullet, which is the first non-negative
81 | integer available for indexing.
82 | """
83 | return self.physics_client.saveState()
84 |
85 | def restore_state(self, state_id: int) -> None:
86 | """Restore a simulation state.
87 |
88 | Args:
89 | state_id: The simulation state id returned by save_state().
90 | """
91 | self.physics_client.restoreState(state_id)
92 |
93 | def remove_state(self, state_id: int) -> None:
94 | """Remove a simulation state. This will make this state_id available again for returning in save_state().
95 |
96 | Args:
97 | state_id: The simulation state id returned by save_state().
98 | """
99 | self.physics_client.removeState(state_id)
100 |
101 | def render(
102 | self,
103 | width: int = 720,
104 | height: int = 480,
105 | target_position: Optional[np.ndarray] = None,
106 | distance: float = 1.4,
107 | yaw: float = 45,
108 | pitch: float = -30,
109 | roll: float = 0,
110 | ) -> Optional[np.ndarray]:
111 | """Render.
112 |
113 | If render mode is "rgb_array", return an RGB array of the scene. Else, do nothing and return None.
114 |
115 | Args:
116 | width (int, optional): Image width. Defaults to 720.
117 | height (int, optional): Image height. Defaults to 480.
118 | target_position (np.ndarray, optional): Camera targeting this position, as (x, y, z).
119 | Defaults to [0., 0., 0.].
120 | distance (float, optional): Distance of the camera. Defaults to 1.4.
121 | yaw (float, optional): Yaw of the camera. Defaults to 45.
122 | pitch (float, optional): Pitch of the camera. Defaults to -30.
123 | roll (int, optional): Roll of the camera. Defaults to 0.
124 | mode (str, optional): Deprecated: This argument is deprecated and will be removed in a future
125 | version. Use the render_mode argument of the constructor instead.
126 |
127 | Returns:
128 | RGB np.ndarray or None: An RGB array if mode is 'rgb_array', else None.
129 | """
130 | if self.render_mode == "rgb_array":
131 | target_position = target_position if target_position is not None else np.zeros(3)
132 | view_matrix = self.physics_client.computeViewMatrixFromYawPitchRoll(
133 | cameraTargetPosition=target_position,
134 | distance=distance,
135 | yaw=yaw,
136 | pitch=pitch,
137 | roll=roll,
138 | upAxisIndex=2,
139 | )
140 | proj_matrix = self.physics_client.computeProjectionMatrixFOV(
141 | fov=60, aspect=float(width) / height, nearVal=0.1, farVal=100.0
142 | )
143 | (_, _, rgba, _, _) = self.physics_client.getCameraImage(
144 | width=width,
145 | height=height,
146 | viewMatrix=view_matrix,
147 | projectionMatrix=proj_matrix,
148 | shadow=True,
149 | renderer=p.ER_BULLET_HARDWARE_OPENGL,
150 | )
151 | # With Python3.10, pybullet return flat tuple instead of array. So we need to build create the array.
152 | rgba = np.array(rgba, dtype=np.uint8).reshape((height, width, 4))
153 | return rgba[..., :3]
154 |
155 | def get_base_position(self, body: str) -> np.ndarray:
156 | """Get the position of the body.
157 |
158 | Args:
159 | body (str): Body unique name.
160 |
161 | Returns:
162 | np.ndarray: The position, as (x, y, z).
163 | """
164 | position = self.physics_client.getBasePositionAndOrientation(self._bodies_idx[body])[0]
165 | return np.array(position)
166 |
167 | def get_base_orientation(self, body: str) -> np.ndarray:
168 | """Get the orientation of the body.
169 |
170 | Args:
171 | body (str): Body unique name.
172 |
173 | Returns:
174 | np.ndarray: The orientation, as quaternion (x, y, z, w).
175 | """
176 | orientation = self.physics_client.getBasePositionAndOrientation(self._bodies_idx[body])[1]
177 | return np.array(orientation)
178 |
179 | def get_base_rotation(self, body: str, type: str = "euler") -> np.ndarray:
180 | """Get the rotation of the body.
181 |
182 | Args:
183 | body (str): Body unique name.
184 | type (str): Type of angle, either "euler" or "quaternion"
185 |
186 | Returns:
187 | np.ndarray: The rotation.
188 | """
189 | quaternion = self.get_base_orientation(body)
190 | if type == "euler":
191 | rotation = self.physics_client.getEulerFromQuaternion(quaternion)
192 | return np.array(rotation)
193 | elif type == "quaternion":
194 | return np.array(quaternion)
195 | else:
196 | raise ValueError("""type must be "euler" or "quaternion".""")
197 |
198 | def get_base_velocity(self, body: str) -> np.ndarray:
199 | """Get the velocity of the body.
200 |
201 | Args:
202 | body (str): Body unique name.
203 |
204 | Returns:
205 | np.ndarray: The velocity, as (vx, vy, vz).
206 | """
207 | velocity = self.physics_client.getBaseVelocity(self._bodies_idx[body])[0]
208 | return np.array(velocity)
209 |
210 | def get_base_angular_velocity(self, body: str) -> np.ndarray:
211 | """Get the angular velocity of the body.
212 |
213 | Args:
214 | body (str): Body unique name.
215 |
216 | Returns:
217 | np.ndarray: The angular velocity, as (wx, wy, wz).
218 | """
219 | angular_velocity = self.physics_client.getBaseVelocity(self._bodies_idx[body])[1]
220 | return np.array(angular_velocity)
221 |
222 | def get_link_position(self, body: str, link: int) -> np.ndarray:
223 | """Get the position of the link of the body.
224 |
225 | Args:
226 | body (str): Body unique name.
227 | link (int): Link index in the body.
228 |
229 | Returns:
230 | np.ndarray: The position, as (x, y, z).
231 | """
232 | position = self.physics_client.getLinkState(self._bodies_idx[body], link)[0]
233 | return np.array(position)
234 |
235 | def get_link_orientation(self, body: str, link: int) -> np.ndarray:
236 | """Get the orientation of the link of the body.
237 |
238 | Args:
239 | body (str): Body unique name.
240 | link (int): Link index in the body.
241 |
242 | Returns:
243 | np.ndarray: The rotation, as (rx, ry, rz).
244 | """
245 | orientation = self.physics_client.getLinkState(self._bodies_idx[body], link)[1]
246 | return np.array(orientation)
247 |
248 | def get_link_velocity(self, body: str, link: int) -> np.ndarray:
249 | """Get the velocity of the link of the body.
250 |
251 | Args:
252 | body (str): Body unique name.
253 | link (int): Link index in the body.
254 |
255 | Returns:
256 | np.ndarray: The velocity, as (vx, vy, vz).
257 | """
258 | velocity = self.physics_client.getLinkState(self._bodies_idx[body], link, computeLinkVelocity=True)[6]
259 | return np.array(velocity)
260 |
261 | def get_link_angular_velocity(self, body: str, link: int) -> np.ndarray:
262 | """Get the angular velocity of the link of the body.
263 |
264 | Args:
265 | body (str): Body unique name.
266 | link (int): Link index in the body.
267 |
268 | Returns:
269 | np.ndarray: The angular velocity, as (wx, wy, wz).
270 | """
271 | angular_velocity = self.physics_client.getLinkState(self._bodies_idx[body], link, computeLinkVelocity=True)[7]
272 | return np.array(angular_velocity)
273 |
274 | def get_joint_angle(self, body: str, joint: int) -> float:
275 | """Get the angle of the joint of the body.
276 |
277 | Args:
278 | body (str): Body unique name.
279 | joint (int): Joint index in the body
280 |
281 | Returns:
282 | float: The angle.
283 | """
284 | return self.physics_client.getJointState(self._bodies_idx[body], joint)[0]
285 |
286 | def get_joint_velocity(self, body: str, joint: int) -> float:
287 | """Get the velocity of the joint of the body.
288 |
289 | Args:
290 | body (str): Body unique name.
291 | joint (int): Joint index in the body
292 |
293 | Returns:
294 | float: The velocity.
295 | """
296 | return self.physics_client.getJointState(self._bodies_idx[body], joint)[1]
297 |
298 | def set_base_pose(self, body: str, position: np.ndarray, orientation: np.ndarray) -> None:
299 | """Set the position of the body.
300 |
301 | Args:
302 | body (str): Body unique name.
303 | position (np.ndarray): The position, as (x, y, z).
304 | orientation (np.ndarray): The target orientation as quaternion (x, y, z, w).
305 | """
306 | if len(orientation) == 3:
307 | orientation = self.physics_client.getQuaternionFromEuler(orientation)
308 | self.physics_client.resetBasePositionAndOrientation(
309 | bodyUniqueId=self._bodies_idx[body], posObj=position, ornObj=orientation
310 | )
311 |
312 | def set_joint_angles(self, body: str, joints: np.ndarray, angles: np.ndarray) -> None:
313 | """Set the angles of the joints of the body.
314 |
315 | Args:
316 | body (str): Body unique name.
317 | joints (np.ndarray): List of joint indices, as a list of ints.
318 | angles (np.ndarray): List of target angles, as a list of floats.
319 | """
320 | for joint, angle in zip(joints, angles):
321 | self.set_joint_angle(body=body, joint=joint, angle=angle)
322 |
323 | def set_joint_angle(self, body: str, joint: int, angle: float) -> None:
324 | """Set the angle of the joint of the body.
325 |
326 | Args:
327 | body (str): Body unique name.
328 | joint (int): Joint index in the body.
329 | angle (float): Target angle.
330 | """
331 | self.physics_client.resetJointState(bodyUniqueId=self._bodies_idx[body], jointIndex=joint, targetValue=angle)
332 |
333 | def control_joints(self, body: str, joints: np.ndarray, target_angles: np.ndarray, forces: np.ndarray) -> None:
334 | """Control the joints motor.
335 |
336 | Args:
337 | body (str): Body unique name.
338 | joints (np.ndarray): List of joint indices, as a list of ints.
339 | target_angles (np.ndarray): List of target angles, as a list of floats.
340 | forces (np.ndarray): Forces to apply, as a list of floats.
341 | """
342 | self.physics_client.setJointMotorControlArray(
343 | self._bodies_idx[body],
344 | jointIndices=joints,
345 | controlMode=self.physics_client.POSITION_CONTROL,
346 | targetPositions=target_angles,
347 | forces=forces,
348 | )
349 |
350 | def inverse_kinematics(self, body: str, link: int, position: np.ndarray, orientation: np.ndarray) -> np.ndarray:
351 | """Compute the inverse kinematics and return the new joint state.
352 |
353 | Args:
354 | body (str): Body unique name.
355 | link (int): Link index in the body.
356 | position (np.ndarray): Desired position of the end-effector, as (x, y, z).
357 | orientation (np.ndarray): Desired orientation of the end-effector as quaternion (x, y, z, w).
358 |
359 | Returns:
360 | np.ndarray: The new joint state.
361 | """
362 | joint_state = self.physics_client.calculateInverseKinematics(
363 | bodyIndex=self._bodies_idx[body],
364 | endEffectorLinkIndex=link,
365 | targetPosition=position,
366 | targetOrientation=orientation,
367 | )
368 | return np.array(joint_state)
369 |
370 | def place_visualizer(self, target_position: np.ndarray, distance: float, yaw: float, pitch: float) -> None:
371 | """Orient the camera used for rendering.
372 |
373 | Args:
374 | target (np.ndarray): Target position, as (x, y, z).
375 | distance (float): Distance from the target position.
376 | yaw (float): Yaw.
377 | pitch (float): Pitch.
378 | """
379 | self.physics_client.resetDebugVisualizerCamera(
380 | cameraDistance=distance,
381 | cameraYaw=yaw,
382 | cameraPitch=pitch,
383 | cameraTargetPosition=target_position,
384 | )
385 |
386 | @contextmanager
387 | def no_rendering(self) -> Iterator[None]:
388 | """Disable rendering within this context."""
389 | self.physics_client.configureDebugVisualizer(self.physics_client.COV_ENABLE_RENDERING, 0)
390 | yield
391 | self.physics_client.configureDebugVisualizer(self.physics_client.COV_ENABLE_RENDERING, 1)
392 |
393 | def loadURDF(self, body_name: str, **kwargs: Any) -> None:
394 | """Load URDF file.
395 |
396 | Args:
397 | body_name (str): The name of the body. Must be unique in the sim.
398 | """
399 | self._bodies_idx[body_name] = self.physics_client.loadURDF(**kwargs)
400 |
401 | def create_box(
402 | self,
403 | body_name: str,
404 | half_extents: np.ndarray,
405 | mass: float,
406 | position: np.ndarray,
407 | rgba_color: Optional[np.ndarray] = None,
408 | specular_color: Optional[np.ndarray] = None,
409 | ghost: bool = False,
410 | lateral_friction: Optional[float] = None,
411 | spinning_friction: Optional[float] = None,
412 | texture: Optional[str] = None,
413 | ) -> None:
414 | """Create a box.
415 |
416 | Args:
417 | body_name (str): The name of the body. Must be unique in the sim.
418 | half_extents (np.ndarray): Half size of the box in meters, as (x, y, z).
419 | mass (float): The mass in kg.
420 | position (np.ndarray): The position, as (x, y, z).
421 | rgba_color (np.ndarray, optional): Body color, as (r, g, b, a). Defaults as [0, 0, 0, 0]
422 | specular_color (np.ndarray, optional): Specular color, as (r, g, b). Defaults to [0, 0, 0].
423 | ghost (bool, optional): Whether the body can collide. Defaults to False.
424 | lateral_friction (float or None, optional): Lateral friction. If None, use the default pybullet
425 | value. Defaults to None.
426 | spinning_friction (float or None, optional): Spinning friction. If None, use the default pybullet
427 | value. Defaults to None.
428 | texture (str or None, optional): Texture file name. Defaults to None.
429 | """
430 | rgba_color = rgba_color if rgba_color is not None else np.zeros(4)
431 | specular_color = specular_color if specular_color is not None else np.zeros(3)
432 | visual_kwargs = {
433 | "halfExtents": half_extents,
434 | "specularColor": specular_color,
435 | "rgbaColor": rgba_color,
436 | }
437 | collision_kwargs = {"halfExtents": half_extents}
438 | self._create_geometry(
439 | body_name,
440 | geom_type=self.physics_client.GEOM_BOX,
441 | mass=mass,
442 | position=position,
443 | ghost=ghost,
444 | lateral_friction=lateral_friction,
445 | spinning_friction=spinning_friction,
446 | visual_kwargs=visual_kwargs,
447 | collision_kwargs=collision_kwargs,
448 | )
449 | if texture is not None:
450 | texture_path = os.path.join(panda_gym.assets.get_data_path(), texture)
451 | texture_uid = self.physics_client.loadTexture(texture_path)
452 | self.physics_client.changeVisualShape(self._bodies_idx[body_name], -1, textureUniqueId=texture_uid)
453 |
454 | def create_cylinder(
455 | self,
456 | body_name: str,
457 | radius: float,
458 | height: float,
459 | mass: float,
460 | position: np.ndarray,
461 | rgba_color: Optional[np.ndarray] = None,
462 | specular_color: Optional[np.ndarray] = None,
463 | ghost: bool = False,
464 | lateral_friction: Optional[float] = None,
465 | spinning_friction: Optional[float] = None,
466 | ) -> None:
467 | """Create a cylinder.
468 |
469 | Args:
470 | body_name (str): The name of the body. Must be unique in the sim.
471 | radius (float): The radius in meter.
472 | height (float): The height in meter.
473 | mass (float): The mass in kg.
474 | position (np.ndarray): The position, as (x, y, z).
475 | rgba_color (np.ndarray, optional): Body color, as (r, g, b, a). Defaults as [0, 0, 0, 0]
476 | specular_color (np.ndarray, optional): Specular color, as (r, g, b). Defaults to [0, 0, 0].
477 | ghost (bool, optional): Whether the body can collide. Defaults to False.
478 | lateral_friction (float or None, optional): Lateral friction. If None, use the default pybullet
479 | value. Defaults to None.
480 | spinning_friction (float or None, optional): Spinning friction. If None, use the default pybullet
481 | value. Defaults to None.
482 | """
483 | rgba_color = rgba_color if rgba_color is not None else np.zeros(4)
484 | specular_color = specular_color if specular_color is not None else np.zeros(3)
485 | visual_kwargs = {
486 | "radius": radius,
487 | "length": height,
488 | "specularColor": specular_color,
489 | "rgbaColor": rgba_color,
490 | }
491 | collision_kwargs = {"radius": radius, "height": height}
492 | self._create_geometry(
493 | body_name,
494 | geom_type=self.physics_client.GEOM_CYLINDER,
495 | mass=mass,
496 | position=position,
497 | ghost=ghost,
498 | lateral_friction=lateral_friction,
499 | spinning_friction=spinning_friction,
500 | visual_kwargs=visual_kwargs,
501 | collision_kwargs=collision_kwargs,
502 | )
503 |
504 | def create_sphere(
505 | self,
506 | body_name: str,
507 | radius: float,
508 | mass: float,
509 | position: np.ndarray,
510 | rgba_color: Optional[np.ndarray] = None,
511 | specular_color: Optional[np.ndarray] = None,
512 | ghost: bool = False,
513 | lateral_friction: Optional[float] = None,
514 | spinning_friction: Optional[float] = None,
515 | ) -> None:
516 | """Create a sphere.
517 |
518 | Args:
519 | body_name (str): The name of the body. Must be unique in the sim.
520 | radius (float): The radius in meter.
521 | mass (float): The mass in kg.
522 | position (np.ndarray): The position, as (x, y, z).
523 | rgba_color (np.ndarray, optional): Body color, as (r, g, b, a). Defaults as [0, 0, 0, 0]
524 | specular_color (np.ndarray, optional): Specular color, as (r, g, b). Defaults to [0, 0, 0].
525 | ghost (bool, optional): Whether the body can collide. Defaults to False.
526 | lateral_friction (float or None, optional): Lateral friction. If None, use the default pybullet
527 | value. Defaults to None.
528 | spinning_friction (float or None, optional): Spinning friction. If None, use the default pybullet
529 | value. Defaults to None.
530 | """
531 | rgba_color = rgba_color if rgba_color is not None else np.zeros(4)
532 | specular_color = specular_color if specular_color is not None else np.zeros(3)
533 | visual_kwargs = {
534 | "radius": radius,
535 | "specularColor": specular_color,
536 | "rgbaColor": rgba_color,
537 | }
538 | collision_kwargs = {"radius": radius}
539 | self._create_geometry(
540 | body_name,
541 | geom_type=self.physics_client.GEOM_SPHERE,
542 | mass=mass,
543 | position=position,
544 | ghost=ghost,
545 | lateral_friction=lateral_friction,
546 | spinning_friction=spinning_friction,
547 | visual_kwargs=visual_kwargs,
548 | collision_kwargs=collision_kwargs,
549 | )
550 |
551 | def _create_geometry(
552 | self,
553 | body_name: str,
554 | geom_type: int,
555 | mass: float = 0.0,
556 | position: Optional[np.ndarray] = None,
557 | ghost: bool = False,
558 | lateral_friction: Optional[float] = None,
559 | spinning_friction: Optional[float] = None,
560 | visual_kwargs: Dict[str, Any] = {},
561 | collision_kwargs: Dict[str, Any] = {},
562 | ) -> None:
563 | """Create a geometry.
564 |
565 | Args:
566 | body_name (str): The name of the body. Must be unique in the sim.
567 | geom_type (int): The geometry type. See self.physics_client.GEOM_.
568 | mass (float, optional): The mass in kg. Defaults to 0.
569 | position (np.ndarray, optional): The position, as (x, y, z). Defaults to [0, 0, 0].
570 | ghost (bool, optional): Whether the body can collide. Defaults to False.
571 | lateral_friction (float or None, optional): Lateral friction. If None, use the default pybullet
572 | value. Defaults to None.
573 | spinning_friction (float or None, optional): Spinning friction. If None, use the default pybullet
574 | value. Defaults to None.
575 | visual_kwargs (dict, optional): Visual kwargs. Defaults to {}.
576 | collision_kwargs (dict, optional): Collision kwargs. Defaults to {}.
577 | """
578 | position = position if position is not None else np.zeros(3)
579 | baseVisualShapeIndex = self.physics_client.createVisualShape(geom_type, **visual_kwargs)
580 | if not ghost:
581 | baseCollisionShapeIndex = self.physics_client.createCollisionShape(geom_type, **collision_kwargs)
582 | else:
583 | baseCollisionShapeIndex = -1
584 | self._bodies_idx[body_name] = self.physics_client.createMultiBody(
585 | baseVisualShapeIndex=baseVisualShapeIndex,
586 | baseCollisionShapeIndex=baseCollisionShapeIndex,
587 | baseMass=mass,
588 | basePosition=position,
589 | )
590 |
591 | if lateral_friction is not None:
592 | self.set_lateral_friction(body=body_name, link=-1, lateral_friction=lateral_friction)
593 | if spinning_friction is not None:
594 | self.set_spinning_friction(body=body_name, link=-1, spinning_friction=spinning_friction)
595 |
596 | def create_plane(self, z_offset: float) -> None:
597 | """Create a plane. (Actually, it is a thin box.)
598 |
599 | Args:
600 | z_offset (float): Offset of the plane.
601 | """
602 | self.create_box(
603 | body_name="plane",
604 | half_extents=np.array([3.0, 3.0, 0.01]),
605 | mass=0.0,
606 | position=np.array([0.0, 0.0, z_offset - 0.01]),
607 | specular_color=np.zeros(3),
608 | rgba_color=np.array([0.15, 0.15, 0.15, 1.0]),
609 | )
610 |
611 | def create_table(
612 | self,
613 | length: float,
614 | width: float,
615 | height: float,
616 | x_offset: float = 0.0,
617 | lateral_friction: Optional[float] = None,
618 | spinning_friction: Optional[float] = None,
619 | ) -> None:
620 | """Create a fixed table. Top is z=0, centered in y.
621 |
622 | Args:
623 | length (float): The length of the table (x direction).
624 | width (float): The width of the table (y direction)
625 | height (float): The height of the table.
626 | x_offset (float, optional): The offset in the x direction.
627 | lateral_friction (float or None, optional): Lateral friction. If None, use the default pybullet
628 | value. Defaults to None.
629 | spinning_friction (float or None, optional): Spinning friction. If None, use the default pybullet
630 | value. Defaults to None.
631 | """
632 | self.create_box(
633 | body_name="table",
634 | half_extents=np.array([length, width, height]) / 2,
635 | mass=0.0,
636 | position=np.array([x_offset, 0.0, -height / 2]),
637 | specular_color=np.zeros(3),
638 | rgba_color=np.array([0.95, 0.95, 0.95, 1]),
639 | lateral_friction=lateral_friction,
640 | spinning_friction=spinning_friction,
641 | )
642 |
643 | def set_lateral_friction(self, body: str, link: int, lateral_friction: float) -> None:
644 | """Set the lateral friction of a link.
645 |
646 | Args:
647 | body (str): Body unique name.
648 | link (int): Link index in the body.
649 | lateral_friction (float): Lateral friction.
650 | """
651 | self.physics_client.changeDynamics(
652 | bodyUniqueId=self._bodies_idx[body],
653 | linkIndex=link,
654 | lateralFriction=lateral_friction,
655 | )
656 |
657 | def set_spinning_friction(self, body: str, link: int, spinning_friction: float) -> None:
658 | """Set the spinning friction of a link.
659 |
660 | Args:
661 | body (str): Body unique name.
662 | link (int): Link index in the body.
663 | spinning_friction (float): Spinning friction.
664 | """
665 | self.physics_client.changeDynamics(
666 | bodyUniqueId=self._bodies_idx[body],
667 | linkIndex=link,
668 | spinningFriction=spinning_friction,
669 | )
670 |
--------------------------------------------------------------------------------
/panda_gym/utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 | def distance(a: np.ndarray, b: np.ndarray) -> np.ndarray:
5 | """Compute the distance between two array. This function is vectorized.
6 |
7 | Args:
8 | a (np.ndarray): First array.
9 | b (np.ndarray): Second array.
10 |
11 | Returns:
12 | np.ndarray: The distance between the arrays.
13 | """
14 | assert a.shape == b.shape
15 | dist = np.linalg.norm(a - b, axis=-1)
16 | # round at 1e-6 (ensure determinism and avoid numerical noise)
17 | return np.round(dist, 6)
18 |
19 |
20 | def angle_distance(a: np.ndarray, b: np.ndarray) -> np.ndarray:
21 | """Compute the geodesic distance between two array of angles. This function is vectorized.
22 |
23 | Args:
24 | a (np.ndarray): First array.
25 | b (np.ndarray): Second array.
26 |
27 | Returns:
28 | np.ndarray: The geodesic distance between the angles.
29 | """
30 | assert a.shape == b.shape
31 | dist = 1 - np.inner(a, b) ** 2
32 | return dist
33 |
--------------------------------------------------------------------------------
/panda_gym/version.txt:
--------------------------------------------------------------------------------
1 | 3.0.8
2 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from setuptools import find_packages, setup
4 |
5 | with open(os.path.join("panda_gym", "version.txt"), "r") as file_handler:
6 | __version__ = file_handler.read().strip()
7 |
8 | with open("README.md", "r") as f:
9 | long_description = f.read()
10 |
11 | setup(
12 | name="panda_gym",
13 | description="Set of robotic environments based on PyBullet physics engine and gymnasium.",
14 | author="Quentin GALLOUÉDEC",
15 | author_email="gallouedec.quentin@gmail.com",
16 | long_description=long_description,
17 | long_description_content_type="text/markdown",
18 | url="https://github.com/qgallouedec/panda-gym",
19 | packages=find_packages(),
20 | include_package_data=True,
21 | package_data={"panda_gym": ["version.txt"]},
22 | version=__version__,
23 | install_requires=["gymnasium>=0.26", "pybullet", "numpy<2", "scipy"],
24 | extras_require={
25 | "develop": ["pytest-cov", "black", "isort", "pytype", "sphinx", "sphinx-rtd-theme"],
26 | },
27 | classifiers=[
28 | "License :: OSI Approved :: MIT License",
29 | "Programming Language :: Python :: 3.7",
30 | "Programming Language :: Python :: 3.8",
31 | "Programming Language :: Python :: 3.9",
32 | "Programming Language :: Python :: 3.10",
33 | ],
34 | )
35 |
--------------------------------------------------------------------------------
/test/envs_test.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 | import pytest
3 | from gymnasium.utils.env_checker import check_env
4 |
5 | import panda_gym
6 |
7 |
8 | def run_env(env):
9 | """Tests running panda gym envs."""
10 | env.reset()
11 | for _ in range(1000):
12 | action = env.action_space.sample()
13 | _, _, terminated, truncated, _ = env.step(action)
14 | if terminated or truncated:
15 | env.reset()
16 | env.close()
17 | # check that it allows to be closed multiple times
18 | env.close()
19 |
20 |
21 | @pytest.mark.parametrize("env_id", panda_gym.ENV_IDS)
22 | def test_env(env_id):
23 | """Tests running panda gym envs."""
24 | env = gym.make(env_id)
25 | run_env(env)
26 |
27 |
28 | @pytest.mark.parametrize("env_id", panda_gym.ENV_IDS)
29 | def test_check_env(env_id):
30 | """Check envs with the env checker."""
31 | check_env(gym.make(env_id).unwrapped, skip_render_check=True)
32 |
--------------------------------------------------------------------------------
/test/import_test.py:
--------------------------------------------------------------------------------
1 | def test_import():
2 | import panda_gym
3 |
--------------------------------------------------------------------------------
/test/pybullet_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 |
4 |
5 | def test_import():
6 | from panda_gym.pybullet import PyBullet
7 |
8 |
9 | def test_construct():
10 | from panda_gym.pybullet import PyBullet
11 |
12 | pybullet = PyBullet()
13 |
14 |
15 | def test_close():
16 | from panda_gym.pybullet import PyBullet
17 |
18 | pybullet = PyBullet()
19 | pybullet.close()
20 |
21 |
22 | def test_step():
23 | from panda_gym.pybullet import PyBullet
24 |
25 | pybullet = PyBullet()
26 | pybullet.step()
27 | pybullet.close()
28 |
29 |
30 | def test_dt():
31 | from panda_gym.pybullet import PyBullet
32 |
33 | pybullet = PyBullet()
34 | assert pybullet.dt == 0.04
35 | pybullet.close()
36 |
37 |
38 | def test_create_box():
39 | from panda_gym.pybullet import PyBullet
40 |
41 | pybullet = PyBullet()
42 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
43 | pybullet.close()
44 |
45 |
46 | def test_get_base_position():
47 | from panda_gym.pybullet import PyBullet
48 |
49 | pybullet = PyBullet()
50 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
51 | base_position = pybullet.get_base_position("my_box")
52 | pybullet.close()
53 | assert np.allclose(base_position, np.zeros(3), atol=1e-7)
54 |
55 |
56 | def test_get_base_velocity():
57 | from panda_gym.pybullet import PyBullet
58 |
59 | pybullet = PyBullet()
60 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
61 | pybullet.step()
62 | base_velocity = pybullet.get_base_velocity("my_box")
63 | pybullet.close()
64 | assert np.allclose(base_velocity, [0.0, 0.0, -0.392], atol=1e-3)
65 |
66 |
67 | def test_get_base_orientation():
68 | from panda_gym.pybullet import PyBullet
69 |
70 | pybullet = PyBullet()
71 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
72 | base_orientation = pybullet.get_base_orientation("my_box")
73 | pybullet.close()
74 | assert np.allclose(base_orientation, [0.0, 0.0, 0.0, 1.0], atol=1e-3)
75 |
76 |
77 | def test_get_base_rotation():
78 | from panda_gym.pybullet import PyBullet
79 |
80 | pybullet = PyBullet()
81 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
82 | base_rotation = pybullet.get_base_rotation("my_box")
83 | pybullet.close()
84 | assert np.allclose(base_rotation, [0.0, 0.0, 0.0], atol=1e-3)
85 |
86 |
87 | def test_get_base_angular_velocity():
88 | from panda_gym.pybullet import PyBullet
89 |
90 | pybullet = PyBullet()
91 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
92 | base_angular_velocity = pybullet.get_base_angular_velocity("my_box")
93 | pybullet.close()
94 | assert np.allclose(base_angular_velocity, [0.0, 0.0, 0.0], atol=1e-3)
95 |
96 |
97 | def test_load_URDF():
98 | from panda_gym.pybullet import PyBullet
99 |
100 | pybullet = PyBullet()
101 | pybullet.loadURDF(
102 | body_name="panda",
103 | fileName="franka_panda/panda.urdf",
104 | basePosition=[0.0, 0.0, 0.0],
105 | useFixedBase=True,
106 | )
107 | pybullet.close()
108 |
109 |
110 | def test_control_joints():
111 | from panda_gym.pybullet import PyBullet
112 |
113 | pybullet = PyBullet()
114 | pybullet.loadURDF(
115 | body_name="panda",
116 | fileName="franka_panda/panda.urdf",
117 | basePosition=[0.0, 0.0, 0.0],
118 | useFixedBase=True,
119 | )
120 | pybullet.control_joints("panda", [5], [0.3], [5.0])
121 | pybullet.step()
122 |
123 |
124 | def test_get_link_position():
125 | from panda_gym.pybullet import PyBullet
126 |
127 | pybullet = PyBullet()
128 | pybullet.loadURDF(
129 | body_name="panda",
130 | fileName="franka_panda/panda.urdf",
131 | basePosition=[0.0, 0.0, 0.0],
132 | useFixedBase=True,
133 | )
134 | link_position = pybullet.get_link_position("panda", 1)
135 | assert np.allclose(link_position, [0.000, 0.060, 0.373], atol=1e-3)
136 | pybullet.close()
137 |
138 |
139 | def test_get_link_orientation():
140 | from panda_gym.pybullet import PyBullet
141 |
142 | pybullet = PyBullet()
143 | pybullet.loadURDF(
144 | body_name="panda",
145 | fileName="franka_panda/panda.urdf",
146 | basePosition=[0.0, 0.0, 0.0],
147 | useFixedBase=True,
148 | )
149 | pybullet.control_joints("panda", [5], [0.3], [5.0])
150 | pybullet.step()
151 | link_orientation = pybullet.get_link_orientation("panda", 5)
152 | assert np.allclose(link_orientation, [0.707, -0.02, 0.02, 0.707], atol=1e-3)
153 | pybullet.close()
154 |
155 |
156 | def test_get_link_velocity():
157 | from panda_gym.pybullet import PyBullet
158 |
159 | pybullet = PyBullet()
160 | pybullet.loadURDF(
161 | body_name="panda",
162 | fileName="franka_panda/panda.urdf",
163 | basePosition=[0.0, 0.0, 0.0],
164 | useFixedBase=True,
165 | )
166 | pybullet.control_joints("panda", [5], [0.3], [5.0])
167 | pybullet.step()
168 | link_velocity = pybullet.get_link_velocity("panda", 5)
169 | assert np.allclose(link_velocity, [-0.0068, 0.0000, 0.1186], atol=1e-3)
170 | pybullet.close()
171 |
172 |
173 | def test_get_link_angular_velocity():
174 | from panda_gym.pybullet import PyBullet
175 |
176 | pybullet = PyBullet()
177 | pybullet.loadURDF(
178 | body_name="panda",
179 | fileName="franka_panda/panda.urdf",
180 | basePosition=[0.0, 0.0, 0.0],
181 | useFixedBase=True,
182 | )
183 | pybullet.control_joints("panda", [5], [0.3], [5.0])
184 | pybullet.step()
185 | link_angular_velocity = pybullet.get_link_angular_velocity("panda", 5)
186 | assert np.allclose(link_angular_velocity, [0.000, -2.969, 0.000], atol=1e-3)
187 | pybullet.close()
188 |
189 |
190 | def test_get_joint_angle():
191 | from panda_gym.pybullet import PyBullet
192 |
193 | pybullet = PyBullet()
194 | pybullet.loadURDF(
195 | body_name="panda",
196 | fileName="franka_panda/panda.urdf",
197 | basePosition=[0.0, 0.0, 0.0],
198 | useFixedBase=True,
199 | )
200 | pybullet.control_joints("panda", [5], [0.3], [5.0])
201 | pybullet.step()
202 | joint_angle = pybullet.get_joint_angle("panda", 5)
203 | assert np.allclose(joint_angle, 0.063, atol=1e-3)
204 | pybullet.close()
205 |
206 |
207 | def test_set_base_pose():
208 | from panda_gym.pybullet import PyBullet
209 |
210 | pybullet = PyBullet()
211 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
212 | pybullet.set_base_pose("my_box", [1.0, 1.0, 1.0], [0.707, -0.02, 0.02, 0.707])
213 | base_position = pybullet.get_base_position("my_box")
214 | base_orientation = pybullet.get_base_orientation("my_box")
215 | pybullet.close()
216 | assert np.allclose(base_position, [1.0, 1.0, 1.0], atol=1e-3)
217 | assert np.allclose(base_orientation, [0.707, -0.02, 0.02, 0.707], atol=1e-3)
218 |
219 |
220 | def test_set_joint_angle():
221 | from panda_gym.pybullet import PyBullet
222 |
223 | pybullet = PyBullet()
224 | pybullet.loadURDF(
225 | body_name="panda",
226 | fileName="franka_panda/panda.urdf",
227 | basePosition=[0.0, 0.0, 0.0],
228 | useFixedBase=True,
229 | )
230 | pybullet.set_joint_angle("panda", 3, 0.4)
231 | joint_angle = pybullet.get_joint_angle("panda", 3)
232 | assert np.allclose(joint_angle, 0.4, atol=1e-3)
233 | pybullet.close()
234 |
235 |
236 | def test_set_joint_angles():
237 | from panda_gym.pybullet import PyBullet
238 |
239 | pybullet = PyBullet()
240 | pybullet.loadURDF(
241 | body_name="panda",
242 | fileName="franka_panda/panda.urdf",
243 | basePosition=[0.0, 0.0, 0.0],
244 | useFixedBase=True,
245 | )
246 | pybullet.set_joint_angles("panda", [3, 4], [0.4, 0.5])
247 | joint_angle3 = pybullet.get_joint_angle("panda", 3)
248 | joint_angle4 = pybullet.get_joint_angle("panda", 4)
249 | assert np.allclose(joint_angle3, 0.4, atol=1e-3)
250 | assert np.allclose(joint_angle4, 0.5, atol=1e-3)
251 | pybullet.close()
252 |
253 |
254 | def test_inverse_kinematics():
255 | from panda_gym.pybullet import PyBullet
256 |
257 | pybullet = PyBullet()
258 | pybullet.loadURDF(
259 | body_name="panda",
260 | fileName="franka_panda/panda.urdf",
261 | basePosition=[0.0, 0.0, 0.0],
262 | useFixedBase=True,
263 | )
264 | joint_angles = pybullet.inverse_kinematics("panda", 6, [0.4, 0.5, 0.6], [0.707, -0.02, 0.02, 0.707])
265 | assert np.allclose(joint_angles, [1.000, 1.223, -1.113, -0.021, -0.917, 0.666, -0.499, 0.0, 0.0], atol=1e-3)
266 | pybullet.close()
267 |
268 |
269 | def test_place_visualizer():
270 | from panda_gym.pybullet import PyBullet
271 |
272 | pybullet = PyBullet()
273 | pybullet.place_visualizer([0.1, 0.2, 0.3], 5.0, 0.3, 0.4)
274 |
275 |
276 | def test_create_cylinder():
277 | from panda_gym.pybullet import PyBullet
278 |
279 | pybullet = PyBullet()
280 | pybullet.create_cylinder("my_cylinder", 0.5, 1.0, 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
281 | pybullet.close()
282 |
283 |
284 | def test_create_sphere():
285 | from panda_gym.pybullet import PyBullet
286 |
287 | pybullet = PyBullet()
288 | pybullet.create_sphere("my_sphere", 0.5, 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
289 | pybullet.close()
290 |
291 |
292 | def test_create_plane():
293 | from panda_gym.pybullet import PyBullet
294 |
295 | pybullet = PyBullet()
296 | pybullet.create_plane(0.5)
297 | pybullet.close()
298 |
299 |
300 | def test_create_table():
301 | from panda_gym.pybullet import PyBullet
302 |
303 | pybullet = PyBullet()
304 | pybullet.create_table(0.5, 0.6, 0.4)
305 | pybullet.close()
306 |
307 |
308 | def test_set_lateral_friction():
309 | from panda_gym.pybullet import PyBullet
310 |
311 | pybullet = PyBullet()
312 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
313 | pybullet.set_lateral_friction("my_box", 0, 0.5)
314 | pybullet.close()
315 |
316 |
317 | def test_set_spinning_friction():
318 | from panda_gym.pybullet import PyBullet
319 |
320 | pybullet = PyBullet()
321 | pybullet.create_box("my_box", [0.5, 0.5, 0.5], 1.0, [0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0])
322 | pybullet.set_spinning_friction("my_box", 0, 0.5)
323 | pybullet.close()
324 |
--------------------------------------------------------------------------------
/test/render_test.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 |
3 | import panda_gym
4 |
5 |
6 | def test_render():
7 | env = gym.make("PandaReach-v3", render_mode="rgb_array")
8 | env.reset()
9 |
10 | for _ in range(100):
11 | _, _, terminated, truncated, _ = env.step(env.action_space.sample())
12 | img = env.render()
13 | assert img.shape == (480, 720, 3)
14 | if terminated or truncated:
15 | env.reset()
16 |
17 | env.close()
18 |
19 |
20 | def test_new_render_shape():
21 | env = gym.make("PandaReach-v3", render_mode="rgb_array", render_height=48, render_width=84)
22 |
23 | env.reset()
24 | for _ in range(100):
25 | _, _, terminated, truncated, _ = env.step(env.action_space.sample())
26 | image = env.render()
27 | assert image.shape == (48, 84, 3)
28 | if terminated or truncated:
29 | env.reset()
30 |
31 | env.close()
32 |
--------------------------------------------------------------------------------
/test/save_and_restore_test.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 | import numpy as np
3 | import pybullet
4 | import pytest
5 |
6 | import panda_gym
7 |
8 |
9 | def test_save_and_restore_state():
10 | env = gym.make("PandaReach-v3")
11 | env.reset()
12 |
13 | state_id = env.unwrapped.save_state()
14 |
15 | # Perform the action
16 | action = env.action_space.sample()
17 | observation1, _, _, _, _ = env.step(action)
18 |
19 | # Restore and perform the same action
20 | env.reset()
21 | env.unwrapped.restore_state(state_id)
22 | observation2, _, _, _, _ = env.step(action)
23 |
24 | # The observations in both cases should be equals
25 | assert np.all(observation1["achieved_goal"] == observation2["achieved_goal"])
26 | assert np.all(observation1["observation"] == observation2["observation"])
27 | assert np.all(observation1["desired_goal"] == observation2["desired_goal"])
28 |
29 |
30 | def test_remove_state():
31 | env = gym.make("PandaReach-v3")
32 | env.reset()
33 | state_id = env.unwrapped.save_state()
34 | env.unwrapped.remove_state(state_id)
35 | with pytest.raises(pybullet.error):
36 | env.unwrapped.restore_state(state_id)
37 |
--------------------------------------------------------------------------------
/test/seed_test.py:
--------------------------------------------------------------------------------
1 | import gymnasium as gym
2 | import numpy as np
3 |
4 | import panda_gym
5 |
6 |
7 | def test_seed_reach():
8 | final_observations = []
9 | env = gym.make("PandaReach-v3")
10 | actions = [
11 | np.array([-0.931, 0.979, -0.385]),
12 | np.array([-0.562, 0.391, -0.532]),
13 | np.array([0.042, 0.254, -0.624]),
14 | np.array([0.465, 0.745, 0.284]),
15 | np.array([-0.237, 0.995, -0.425]),
16 | np.array([0.67, 0.472, 0.972]),
17 | ]
18 | for _ in range(2):
19 | env.reset(seed=12345)
20 | for action in actions:
21 | observation, _, terminated, truncated, _ = env.step(action)
22 | if terminated or truncated:
23 | observation, _ = env.reset()
24 | final_observations.append(observation)
25 |
26 | assert np.allclose(final_observations[0]["observation"], final_observations[1]["observation"])
27 | assert np.allclose(final_observations[0]["achieved_goal"], final_observations[1]["achieved_goal"])
28 | assert np.allclose(final_observations[0]["desired_goal"], final_observations[1]["desired_goal"])
29 |
30 |
31 | def test_seed_push():
32 | final_observations = []
33 | env = gym.make("PandaPush-v3")
34 | actions = [
35 | np.array([0.925, 0.352, -0.014]),
36 | np.array([0.400, -0.018, -0.042]),
37 | np.array([0.308, 0.189, -0.943]),
38 | np.array([-0.556, 0.209, 0.907]),
39 | np.array([-0.862, -0.243, 0.835]),
40 | np.array([-0.552, -0.262, 0.317]),
41 | ]
42 | for _ in range(2):
43 | env.reset(seed=6789)
44 | for action in actions:
45 | observation, _, terminated, truncated, _ = env.step(action)
46 | if terminated or truncated:
47 | observation, _ = env.reset()
48 | final_observations.append(observation)
49 |
50 | assert np.allclose(final_observations[0]["observation"], final_observations[1]["observation"])
51 | assert np.allclose(final_observations[0]["achieved_goal"], final_observations[1]["achieved_goal"])
52 | assert np.allclose(final_observations[0]["desired_goal"], final_observations[1]["desired_goal"])
53 |
54 |
55 | def test_seed_slide():
56 | final_observations = []
57 | env = gym.make("PandaSlide-v3")
58 | actions = [
59 | np.array([0.245, 0.786, 0.329]),
60 | np.array([-0.414, 0.343, -0.839]),
61 | np.array([0.549, 0.047, -0.857]),
62 | np.array([0.744, -0.507, 0.092]),
63 | np.array([-0.202, -0.939, -0.945]),
64 | np.array([-0.97, -0.616, 0.472]),
65 | ]
66 | for _ in range(2):
67 | env.reset(seed=13795)
68 | for action in actions:
69 | observation, _, terminated, truncated, _ = env.step(action)
70 | if terminated or truncated:
71 | observation, _ = env.reset()
72 | final_observations.append(observation)
73 | assert np.allclose(final_observations[0]["observation"], final_observations[1]["observation"])
74 | assert np.allclose(final_observations[0]["achieved_goal"], final_observations[1]["achieved_goal"])
75 | assert np.allclose(final_observations[0]["desired_goal"], final_observations[1]["desired_goal"])
76 |
77 |
78 | def test_seed_pick_and_place():
79 | final_observations = []
80 | env = gym.make("PandaPickAndPlace-v3")
81 | actions = [
82 | np.array([0.429, -0.287, 0.804, -0.592]),
83 | np.array([0.351, -0.136, 0.296, -0.223]),
84 | np.array([-0.187, 0.706, -0.988, 0.972]),
85 | np.array([-0.389, -0.249, 0.374, -0.389]),
86 | np.array([-0.191, -0.297, -0.739, 0.633]),
87 | np.array([0.093, 0.242, -0.11, -0.949]),
88 | ]
89 | for _ in range(2):
90 | env.reset(seed=794512)
91 | for action in actions:
92 | observation, _, terminated, truncated, _ = env.step(action)
93 | if terminated or truncated:
94 | observation, _ = env.reset()
95 | final_observations.append(observation)
96 |
97 | assert np.allclose(final_observations[0]["observation"], final_observations[1]["observation"])
98 | assert np.allclose(final_observations[0]["achieved_goal"], final_observations[1]["achieved_goal"])
99 | assert np.allclose(final_observations[0]["desired_goal"], final_observations[1]["desired_goal"])
100 |
101 |
102 | def test_seed_stack():
103 | final_observations = []
104 | env = gym.make("PandaStack-v3")
105 | actions = [
106 | np.array([-0.609, 0.73, -0.433, 0.76]),
107 | np.array([0.414, 0.327, 0.275, -0.196]),
108 | np.array([-0.3, 0.589, -0.712, 0.683]),
109 | np.array([0.772, 0.333, -0.537, -0.253]),
110 | np.array([0.784, -0.014, -0.997, -0.118]),
111 | np.array([-0.12, -0.958, -0.744, -0.98]),
112 | ]
113 | for _ in range(2):
114 | env.reset(seed=657894)
115 | for action in actions:
116 | observation, _, terminated, truncated, _ = env.step(action)
117 | if terminated or truncated:
118 | observation, _ = env.reset()
119 | final_observations.append(observation)
120 | assert np.allclose(final_observations[0]["observation"], final_observations[1]["observation"])
121 | assert np.allclose(final_observations[0]["achieved_goal"], final_observations[1]["achieved_goal"])
122 | assert np.allclose(final_observations[0]["desired_goal"], final_observations[1]["desired_goal"])
123 |
--------------------------------------------------------------------------------