├── .flake8 ├── .github └── workflows │ ├── build_pr.yml │ ├── doc_test.yml │ ├── docs.yml │ ├── flake8-problem-matcher.json │ ├── git.yml │ ├── lint.yml │ ├── mypy-problem-matcher.json │ ├── pr_todo_checks.yml │ └── test.yml ├── .gitignore ├── .readthedocs.yml ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── demos ├── demo_cameras.py ├── demo_control.py ├── demo_inverse_kinematics.py ├── demo_load_gym_env.py ├── demo_plain_torque_control.py ├── demo_random_policy.py └── demo_trifinger_platform.py ├── docs ├── Makefile ├── _static │ ├── .keep │ └── custom.css ├── _templates │ └── .keep ├── api │ ├── camera.rst │ ├── collision_objects.rst │ ├── finger_types_data.rst │ ├── pinocchio_utils.rst │ ├── sim_finger.rst │ ├── trifingerplatform.rst │ └── visual_objects.rst ├── conf.py ├── getting_started │ ├── finger_types.rst │ ├── first_steps.rst │ └── installation.rst ├── images │ ├── all_finger_types.jpg │ ├── applied_action_dependency.png │ ├── com_inertias.png │ ├── fingeredu.jpg │ ├── fingerone.jpg │ ├── fingerpro.jpg │ ├── hand.JPG │ ├── multi_object.JPG │ ├── sim_trifingerpro.jpg │ ├── trifingeredu.jpg │ ├── trifingerone.jpg │ ├── trifingerpro.jpg │ └── workspace.png ├── index.rst ├── requirements.txt ├── sim_finger_screenshots.py └── simreal │ ├── simvsreal.rst │ ├── simwithreal.rst │ ├── src_trifinger_reach-py.rst │ └── timesteplogic.rst ├── package.xml ├── pyproject.toml ├── resource └── trifinger_simulation ├── scripts ├── check_position_control_accuracy.py ├── playback_data.py ├── plot_position_error.py ├── plot_tip_position_trajectories.py ├── profiling.py ├── rrc_evaluate.py ├── screenshot_mode.py ├── trifingerpro_model_test.py └── view_camera_log_real_and_sim.py ├── setup.cfg ├── setup.py ├── tests ├── test_camera.py ├── test_camera │ ├── camera180_full.yml │ ├── camera300_full.yml │ └── camera60_full.yml ├── test_cube_env.py ├── test_determinism.py ├── test_finger_types_data.py ├── test_loading_urdfs.py ├── test_parallel_instances.py ├── test_reset_joints.py ├── test_robot_equivalent_interface.py ├── test_sample.py ├── test_sim_finger.py ├── test_tasks_move_cube.py ├── test_tasks_move_cube_on_trajectory.py ├── test_tasks_move_cube_on_trajectory │ ├── bad_goal.json │ ├── good_goal.json │ └── no_goal.json ├── test_tasks_move_cuboid.py ├── test_tasks_rearrange_dice.py ├── test_tasks_rearrange_dice │ ├── bad_goal.json │ ├── good_goal.json │ └── no_goal.json └── test_trifinger_platform.py └── trifinger_simulation ├── __init__.py ├── action.py ├── camera.py ├── collision_objects.py ├── data ├── camera_params │ ├── camera180_cropped.yml │ ├── camera180_cropped_and_downsampled.yml │ ├── camera180_full.yml │ ├── camera300_cropped.yml │ ├── camera300_cropped_and_downsampled.yml │ ├── camera300_full.yml │ ├── camera60_cropped.yml │ ├── camera60_cropped_and_downsampled.yml │ └── camera60_full.yml └── cube_v2 │ ├── cube_v2.blend │ ├── cube_v2.mtl │ ├── cube_v2.obj │ ├── cube_v2.urdf │ └── cube_v2_texture.png ├── finger_types_data.py ├── gym_wrapper ├── __init__.py ├── data_logger.py ├── envs │ ├── __init__.py │ ├── cube_env.py │ ├── cube_trajectory_env.py │ ├── trifinger_push.py │ └── trifinger_reach.py ├── finger_spaces.py └── utils.py ├── observation.py ├── pinocchio_utils.py ├── py.typed ├── real_finger.py ├── sample.py ├── sim_finger.py ├── tasks ├── __init__.py ├── move_cube │ ├── __init__.py │ ├── __main__.py │ ├── evaluate_policy.py │ ├── replay_action_log.py │ ├── run_evaluate_policy.py │ └── run_replay.py ├── move_cube_on_trajectory │ ├── __init__.py │ ├── __main__.py │ ├── evaluate_policy.py │ ├── replay_action_log.py │ ├── run_evaluate_policy.py │ └── run_replay.py ├── move_cuboid.py └── rearrange_dice │ ├── __init__.py │ ├── __main__.py │ ├── test_reward.py │ └── utils.py ├── trifinger_platform.py ├── trifingerpro_limits.py └── visual_objects.py /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=79 3 | ignore=E203,W503,E501 4 | -------------------------------------------------------------------------------- /.github/workflows/build_pr.yml: -------------------------------------------------------------------------------- 1 | # Build the package and run tests, using the latest trifinger_user Apptainer 2 | # image. 3 | # The Apptainer image has a full ROBOT_FINGERS workspace installed, so all 4 | # dependencies are there and only the checked package needs to be build, not the 5 | # whole workspace. 6 | # 7 | # Note: The Apptainer image only gets automatically rebuild, if something in the 8 | # image definition changes, not when other packages are changed. So to avoid 9 | # that the workspace in the container gets outdated, manual builds need to be 10 | # triggered regularly. 11 | 12 | name: Build and Test (colcon) 13 | 14 | on: pull_request 15 | 16 | jobs: 17 | build_and_test: 18 | runs-on: ubuntu-latest 19 | steps: 20 | - name: Check out code 21 | uses: actions/checkout@v4 22 | with: 23 | # checkout to subdirectory, otherwise the tests fail for some reason 24 | path: trifinger_simulation 25 | 26 | - name: Setup Apptainer 27 | uses: open-dynamic-robot-initiative/trifinger-build-action/setup_apptainer@v2 28 | 29 | - name: Build 30 | uses: open-dynamic-robot-initiative/trifinger-build-action/build@v2 31 | 32 | - name: Tests 33 | uses: open-dynamic-robot-initiative/trifinger-build-action/run_tests@v2 34 | -------------------------------------------------------------------------------- /.github/workflows/doc_test.yml: -------------------------------------------------------------------------------- 1 | # build docs using sphinx and deploy to branch gh-pages 2 | name: Documentation (Test) 3 | on: 4 | workflow_dispatch: 5 | 6 | jobs: 7 | build_only: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Build and deploy documentation 11 | uses: sphinx-notes/pages@v3 12 | with: 13 | documentation_path: docs 14 | requirements_path: docs/requirements.txt 15 | publish: false 16 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | # build docs using sphinx and deploy to branch gh-pages 2 | name: Documentation 3 | on: 4 | push: 5 | branches: 6 | - master 7 | workflow_dispatch: 8 | inputs: 9 | publish: 10 | description: "Publish to GitHub Pages (disable to only test the build)" 11 | type: boolean 12 | default: false 13 | 14 | jobs: 15 | build_and_deploy: 16 | runs-on: ubuntu-latest 17 | 18 | # Grant GITHUB_TOKEN the permissions required to make a Pages deployment 19 | permissions: 20 | pages: write # to deploy to Pages 21 | id-token: write # to verify the deployment originates from an appropriate source 22 | 23 | # Deploy to the github-pages environment 24 | environment: 25 | name: github-pages 26 | url: ${{ steps.deployment.outputs.page_url }} 27 | 28 | steps: 29 | - name: Build and deploy documentation 30 | uses: sphinx-notes/pages@v3 31 | with: 32 | documentation_path: docs 33 | requirements_path: docs/requirements.txt 34 | publish: ${{ github.event_name == 'push' || github.event.inputs.publish == 'true' }} 35 | -------------------------------------------------------------------------------- /.github/workflows/flake8-problem-matcher.json: -------------------------------------------------------------------------------- 1 | { 2 | "problemMatcher": [ 3 | { 4 | "owner": "flake8", 5 | "pattern": [ 6 | { 7 | "regexp": "^(.*?):(\\d+):(\\d+):\\s*(.*?)$", 8 | "file": 1, 9 | "line": 2, 10 | "column": 3, 11 | "message": 4 12 | } 13 | ] 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /.github/workflows/git.yml: -------------------------------------------------------------------------------- 1 | name: Git Checks 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | block-fixup: 7 | runs-on: ubuntu-latest 8 | 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Block Fixup Commit Merge 12 | uses: 13rac1/block-fixup-merge-action@v2.0.0 13 | -------------------------------------------------------------------------------- /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Linters 2 | 3 | on: [pull_request] 4 | 5 | jobs: 6 | mypy: 7 | runs-on: ubuntu-latest 8 | steps: 9 | - uses: actions/checkout@v4 10 | - uses: actions/setup-python@v5 11 | with: 12 | python-version: '3.10' 13 | - name: Add matcher 14 | run: | 15 | echo "::add-matcher::.github/workflows/mypy-problem-matcher.json" 16 | - name: Install package 17 | run: | 18 | pip install ".[mypy]" 19 | - name: Run mypy 20 | run: | 21 | python3 -m mypy --exclude=build . 22 | 23 | flake8: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v4 27 | - uses: actions/setup-python@v5 28 | with: 29 | python-version: '3.10' 30 | - name: Install 31 | run: | 32 | python3 -m pip install flake8 33 | - name: Add matcher 34 | run: | 35 | echo "::add-matcher::.github/workflows/flake8-problem-matcher.json" 36 | - name: Lint with flake8 37 | run: | 38 | python3 -m flake8 . 39 | 40 | black: 41 | runs-on: ubuntu-latest 42 | steps: 43 | - uses: actions/checkout@v4 44 | - uses: actions/setup-python@v5 45 | - uses: psf/black@stable 46 | -------------------------------------------------------------------------------- /.github/workflows/mypy-problem-matcher.json: -------------------------------------------------------------------------------- 1 | { 2 | "problemMatcher": [ 3 | { 4 | "owner": "mypy", 5 | "pattern": [ 6 | { 7 | "regexp": "^(.*?):(\\d+):\\s*(.*?)$", 8 | "file": 1, 9 | "line": 2, 10 | "message": 3 11 | } 12 | ] 13 | } 14 | ] 15 | } 16 | 17 | -------------------------------------------------------------------------------- /.github/workflows/pr_todo_checks.yml: -------------------------------------------------------------------------------- 1 | name: PR TODO checks 2 | 3 | on: pull_request 4 | 5 | jobs: 6 | fixmes: 7 | name: New FIXMEs 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v4 11 | - name: Check for FIXMEs 12 | uses: luator/github_action_check_new_todos@v2 13 | with: 14 | label: FIXME 15 | base_ref: origin/${{ github.base_ref }} 16 | 17 | todos: 18 | name: New TODOs 19 | runs-on: ubuntu-latest 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Check for TODOs 23 | uses: luator/github_action_check_new_todos@v2 24 | with: 25 | label: TODO 26 | base_ref: origin/${{ github.base_ref }} 27 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | # Install package via pip and run tests 2 | name: Tests (pip install) 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | pull_request: 9 | 10 | jobs: 11 | pytest: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v4 15 | - uses: actions/setup-python@v5 16 | with: 17 | python-version: '3.10' 18 | - name: Update pip 19 | run: | 20 | python -m pip install --upgrade pip 21 | - name: Install package 22 | run: | 23 | python -m pip install ".[test]" 24 | - name: Run tests 25 | run: | 26 | python -m pytest tests/ 27 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | __pycache__ 4 | .cache 5 | *.egg-info 6 | *.swp 7 | docs/_build 8 | build/ 9 | dist/ 10 | venv/ 11 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: docs/conf.py 5 | 6 | conda: 7 | environment: environment.yml -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | trifinger_simulation Changelog 2 | ============================== 3 | 4 | All notable changes to this project will be documented in this file. 5 | 6 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 7 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html) 8 | (at least for versions > 1.4.1). 9 | 10 | 11 | ## [Unreleased] 12 | 13 | ## [2.0.0] - 2024-07-30 14 | ### Added 15 | - Add py.typed marker for mypy 16 | - screenshot_mode.py to run simulation without visualisations (for nicer screenshots). 17 | 18 | ### Deprecated 19 | - The `pinocchio_utils` module is deprecated and will be removed in a future release. 20 | Use the one from robot_properties_fingers instead. 21 | 22 | ### Fixed 23 | - Fixed a rounding error that occasionally resulted in NaN values when evaluating states 24 | in the "move_cuboid" task. 25 | - Fixed included URDF files of (Tri)FingerPro (see [robot_properties_fingers #15 26 | ](https://github.com/open-dynamic-robot-initiative/robot_properties_fingers/pull/15)). 27 | 28 | ### Removed 29 | - trifinger_simulation does no longer maintain a copy of the robot model files for the 30 | pure-Python installation. Instead it depends on robot_properties_fingers now for both 31 | installation types (colcon and pure Python). 32 | 33 | 34 | ## [1.4.1] - 2022-06-24 35 | 36 | - Switch from setup.py to setup.cfg 37 | - Set dtypes of gym spaces to np.float64 38 | 39 | 40 | ## [1.4.0] - 2022-06-23 41 | 42 | - Nicer rendering of the boundary (#68) 43 | - Change initial pose of camera in PyBullet GUI (#68) 44 | - Set proper initial position based on finger type (#79) 45 | - Fix incompatibility with newer Gym versions (#81) 46 | - Add option to save images to `demo_cameras` (#82) 47 | - Simulate delay of camera observations (#84) 48 | - Optionally compute tip velocities in forward kinematics (#85) 49 | - Cleanup the code and add more unit tests 50 | 51 | ## [1.3.0] - 2021-08-04 52 | 53 | There is no changelog for this or earlier versions. 54 | 55 | --- 56 | 57 | [Unreleased]: https://github.com/open-dynamic-robot-initiative/trifinger_simluation/compare/v2.0.0...HEAD 58 | [2.0.0]: https://github.com/open-dynamic-robot-initiative/trifinger_simluation/compare/v1.4.1...v2.0.0 59 | [1.4.1]: https://github.com/open-dynamic-robot-initiative/trifinger_simluation/compare/v1.4.0...v1.4.1 60 | [1.4.0]: https://github.com/open-dynamic-robot-initiative/trifinger_simluation/compare/v1.3.0...v1.4.0 61 | [1.3.0]: https://github.com/open-dynamic-robot-initiative/trifinger_simluation/releases/tag/v1.3.0 62 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | How to Contribute 2 | ================= 3 | 4 | Bug reports are always welcome. Please open an issue on GitHub. 5 | 6 | If you want to contribute code to this package, please see the 7 | [TriFinger Contribution Guide](https://open-dynamic-robot-initiative.github.io/trifinger_docs/contribute.html). 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2020 Max Planck Gesellschaft 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TriFinger Robot Simulation 2 | 3 | Welcome to the official simulation of the TriFinger robots! 4 | 5 | ![Screenshots of different (Tri)Finger robots in simulation](docs/images/all_finger_types.jpg) 6 | 7 | ## The TriFinger Project 8 | 9 | To learn more about the TriFinger robots, check out our official 10 | [project website](https://sites.google.com/view/trifinger), and the 11 | [preprint](https://arxiv.org/abs/2008.03596) of this work. 12 | 13 | ## Documentation 14 | 15 | For instructions on how to install and use this package, see the 16 | [documentation](https://open-dynamic-robot-initiative.github.io/trifinger_simulation/). 17 | 18 | 19 | ## Cite Us 20 | 21 | If you are using this package in you academic work, please cite this repository 22 | and also the corresponding paper: 23 | 24 | ``` 25 | @misc{trifinger-simulation, 26 | author = {Joshi, Shruti and Widmaier, Felix and Agrawal, Vaibhav and Wüthrich, Manuel}, 27 | year = {2020}, 28 | publisher = {GitHub}, 29 | journal = {GitHub repository}, 30 | howpublished = {\url{https://github.com/open-dynamic-robot-initiative/trifinger_simulation}}, 31 | } 32 | ``` 33 | 34 | ``` 35 | @conference{wuethrich2020trifinger, 36 | title = {TriFinger: An Open-Source Robot for Learning Dexterity}, 37 | author = {W{\"u}thrich, M. and Widmaier, F. and Grimminger, F. and Akpo, J. and Joshi, S. and Agrawal, V. and Hammoud, B. and Khadiv, M. and Bogdanovic, M. and Berenz, V. and Viereck, J. and Naveau, M. and Righetti, L. and Sch{\"o}lkopf, B. and Bauer, S.}, 38 | booktitle = {Proceedings of the 4th Conference on Robot Learning (CoRL)}, 39 | month = nov, 40 | year = {2020}, 41 | doi = {}, 42 | url = {https://corlconf.github.io/corl2020/paper_421/}, 43 | month_numeric = {11} 44 | } 45 | ``` 46 | -------------------------------------------------------------------------------- /demos/demo_cameras.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Demo showing how to add cameras in the TriFinger simulation. 3 | 4 | Press "q" to exit. 5 | Images can be saved by specifying an output directory with `--save-dir` and then 6 | pressing "s" to save the current images. Images will be named "camera{id}_##.png". 7 | Existing images that match this pattern will be overwritten! 8 | """ 9 | import argparse 10 | import pathlib 11 | import typing 12 | 13 | import numpy as np 14 | import cv2 15 | 16 | from trifinger_simulation import sim_finger, sample, camera 17 | 18 | 19 | def save_images( 20 | images: typing.List[np.ndarray], output_dir: pathlib.Path, counter: int 21 | ): 22 | """Save images to the given directory, appending ``counter`` to the filenames. 23 | 24 | Files will be saved to ``output_dir`` with the pattern 25 | ``camera{id}_{counter:02d}.png``. 26 | Existing files that match this name will be overwritten! 27 | """ 28 | for i, name in enumerate(("camera60", "camera180", "camera300")): 29 | filename = "{}_{:02d}.png".format(name, counter) 30 | outpath = output_dir / filename 31 | print("Save image {}".format(outpath)) 32 | cv2.imwrite(str(outpath), images[i]) 33 | 34 | 35 | def main(): 36 | parser = argparse.ArgumentParser(description=__doc__) 37 | parser.add_argument( 38 | "--robot", 39 | "-r", 40 | choices=["trifingerone", "trifingerpro"], 41 | default="trifingerpro", 42 | help="Which robot to use. Default: %(default)s", 43 | ) 44 | parser.add_argument( 45 | "--config-dir", 46 | "-c", 47 | type=pathlib.Path, 48 | help="""Path to the directory containing camera calibration files. This 49 | is optional, if not specified, some default values will be used. 50 | """, 51 | ) 52 | parser.add_argument( 53 | "--param-file-format", 54 | type=str, 55 | default="camera{id}.yml", 56 | help="""Format of the camera parameter files. '{id}' is replaced with 57 | the camera id. Default: %(default)s 58 | """, 59 | ) 60 | parser.add_argument( 61 | "--save-dir", 62 | type=pathlib.Path, 63 | help="Directory to which images are saved when pressing 's'.", 64 | ) 65 | args = parser.parse_args() 66 | 67 | time_step = 0.004 68 | finger = sim_finger.SimFinger( 69 | finger_type=args.robot, 70 | time_step=time_step, 71 | enable_visualization=True, 72 | ) 73 | 74 | # Important: The cameras need the be created _after_ the simulation is 75 | # initialized. 76 | if args.config_dir: 77 | cameras = camera.create_trifinger_camera_array_from_config( 78 | args.config_dir, calib_filename_pattern=args.param_file_format 79 | ) 80 | else: 81 | cameras = camera.TriFingerCameras() 82 | 83 | save_counter = 0 84 | 85 | # Move the fingers to random positions 86 | while True: 87 | goal = np.array( 88 | sample.random_joint_positions( 89 | number_of_fingers=3, 90 | lower_bounds=[-1, -1, -2], 91 | upper_bounds=[1, 1, 2], 92 | ) 93 | ) 94 | finger_action = finger.Action(position=goal) 95 | 96 | for _ in range(50): 97 | t = finger.append_desired_action(finger_action) 98 | finger.get_observation(t) 99 | 100 | images = cameras.get_images() 101 | # images are rgb --> convert to bgr for opencv 102 | images = [cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in images] 103 | cv2.imshow("camera60", images[0]) 104 | cv2.imshow("camera180", images[1]) 105 | cv2.imshow("camera300", images[2]) 106 | key = cv2.waitKey(int(time_step * 1000)) 107 | 108 | if key == ord("s"): 109 | if args.save_dir: 110 | save_images(images, args.save_dir, save_counter) 111 | save_counter += 1 112 | else: 113 | print("ERROR: --save-dir not set, cannot save images.") 114 | elif key == ord("q"): 115 | return 116 | 117 | 118 | if __name__ == "__main__": 119 | main() 120 | -------------------------------------------------------------------------------- /demos/demo_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | To demonstrate reaching a randomly set target point in the arena using torque 5 | control by directly specifying the position of the target only. 6 | """ 7 | import argparse 8 | import numpy as np 9 | import random 10 | import time 11 | from trifinger_simulation import ( 12 | sim_finger, 13 | visual_objects, 14 | sample, 15 | finger_types_data, 16 | ) 17 | 18 | 19 | def main(): 20 | argparser = argparse.ArgumentParser(description=__doc__) 21 | argparser.add_argument( 22 | "--control-mode", 23 | default="position", 24 | choices=["position", "torque"], 25 | help="Specify position or torque as the control mode.", 26 | ) 27 | argparser.add_argument( 28 | "--finger-type", 29 | default="trifingerone", 30 | choices=finger_types_data.get_valid_finger_types(), 31 | help="Specify a valid finger type", 32 | ) 33 | args = argparser.parse_args() 34 | time_step = 0.001 35 | 36 | finger = sim_finger.SimFinger( 37 | finger_type=args.finger_type, 38 | time_step=time_step, 39 | enable_visualization=True, 40 | ) 41 | num_fingers = finger.number_of_fingers 42 | 43 | if args.control_mode == "position": 44 | position_goals = visual_objects.Marker(number_of_goals=num_fingers) 45 | 46 | while True: 47 | if args.control_mode == "position": 48 | desired_joint_positions = np.array( 49 | sample.random_joint_positions(number_of_fingers=num_fingers) 50 | ) 51 | finger_action = finger.Action(position=desired_joint_positions) 52 | # visualize the goal position of the finger tip 53 | position_goals.set_state( 54 | finger.kinematics.forward_kinematics(desired_joint_positions) 55 | ) 56 | if args.control_mode == "torque": 57 | desired_joint_torques = [random.random()] * 3 * num_fingers 58 | finger_action = finger.Action(torque=desired_joint_torques) 59 | 60 | # pursue this goal for one second 61 | for _ in range(int(1 / time_step)): 62 | t = finger.append_desired_action(finger_action) 63 | finger.get_observation(t) 64 | time.sleep(time_step) 65 | 66 | 67 | if __name__ == "__main__": 68 | main() 69 | -------------------------------------------------------------------------------- /demos/demo_inverse_kinematics.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Simple Inverse kinematics demo.""" 3 | import numpy as np 4 | import time 5 | 6 | import trifinger_simulation 7 | 8 | 9 | def main(): 10 | time_step = 0.001 11 | finger = trifinger_simulation.SimFinger( 12 | finger_type="trifingerpro", 13 | time_step=time_step, 14 | enable_visualization=True, 15 | ) 16 | 17 | init_pos = np.array([0, 0.9, -1.7] * finger.number_of_fingers) 18 | finger.reset_finger_positions_and_velocities(init_pos) 19 | 20 | tip_positions = finger.kinematics.forward_kinematics(init_pos) 21 | 22 | # move the tips back and forth on the y-axis 23 | joint_pos = init_pos 24 | sign = +1 25 | while True: 26 | n_steps = int(1 / time_step) 27 | sign *= -1 28 | for _ in range(n_steps): 29 | finger_action = finger.Action(position=joint_pos) 30 | t = finger.append_desired_action(finger_action) 31 | obs = finger.get_observation(t) 32 | time.sleep(time_step) 33 | 34 | for i in range(len(tip_positions)): 35 | tip_positions[i][1] += sign * 0.05 / n_steps 36 | joint_pos, err = finger.kinematics.inverse_kinematics( 37 | tip_positions, obs.position 38 | ) 39 | print("error:", [round(np.linalg.norm(e), 4) for e in err]) 40 | 41 | 42 | if __name__ == "__main__": 43 | main() 44 | -------------------------------------------------------------------------------- /demos/demo_load_gym_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Minimum example to show how to create the available gym environments.""" 3 | import gym 4 | import argparse 5 | 6 | 7 | def main(): 8 | argparser = argparse.ArgumentParser(description=__doc__) 9 | argparser.add_argument( 10 | "--env", 11 | default="push", 12 | choices=["reach", "push"], 13 | help="Specify which gym env to load, push or reach", 14 | ) 15 | args = argparser.parse_args() 16 | 17 | if args.env == "push": 18 | env = gym.make( 19 | "trifinger_simulation.gym_wrapper:push-v0", 20 | control_rate_s=0.02, 21 | finger_type="trifingerone", 22 | enable_visualization=True, 23 | ) 24 | 25 | elif args.env == "reach": 26 | smoothing_params = { 27 | "num_episodes": 700, 28 | "start_after": 3.0 / 7.0, 29 | "final_alpha": 0.975, 30 | "stop_after": 5.0 / 7.0, 31 | } 32 | env = gym.make( 33 | "trifinger_simulation.gym_wrapper:reach-v0", 34 | control_rate_s=0.02, 35 | finger_type="trifingerone", 36 | smoothing_params=smoothing_params, 37 | enable_visualization=True, 38 | ) 39 | 40 | for episode in range(700): 41 | env.reset() 42 | for step in range(100): 43 | action = env.action_space.sample() 44 | observation, reward, done, info = env.step(action) 45 | 46 | 47 | if __name__ == "__main__": 48 | main() 49 | -------------------------------------------------------------------------------- /demos/demo_plain_torque_control.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Demonstrate how to run the simulated finger with torque control.""" 3 | import time 4 | import numpy as np 5 | 6 | from trifinger_simulation import SimFinger 7 | 8 | 9 | if __name__ == "__main__": 10 | finger = SimFinger( 11 | finger_type="fingerone", 12 | enable_visualization=True, 13 | ) 14 | 15 | # Send a constant torque to the joints, switching direction periodically. 16 | torque = np.array([0.0, 0.3, 0.3]) 17 | while True: 18 | time.sleep(finger.time_step_s) 19 | 20 | action = finger.Action(torque=torque) 21 | t = finger.append_desired_action(action) 22 | observation = finger.get_observation(t) 23 | 24 | # invert the direction of the command every 100 steps 25 | if t % 100 == 0: 26 | torque *= -1 27 | -------------------------------------------------------------------------------- /demos/demo_random_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Demo on how to run the simulation using the Gym environment 3 | 4 | This demo creates a CubeEnv environment and runs one episode with random 5 | initialization using a dummy policy which uses random actions. 6 | """ 7 | import gym 8 | 9 | from trifinger_simulation.gym_wrapper.envs import cube_env 10 | 11 | 12 | class RandomPolicy: 13 | """Dummy policy which uses random actions.""" 14 | 15 | def __init__(self, action_space): 16 | self.action_space = action_space 17 | 18 | def predict(self, observation): 19 | return self.action_space.sample() 20 | 21 | 22 | def main(): 23 | # Use a random initializer with difficulty 1 24 | initializer = cube_env.RandomInitializer(difficulty=1) 25 | 26 | env = gym.make( 27 | "trifinger_simulation.gym_wrapper:real_robot_challenge_phase_1-v1", 28 | initializer=initializer, 29 | action_type=cube_env.ActionType.POSITION, 30 | frameskip=100, 31 | visualization=True, 32 | ) 33 | 34 | policy = RandomPolicy(env.action_space) 35 | 36 | observation = env.reset() 37 | is_done = False 38 | while not is_done: 39 | action = policy.predict(observation) 40 | observation, reward, is_done, info = env.step(action) 41 | 42 | print("Reward at final step: {:.3f}".format(reward)) 43 | 44 | 45 | if __name__ == "__main__": 46 | main() 47 | -------------------------------------------------------------------------------- /demos/demo_trifinger_platform.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Simple demo on how to use the TriFingerPlatform interface.""" 3 | import argparse 4 | import time 5 | 6 | import cv2 7 | import numpy as np 8 | 9 | from trifinger_simulation import trifinger_platform, sample 10 | 11 | 12 | def main(): 13 | parser = argparse.ArgumentParser(description=__doc__) 14 | parser.add_argument( 15 | "--enable-cameras", 16 | "-c", 17 | action="store_true", 18 | help="Enable camera observations.", 19 | ) 20 | parser.add_argument( 21 | "--iterations", 22 | type=int, 23 | default=100, 24 | help="Number of motions that are performed.", 25 | ) 26 | parser.add_argument( 27 | "--save-action-log", 28 | type=str, 29 | metavar="FILENAME", 30 | help="If set, save the action log to the specified file.", 31 | ) 32 | parser.add_argument( 33 | "--object", 34 | type=str, 35 | choices=["cube", "dice", "none"], 36 | default="cube", 37 | metavar="OBJECT_TYPE", 38 | help="Which type of object to use (if any).", 39 | ) 40 | args = parser.parse_args() 41 | 42 | object_type = trifinger_platform.ObjectType.NONE 43 | if args.object == "cube": 44 | object_type = trifinger_platform.ObjectType.COLORED_CUBE 45 | elif args.object == "dice": 46 | object_type = trifinger_platform.ObjectType.DICE 47 | 48 | platform = trifinger_platform.TriFingerPlatform( 49 | visualization=True, 50 | enable_cameras=args.enable_cameras, 51 | object_type=object_type, 52 | ) 53 | 54 | # Move the fingers to random positions so that the cube is kicked around 55 | # (and thus it's position changes). 56 | for _ in range(args.iterations): 57 | goal = np.array( 58 | sample.random_joint_positions( 59 | number_of_fingers=3, 60 | lower_bounds=[-1, -1, -2], 61 | upper_bounds=[1, 1, 2], 62 | ) 63 | ) 64 | finger_action = platform.Action(position=goal) 65 | 66 | # apply action for a few steps, so the fingers can move to the target 67 | # position and stay there for a while 68 | for _ in range(250): 69 | t = platform.append_desired_action(finger_action) 70 | time.sleep(platform.get_time_step()) 71 | 72 | # show the latest observations 73 | robot_observation = platform.get_robot_observation(t) 74 | print("Finger0 Position: %s" % robot_observation.position[:3]) 75 | 76 | if args.object == "cube": 77 | camera_observation = platform.get_camera_observation(t) 78 | print( 79 | "Cube Position: %s" 80 | % camera_observation.filtered_object_pose.position 81 | ) 82 | 83 | if platform.enable_cameras: 84 | for i, name in enumerate(("camera60", "camera180", "camera300")): 85 | # simulation provides images in RGB but OpenCV expects BGR 86 | img = cv2.cvtColor( 87 | camera_observation.cameras[i].image, cv2.COLOR_RGB2BGR 88 | ) 89 | cv2.imshow(name, img) 90 | cv2.waitKey(1) 91 | 92 | print() 93 | 94 | if args.save_action_log: 95 | platform.store_action_log(args.save_action_log) 96 | 97 | 98 | if __name__ == "__main__": 99 | main() 100 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SPHINXPROJ = pyBulletSimulationforFingerRobots 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/_static/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/_static/.keep -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | /* Workaround for https://github.com/readthedocs/sphinx_rtd_theme/issues/1247 */ 2 | dl.py.property { 3 | display: block !important; 4 | } 5 | -------------------------------------------------------------------------------- /docs/_templates/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/_templates/.keep -------------------------------------------------------------------------------- /docs/api/camera.rst: -------------------------------------------------------------------------------- 1 | ************* 2 | Module camera 3 | ************* 4 | 5 | .. automodule:: trifinger_simulation.camera 6 | :members: 7 | -------------------------------------------------------------------------------- /docs/api/collision_objects.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Module collision_objects 3 | ************************ 4 | 5 | 6 | .. automodule:: trifinger_simulation.collision_objects 7 | :members: 8 | :exclude-members: Block, BaseCollisionObject 9 | :inherited-members: 10 | -------------------------------------------------------------------------------- /docs/api/finger_types_data.rst: -------------------------------------------------------------------------------- 1 | ************************ 2 | Module finger_types_data 3 | ************************ 4 | 5 | .. automodule:: trifinger_simulation.finger_types_data 6 | :members: 7 | :member-order: bysource 8 | 9 | 10 | The simulation supports :ref:`different types of (Tri-)Finger robots 11 | ` which all follow the same basic idea but have different 12 | models. The functions of this module help getting the type-specific 13 | information. 14 | -------------------------------------------------------------------------------- /docs/api/pinocchio_utils.rst: -------------------------------------------------------------------------------- 1 | ********************** 2 | Module pinocchio_utils 3 | ********************** 4 | 5 | 6 | .. important:: 7 | 8 | **The pinocchio_utils module is deprecated!** 9 | 10 | It has been moved to the :doc:`robot_properties_fingers 11 | ` package. Please update your code to use 12 | the version from that package. The one here will be removed soon. 13 | 14 | 15 | .. automodule:: trifinger_simulation.pinocchio_utils 16 | :members: 17 | 18 | -------------------------------------------------------------------------------- /docs/api/sim_finger.rst: -------------------------------------------------------------------------------- 1 | ******************* 2 | The SimFinger Class 3 | ******************* 4 | 5 | The :class:`~trifinger_simulation.SimFinger` class provides a simulation 6 | environment for the different finger robots: these include the TriFingerEdu, our 7 | reproducible open-source robot platform; the TriFingerPro, and the TriFingerOne. 8 | See :func:`trifinger_simulation.finger_types_data.get_valid_finger_types()` for 9 | a complete list of supported robots. 10 | 11 | The interface of :class:`~trifinger_simulation.SimFinger` mimics that of the 12 | real robot frontend (see :doc:`../simreal/simwithreal`), 13 | which makes it easy to transfer code between simulation and real robot. 14 | 15 | When using the TriFinger platform, see also 16 | :class:`~trifinger_simulation.TriFingerPlatform`, which is a wrapper around 17 | :class:`~trifinger_simulation.SimFinger`. 18 | 19 | .. _`Desired vs Applied Action`: 20 | 21 | Desired vs Applied Action 22 | ========================= 23 | 24 | The action given by the user is called the *desired* action. This action may be 25 | altered before it is actually applied on the robot, e.g. by some safety checks 26 | limiting torque and velocity. This altered action is called the *applied* 27 | action. You may use 28 | :meth:`~trifinger_simulation.SimFinger.get_applied_action` to see what action 29 | actually got applied on the robot. 30 | 31 | 32 | API Documentation 33 | =================== 34 | 35 | 36 | .. autoclass:: trifinger_simulation.SimFinger 37 | :members: 38 | 39 | ------------------------------------------------------------------------------ 40 | 41 | .. autoclass:: trifinger_simulation.Action 42 | :members: 43 | :member-order: bysource 44 | 45 | ------------------------------------------------------------------------------ 46 | 47 | .. autoclass:: trifinger_simulation.Observation 48 | :members: 49 | :member-order: bysource 50 | -------------------------------------------------------------------------------- /docs/api/trifingerplatform.rst: -------------------------------------------------------------------------------- 1 | *************************** 2 | The TriFingerPlatform Class 3 | *************************** 4 | 5 | 6 | The :class:`~trifinger_simulation.TriFingerPlatform` class provides access to the 7 | simulation of the TriFinger platform using the same interface as for the real 8 | robot. 9 | 10 | For a detailed explanation of the software interface on the real robot, and the 11 | logic behind it, please check the `paper 12 | `_. 13 | 14 | 15 | API Documentation 16 | ================= 17 | 18 | 19 | .. autoclass:: trifinger_simulation.TriFingerPlatform 20 | :special-members: 21 | 22 | .. automethod:: __init__ 23 | 24 | .. method:: Action(torque=None, position=None) 25 | 26 | See :meth:`trifinger_simulation.SimFinger.Action`. 27 | 28 | .. method:: append_desired_action(action) 29 | 30 | See :meth:`trifinger_simulation.SimFinger.append_desired_action`. 31 | 32 | .. method:: get_current_timeindex 33 | 34 | See :meth:`trifinger_simulation.SimFinger.get_current_timeindex`. 35 | 36 | .. method:: get_timestamp_ms(t) 37 | 38 | See :meth:`trifinger_simulation.SimFinger.get_timestamp_ms`. 39 | 40 | .. method:: get_desired_action(t) 41 | 42 | See :meth:`trifinger_simulation.SimFinger.get_desired_action`. 43 | 44 | .. method:: get_applied_action(t) 45 | 46 | See :meth:`trifinger_simulation.SimFinger.get_applied_action`. 47 | 48 | .. method:: get_robot_observation(t) 49 | 50 | Get observation of the robot state (joint angles, torques, etc.). 51 | See :meth:`trifinger_simulation.SimFinger.get_observation`. 52 | 53 | .. automethod:: get_camera_observation 54 | 55 | .. automethod:: store_action_log 56 | 57 | ------------------------------------------------------------------------------ 58 | 59 | .. autoclass:: trifinger_simulation.ObjectPose 60 | :members: 61 | 62 | ------------------------------------------------------------------------------ 63 | 64 | .. autoclass:: trifinger_simulation.CameraObservation 65 | :members: 66 | 67 | ------------------------------------------------------------------------------ 68 | 69 | .. autoclass:: trifinger_simulation.TriCameraObservation 70 | :members: 71 | 72 | ------------------------------------------------------------------------------ 73 | 74 | .. autoclass:: trifinger_simulation.TriCameraObjectObservation 75 | :members: 76 | -------------------------------------------------------------------------------- /docs/api/visual_objects.rst: -------------------------------------------------------------------------------- 1 | ********************* 2 | Module visual_objects 3 | ********************* 4 | 5 | 6 | .. automodule:: trifinger_simulation.visual_objects 7 | :members: 8 | :inherited-members: 9 | 10 | -------------------------------------------------------------------------------- /docs/getting_started/finger_types.rst: -------------------------------------------------------------------------------- 1 | .. _finger_types: 2 | 3 | ************ 4 | Finger Types 5 | ************ 6 | 7 | The simulation supports different types of (Tri-)Finger robots. The type has 8 | to be passed to the :class:`~trifinger_simulation.sim_finger.SimFinger` class 9 | upon initialisation. The different types that are currently supported by the 10 | simulation are described below. Note that the moving direction of some joints 11 | differ between the types. 12 | 13 | The "TriFinger" is basically just an arrangement of three individual fingers, 14 | so it is also possible to use a single finger. Thus for each type there is 15 | also a single finger version implemented, simply drop the "tri" from the name. 16 | 17 | 18 | TriFingerPro 19 | ============ 20 | 21 | .. list-table:: 22 | 23 | * - .. figure:: ../images/trifingerpro.jpg 24 | 25 | ``finger_type="trifingerpro"`` 26 | 27 | - .. figure:: ../images/fingerpro.jpg 28 | 29 | ``finger_type="fingerpro"`` 30 | 31 | 32 | This is the robot that is used in the `Real Robot Challenge`_. The visuals of 33 | the robot model are close to what the real robot looks like, so it can be used 34 | for rendering of reasonable realistic images (within the limitations of 35 | PyBullet's rendering options). 36 | 37 | 38 | TriFingerEdu 39 | ============ 40 | 41 | .. list-table:: 42 | 43 | * - .. figure:: ../images/trifingeredu.jpg 44 | 45 | ``finger_type="trifingeredu"`` 46 | 47 | - .. figure:: ../images/fingeredu.jpg 48 | 49 | ``finger_type="fingeredu"`` 50 | 51 | 52 | The "Edu" version is a more lightweight, cheaper and easier to build version of 53 | the robot. The hardware documentation is available through the `Open Dynamic 54 | Robot Initiative`_ 55 | 56 | The robot model is not optimised for realistic rendering, the different fingers 57 | are rendered in different colours to make it easier to distinguish them. 58 | 59 | 60 | TriFingerOne 61 | ============ 62 | 63 | .. list-table:: 64 | 65 | * - .. figure:: ../images/trifingerone.jpg 66 | 67 | ``finger_type="trifingerone"`` 68 | 69 | - .. figure:: ../images/fingerone.jpg 70 | 71 | ``finger_type="fingerone"`` 72 | 73 | 74 | (Tri)FingerOne is an early prototype (so to say "version 1") of the robot. 75 | 76 | The robot model is not optimised for realistic rendering, the different fingers 77 | are rendered in different colours to make it easier to distinguish them. 78 | 79 | 80 | 81 | .. _Real Robot Challenge: https://real-robot-challenge.com 82 | .. _Open Dynamic Robot Initiative: https://github.com/open-dynamic-robot-initiative/open_robot_actuator_hardware 83 | -------------------------------------------------------------------------------- /docs/getting_started/first_steps.rst: -------------------------------------------------------------------------------- 1 | ********** 2 | Quickstart 3 | ********** 4 | 5 | The heart of this simulation is the 6 | :class:`~trifinger_simulation.sim_finger.SimFinger` class. It takes care of 7 | initialising PyBullet and loading the robot into the environment. 8 | 9 | .. code-block:: Python 10 | 11 | from trifinger_simulation import SimFinger 12 | 13 | sim = SimFinger( 14 | finger_type="fingerpro", 15 | enable_visualization=True, 16 | ) 17 | 18 | The above example populates the simulation with a "FingerPro" robot, see 19 | :doc:`finger_types` for the different robots that are supported. 20 | 21 | By default, the fingers are pointing straight down initially, which usually 22 | means that they are intersecting with the ground. It is thus important to 23 | first set them to a feasible position before starting to run the simulation: 24 | 25 | .. code-block:: Python 26 | 27 | # set feasible initial position for FingerPro 28 | sim.reset_finger_positions_and_velocities([0, -0.7, -1.5]) 29 | 30 | 31 | Now you can start to run the simulation by sending actions to the robot. Below 32 | is a very basic example that uses position commands to hold the robot at the 33 | initial position and prints the actual observed position: 34 | 35 | .. code-block:: Python 36 | 37 | import time 38 | 39 | while True: 40 | # to have it run in real-time 41 | time.sleep(sim.time_step_s) 42 | 43 | # create a position action 44 | action = sim.Action(position=[0, -0.7, -1.5]) 45 | 46 | # send it to the robot (this executes one simulation time step) 47 | t = sim.append_desired_action(action) 48 | 49 | # get the observation of the current time step 50 | observation = sim.get_observation(t) 51 | 52 | print(observation.position) 53 | 54 | When using position commands, a PD controller is executed internally to compute 55 | the desired torques. You can also directly use torque commands: 56 | 57 | .. code-block:: Python 58 | 59 | action = sim.Action(torque=[0., -0.2, -0.2]) 60 | 61 | 62 | Regarding the meaning of the *time index* ``t`` in the above example, please 63 | see :doc:`../simreal/timesteplogic`. 64 | 65 | 66 | Adding Objects to the Simulation 67 | ================================ 68 | 69 | We have a few objects defined in the 70 | :mod:`~trifinger_simulation.collision_objects` module, which can easily be 71 | added to the simulation. For example to add a coloured cube add the following 72 | code **after** initialising ``SimFinger``: 73 | 74 | .. code-block:: Python 75 | 76 | from trifinger_simulation import collision_objects 77 | 78 | cube = collision_objects.ColoredCubeV2(position=(0, 0, 0.4)) 79 | 80 | 81 | Modifying the Simulation Environment 82 | ==================================== 83 | 84 | If you want to modify any property of the simulation (add objects, change 85 | parameters, etc.) beyond the interface that is provided by the classes of this 86 | package, you can simply access pybullet directly. For example to change the 87 | default camera pose in the PyBullet GUI: 88 | 89 | .. code-block:: Python 90 | 91 | import pybullet 92 | 93 | pybullet.resetDebugVisualizerCamera( 94 | cameraDistance=2.0, 95 | cameraYaw=0.0, 96 | cameraPitch=-30.0, 97 | cameraTargetPosition=(0, 0, 0.2), 98 | physicsClientId=sim._pybullet_client_id, 99 | ) 100 | 101 | Note, however, that all these modifications should be done **after** 102 | initialising ``SimFinger``. Ideally also pass the "physics client id" to all 103 | PyBullet calls like shown above. 104 | 105 | 106 | More Examples 107 | ============= 108 | 109 | For more examples, see the scripts in the `demos/` folder of the 110 | trifinger_simulation package. Good starting points are 111 | 112 | - ``demo_plain_torque_control.py``: A very minimalistic demo, similar to the 113 | example above. 114 | - ``demo_control.py``: A slightly more complicated demo. 115 | - ``demo_inverse_kinematics.py``: Uses inverse kinematics to control the 116 | finger tip position. 117 | - ``demo_cameras.py``: Shows how to use the cameras in simulation (note that 118 | image rendering is pretty slow, though). 119 | -------------------------------------------------------------------------------- /docs/getting_started/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | There are two ways to build/install this package: 5 | 6 | 1. Stand-alone installation using pip 7 | 2. As part of a colon workspace 8 | 9 | 10 | Stand-alone installation using pip 11 | ---------------------------------- 12 | 13 | .. note:: 14 | 15 | We are developing for Python 3.10. Other versions may work as well but are 16 | not officially tested. 17 | 18 | 19 | To install the latest release: 20 | 21 | .. code-block:: bash 22 | 23 | $ pip install trifinger-simulation 24 | 25 | If you run into errors during installation, you may need to update pip first: 26 | 27 | .. code-block:: bash 28 | 29 | $ pip install --upgrade pip 30 | 31 | 32 | .. note:: 33 | 34 | To avoid version conflicts with other packages on your system, it is 35 | recommended to install the package in an isolated environment, e.g. using venv: 36 | 37 | .. code-block:: bash 38 | 39 | $ python3 -m venv ~/venv_trifinger_simulation 40 | $ . ~/venv_trifinger_simulation/bin/activate 41 | 42 | $ pip install --upgrade pip # make sure the latest version of pip is used 43 | $ pip install trifinger-simulation 44 | 45 | 46 | Installation from source 47 | ~~~~~~~~~~~~~~~~~~~~~~~~ 48 | 49 | To install the latest version from source, follow the same instructions as above 50 | but replace ``pip install trifinger-simulation`` with 51 | 52 | .. code-block:: bash 53 | 54 | $ git clone https://github.com/open-dynamic-robot-initiative/trifinger_simulation 55 | $ cd trifinger_simulation 56 | $ pip install . 57 | 58 | 59 | Test Installation 60 | ~~~~~~~~~~~~~~~~~ 61 | 62 | You can test the installation by running the unit tests:: 63 | 64 | $ python3 -m pytest tests/ 65 | 66 | or by running one of the demos:: 67 | 68 | $ python3 demos/demo_trifinger_platform.py 69 | 70 | 71 | 72 | .. _`colcon`: 73 | 74 | Using colcon 75 | ------------ 76 | 77 | The ``trifinger_simulation`` package is also part of the TriFinger software 78 | bundle. See :ref:`trifinger_docs:install_software` in the TriFinger 79 | documentation. 80 | -------------------------------------------------------------------------------- /docs/images/all_finger_types.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/all_finger_types.jpg -------------------------------------------------------------------------------- /docs/images/applied_action_dependency.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/applied_action_dependency.png -------------------------------------------------------------------------------- /docs/images/com_inertias.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/com_inertias.png -------------------------------------------------------------------------------- /docs/images/fingeredu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/fingeredu.jpg -------------------------------------------------------------------------------- /docs/images/fingerone.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/fingerone.jpg -------------------------------------------------------------------------------- /docs/images/fingerpro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/fingerpro.jpg -------------------------------------------------------------------------------- /docs/images/hand.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/hand.JPG -------------------------------------------------------------------------------- /docs/images/multi_object.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/multi_object.JPG -------------------------------------------------------------------------------- /docs/images/sim_trifingerpro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/sim_trifingerpro.jpg -------------------------------------------------------------------------------- /docs/images/trifingeredu.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/trifingeredu.jpg -------------------------------------------------------------------------------- /docs/images/trifingerone.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/trifingerone.jpg -------------------------------------------------------------------------------- /docs/images/trifingerpro.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/trifingerpro.jpg -------------------------------------------------------------------------------- /docs/images/workspace.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/docs/images/workspace.png -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ************************************************* 2 | Welcome to the TriFinger Robot Simulation docs! 3 | ************************************************* 4 | 5 | .. image:: images/all_finger_types.jpg 6 | :alt: Screenshots of different (Tri)Finger robots in simulation 7 | 8 | The trifinger_simulation_ package contains a PyBullet_ simulation environment 9 | for the different :doc:`TriFinger robots ` as well as helper 10 | functions for some tasks (for sampling goals, computing rewards, etc.). 11 | 12 | To learn more about the TriFinger robots, check out our paper_ (and the corresponding 13 | `webpage `_) and the overarching 14 | :doc:`TriFinger documentation `. 15 | 16 | 17 | .. toctree:: 18 | :maxdepth: 1 19 | :caption: Getting Started 20 | 21 | getting_started/installation 22 | getting_started/first_steps 23 | getting_started/finger_types 24 | 25 | .. toctree:: 26 | :maxdepth: 1 27 | :caption: Guides 28 | 29 | simreal/simwithreal 30 | simreal/simvsreal 31 | simreal/timesteplogic 32 | 33 | .. toctree:: 34 | :maxdepth: 1 35 | :caption: API Documentation 36 | 37 | api/sim_finger 38 | api/trifingerplatform 39 | api/collision_objects 40 | api/visual_objects 41 | api/camera 42 | api/pinocchio_utils 43 | api/finger_types_data 44 | 45 | 46 | Indices and tables 47 | ================== 48 | 49 | * :ref:`genindex` 50 | * :ref:`modindex` 51 | * :ref:`search` 52 | 53 | 54 | Cite Us 55 | ======= 56 | 57 | If you are using this package in you academic work, 58 | please cite this repository and also the corresponding paper: 59 | 60 | .. code-block:: bibtex 61 | 62 | @misc{trifinger-simulation, 63 | author = {Joshi, Shruti and Widmaier, Felix and Agrawal, Vaibhav and Wüthrich, Manuel}, 64 | year = {2020}, 65 | publisher = {GitHub}, 66 | journal = {GitHub repository}, 67 | howpublished = {\url{https://github.com/open-dynamic-robot-initiative/trifinger_simulation}}, 68 | } 69 | 70 | .. code-block:: bibtex 71 | 72 | @conference{wuethrich2020trifinger, 73 | title = {TriFinger: An Open-Source Robot for Learning Dexterity}, 74 | author = {W{\"u}thrich, M. and Widmaier, F. and Grimminger, F. and Akpo, J. and Joshi, S. and Agrawal, V. and Hammoud, B. and Khadiv, M. and Bogdanovic, M. and Berenz, V. and Viereck, J. and Naveau, M. and Righetti, L. and Sch{\"o}lkopf, B. and Bauer, S.}, 75 | booktitle = {Proceedings of the 4th Conference on Robot Learning (CoRL)}, 76 | month = nov, 77 | year = {2020}, 78 | doi = {}, 79 | url = {https://corlconf.github.io/corl2020/paper_421/}, 80 | month_numeric = {11} 81 | } 82 | 83 | .. _trifinger_simulation: https://github.com/open-dynamic-robot-initiative/trifinger_simulation 84 | .. _paper: https://arxiv.org/abs/2008.03596 85 | .. _PyBullet: https://pybullet.org 86 | 87 | 88 | ---- 89 | 90 | Last rebuild of this documentation: |today| 91 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx_rtd_theme 3 | -------------------------------------------------------------------------------- /docs/sim_finger_screenshots.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Set up PyBullet for making screenshots with the different finger types.""" 3 | import argparse 4 | import time 5 | import typing 6 | import pybullet 7 | 8 | from trifinger_simulation import ( 9 | collision_objects, 10 | finger_types_data, 11 | sim_finger, 12 | ) 13 | 14 | 15 | if __name__ == "__main__": 16 | parser = argparse.ArgumentParser() 17 | parser.add_argument( 18 | "finger_type", 19 | choices=finger_types_data.get_valid_finger_types(), 20 | ) 21 | args = parser.parse_args() 22 | 23 | time_step = 0.001 24 | 25 | finger = sim_finger.SimFinger( 26 | finger_type=args.finger_type, 27 | time_step=time_step, 28 | enable_visualization=True, 29 | ) 30 | 31 | _config: typing.Dict[str, typing.Dict[str, typing.Sequence[float]]] = { 32 | "fingerone": { 33 | "joint_pos": (0, -0.7, -1.5), 34 | "cube_pos": (0.05, -0.08, 0.04), 35 | }, 36 | "trifingerone": { 37 | "joint_pos": (0, -0.7, -1.5) * 3, 38 | "cube_pos": (0.05, 0.08, 0.04), 39 | }, 40 | "fingeredu": { 41 | "joint_pos": (0, 0.7, -1.5), 42 | "cube_pos": (0.08, 0.05, 0.04), 43 | }, 44 | "trifingeredu": { 45 | "joint_pos": (0, 0.7, -1.5) * 3, 46 | "cube_pos": (0.05, 0.08, 0.04), 47 | }, 48 | "fingerpro": { 49 | "joint_pos": (0, 0.9, -1.7), 50 | "cube_pos": (0.05, -0.08, 0.04), 51 | }, 52 | "trifingerpro": { 53 | "joint_pos": (0, 0.9, -1.7) * 3, 54 | "cube_pos": (0.05, -0.02, 0.04), 55 | }, 56 | } 57 | config = _config[args.finger_type] 58 | 59 | camera_yaw = 100.0 60 | if args.finger_type == "fingeredu": 61 | # for FingerEdu rotate the camera by 90 deg as the finger is oriented 62 | # differently 63 | camera_yaw += 90.0 64 | 65 | # disable all gui elements and specify camera pose (so we have the same 66 | # fixed view in all screenshots) 67 | pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0) 68 | pybullet.resetDebugVisualizerCamera( 69 | cameraDistance=1.0, 70 | cameraYaw=camera_yaw, 71 | cameraPitch=-30.0, 72 | cameraTargetPosition=(0, 0, 0.2), 73 | ) 74 | from scipy.spatial.transform import Rotation 75 | 76 | cube = collision_objects.ColoredCubeV2( 77 | position=config["cube_pos"], 78 | # rotate the cube a bit, so it looks nicer 79 | orientation=Rotation.from_euler( 80 | "xz", (90, -40), degrees=True 81 | ).as_quat(), 82 | ) 83 | 84 | # set the finger position 85 | finger.reset_finger_positions_and_velocities(config["joint_pos"]) 86 | 87 | while True: 88 | time.sleep(1) 89 | -------------------------------------------------------------------------------- /docs/simreal/simvsreal.rst: -------------------------------------------------------------------------------- 1 | .. _sec-simulation-vs-real-robot: 2 | 3 | ************************ 4 | Simulation vs Real Robot 5 | ************************ 6 | 7 | The :class:`~trifinger_simulation.sim_finger.SimFinger` class provides an API to control 8 | the TriFinger robots in simulation. Similarly, the 9 | :cpp:class:`robot_interfaces::RobotFrontend` class provides an API to interact with 10 | these robots in real life. 11 | 12 | It is easy to switch between these two (see :doc:`simwithreal`). However, there are some 13 | subtle differences one should keep in mind, which arise due to the accompanying switch 14 | between non-real-time behaviour (in simulation), and real-time behaviour (with real 15 | robots). 16 | 17 | 18 | Simulation is Stepped in ``append_desired_action()`` 19 | ==================================================== 20 | 21 | The simulation is explicitly stepped in the ``append_desired_action()`` 22 | method: this is because the simulation doesn't exhibit real-time 23 | behaviour. So, every time the ``append_desired_action()`` is called, 24 | the simulation is stepped, and the next time step in the simulation is computed. 25 | This means that the state of the simulation does not change as long as this 26 | method is not called. This is different than on the real robot, which will physically 27 | continue to move and will repeat the last action if no new action is provided in time. 28 | 29 | .. _`No waiting for future time steps`: 30 | 31 | No Waiting for Future Time Steps 32 | ================================ 33 | 34 | On the real robot, it is possible to pass a time index that lies in the future 35 | and most methods of the :cpp:class:`robot_interfaces::RobotFrontend` will simply block 36 | and wait until this time step is reached. The simulation, however, is not running 37 | asynchronously but is actively stepped every time ``append_desired_action()`` is called. 38 | Therefore the behaviour of waiting for a time step is not supported. Instead, passing a 39 | time index that lies in the future will result in an error. 40 | 41 | 42 | No Time Series in SimFinger 43 | =========================== 44 | 45 | The :doc:`robot_interfaces ` package makes use of time series 46 | for observations, actions, etc. This means all data of the last few time steps is 47 | available. One could, for example do the following to determine how the state of the 48 | robot changed: 49 | 50 | .. code-block:: python 51 | 52 | previous_observation = frontend.get_observation(t - 1) 53 | current_observation = frontend.get_observation(t) 54 | 55 | The ``SimFinger``:class:`~trifinger_simulation.sim_finger.SimFinger` class does not 56 | implement a time series, so it only provides the observation of the current time step 57 | ``t``. Passing any other value for ``t`` will result in an error. 58 | 59 | 60 | API-differences with robot_interfaces::RobotFrontend 61 | ==================================================== 62 | 63 | Our goal is to provide the same API in 64 | :class:`~trifinger_simulation.sim_finger.SimFinger` as in ``RobotFrontend`` to make 65 | transition between simulation and real robot easy. There are a few differences, though. 66 | 67 | Currently :class:`~trifinger_simulation.sim_finger.SimFinger` supports the following 68 | methods: 69 | 70 | - ``append_desired_action()`` 71 | - ``get_observation()`` 72 | - ``get_desired_action()`` 73 | - ``get_applied_action()`` 74 | - ``get_timestamp_ms()`` 75 | - ``get_current_timeindex()`` 76 | 77 | The following methods are not supported: 78 | 79 | - ``get_status()``: There are no meaningful values for the status message in 80 | simulation, so this method is omitted to avoid confusion. 81 | - ``wait_until_timeindex()``: In general the process of waiting for a specific 82 | time step is not supported, see :ref:`No waiting for future time steps`. 83 | -------------------------------------------------------------------------------- /docs/simreal/simwithreal.rst: -------------------------------------------------------------------------------- 1 | ******************** 2 | Real Robot Interface 3 | ******************** 4 | 5 | The simulation API is designed with the aim of making it easy to transition to the 6 | interface of the real robots. 7 | Below, we describe two different ways of switching between simulated and real robot: 8 | 9 | 1. Switching between :class:`~trifinger_simulation.sim_finger.SimFinger` and 10 | :class:`~trifinger_simulation.real_finger.RealFinger` 11 | 2. Using the real-robot interface and replacing the hardware back end with the 12 | simulation. 13 | 14 | Note also, that the interface of :class:`~trifinger_simulation.sim_finger.SimFinger` is 15 | designed to be as similar as possible to the real-robot front end, so even switching 16 | between the two should be relatively easy. 17 | 18 | 19 | RealRobot Wrapper 20 | ================= 21 | 22 | The :class:`~trifinger_simulation.real_finger.RealFinger` class provides a wrapper 23 | around the real-robot interface which can be used as a drop-in replacement for 24 | :class:`~trifinger_simulation.sim_finger.SimFinger`. 25 | 26 | Simple example, sending control commands to the TriFinger: 27 | 28 | .. code-block:: python 29 | 30 | from trifinger_simulation import sim_finger, real_finger 31 | 32 | if use_real == True: 33 | robot = real_finger.RealFinger(finger_type) 34 | else: 35 | robot = sim_finger.SimFinger(finger_type) 36 | 37 | def step(action): 38 | for _ in range(steps_per_control): 39 | t = robot.append_desired_action(action) 40 | observation = robot.get_observation() 41 | 42 | You can see an example of this usage in our :doc:`TriFingerReach environment 43 | `. 44 | 45 | 46 | .. note:: 47 | 48 | In order to use the ``RealFinger`` class, you would need additional packages from 49 | the TriFinger software bundle. See :ref:`colcon`. 50 | 51 | 52 | 53 | Real Robot Front End with Simulation Back End 54 | ============================================= 55 | 56 | The :doc:`robot_fingers ` package provides an 57 | :doc:`robot_interfaces `-based interface to the hardware of the 58 | real TriFinger robots. 59 | 60 | It is possible to use the front end of the real-robot interface but with the simulation 61 | plugged in as back end. This allows using the exact same code for simulation and real 62 | robot. For more information on this, see :ref:`robot_fingers:simulation_backend` in the 63 | ``robot_fingers`` documentation. 64 | -------------------------------------------------------------------------------- /docs/simreal/src_trifinger_reach-py.rst: -------------------------------------------------------------------------------- 1 | :orphan: 2 | 3 | *************************************** 4 | Example: TriFingerReach Gym Environment 5 | *************************************** 6 | 7 | .. literalinclude:: ../../trifinger_simulation/gym_wrapper/envs/trifinger_reach.py 8 | -------------------------------------------------------------------------------- /docs/simreal/timesteplogic.rst: -------------------------------------------------------------------------------- 1 | ***************************************** 2 | Time Relation of Actions and Observations 3 | ***************************************** 4 | 5 | On the real robot, we are using *time series* for all robot data like actions 6 | and observations. The user does not send actions directly to the robot but 7 | appends them to the "desired actions" time series which serves as a queue. 8 | At each time step, identified by a *time index t*, the action at position *t* is 9 | taken from the "desired actions" time series and processed. At the same time an 10 | observation is acquired from the robot and added to the "observation" time 11 | series. This means that the effect of the desired action ``a_t`` is not yet 12 | visible in the observation ``y_t`` as is illustrated below. (``a'_t`` 13 | corresponds to the *applied action*, see :ref:`Desired vs Applied Action`) 14 | 15 | .. image:: ../images/applied_action_dependency.png 16 | :width: 80% 17 | :align: center 18 | 19 | 20 | In the `robot_interfaces`_ software, the method 21 | ``append_desired_action()`` is used to 22 | append actions to the time series. It returns the time index *t* at which the 23 | appended action will be executed. Methods like 24 | ``get_observation`` expect a time index as 25 | input and will return the data corresponding to this time step. If the given 26 | time index refers to a point in the future, these methods will block and wait 27 | until that point is reached. 28 | 29 | As the simulation is not real-time critical, the behaviour is a bit different 30 | here: 31 | 32 | - :meth:`~trifinger_simulation.SimFinger.append_desired_action` will directly 33 | apply the action and step the simulation. 34 | - There is no actual time series. The API in the simulation 35 | follows the same principle to make the transition to the real robot easier. 36 | However, it is implemented with a buffer size of 1, so the getter methods only 37 | provide data for the current time step. 38 | - It is possible to access information from *t + 1*. In a typical gym 39 | environment, it is expected that the observation returned by ``step(action)`` 40 | belongs to the moment *after* the given action is executed (this corresponds 41 | to the time index *t + 1*). To make it easier to get started, we therefore 42 | allow to access the observations of this time index in the simulation. Note, 43 | however, that this is **not possible** on the real robot! 44 | 45 | For more information on the API of the real robot, see our publication `TriFinger: An Open-Source 46 | Robot for Learning Dexterity `_. 47 | 48 | .. _`robot_interfaces`: https://github.com/open-dynamic-robot-initiative/robot_interfaces 49 | -------------------------------------------------------------------------------- /package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | trifinger_simulation 4 | 2.0.0 5 | 6 | TriFinger Robot Simulation 7 | 8 | 9 | Felix Widmaier 10 | BSD 3-Clause 11 | 12 | ament_python 13 | 14 | ament_index_python 15 | robot_properties_fingers 16 | 17 | python3-pytest 18 | 19 | 20 | ament_python 21 | 22 | 23 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=42", "wheel"] 3 | 4 | [tool.black] 5 | line-length = 79 6 | 7 | [tool.pylint.messages_control] 8 | disable = "C0330, C0326" 9 | 10 | [tool.mypy] 11 | ignore_missing_imports = true 12 | no_implicit_optional = true 13 | -------------------------------------------------------------------------------- /resource/trifinger_simulation: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/resource/trifinger_simulation -------------------------------------------------------------------------------- /scripts/check_position_control_accuracy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Move to random points with position control and print position error. 3 | 4 | This can be used for testing different PD gains. 5 | """ 6 | import argparse 7 | import numpy as np 8 | import matplotlib.pyplot as plt 9 | 10 | from trifinger_simulation import sim_finger, sample 11 | 12 | 13 | if __name__ == "__main__": 14 | parser = argparse.ArgumentParser() 15 | parser.add_argument("--time-step", "-t", type=float, required=True) 16 | parser.add_argument("--plot", type=int) 17 | args = parser.parse_args() 18 | 19 | time_step = args.time_step 20 | 21 | finger = sim_finger.SimFinger( 22 | time_step=time_step, 23 | enable_visualization=True, 24 | finger_type="fingerpro", 25 | # spawn robot higher up to avoid collisions with the table 26 | robot_position_offset=(0, 0, 0.5), 27 | ) 28 | 29 | errors = [] 30 | for _ in range(100): 31 | target_position = np.array( 32 | sample.random_joint_positions( 33 | number_of_fingers=1, 34 | lower_bounds=[-0.33, 0.0, -2.7], 35 | upper_bounds=[1.0, 1.57, 0.0], 36 | ) 37 | ) 38 | 39 | action = finger.Action(position=target_position) 40 | positions = [] 41 | steps = 1000 42 | for _ in range(steps): 43 | t = finger.append_desired_action(action) 44 | observation = finger.get_observation(t) 45 | positions.append(observation.position) 46 | 47 | observation = finger.get_observation(t) 48 | error = np.abs(observation.position - target_position) 49 | errors.append(error) 50 | # print("Target position: {}".format(target_position)) 51 | print("Position error: {}".format(error)) 52 | 53 | if args.plot is not None: 54 | target_line = target_position[args.plot] 55 | position_array = np.vstack(positions) 56 | plt.plot(position_array[:, args.plot], "b") 57 | plt.hlines(target_line, 0, steps) 58 | plt.axis((None, None, target_line - 0.1, target_line + 0.1)) 59 | plt.show() 60 | 61 | print("==============================================================") 62 | print("Mean error: {}".format(np.mean(errors, axis=0))) 63 | print("\n\n") 64 | -------------------------------------------------------------------------------- /scripts/plot_position_error.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Plot the position error over all episodes. 3 | 4 | Plots for each episode the position error between desired and actual finger tip 5 | position at the last step of the episode. 6 | 7 | Also prints the mean error over all episodes. This is only useful when using a 8 | log file from testing a final policy. 9 | """ 10 | import argparse 11 | import pickle 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | 15 | 16 | def main(): 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument("data_file") 19 | args = parser.parse_args() 20 | 21 | with open(args.data_file, "rb") as file_handle: 22 | episodes = pickle.load(file_handle) 23 | 24 | dist = [] 25 | for data in episodes[1:]: 26 | tip_goal = data["tip_goal"][0] 27 | trajectory = np.vstack(data["tip_positions"]) 28 | dist.append(np.linalg.norm(trajectory[-1] - tip_goal)) 29 | 30 | print("Mean position error over all episodes: {}".format(np.mean(dist))) 31 | 32 | fig, ax = plt.subplots(1, 1, dpi=300) 33 | ax.plot(dist, linewidth=2) 34 | 35 | ax.set_xlabel("Episode") 36 | ax.set_ylabel("Distance to goal at end of episode") 37 | 38 | plt.show() 39 | 40 | 41 | if __name__ == "__main__": 42 | main() 43 | -------------------------------------------------------------------------------- /scripts/plot_tip_position_trajectories.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script for plotting tip position data from a log file.""" 3 | import argparse 4 | import pickle 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | import matplotlib.patches as patches 8 | 9 | 10 | def main(): 11 | parser = argparse.ArgumentParser(description=__doc__) 12 | parser.add_argument( 13 | "data_file", 14 | help="""Path to the pickle file containing the logged 15 | data""", 16 | ) 17 | parser.add_argument( 18 | "--episode", 19 | type=int, 20 | help="""Number of the episode 21 | When this is set, only the given episode will be 22 | plotted, --start-episode and --end-episode will be 23 | ignored.""", 24 | ) 25 | parser.add_argument( 26 | "-s", 27 | "--start-episode", 28 | type=int, 29 | help="""Number of the first episode when plotting a 30 | range of episodes.""", 31 | ) 32 | parser.add_argument( 33 | "-e", 34 | "--end-episode", 35 | type=int, 36 | help="""Number of the last episode when plotting a 37 | range of episodes.""", 38 | ) 39 | parser.add_argument( 40 | "--plain", 41 | choices=["xy", "xz"], 42 | help="""Specifies on which plane the data is 43 | plotted.""", 44 | ) 45 | parser.add_argument( 46 | "--output", 47 | type=str, 48 | help="""Path to a file to store the plot. 49 | If not set, the plot will be displayed instead.""", 50 | ) 51 | args = parser.parse_args() 52 | 53 | with open(args.data_file, "rb") as file_handle: 54 | episodes = pickle.load(file_handle) 55 | 56 | x = 0 57 | 58 | plt.rcParams.update( 59 | { 60 | "font.size": 20, 61 | "font.weight": "bold", 62 | "axes.linewidth": 3, 63 | } 64 | ) 65 | 66 | if args.episode: 67 | if args.plain == "xy": 68 | y = 1 69 | else: 70 | y = 2 71 | data = episodes[args.episode] 72 | 73 | tip_goal = data["tip_goal"][0] 74 | trajectory = np.vstack(data["tip_positions"]) 75 | 76 | plt.plot(tip_goal[x], tip_goal[y], "rx") 77 | plt.plot(trajectory[:, x], trajectory[:, y], "b-o") 78 | else: 79 | if args.start_episode: 80 | start = args.start_episode 81 | end = args.end_episode + 1 82 | else: 83 | start = 1 84 | end = len(episodes) 85 | 86 | # if args.plain is not set, plot both, otherwise only one 87 | plains = [] 88 | num_subplots = 0 89 | if args.plain in ("xy", None): 90 | plains.append(("xy-plane", "y", num_subplots, 1)) 91 | num_subplots += 1 92 | if args.plain in ("xz", None): 93 | plains.append(("xz-plane", "z", num_subplots, 2)) 94 | num_subplots += 1 95 | 96 | fig, ax = plt.subplots(1, num_subplots, figsize=(10, 10), dpi=300) 97 | if num_subplots == 1: 98 | ax = [ax] 99 | 100 | for title, ylabel, a, y in plains: 101 | for i in range(start, end): 102 | data = episodes[i] 103 | 104 | tip_goal = np.asarray(data["tip_goal"][0]) 105 | trajectory = np.vstack(data["tip_positions"]) 106 | dist = trajectory - tip_goal 107 | 108 | ax[a].plot( 109 | dist[:, x], dist[:, y], color=(0.3, 0.3, 0.3), linewidth=2 110 | ) 111 | ax[a].plot(dist[0, x], dist[0, y], "bo", zorder=40) 112 | ax[a].plot(dist[-1, x], dist[-1, y], "g^", zorder=40) 113 | 114 | circ = patches.Circle( 115 | (0, 0), 116 | 0.02, 117 | edgecolor="orange", 118 | facecolor="none", 119 | linewidth=4, 120 | zorder=50, 121 | ) 122 | 123 | ax[a].add_patch(circ) 124 | 125 | ax[a].set_xlabel("x") 126 | ax[a].set_ylabel(ylabel + " ", rotation="horizontal") 127 | 128 | ax[a].axis("equal") 129 | 130 | # Move left y-axis and bottim x-axis to centre, passing 131 | # through (0,0) 132 | ax[a].spines["left"].set_position("zero") 133 | ax[a].spines["bottom"].set_position("zero") 134 | 135 | ax[a].spines["left"].set_color("grey") 136 | ax[a].spines["bottom"].set_color("grey") 137 | ax[a].tick_params(axis="x", colors="grey") 138 | ax[a].tick_params(axis="y", colors="grey") 139 | 140 | # Eliminate upper and right axes 141 | ax[a].spines["right"].set_color("none") 142 | ax[a].spines["top"].set_color("none") 143 | 144 | # Show ticks in the left and lower axes only 145 | ax[a].xaxis.set_ticks_position("bottom") 146 | ax[a].yaxis.set_ticks_position("left") 147 | 148 | ax[a].xaxis.set_label_coords(1, 0.5) 149 | ax[a].yaxis.set_label_coords(0.5, 1) 150 | 151 | if args.output: 152 | plt.savefig(args.output, bbox_inches="tight") 153 | else: 154 | plt.show() 155 | 156 | 157 | if __name__ == "__main__": 158 | main() 159 | -------------------------------------------------------------------------------- /scripts/profiling.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import cProfile 3 | import pstats 4 | 5 | import numpy as np 6 | 7 | from trifinger_simulation.sim_finger import SimFinger 8 | from trifinger_simulation import collision_objects 9 | 10 | 11 | finger_names = ["trifingerone", "trifingeredu", "trifingerpro"] 12 | 13 | 14 | def execute_random_motion(*, finger_name, nb_timesteps, enable_visualization): 15 | finger = SimFinger( 16 | finger_type=finger_name, 17 | enable_visualization=enable_visualization, 18 | ) 19 | cube = collision_objects.Block() 20 | 21 | for t in range(nb_timesteps): 22 | if t % 50 == 0: 23 | torque = np.random.uniform(low=-0.36, high=0.36, size=(9)) 24 | finger.append_desired_action(finger.Action(torque=torque)) 25 | del finger 26 | del cube 27 | 28 | 29 | if __name__ == "__main__": 30 | 31 | def get_filename(*, finger_name, enable_visualization): 32 | return "stats_" + finger_name + "_vis-" + str(enable_visualization) 33 | 34 | for enable_visualization in [False, True]: 35 | for finger_name in finger_names: 36 | 37 | def execute_random_motion_finger(): 38 | execute_random_motion( 39 | finger_name=finger_name, 40 | nb_timesteps=1000, 41 | enable_visualization=enable_visualization, 42 | ) 43 | 44 | filename = get_filename( 45 | finger_name=finger_name, 46 | enable_visualization=enable_visualization, 47 | ) 48 | cProfile.run("execute_random_motion_finger()", filename=filename) 49 | 50 | for enable_visualization in [False, True]: 51 | for finger_name in finger_names: 52 | filename = get_filename( 53 | finger_name=finger_name, 54 | enable_visualization=enable_visualization, 55 | ) 56 | p = pstats.Stats(filename) 57 | print(filename, "==============================================") 58 | p.strip_dirs().sort_stats("tottime").print_stats(3) 59 | -------------------------------------------------------------------------------- /scripts/rrc_evaluate.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Run RRC Simulation Phase Evaluation in a Singularity container. 3 | 4 | Copies the specified package into the workspace created in the output 5 | directory, builds it using ``colcon build`` and then runs policy evaluation and 6 | replay assuming that a proper ``evaluate_policy.py`` exists in the root of the 7 | given package. 8 | 9 | Build and evaluation is executed inside the specified Singularity image which 10 | is run with "--containall --cleanenv" to minimise any influence of the host 11 | system. The only directories that are mounted are the package directory 12 | (read-only) and the output directory. 13 | """ 14 | import argparse 15 | import pathlib 16 | import subprocess 17 | import sys 18 | 19 | 20 | #: The task that is evaluated 21 | TASK = "move_cube_on_trajectory" 22 | 23 | 24 | def main(): 25 | parser = argparse.ArgumentParser(description=__doc__) 26 | parser.add_argument( 27 | "--singularity-image", 28 | "-s", 29 | type=pathlib.Path, 30 | help="Path to the Singularity image.", 31 | ) 32 | parser.add_argument( 33 | "--package", 34 | "-p", 35 | type=pathlib.Path, 36 | help="""Path to the package containing the policy. This package is 37 | expected to have a file `evaluate_policy.py` in its root directory. 38 | """, 39 | ) 40 | parser.add_argument( 41 | "--output-dir", 42 | "-o", 43 | type=pathlib.Path, 44 | help="""Output directory in which log files are saved. CAUTION: 45 | existing files in this directory may be deleted! 46 | """, 47 | ) 48 | args = parser.parse_args() 49 | 50 | # some basic checks of the input 51 | if not args.singularity_image.exists(): 52 | print( 53 | "The singularity image {} does not exist.".format( 54 | args.singularity_image 55 | ), 56 | file=sys.stderr, 57 | ) 58 | return 1 59 | if not args.package.is_dir(): 60 | print("{} is not a directory.".format(args.package), file=sys.stderr) 61 | return 1 62 | if not args.output_dir.is_dir(): 63 | print("{} is not a directory".format(args.output_dir), file=sys.stderr) 64 | return 1 65 | 66 | eval_script = ( 67 | "rm -rf /ws/{install,build,log,src} && " # wipe existing workspace 68 | "mkdir -p /ws/src && " 69 | "mkdir -p /ws/output && " 70 | "cp -r /input /ws/src/pkg && " 71 | "cd /ws && " 72 | "colcon build && " 73 | ". install/local_setup.bash && " 74 | f"python3 -m trifinger_simulation.tasks.{TASK}" 75 | " evaluate_and_check --exec /ws/src/pkg/evaluate_policy.py /ws/output" 76 | ) 77 | 78 | singularity_cmd = [ 79 | "singularity", 80 | "run", 81 | "--containall", 82 | "--cleanenv", 83 | "-B", 84 | "{}:/input:ro,{}:/ws".format(args.package, args.output_dir), 85 | args.singularity_image, 86 | eval_script, 87 | ] 88 | 89 | subprocess.run(singularity_cmd, check=True) 90 | 91 | return 0 92 | 93 | 94 | if __name__ == "__main__": 95 | sys.exit(main()) 96 | -------------------------------------------------------------------------------- /scripts/screenshot_mode.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Visualise the robot at the specified position with axes, etc. disabled. 3 | 4 | This can be used to more easily create screenshots of the robot in specific poses. 5 | """ 6 | import argparse 7 | import time 8 | from trifinger_simulation import ( 9 | sim_finger, 10 | finger_types_data, 11 | ) 12 | 13 | import pybullet 14 | 15 | 16 | def main(): 17 | valid_finger_types = finger_types_data.get_valid_finger_types() 18 | 19 | parser = argparse.ArgumentParser(description=__doc__) 20 | parser.add_argument( 21 | "finger_type", 22 | choices=valid_finger_types, 23 | help="Specify a valid finger type. One of {}".format( 24 | valid_finger_types 25 | ), 26 | ) 27 | parser.add_argument( 28 | "--position", "-p", type=float, nargs="+", help="Joint positions." 29 | ) 30 | parser.add_argument( 31 | "--robot-height-offset", 32 | type=float, 33 | default=0, 34 | help="Offset to the default robot height.", 35 | ) 36 | args = parser.parse_args() 37 | 38 | finger = sim_finger.SimFinger( 39 | finger_type=args.finger_type, 40 | enable_visualization=True, 41 | robot_position_offset=[0, 0, args.robot_height_offset], 42 | ) 43 | 44 | # disable all GUI elements 45 | pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_GUI, 0) 46 | 47 | # change initial camera pose 48 | # pybullet.resetDebugVisualizerCamera( 49 | # cameraDistance=1.0, 50 | # cameraYaw=100.0, 51 | # cameraPitch=-30.0, 52 | # cameraTargetPosition=(0, 0, 0.2), 53 | # physicsClientId=self._pybullet_client_id, 54 | # ) 55 | 56 | if args.position: 57 | print("Joint positions: {}".format(args.position)) 58 | finger.reset_finger_positions_and_velocities(args.position) 59 | 60 | while True: 61 | time.sleep(100) 62 | 63 | 64 | if __name__ == "__main__": 65 | main() 66 | -------------------------------------------------------------------------------- /scripts/trifingerpro_model_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Script for testing the TriFingerPro model.""" 3 | import time 4 | import pybullet 5 | from trifinger_simulation import ( 6 | sim_finger, 7 | visual_objects, 8 | ) 9 | 10 | 11 | def visualize_collisions(sim): 12 | contact_points = pybullet.getContactPoints( 13 | bodyA=sim.finger_id, 14 | physicsClientId=sim._pybullet_client_id, 15 | ) 16 | 17 | markers = [] 18 | 19 | for cp in contact_points: 20 | contact_distance = cp[8] 21 | if contact_distance < 0: 22 | position = cp[5] 23 | marker = visual_objects.CubeMarker( 24 | width=0.01, 25 | position=position, 26 | orientation=(0, 0, 0, 1), 27 | color=(1, 0, 0, 1), 28 | ) 29 | markers.append(marker) 30 | 31 | return markers 32 | 33 | 34 | def main(): 35 | # argparser = argparse.ArgumentParser(description=__doc__) 36 | # args = argparser.parse_args() 37 | 38 | time_step = 0.001 39 | 40 | finger = sim_finger.SimFinger( 41 | finger_type="trifingerpro", 42 | time_step=time_step, 43 | enable_visualization=True, 44 | ) 45 | 46 | markers = [] 47 | while True: 48 | # action = finger.Action(torque=[0.3, 0.3, -0.2] * 3) 49 | action = finger.Action(position=[0.0, 0.9, -1.7] * 3) 50 | t = finger.append_desired_action(action) 51 | finger.get_observation(t) 52 | 53 | # delete old markers 54 | for m in markers: 55 | del m 56 | markers = visualize_collisions(finger) 57 | 58 | time.sleep(time_step) 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /scripts/view_camera_log_real_and_sim.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """view rendered and real images in parallel. 3 | 4 | Loads robot and camera log files and plays back the images from the log while 5 | at the same time rendering images in the simulation, using the robot and object 6 | state from the log file. 7 | """ 8 | import argparse 9 | import pathlib 10 | 11 | import numpy as np 12 | import cv2 13 | import pybullet 14 | 15 | import robot_fingers 16 | from trifinger_cameras import utils 17 | import trifinger_simulation 18 | from trifinger_simulation import sim_finger, camera 19 | 20 | 21 | def main(): 22 | parser = argparse.ArgumentParser(description=__doc__) 23 | parser.add_argument( 24 | "log_dir", type=pathlib.Path, help="Path to the log files." 25 | ) 26 | parser.add_argument( 27 | "--rate", type=int, default=1, help="Time in ms per step." 28 | ) 29 | args = parser.parse_args() 30 | 31 | robot_log_file = str(args.log_dir / "robot_data.dat") 32 | camera_log_file = str(args.log_dir / "camera_data.dat") 33 | 34 | log = robot_fingers.TriFingerPlatformLog(robot_log_file, camera_log_file) 35 | 36 | simulation = sim_finger.SimFinger( 37 | finger_type="trifingerpro", 38 | enable_visualization=True, 39 | ) 40 | cameras = camera.create_trifinger_camera_array_from_config(args.log_dir) 41 | 42 | cube_urdf_file = ( 43 | pathlib.Path(trifinger_simulation.__file__).parent 44 | / "data/cube_v2/cube_v2.urdf" 45 | ) 46 | cube = pybullet.loadURDF( 47 | fileName=str(cube_urdf_file), 48 | ) 49 | assert cube >= 0, "Failed to load cube model." 50 | 51 | pybullet.configureDebugVisualizer(lightPosition=(0, 0, 1.0)) 52 | pybullet.configureDebugVisualizer(pybullet.COV_ENABLE_SHADOWS, 1) 53 | pybullet.configureDebugVisualizer( 54 | pybullet.COV_ENABLE_RGB_BUFFER_PREVIEW, 0 55 | ) 56 | pybullet.configureDebugVisualizer( 57 | pybullet.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0 58 | ) 59 | # yes, it is really a segmentation maRk... 60 | pybullet.configureDebugVisualizer( 61 | pybullet.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0 62 | ) 63 | 64 | for t in range( 65 | log.get_first_timeindex(), log.get_last_timeindex() + 1, 100 66 | ): 67 | robot_observation = log.get_robot_observation(t) 68 | simulation.reset_finger_positions_and_velocities( 69 | robot_observation.position 70 | ) 71 | 72 | # get rendered images from simulation 73 | sim_images = cameras.get_images() 74 | # images are rgb --> convert to bgr for opencv 75 | sim_images = [ 76 | cv2.cvtColor(img, cv2.COLOR_RGB2BGR) for img in sim_images 77 | ] 78 | sim_images = np.hstack(sim_images) 79 | 80 | # get real images from cameras 81 | try: 82 | camera_observation = log.get_camera_observation(t) 83 | # set cube pose 84 | pybullet.resetBasePositionAndOrientation( 85 | cube, 86 | camera_observation.filtered_object_pose.position, 87 | camera_observation.filtered_object_pose.orientation, 88 | ) 89 | 90 | real_images = [ 91 | utils.convert_image(cam.image) 92 | for cam in camera_observation.cameras 93 | ] 94 | real_images = np.hstack(real_images) 95 | except Exception as e: 96 | print(e) 97 | real_images = np.zeros_like(sim_images) 98 | 99 | # display images 100 | image = np.vstack((sim_images, real_images)) 101 | cv2.imshow("cameras", image) 102 | key = cv2.waitKey(args.rate) 103 | # exit on "q" 104 | if key == ord("q"): 105 | return 106 | 107 | 108 | if __name__ == "__main__": 109 | main() 110 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [develop] 2 | script_dir=$base/lib/trifinger_simulation 3 | [install] 4 | install_scripts=$base/lib/trifinger_simulation 5 | 6 | [metadata] 7 | name = trifinger_simulation 8 | version = attr: trifinger_simulation.__version__ 9 | description = TriFinger Robot Simulation 10 | long_description = file: README.md 11 | long_description_content_type = text/markdown 12 | author = Felix Widmaier 13 | author_email = felix.widmaier@tue.mpg.de 14 | license = BSD 3-Clause License 15 | license_files = LICENSE 16 | url = https://open-dynamic-robot-initiative.github.io/trifinger_simulation 17 | project_urls = 18 | Documentation = https://open-dynamic-robot-initiative.github.io/trifinger_simulation 19 | Source Code = https://github.com/open-dynamic-robot-initiative/trifinger_simulation 20 | Bug Tracker = https://github.com/open-dynamic-robot-initiative/trifinger_simulation/issues 21 | keywords = TriFinger, Simulation, Robotics, Robot 22 | classifiers = 23 | Development Status :: 5 - Production/Stable 24 | License :: OSI Approved :: BSD License 25 | Programming Language :: Python :: 3 26 | Topic :: Scientific/Engineering 27 | 28 | [options] 29 | packages = find: 30 | install_requires = 31 | numpy >=1.19.1,<2 32 | scipy >=1.5.4 33 | # pinocchio 34 | pin >=2.4.7 35 | pybullet >=3.0.8 36 | gym >=0.23.1 37 | opencv-python >=4.2.0.34 38 | pyyaml >=5.3.1 39 | robot_properties_fingers>=2.0.2 40 | 41 | scripts = 42 | demos/demo_cameras.py 43 | demos/demo_control.py 44 | demos/demo_inverse_kinematics.py 45 | demos/demo_load_gym_env.py 46 | demos/demo_plain_torque_control.py 47 | demos/demo_random_policy.py 48 | demos/demo_trifinger_platform.py 49 | scripts/check_position_control_accuracy.py 50 | scripts/profiling.py 51 | 52 | [options.extras_require] 53 | test = pytest 54 | mypy = 55 | mypy 56 | types-PyYAML 57 | types-setuptools 58 | doc = 59 | sphinx 60 | sphinx_rtd_theme 61 | 62 | [options.package_data] 63 | trifinger_simulation = 64 | py.typed 65 | data/camera_params/*.yml 66 | data/cube_v2/*.blend 67 | data/cube_v2/*.urdf 68 | data/cube_v2/*.png 69 | data/cube_v2/*.mtl 70 | data/cube_v2/*.obj 71 | 72 | [options.data_files] 73 | share/ament_index/resource_index/packages = resource/trifinger_simulation 74 | share/trifinger_simulation = package.xml 75 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Dummy setup.py needed for colon 2 | import setuptools 3 | 4 | if __name__ == "__main__": 5 | setuptools.setup() 6 | -------------------------------------------------------------------------------- /tests/test_camera.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the camera module.""" 2 | 3 | import os 4 | import pathlib 5 | import numpy as np 6 | import pybullet 7 | 8 | from trifinger_simulation import camera as sim_camera 9 | 10 | 11 | TEST_DATA_DIR, _ = os.path.splitext(__file__) 12 | 13 | 14 | def test_calib_data_to_matrix_1x1(): 15 | data = { 16 | "rows": 1, 17 | "cols": 1, 18 | "data": [42], 19 | } 20 | expected = np.array([[42]]) 21 | 22 | np.testing.assert_array_equal( 23 | sim_camera.calib_data_to_matrix(data), expected 24 | ) 25 | 26 | 27 | def test_calib_data_to_matrix_3x3(): 28 | data = { 29 | "rows": 3, 30 | "cols": 3, 31 | "data": [1, 2, 3, 4, 5, 6, 7, 8, 9], 32 | } 33 | expected = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) 34 | 35 | np.testing.assert_array_equal( 36 | sim_camera.calib_data_to_matrix(data), expected 37 | ) 38 | 39 | 40 | def test_calib_data_to_matrix_2x4(): 41 | data = { 42 | "rows": 2, 43 | "cols": 4, 44 | "data": [1, 2, 3, 4, 5, 6, 7, 8], 45 | } 46 | expected = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]) 47 | 48 | np.testing.assert_array_equal( 49 | sim_camera.calib_data_to_matrix(data), expected 50 | ) 51 | 52 | 53 | def test_camera_parameters_load(): 54 | config_file = os.path.join(TEST_DATA_DIR, "camera180_full.yml") 55 | 56 | expected_camera_matrix = np.array( 57 | [ 58 | [585.06, 0.0, 369.13], 59 | [0.0, 587.96, 278.41], 60 | [0.0, 0.0, 1.0], 61 | ] 62 | ) 63 | 64 | expected_dist_coeffs = np.array( 65 | [-0.2373, 0.1466, -0.0033, 0.0002, -0.1356] 66 | ) 67 | 68 | expected_tf_mat = np.array( 69 | [ 70 | [0.9999, 0.0011, 0.0009, 0.0015], 71 | [0.0015, -0.8603, -0.5096, 0.0059], 72 | [0.0001, 0.5096, -0.8603, 0.5336], 73 | [0.0, 0.0, 0.0, 1.0], 74 | ] 75 | ) 76 | 77 | with open(config_file, "r") as f: 78 | params = sim_camera.CameraParameters.load(f) 79 | 80 | assert type(params) is sim_camera.CameraParameters 81 | assert params.name == "camera180" 82 | assert params.width == 720 83 | assert params.height == 540 84 | np.testing.assert_array_almost_equal( 85 | params.camera_matrix, expected_camera_matrix 86 | ) 87 | np.testing.assert_array_almost_equal( 88 | params.distortion_coefficients, expected_dist_coeffs 89 | ) 90 | np.testing.assert_array_almost_equal( 91 | params.tf_world_to_camera, expected_tf_mat 92 | ) 93 | 94 | 95 | def test_camera_parameters_dump(tmpdir): 96 | # load the test data 97 | config_file = os.path.join(TEST_DATA_DIR, "camera180_full.yml") 98 | with open(config_file, "r") as f: 99 | params = sim_camera.CameraParameters.load(f) 100 | 101 | # dump to a temporary file, load that again and verify that the values are 102 | # the same 103 | tmpfile = tmpdir.join("dump.yml") 104 | with open(tmpfile, "w") as f: 105 | params.dump(f) 106 | 107 | with open(tmpfile, "r") as f: 108 | params2 = sim_camera.CameraParameters.load(f) 109 | 110 | assert params.name == params2.name 111 | assert params.width == params2.width 112 | assert params.height == params2.height 113 | np.testing.assert_array_almost_equal( 114 | params.camera_matrix, params2.camera_matrix 115 | ) 116 | np.testing.assert_array_almost_equal( 117 | params.distortion_coefficients, params2.distortion_coefficients 118 | ) 119 | np.testing.assert_array_almost_equal( 120 | params.tf_world_to_camera, params2.tf_world_to_camera 121 | ) 122 | 123 | 124 | def test_load_camera_parameters(): 125 | params = sim_camera.load_camera_parameters( 126 | pathlib.Path(TEST_DATA_DIR), "camera{id}_full.yml" 127 | ) 128 | 129 | # loading in general is already covered above, so only verify correct order 130 | # here 131 | assert len(params) == 3 132 | assert params[0].name == "camera60" 133 | assert params[1].name == "camera180" 134 | assert params[2].name == "camera300" 135 | 136 | 137 | def test_camera(): 138 | pybullet.connect(pybullet.DIRECT) 139 | 140 | width = 150 141 | height = 100 142 | 143 | camera = sim_camera.Camera( 144 | (0, 0, 0.3), 145 | (0, 0, 0, 1), 146 | image_size=(width, height), 147 | ) 148 | 149 | # only some very basic test, verifying that we get an image of the correct 150 | # shape 151 | image = camera.get_image() 152 | assert image.shape == (height, width, 3) 153 | 154 | pybullet.disconnect() 155 | 156 | 157 | def test_calibrated_camera(): 158 | config_file = os.path.join(TEST_DATA_DIR, "camera180_full.yml") 159 | 160 | pybullet.connect(pybullet.DIRECT) 161 | 162 | with open(config_file) as f: 163 | params = sim_camera.CameraParameters.load(f) 164 | camera = sim_camera.CalibratedCamera( 165 | params.camera_matrix, 166 | params.distortion_coefficients, 167 | params.tf_world_to_camera, 168 | (params.width, params.height), 169 | near_plane_distance=0.02, 170 | far_plane_distance=1.0, 171 | ) 172 | 173 | # only some very basic test, verifying that we get an image of the correct 174 | # shape 175 | image = camera.get_image() 176 | assert image.shape == (params.height, params.width, 3) 177 | 178 | pybullet.disconnect() 179 | 180 | 181 | def test_trifingercameras(): 182 | pybullet.connect(pybullet.DIRECT) 183 | 184 | width = 150 185 | height = 100 186 | expected_shape = (height, width, 3) 187 | 188 | tricamera = sim_camera.TriFingerCameras( 189 | image_size=(width, height), 190 | ) 191 | 192 | images = tricamera.get_images() 193 | assert len(images) == 3 194 | assert images[0].shape == expected_shape 195 | assert images[1].shape == expected_shape 196 | assert images[2].shape == expected_shape 197 | 198 | pybullet.disconnect() 199 | 200 | 201 | def test_create_trifinger_camera_array_from_config(): 202 | pybullet.connect(pybullet.DIRECT) 203 | 204 | expected_shape = (540, 720, 3) 205 | 206 | tricamera = sim_camera.create_trifinger_camera_array_from_config( 207 | pathlib.Path(TEST_DATA_DIR), 208 | "camera{id}_full.yml", 209 | ) 210 | 211 | images = tricamera.get_images() 212 | assert len(images) == 3 213 | assert images[0].shape == expected_shape 214 | assert images[1].shape == expected_shape 215 | assert images[2].shape == expected_shape 216 | 217 | pybullet.disconnect() 218 | -------------------------------------------------------------------------------- /tests/test_camera/camera180_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 585.06 5 | - 0.0 6 | - 369.13 7 | - 0.0 8 | - 587.96 9 | - 278.41 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera180 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.2373 19 | - 0.1466 20 | - -0.0033 21 | - 0.0002 22 | - -0.1356 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - 0.9999 30 | - 0.0011 31 | - 0.0009 32 | - 0.0015 33 | - 0.0015 34 | - -0.8603 35 | - -0.5096 36 | - 0.0059 37 | - 0.0001 38 | - 0.5096 39 | - -0.8603 40 | - 0.5336 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 7.8563e-06 50 | - 0.0029 51 | - 0.0030 52 | - 0.0004 53 | - 0.0025 54 | - 0.0013 55 | - 0.0023 56 | - 0.0005 57 | - 0.0034 58 | - 0.0023 59 | - 0.0013 60 | - 0.0002 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /tests/test_camera/camera300_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 586.4371656440662 5 | - 0.0 6 | - 376.0799627035372 7 | - 0.0 8 | - 590.8073307672728 9 | - 284.1827367615578 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera300 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23391341982227962 19 | - 0.09394432535010212 20 | - -0.002787865901636738 21 | - -7.252943212413023e-05 22 | - -0.017420413881185703 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7133771728609642 30 | - -0.7006191670792009 31 | - -0.01456422308851129 32 | - 0.000684373876406266 33 | - -0.5196240895754424 34 | - 0.5428037431338101 35 | - -0.6598003871028696 36 | - -0.007554524367671664 37 | - 0.4701789145919274 38 | - -0.463122803047164 39 | - -0.751291777787446 40 | - 0.5244100800677238 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0020759651842660117 50 | - 0.0021444024623455534 51 | - 0.0021834089415969857 52 | - 0.0005068714917011395 53 | - 0.0026703132755999 54 | - 0.002673806193417258 55 | - 0.0020177559741561607 56 | - 0.0003790199429267026 57 | - 0.0020403045144949472 58 | - 0.001549437778265158 59 | - 0.0017772450402005176 60 | - 0.0003322102351354774 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /tests/test_camera/camera60_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 587.2983721486902 5 | - 0.0 6 | - 379.2857905622954 7 | - 0.0 8 | - 591.5395266821638 9 | - 277.45556498689365 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera60 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23804660749779824 19 | - 0.10825771776560758 20 | - -0.0024551017209611128 21 | - 4.8257038130254314e-05 22 | - -0.02722946246691342 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7019134208048192 30 | - 0.7122353932236418 31 | - -0.004805374269147043 32 | - 0.002252555442149284 33 | - 0.5344638657119378 34 | - 0.5222316464208596 35 | - -0.6645386970157846 36 | - -0.0032747731308788697 37 | - -0.47079826418692317 38 | - -0.46902151242952833 39 | - -0.7472317485480533 40 | - 0.5238875639497548 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0021790353060527645 50 | - 0.0021481513684787814 51 | - 0.0024166014581399066 52 | - 0.0006146887379140977 53 | - 0.0025393011564917435 54 | - 0.001435794780360425 55 | - 0.00151470669061762 56 | - 0.0003712483847988384 57 | - 0.0019298019794210424 58 | - 0.0026451717311933042 59 | - 0.0013446968243762806 60 | - 0.00034964603848654035 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /tests/test_cube_env.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import unittest 3 | 4 | from trifinger_simulation.gym_wrapper.envs import cube_env 5 | 6 | 7 | class TestSample(unittest.TestCase): 8 | """Test the CubeEnv gym environment.""" 9 | 10 | def test_if_observations_are_valid(self): 11 | # Observations need to be contained in the observation space. If this 12 | # is not the case, there is either an issue with the generation of the 13 | # observations or the observation space is not defined properly. 14 | env = cube_env.CubeEnv(cube_env.RandomInitializer(4)) 15 | 16 | observation = env.reset() 17 | self.assertTrue( 18 | env.observation_space.contains(observation), 19 | msg="observation: {}".format(observation), 20 | ) 21 | 22 | for i in range(3000): 23 | action = env.action_space.sample() 24 | observation, _, _, _ = env.step(action) 25 | 26 | self.assertTrue( 27 | env.observation_space.contains(observation), 28 | msg="Invalid observation: {}".format(observation), 29 | ) 30 | 31 | 32 | if __name__ == "__main__": 33 | unittest.main() 34 | -------------------------------------------------------------------------------- /tests/test_determinism.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import unittest 3 | import numpy as np 4 | 5 | import gym 6 | 7 | import trifinger_simulation.gym_wrapper # noqa 8 | from trifinger_simulation.sim_finger import SimFinger 9 | 10 | 11 | class TestSimulationDeterminisim(unittest.TestCase): 12 | """This test verifies that the simulation always behaves deterministically. 13 | 14 | When starting from the same position and sending the same commands, the 15 | result should be the same. 16 | """ 17 | 18 | def test_constant_torque(self): 19 | """Compare two runs sending a constant torque. 20 | 21 | In each run the finger is reset to a fixed position and a constant 22 | torque is applied for a number of steps. The final observation of each 23 | run is compared. If the simulation behaves deterministically, the 24 | observations should be equal. 25 | """ 26 | finger = SimFinger( 27 | finger_type="fingerone", 28 | ) 29 | 30 | start_position = [0.5, -0.7, -1.5] 31 | action = finger.Action(torque=[0.3, 0.3, 0.3]) 32 | 33 | def run(): 34 | finger.reset_finger_positions_and_velocities(start_position) 35 | for _ in range(30): 36 | finger._set_desired_action(action) 37 | finger._step_simulation() 38 | return finger._get_latest_observation() 39 | 40 | first_run = run() 41 | second_run = run() 42 | 43 | np.testing.assert_array_equal(first_run.torque, second_run.torque) 44 | np.testing.assert_array_equal(first_run.position, second_run.position) 45 | np.testing.assert_array_equal(first_run.velocity, second_run.velocity) 46 | 47 | def test_reach_rollouts(self): 48 | smoothing_params = { 49 | "num_episodes": 10, 50 | # ratio of total training time after which smoothing starts 51 | "start_after": 3.0 / 7.0, 52 | # smoothing coeff. that shall be reached at end of training 53 | "final_alpha": 0.975, 54 | "stop_after": 5.0 / 7.0, 55 | } 56 | 57 | num_samples = 10 58 | horizon = 100 59 | 60 | env = gym.make( 61 | "reach-v0", 62 | control_rate_s=0.02, 63 | enable_visualization=False, 64 | finger_type="fingerone", 65 | smoothing_params=smoothing_params, 66 | ) 67 | 68 | start_position = [0.5, -0.7, -1.5] 69 | 70 | plans = -1.0 + 2.0 * np.random.rand(horizon, num_samples, 3) 71 | 72 | def run(): 73 | states = [] 74 | rewards = [] 75 | 76 | for i in range(num_samples): 77 | env.reset() 78 | env.finger.reset_finger_positions_and_velocities( 79 | start_position 80 | ) 81 | for t in range(horizon): 82 | state, reward, _, _ = env.step(plans[t, i]) 83 | states.append(state) 84 | rewards.append(reward) 85 | states = np.array(states) 86 | rewards = np.array(rewards) 87 | return states, rewards 88 | 89 | first_states, first_rewards = run() 90 | second_states, second_rewards = run() 91 | 92 | np.testing.assert_array_equal(first_states.all(), second_states.all()) 93 | np.testing.assert_array_equal( 94 | first_rewards.all(), second_rewards.all() 95 | ) 96 | 97 | 98 | if __name__ == "__main__": 99 | unittest.main() 100 | -------------------------------------------------------------------------------- /tests/test_finger_types_data.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import trifinger_simulation.finger_types_data as ftd 4 | 5 | 6 | def test_get_valid_finger_types(): 7 | expected_types = [ 8 | "fingerone", 9 | "trifingerone", 10 | "fingeredu", 11 | "trifingeredu", 12 | "fingerpro", 13 | "trifingerpro", 14 | ] 15 | actual_types = ftd.get_valid_finger_types() 16 | 17 | assert sorted(actual_types) == sorted(expected_types) 18 | 19 | 20 | def test_check_finger_type(): 21 | for name in ftd.finger_types_data.keys(): 22 | assert ftd.check_finger_type(name) == name 23 | 24 | with pytest.raises(ValueError): 25 | ftd.check_finger_type("invalid") 26 | 27 | 28 | def test_get_finger_urdf(): 29 | for name in ftd.finger_types_data.keys(): 30 | assert ( 31 | ftd.get_finger_urdf(name) == ftd.finger_types_data[name].urdf_file 32 | ) 33 | 34 | with pytest.raises(ValueError): 35 | ftd.check_finger_type("invalid") 36 | 37 | 38 | def test_get_number_of_fingers(): 39 | for name in ftd.finger_types_data.keys(): 40 | assert ( 41 | ftd.get_number_of_fingers(name) 42 | == ftd.finger_types_data[name].number_of_fingers 43 | ) 44 | 45 | with pytest.raises(ValueError): 46 | ftd.check_finger_type("invalid") 47 | -------------------------------------------------------------------------------- /tests/test_loading_urdfs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import os 3 | 4 | import pybullet 5 | from trifinger_simulation.sim_finger import SimFinger 6 | from trifinger_simulation import finger_types_data 7 | 8 | 9 | class TestLoadingURDFs(unittest.TestCase): 10 | """ 11 | This test verifies that all the URDFs corresponding to 12 | all the valid finger types can be imported successfully. 13 | """ 14 | 15 | def test_loading_urdfs(self): 16 | """ 17 | Get the keys corresponding to the valid finger types 18 | from BaseFinger and try importing their corresponding 19 | URDFs. 20 | """ 21 | finger_data = finger_types_data.finger_types_data 22 | for key in finger_data.keys(): 23 | try: 24 | SimFinger( 25 | finger_type=key, 26 | ) 27 | 28 | except pybullet.error as e: 29 | self.fail( 30 | "Failed to create SimFinger(finger_type={}): {}".format( 31 | key, e 32 | ) 33 | ) 34 | 35 | def test_loading_urdfs_locally(self): 36 | """ 37 | Get the keys corresponding to the valid finger types 38 | from BaseFinger and try importing their corresponding 39 | URDFs. 40 | """ 41 | finger_data = finger_types_data.finger_types_data 42 | for key in finger_data.keys(): 43 | try: 44 | os.environ["ROS_PACKAGE_PATH"] = " " 45 | SimFinger( 46 | finger_type=key, 47 | ) 48 | except pybullet.error as e: 49 | self.fail( 50 | "Failed to import the local copies of the urdf for" 51 | "SimFinger(finger_type={}): {}".format(key, e) 52 | ) 53 | 54 | 55 | if __name__ == "__main__": 56 | unittest.main() 57 | -------------------------------------------------------------------------------- /tests/test_parallel_instances.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Test two parallel simulation instances.""" 3 | import unittest 4 | import numpy as np 5 | 6 | from trifinger_simulation.sim_finger import SimFinger 7 | 8 | 9 | class TestParallelInstances(unittest.TestCase): 10 | def test_step_one(self): 11 | # Create two instances, send actions and step only one. Verify that 12 | # the other one does not move. 13 | robot1 = SimFinger(finger_type="trifingerpro") 14 | robot2 = SimFinger(finger_type="trifingerpro") 15 | 16 | start_position = np.array([0.0, 0.7, -1.5] * 3) 17 | 18 | robot1.reset_finger_positions_and_velocities(start_position) 19 | robot2.reset_finger_positions_and_velocities(start_position) 20 | 21 | action = robot1.Action(torque=[0.3, 0.3, 0.3] * 3) 22 | 23 | for i in range(30): 24 | robot1._set_desired_action(action) 25 | robot1._step_simulation() 26 | obs1 = robot1._get_latest_observation() 27 | obs2 = robot2._get_latest_observation() 28 | 29 | self.assertTrue((start_position != obs1.position).any()) 30 | np.testing.assert_array_equal(start_position, obs2.position) 31 | 32 | def test_step_both(self): 33 | # Create two instances and send different actions to them. 34 | # Verify that both go towards their target 35 | robot1 = SimFinger(finger_type="trifingerpro") 36 | robot2 = SimFinger(finger_type="trifingerpro") 37 | 38 | start_position = np.array([0.0, 0.7, -1.5] * 3) 39 | 40 | robot1.reset_finger_positions_and_velocities(start_position) 41 | robot2.reset_finger_positions_and_velocities(start_position) 42 | 43 | action1 = robot1.Action(position=[0.5, 0.7, -1.5] * 3) 44 | action2 = robot2.Action(position=[-0.1, 0.7, -1.5] * 3) 45 | 46 | for i in range(1000): 47 | t1 = robot1.append_desired_action(action1) 48 | t2 = robot2.append_desired_action(action2) 49 | obs1 = robot1.get_observation(t1) 50 | obs2 = robot2.get_observation(t2) 51 | 52 | if i > 1: 53 | self.assertTrue((obs2.position != obs1.position).any()) 54 | 55 | self.assertLess(np.linalg.norm(action1.position - obs1.position), 0.1) 56 | self.assertLess(np.linalg.norm(action2.position - obs2.position), 0.1) 57 | 58 | 59 | if __name__ == "__main__": 60 | unittest.main() 61 | -------------------------------------------------------------------------------- /tests/test_reset_joints.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import unittest 3 | import numpy as np 4 | 5 | from trifinger_simulation.sim_finger import SimFinger 6 | from trifinger_simulation import sample 7 | 8 | 9 | class TestResetJoints(unittest.TestCase): 10 | """ 11 | This test verifies that the state of the finger(s) gets reset correctly. 12 | 13 | So, all the 1DOF joints of the finger(s) should be at the *exact* positions 14 | and have the *exact* same velocities to which we want the joints to get 15 | reset to. 16 | """ 17 | 18 | def test_reproduce_reset_state(self): 19 | """ 20 | Send hundred states (positions + velocities) to all the 1DOF joints 21 | of the fingers and assert they exactly reach these states. 22 | """ 23 | finger = SimFinger( 24 | finger_type="fingerone", 25 | ) 26 | 27 | for _ in range(100): 28 | state_positions = sample.random_joint_positions( 29 | finger.number_of_fingers 30 | ) 31 | state_velocities = [pos * 10 for pos in state_positions] 32 | 33 | reset_state = finger.reset_finger_positions_and_velocities( 34 | state_positions, state_velocities 35 | ) 36 | 37 | reset_positions = reset_state.position 38 | reset_velocities = reset_state.velocity 39 | 40 | np.testing.assert_array_equal( 41 | reset_positions, state_positions, verbose=True 42 | ) 43 | np.testing.assert_array_equal( 44 | reset_velocities, state_velocities, verbose=True 45 | ) 46 | 47 | 48 | if __name__ == "__main__": 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /tests/test_sample.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import unittest 3 | from numpy.testing import assert_array_compare 4 | import operator 5 | 6 | from trifinger_simulation import sample 7 | 8 | 9 | def assert_array_less_equal(x, y, err_msg="", verbose=True): 10 | """Assert array x <= y. Based on numpy.testing.assert_array_less.""" 11 | assert_array_compare( 12 | operator.le, 13 | x, 14 | y, 15 | err_msg=err_msg, 16 | verbose=verbose, 17 | header="Arrays are not less-ordered", 18 | equal_inf=False, 19 | ) 20 | 21 | 22 | class TestSample(unittest.TestCase): 23 | """Test the functions of the sample module.""" 24 | 25 | def test_random_point_positions(self): 26 | # choose bounds such that the ranges of the three joints are 27 | # non-overlapping 28 | lower_bounds = [-2, 0, 3] 29 | upper_bounds = [-1, 2, 5] 30 | 31 | # one finger 32 | for i in range(100): 33 | result = sample.random_joint_positions( 34 | 1, lower_bounds, upper_bounds 35 | ) 36 | assert_array_less_equal(lower_bounds, result) 37 | assert_array_less_equal(result, upper_bounds) 38 | 39 | # three finger 40 | for i in range(100): 41 | result = sample.random_joint_positions( 42 | 3, lower_bounds, upper_bounds 43 | ) 44 | assert_array_less_equal(lower_bounds * 3, result) 45 | assert_array_less_equal(result, upper_bounds * 3) 46 | 47 | 48 | if __name__ == "__main__": 49 | unittest.main() 50 | -------------------------------------------------------------------------------- /tests/test_sim_finger.py: -------------------------------------------------------------------------------- 1 | from trifinger_simulation.sim_finger import int_to_rgba, SimFinger 2 | 3 | import numpy as np 4 | 5 | 6 | def test_int_to_rgba(): 7 | assert int_to_rgba(0x000000) == (0.0, 0.0, 0.0, 1.0) 8 | assert int_to_rgba(0xFFFFFF) == (1.0, 1.0, 1.0, 1.0) 9 | assert int_to_rgba(0x006C66) == (0, 108 / 255, 102 / 255, 1.0) 10 | 11 | assert int_to_rgba(0x006C66, alpha=42) == ( 12 | 0, 13 | 108 / 255, 14 | 102 / 255, 15 | 42 / 255, 16 | ) 17 | 18 | 19 | def test_SimFinger_sanitise_torqes(): 20 | max_torque = 0.5 21 | 22 | # without velocity damping 23 | t = SimFinger._sanitise_torques( 24 | [-0.2, 0.4], max_torque, 0, np.array([0, 0]) 25 | ) 26 | np.testing.assert_array_equal(t, [-0.2, 0.4]) 27 | 28 | t = SimFinger._sanitise_torques( 29 | [-1.2, 0.4], max_torque, 0, np.array([0, 0]) 30 | ) 31 | np.testing.assert_array_equal(t, [-0.5, 0.4]) 32 | 33 | t = SimFinger._sanitise_torques( 34 | [-0.2, 0.6], max_torque, 0, np.array([0, 0]) 35 | ) 36 | np.testing.assert_array_equal(t, [-0.2, 0.5]) 37 | 38 | t = SimFinger._sanitise_torques( 39 | [-0.8, 0.6], max_torque, 0, np.array([0, 0]) 40 | ) 41 | np.testing.assert_array_equal(t, [-0.5, 0.5]) 42 | 43 | # with velocity damping 44 | t = SimFinger._sanitise_torques( 45 | [0, 0], max_torque, 0.2, np.array([1, -0.5]) 46 | ) 47 | np.testing.assert_array_almost_equal(t, [-0.2, 0.1]) 48 | 49 | t = SimFinger._sanitise_torques([0, 0], max_torque, 1, np.array([1, -0.6])) 50 | np.testing.assert_array_almost_equal(t, [-0.5, 0.5]) 51 | 52 | t = SimFinger._sanitise_torques( 53 | [0.1, 0.2], max_torque, 0.2, np.array([1, -0.5]) 54 | ) 55 | np.testing.assert_array_almost_equal(t, [-0.1, 0.3]) 56 | 57 | t = SimFinger._sanitise_torques( 58 | [1, -1], max_torque, 0.2, np.array([1, -0.5]) 59 | ) 60 | np.testing.assert_array_almost_equal(t, [0.3, -0.4]) 61 | -------------------------------------------------------------------------------- /tests/test_tasks_move_cube_on_trajectory/bad_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "the_goal_below_has_bad_format": 42, 3 | "goal": [ 4 | [0, 0, 0.05], 5 | [0.1, 0, 0.1] 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /tests/test_tasks_move_cube_on_trajectory/good_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "goal": [ 3 | [0, [0, 0, 0.05]], 4 | [100, [0.1, 0, 0.1]], 5 | [200, [0.02, 0.02, 0.05]], 6 | [300, [0.04, 0.01, 0.07]] 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /tests/test_tasks_move_cube_on_trajectory/no_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "no_goal_here": 42 3 | } 4 | -------------------------------------------------------------------------------- /tests/test_tasks_rearrange_dice/bad_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "goal_has_not_enough_positions": 42, 3 | "goal": [ 4 | [0.132, 0.065, 0.011], 5 | [0.0, -0.088, 0.011], 6 | [0.022, 0.0, 0.011], 7 | [0.065, 0.132, 0.011], 8 | [-0.066, 0.154, 0.011], 9 | [0.065, 0.109, 0.011], 10 | [-0.066, 0.044, 0.011] 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /tests/test_tasks_rearrange_dice/good_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "goal": [ 3 | [0.132, 0.065, 0.011], 4 | [0.0, -0.088, 0.011], 5 | [0.022, 0.0, 0.011], 6 | [0.065, 0.132, 0.011], 7 | [-0.066, 0.154, 0.011], 8 | [-0.11, -0.11, 0.011], 9 | [0.109, -0.131, 0.011], 10 | [-0.11, -0.044, 0.011], 11 | [0.022, -0.066, 0.011], 12 | [0.044, -0.131, 0.011], 13 | [-0.066, 0.132, 0.011], 14 | [0.022, -0.131, 0.011], 15 | [0.065, 0.088, 0.011], 16 | [0.176, -0.0222, 0.011], 17 | [0.065, 0.0, 0.011], 18 | [-0.131, -0.0220, 0.011], 19 | [-0.153, 0.044, 0.011], 20 | [-0.044, 0.022, 0.011], 21 | [0.044, 0.0, 0.011], 22 | [0.109, 0.0, 0.011], 23 | [-0.153, -0.0222, 0.011], 24 | [-0.11, 0.044, 0.011], 25 | [0.132, -0.11, 0.011], 26 | [0.065, 0.109, 0.011], 27 | [-0.066, 0.044, 0.011] 28 | ] 29 | } 30 | -------------------------------------------------------------------------------- /tests/test_tasks_rearrange_dice/no_goal.json: -------------------------------------------------------------------------------- 1 | { 2 | "there_is_now_goal": 42 3 | } 4 | -------------------------------------------------------------------------------- /trifinger_simulation/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "2.0.0" 2 | 3 | import pathlib 4 | 5 | # import some important classes to the main module 6 | from .action import Action # noqa 7 | from .observation import Observation # noqa 8 | from .sim_finger import SimFinger # noqa 9 | from .trifinger_platform import ( # noqa 10 | TriFingerPlatform, 11 | ObjectPose, 12 | CameraObservation, 13 | TriCameraObservation, 14 | TriCameraObjectObservation, 15 | ) 16 | 17 | 18 | def get_data_dir() -> pathlib.Path: 19 | """Get path to the data directory of this package.""" 20 | p = pathlib.Path(__file__) 21 | return p.parent / "data" 22 | -------------------------------------------------------------------------------- /trifinger_simulation/action.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import typing 3 | 4 | import numpy as np 5 | import numpy.typing as npt 6 | 7 | 8 | class Action: 9 | """Robot action.""" 10 | 11 | #: Torque commands for the joints. 12 | torque: np.ndarray 13 | #: Position commands for the joints. Set to NaN to disable position control 14 | #: for the corresponding joint. 15 | position: np.ndarray 16 | #: P-gain for position controller. Set to NaN to use default gain for the 17 | #: corresponding joint. 18 | position_kp: np.ndarray 19 | #: D-gain for position controller. Set to NaN to use default gain for the 20 | #: corresponding joint. 21 | position_kd: np.ndarray 22 | 23 | def __init__( 24 | self, 25 | torque: npt.ArrayLike, 26 | position: npt.ArrayLike, 27 | kp: typing.Optional[npt.ArrayLike] = None, 28 | kd: typing.Optional[npt.ArrayLike] = None, 29 | ): 30 | """ 31 | All parameters are expected to be of same length *N*, where *N* is the 32 | number of joints of the robot (e.g. 9 for TriFinger robots). 33 | 34 | Args: 35 | torque: See :attr:`torque`. 36 | position: See :attr:`position`. 37 | kp: See :attr:`position_kp`. If not set, default gains are used 38 | for all joints. 39 | kd: See :attr:`position_kd`. If not set, default gains are used 40 | for all joints. 41 | """ 42 | self.torque = np.asarray(torque) 43 | self.position = np.asarray(position) 44 | 45 | if kp is None: 46 | self.position_kp = np.full_like(position, np.nan, dtype=float) 47 | else: 48 | self.position_kp = np.asarray(kp) 49 | 50 | if kd is None: 51 | self.position_kd = np.full_like(position, np.nan, dtype=float) 52 | else: 53 | self.position_kd = np.asarray(kd) 54 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera180_cropped.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 585.0643587477704 5 | - 0.0 6 | - 281.13509021059457 7 | - 0.0 8 | - 587.965384914249 9 | - 278.41510917005746 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera180 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23738566123860577 19 | - 0.14668759366047324 20 | - -0.0033595184065924786 21 | - 0.0002152185045991212 22 | - -0.13564647032376007 23 | rows: 1 24 | image_height: 540 25 | image_width: 540 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - 0.9999898148320592 30 | - 0.0011975322534177515 31 | - 0.0009296176718924966 32 | - 0.0015825167118240365 33 | - 0.0015046826000956175 34 | - -0.8603974609595656 35 | - -0.5096079454902745 36 | - 0.005907939842208572 37 | - 0.00019394945000544882 38 | - 0.5096062743319824 39 | - -0.8603966378529374 40 | - 0.5336501221993086 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 7.85638794234545e-06 50 | - 0.002983154382001687 51 | - 0.003028644555038531 52 | - 0.00043401200407849655 53 | - 0.0025220714628423576 54 | - 0.0013792788872261296 55 | - 0.0023288931006661792 56 | - 0.0005050858117703131 57 | - 0.0034216428571110864 58 | - 0.0023277955226372977 59 | - 0.001380885444825941 60 | - 0.000272740413680309 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera180_cropped_and_downsampled.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 292.5321793738852 5 | - 0.0 6 | - 140.56754510529728 7 | - 0.0 8 | - 293.9826924571245 9 | - 139.20755458502873 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera180 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23738566123860577 19 | - 0.14668759366047324 20 | - -0.0033595184065924786 21 | - 0.0002152185045991212 22 | - -0.13564647032376007 23 | rows: 1 24 | image_height: 270 25 | image_width: 270 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - 0.9999898148320592 30 | - 0.0011975322534177515 31 | - 0.0009296176718924966 32 | - 0.0015825167118240365 33 | - 0.0015046826000956175 34 | - -0.8603974609595656 35 | - -0.5096079454902745 36 | - 0.005907939842208572 37 | - 0.00019394945000544882 38 | - 0.5096062743319824 39 | - -0.8603966378529374 40 | - 0.5336501221993086 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 7.85638794234545e-06 50 | - 0.002983154382001687 51 | - 0.003028644555038531 52 | - 0.00043401200407849655 53 | - 0.0025220714628423576 54 | - 0.0013792788872261296 55 | - 0.0023288931006661792 56 | - 0.0005050858117703131 57 | - 0.0034216428571110864 58 | - 0.0023277955226372977 59 | - 0.001380885444825941 60 | - 0.000272740413680309 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera180_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 585.0643587477704 5 | - 0.0 6 | - 369.13509021059457 7 | - 0.0 8 | - 587.965384914249 9 | - 278.41510917005746 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera180 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23738566123860577 19 | - 0.14668759366047324 20 | - -0.0033595184065924786 21 | - 0.0002152185045991212 22 | - -0.13564647032376007 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - 0.9999898148320592 30 | - 0.0011975322534177515 31 | - 0.0009296176718924966 32 | - 0.0015825167118240365 33 | - 0.0015046826000956175 34 | - -0.8603974609595656 35 | - -0.5096079454902745 36 | - 0.005907939842208572 37 | - 0.00019394945000544882 38 | - 0.5096062743319824 39 | - -0.8603966378529374 40 | - 0.5336501221993086 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 7.85638794234545e-06 50 | - 0.002983154382001687 51 | - 0.003028644555038531 52 | - 0.00043401200407849655 53 | - 0.0025220714628423576 54 | - 0.0013792788872261296 55 | - 0.0023288931006661792 56 | - 0.0005050858117703131 57 | - 0.0034216428571110864 58 | - 0.0023277955226372977 59 | - 0.001380885444825941 60 | - 0.000272740413680309 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera300_cropped.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 586.4371656440662 5 | - 0.0 6 | - 288.0799627035372 7 | - 0.0 8 | - 590.8073307672728 9 | - 284.1827367615578 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera300 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23391341982227962 19 | - 0.09394432535010212 20 | - -0.002787865901636738 21 | - -7.252943212413023e-05 22 | - -0.017420413881185703 23 | rows: 1 24 | image_height: 540 25 | image_width: 540 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7133771728609642 30 | - -0.7006191670792009 31 | - -0.01456422308851129 32 | - 0.000684373876406266 33 | - -0.5196240895754424 34 | - 0.5428037431338101 35 | - -0.6598003871028696 36 | - -0.007554524367671664 37 | - 0.4701789145919274 38 | - -0.463122803047164 39 | - -0.751291777787446 40 | - 0.5244100800677238 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0020759651842660117 50 | - 0.0021444024623455534 51 | - 0.0021834089415969857 52 | - 0.0005068714917011395 53 | - 0.0026703132755999 54 | - 0.002673806193417258 55 | - 0.0020177559741561607 56 | - 0.0003790199429267026 57 | - 0.0020403045144949472 58 | - 0.001549437778265158 59 | - 0.0017772450402005176 60 | - 0.0003322102351354774 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera300_cropped_and_downsampled.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 293.2185828220331 5 | - 0.0 6 | - 144.0399813517686 7 | - 0.0 8 | - 295.4036653836364 9 | - 142.0913683807789 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera300 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23391341982227962 19 | - 0.09394432535010212 20 | - -0.002787865901636738 21 | - -7.252943212413023e-05 22 | - -0.017420413881185703 23 | rows: 1 24 | image_height: 270 25 | image_width: 270 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7133771728609642 30 | - -0.7006191670792009 31 | - -0.01456422308851129 32 | - 0.000684373876406266 33 | - -0.5196240895754424 34 | - 0.5428037431338101 35 | - -0.6598003871028696 36 | - -0.007554524367671664 37 | - 0.4701789145919274 38 | - -0.463122803047164 39 | - -0.751291777787446 40 | - 0.5244100800677238 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0020759651842660117 50 | - 0.0021444024623455534 51 | - 0.0021834089415969857 52 | - 0.0005068714917011395 53 | - 0.0026703132755999 54 | - 0.002673806193417258 55 | - 0.0020177559741561607 56 | - 0.0003790199429267026 57 | - 0.0020403045144949472 58 | - 0.001549437778265158 59 | - 0.0017772450402005176 60 | - 0.0003322102351354774 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera300_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 586.4371656440662 5 | - 0.0 6 | - 376.0799627035372 7 | - 0.0 8 | - 590.8073307672728 9 | - 284.1827367615578 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera300 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23391341982227962 19 | - 0.09394432535010212 20 | - -0.002787865901636738 21 | - -7.252943212413023e-05 22 | - -0.017420413881185703 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7133771728609642 30 | - -0.7006191670792009 31 | - -0.01456422308851129 32 | - 0.000684373876406266 33 | - -0.5196240895754424 34 | - 0.5428037431338101 35 | - -0.6598003871028696 36 | - -0.007554524367671664 37 | - 0.4701789145919274 38 | - -0.463122803047164 39 | - -0.751291777787446 40 | - 0.5244100800677238 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0020759651842660117 50 | - 0.0021444024623455534 51 | - 0.0021834089415969857 52 | - 0.0005068714917011395 53 | - 0.0026703132755999 54 | - 0.002673806193417258 55 | - 0.0020177559741561607 56 | - 0.0003790199429267026 57 | - 0.0020403045144949472 58 | - 0.001549437778265158 59 | - 0.0017772450402005176 60 | - 0.0003322102351354774 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera60_cropped.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 587.2983721486902 5 | - 0.0 6 | - 291.2857905622954 7 | - 0.0 8 | - 591.5395266821638 9 | - 277.45556498689365 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera60 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23804660749779824 19 | - 0.10825771776560758 20 | - -0.0024551017209611128 21 | - 4.8257038130254314e-05 22 | - -0.02722946246691342 23 | rows: 1 24 | image_height: 540 25 | image_width: 540 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7019134208048192 30 | - 0.7122353932236418 31 | - -0.004805374269147043 32 | - 0.002252555442149284 33 | - 0.5344638657119378 34 | - 0.5222316464208596 35 | - -0.6645386970157846 36 | - -0.0032747731308788697 37 | - -0.47079826418692317 38 | - -0.46902151242952833 39 | - -0.7472317485480533 40 | - 0.5238875639497548 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0021790353060527645 50 | - 0.0021481513684787814 51 | - 0.0024166014581399066 52 | - 0.0006146887379140977 53 | - 0.0025393011564917435 54 | - 0.001435794780360425 55 | - 0.00151470669061762 56 | - 0.0003712483847988384 57 | - 0.0019298019794210424 58 | - 0.0026451717311933042 59 | - 0.0013446968243762806 60 | - 0.00034964603848654035 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera60_cropped_and_downsampled.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 293.6491860743451 5 | - 0.0 6 | - 145.6428952811477 7 | - 0.0 8 | - 295.7697633410819 9 | - 138.72778249344682 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera60 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23804660749779824 19 | - 0.10825771776560758 20 | - -0.0024551017209611128 21 | - 4.8257038130254314e-05 22 | - -0.02722946246691342 23 | rows: 1 24 | image_height: 270 25 | image_width: 270 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7019134208048192 30 | - 0.7122353932236418 31 | - -0.004805374269147043 32 | - 0.002252555442149284 33 | - 0.5344638657119378 34 | - 0.5222316464208596 35 | - -0.6645386970157846 36 | - -0.0032747731308788697 37 | - -0.47079826418692317 38 | - -0.46902151242952833 39 | - -0.7472317485480533 40 | - 0.5238875639497548 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0021790353060527645 50 | - 0.0021481513684787814 51 | - 0.0024166014581399066 52 | - 0.0006146887379140977 53 | - 0.0025393011564917435 54 | - 0.001435794780360425 55 | - 0.00151470669061762 56 | - 0.0003712483847988384 57 | - 0.0019298019794210424 58 | - 0.0026451717311933042 59 | - 0.0013446968243762806 60 | - 0.00034964603848654035 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/camera_params/camera60_full.yml: -------------------------------------------------------------------------------- 1 | camera_matrix: 2 | cols: 3 3 | data: 4 | - 587.2983721486902 5 | - 0.0 6 | - 379.2857905622954 7 | - 0.0 8 | - 591.5395266821638 9 | - 277.45556498689365 10 | - 0.0 11 | - 0.0 12 | - 1.0 13 | rows: 3 14 | camera_name: camera60 15 | distortion_coefficients: 16 | cols: 5 17 | data: 18 | - -0.23804660749779824 19 | - 0.10825771776560758 20 | - -0.0024551017209611128 21 | - 4.8257038130254314e-05 22 | - -0.02722946246691342 23 | rows: 1 24 | image_height: 540 25 | image_width: 720 26 | tf_world_to_camera: 27 | cols: 4 28 | data: 29 | - -0.7019134208048192 30 | - 0.7122353932236418 31 | - -0.004805374269147043 32 | - 0.002252555442149284 33 | - 0.5344638657119378 34 | - 0.5222316464208596 35 | - -0.6645386970157846 36 | - -0.0032747731308788697 37 | - -0.47079826418692317 38 | - -0.46902151242952833 39 | - -0.7472317485480533 40 | - 0.5238875639497548 41 | - 0.0 42 | - 0.0 43 | - 0.0 44 | - 1.0 45 | rows: 4 46 | tf_world_to_camera_std: 47 | cols: 4 48 | data: 49 | - 0.0021790353060527645 50 | - 0.0021481513684787814 51 | - 0.0024166014581399066 52 | - 0.0006146887379140977 53 | - 0.0025393011564917435 54 | - 0.001435794780360425 55 | - 0.00151470669061762 56 | - 0.0003712483847988384 57 | - 0.0019298019794210424 58 | - 0.0026451717311933042 59 | - 0.0013446968243762806 60 | - 0.00034964603848654035 61 | - 0.0 62 | - 0.0 63 | - 0.0 64 | - 0.0 65 | rows: 4 66 | -------------------------------------------------------------------------------- /trifinger_simulation/data/cube_v2/cube_v2.blend: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/trifinger_simulation/data/cube_v2/cube_v2.blend -------------------------------------------------------------------------------- /trifinger_simulation/data/cube_v2/cube_v2.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'cube_v2.blend' 2 | # Material Count: 1 3 | 4 | newmtl Material 5 | Ns 323.999994 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 0.800000 0.800000 0.800000 8 | Ks 0.500000 0.500000 0.500000 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.450000 11 | d 1.000000 12 | illum 2 13 | map_Kd cube_v2_texture.png 14 | -------------------------------------------------------------------------------- /trifinger_simulation/data/cube_v2/cube_v2.obj: -------------------------------------------------------------------------------- 1 | # Blender v2.92.0 OBJ File: 'cube_v2.blend' 2 | # www.blender.org 3 | mtllib cube_v2.mtl 4 | o Cube 5 | v 0.032500 0.032500 0.032500 6 | v 0.032500 0.032500 -0.032500 7 | v 0.032500 -0.032500 0.032500 8 | v 0.032500 -0.032500 -0.032500 9 | v -0.032500 0.032500 0.032500 10 | v -0.032500 0.032500 -0.032500 11 | v -0.032500 -0.032500 0.032500 12 | v -0.032500 -0.032500 -0.032500 13 | vt 0.666633 0.749950 14 | vt 0.666633 0.999900 15 | vt 0.333366 0.999900 16 | vt 0.333366 0.749950 17 | vt 0.333366 0.500000 18 | vt 0.000100 0.749950 19 | vt 0.000100 0.500000 20 | vt 0.333366 0.250050 21 | vt 0.333366 0.000100 22 | vt 0.666633 0.000100 23 | vt 0.666633 0.250050 24 | vt 0.666633 0.500000 25 | vt 0.999900 0.500000 26 | vt 0.999900 0.749950 27 | vn 0.0000 0.0000 1.0000 28 | vn 0.0000 -1.0000 0.0000 29 | vn -1.0000 0.0000 0.0000 30 | vn 0.0000 0.0000 -1.0000 31 | vn 1.0000 0.0000 0.0000 32 | vn 0.0000 1.0000 0.0000 33 | usemtl Material 34 | s off 35 | f 1/1/1 5/2/1 7/3/1 3/4/1 36 | f 4/5/2 3/4/2 7/6/2 8/7/2 37 | f 8/8/3 7/9/3 5/10/3 6/11/3 38 | f 6/11/4 2/12/4 4/5/4 8/8/4 39 | f 2/12/5 1/1/5 3/4/5 4/5/5 40 | f 6/13/6 5/14/6 1/1/6 2/12/6 41 | -------------------------------------------------------------------------------- /trifinger_simulation/data/cube_v2/cube_v2.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /trifinger_simulation/data/cube_v2/cube_v2_texture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/trifinger_simulation/data/cube_v2/cube_v2_texture.png -------------------------------------------------------------------------------- /trifinger_simulation/finger_types_data.py: -------------------------------------------------------------------------------- 1 | """Get model information for the different Finger types.""" 2 | 3 | import typing 4 | 5 | 6 | class FingerTypesDataFormat(typing.NamedTuple): 7 | """Describes the format for the finger type data, 8 | comprising of the corresponding urdf, and the 9 | number of fingers. 10 | """ 11 | 12 | #: Path to the URDF file (relative to the URDF base directory) 13 | urdf_file: str 14 | 15 | #: Number of fingers the robot has. 16 | number_of_fingers: int 17 | 18 | #: Initial joint positions. 19 | initial_joint_positions: typing.Sequence[float] 20 | 21 | 22 | finger_types_data = { 23 | "fingerone": FingerTypesDataFormat( 24 | urdf_file="finger.urdf", 25 | number_of_fingers=1, 26 | initial_joint_positions=[0, 0, 0], 27 | ), 28 | "trifingerone": FingerTypesDataFormat( 29 | urdf_file="trifinger.urdf", 30 | number_of_fingers=3, 31 | initial_joint_positions=[0, -0.9, -1.7] * 3, 32 | ), 33 | "fingeredu": FingerTypesDataFormat( 34 | urdf_file="edu/fingeredu.urdf", 35 | number_of_fingers=1, 36 | initial_joint_positions=[0, 0.9, -1.7], 37 | ), 38 | "trifingeredu": FingerTypesDataFormat( 39 | urdf_file="edu/trifingeredu.urdf", 40 | number_of_fingers=3, 41 | initial_joint_positions=[0, 0.9, -1.7] * 3, 42 | ), 43 | "fingerpro": FingerTypesDataFormat( 44 | urdf_file="pro/fingerpro.urdf", 45 | number_of_fingers=1, 46 | initial_joint_positions=[0, 0.9, -1.7], 47 | ), 48 | "trifingerpro": FingerTypesDataFormat( 49 | urdf_file="pro/trifingerpro.urdf", 50 | number_of_fingers=3, 51 | initial_joint_positions=[0, 0.9, -1.7] * 3, 52 | ), 53 | } 54 | 55 | 56 | def get_valid_finger_types() -> typing.KeysView[str]: 57 | """ 58 | Get list of supported finger types. 59 | 60 | Returns: 61 | List of supported finger types. 62 | """ 63 | return finger_types_data.keys() 64 | 65 | 66 | def check_finger_type(name: str) -> str: 67 | """ 68 | Check if *name* is a valid finger type. 69 | 70 | Args: 71 | name: Name of the finger type. 72 | 73 | Returns: 74 | The name if it is valid. 75 | 76 | Raises: 77 | ValueError: If *name* is not a valid finger type. 78 | """ 79 | if name not in finger_types_data.keys(): 80 | raise ValueError( 81 | "Invalid finger type '%s'. Valid types are %s" 82 | % (name, finger_types_data.keys()) 83 | ) 84 | else: 85 | return name 86 | 87 | 88 | def get_finger_urdf(name: str) -> str: 89 | """ 90 | Get the name of the URDF-file with the model of the specified finger type. 91 | 92 | Args: 93 | name: Name of the finger type. 94 | 95 | Returns: 96 | The name of the URDF file in the ``robot_properties_fingers`` package. 97 | 98 | Raises: 99 | ValueError: If *name* is not a valid finger type. 100 | """ 101 | check_finger_type(name) 102 | return finger_types_data[name].urdf_file 103 | 104 | 105 | def get_number_of_fingers(name: str) -> int: 106 | """ 107 | Get the number of fingers of the specified finger type 108 | 109 | Args: 110 | name: Name of the finger type. 111 | 112 | Returns: 113 | Number of fingers. 114 | 115 | Raises: 116 | ValueError: If *name* is not a valid finger type. 117 | """ 118 | check_finger_type(name) 119 | return finger_types_data[name].number_of_fingers 120 | 121 | 122 | def get_initial_joint_positions(name: str) -> typing.Sequence[float]: 123 | """ 124 | Get initial joint positions of the specified finger type 125 | 126 | Args: 127 | name: Name of the finger type. 128 | 129 | Returns: 130 | Angular joint positions. 131 | 132 | Raises: 133 | ValueError: If *name* is not a valid finger type. 134 | """ 135 | check_finger_type(name) 136 | return finger_types_data[name].initial_joint_positions 137 | -------------------------------------------------------------------------------- /trifinger_simulation/gym_wrapper/__init__.py: -------------------------------------------------------------------------------- 1 | from gym.envs.registration import register 2 | 3 | register( 4 | id="reach-v0", 5 | entry_point="trifinger_simulation.gym_wrapper.envs.trifinger_reach:TriFingerReach", 6 | ) 7 | register( 8 | id="push-v0", 9 | entry_point="trifinger_simulation.gym_wrapper.envs.trifinger_push:TriFingerPush", 10 | ) 11 | 12 | register( 13 | id="real_robot_challenge_phase_1-v1", 14 | entry_point="trifinger_simulation.gym_wrapper.envs.cube_env:CubeEnv", 15 | ) 16 | 17 | 18 | register( 19 | id="cube_trajectory-v1", 20 | entry_point="trifinger_simulation.gym_wrapper.envs.cube_trajectory_env:CubeTrajectoryEnv", 21 | ) 22 | -------------------------------------------------------------------------------- /trifinger_simulation/gym_wrapper/data_logger.py: -------------------------------------------------------------------------------- 1 | import pickle 2 | 3 | 4 | class EpisodeData: 5 | """ 6 | The structure in which the data from each episode 7 | will be logged. 8 | """ 9 | 10 | def __init__(self, joint_goal, tip_goal): 11 | self.joint_goal = joint_goal 12 | self.tip_goal = tip_goal 13 | self.joint_positions = [] 14 | self.tip_positions = [] 15 | self.timestamps = [] 16 | 17 | def append(self, joint_pos, tip_pos, timestamp): 18 | self.joint_positions.append(joint_pos) 19 | self.tip_positions.append(tip_pos) 20 | self.timestamps.append(timestamp) 21 | 22 | 23 | class DataLogger: 24 | """ 25 | Dumps the env episodic data to a pickle file 26 | """ 27 | 28 | def __init__(self): 29 | self.episodes = [] 30 | self._curr = None 31 | 32 | def new_episode(self, joint_goal, tip_goal): 33 | if self._curr: 34 | # convert to dict for saving so loading has no dependencies 35 | self.episodes.append(self._curr.__dict__) 36 | 37 | self._curr = EpisodeData(joint_goal, tip_goal) 38 | 39 | def append(self, joint_pos, tip_pos, timestamp): 40 | self._curr.append(joint_pos, tip_pos, timestamp) 41 | 42 | def store(self, filename): 43 | with open(filename, "wb") as file_handle: 44 | pickle.dump(self.episodes, file_handle) 45 | -------------------------------------------------------------------------------- /trifinger_simulation/gym_wrapper/envs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/trifinger_simulation/gym_wrapper/envs/__init__.py -------------------------------------------------------------------------------- /trifinger_simulation/gym_wrapper/finger_spaces.py: -------------------------------------------------------------------------------- 1 | import math 2 | import numpy as np 3 | 4 | from gym import spaces 5 | 6 | 7 | class FingerSpaces: 8 | """ 9 | Sets up the observation and action spaces for a finger env depending 10 | on the observations used. 11 | 12 | Args: 13 | num_fingers (int): The number of fingers, 1 or 3. 14 | observations_keys (list of strings): The keys corresponding to the 15 | observations used in the env 16 | observations_sizes (list of ints): The sizes of each of the keys 17 | in observations_keys in the same order. 18 | separate_goals (bool): Whether the 3 fingers get the same goal 19 | or separate goals. 20 | """ 21 | 22 | def __init__( 23 | self, 24 | num_fingers, 25 | observations_keys, 26 | observations_sizes, 27 | separate_goals, 28 | ): 29 | self.num_fingers = num_fingers 30 | 31 | self.lower_bounds = {} 32 | self.upper_bounds = {} 33 | 34 | self.observations_keys = observations_keys 35 | self.observations_sizes = observations_sizes 36 | self.key_to_index = {} 37 | 38 | self.separate_goals = separate_goals 39 | 40 | assert len(self.observations_keys) == len(self.observations_sizes), ( 41 | "Specify the size for each expected observation key." 42 | "And this is not being checked, but the sizes must be" 43 | "in the same order as the keys." 44 | ) 45 | 46 | slice_start = 0 47 | for i in range(len(self.observations_keys)): 48 | self.key_to_index[self.observations_keys[i]] = slice( 49 | slice_start, slice_start + self.observations_sizes[i] 50 | ) 51 | slice_start += self.observations_sizes[i] 52 | 53 | self.action_bounds = { 54 | "low": np.array( 55 | [-math.radians(70), -math.radians(70), -math.radians(160)] 56 | * self.num_fingers 57 | ), 58 | "high": np.array( 59 | [math.radians(70), 0, math.radians(-2)] * self.num_fingers 60 | ), 61 | } 62 | 63 | self.lower_bounds["action_joint_positions"] = self.action_bounds["low"] 64 | self.upper_bounds["action_joint_positions"] = self.action_bounds[ 65 | "high" 66 | ] 67 | 68 | self.lower_bounds["end_effector_position"] = [ 69 | -0.5, 70 | -0.5, 71 | 0.0, 72 | ] * self.num_fingers 73 | self.upper_bounds["end_effector_position"] = [ 74 | 0.5, 75 | 0.5, 76 | 0.5, 77 | ] * self.num_fingers 78 | 79 | self.lower_bounds["joint_positions"] = [ 80 | -math.radians(90), 81 | -math.radians(90), 82 | -math.radians(172), 83 | ] * self.num_fingers 84 | self.upper_bounds["joint_positions"] = [ 85 | math.radians(90), 86 | math.radians(100), 87 | math.radians(-2), 88 | ] * self.num_fingers 89 | 90 | self.lower_bounds["joint_velocities"] = [-20] * 3 * self.num_fingers 91 | self.upper_bounds["joint_velocities"] = [20] * 3 * self.num_fingers 92 | 93 | self.lower_bounds["end_effector_to_goal"] = ( 94 | [-0.5] * 3 * self.num_fingers 95 | ) 96 | self.upper_bounds["end_effector_to_goal"] = ( 97 | [0.5] * 3 * self.num_fingers 98 | ) 99 | 100 | if self.separate_goals: 101 | self.lower_bounds["goal_position"] = [-0.5] * 3 * self.num_fingers 102 | self.upper_bounds["goal_position"] = [0.5] * 3 * self.num_fingers 103 | else: 104 | self.lower_bounds["goal_position"] = [-0.5] * 3 105 | self.upper_bounds["goal_position"] = [0.5] * 3 106 | 107 | self.lower_bounds["object_position"] = [-0.5, -0.5, 0.0] 108 | self.upper_bounds["object_position"] = [0.5, 0.5, 0.5] 109 | 110 | def get_unscaled_observation_space(self): 111 | """ 112 | Returns the unscaled observation space corresponding 113 | to the observation bounds 114 | """ 115 | observation_lower_bounds = [ 116 | value 117 | for key in self.observations_keys 118 | for value in self.lower_bounds[key] 119 | ] 120 | observation_higher_bounds = [ 121 | value 122 | for key in self.observations_keys 123 | for value in self.upper_bounds[key] 124 | ] 125 | return spaces.Box( 126 | low=np.array(observation_lower_bounds), 127 | high=np.array(observation_higher_bounds), 128 | dtype=np.float64, 129 | ) 130 | 131 | def get_unscaled_action_space(self): 132 | """ 133 | Returns the unscaled action space according to the action bounds. 134 | """ 135 | return spaces.Box( 136 | low=self.action_bounds["low"], 137 | high=self.action_bounds["high"], 138 | dtype=np.float64, 139 | ) 140 | 141 | def get_scaled_observation_space(self): 142 | """ 143 | Returns an observation space with the same size as the unscaled 144 | but bounded by -1s and 1s. 145 | """ 146 | unscaled_observation_space = self.get_unscaled_observation_space() 147 | return spaces.Box( 148 | low=-np.ones_like(unscaled_observation_space.low), 149 | high=np.ones_like(unscaled_observation_space.high), 150 | dtype=np.float64, 151 | ) 152 | 153 | def get_scaled_action_space(self): 154 | """ 155 | Returns an action space with the same size as the unscaled 156 | but bounded by -1s and 1s. 157 | """ 158 | return spaces.Box( 159 | low=-np.ones(3 * self.num_fingers), 160 | high=np.ones(3 * self.num_fingers), 161 | dtype=np.float64, 162 | ) 163 | -------------------------------------------------------------------------------- /trifinger_simulation/gym_wrapper/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import time 3 | import datetime 4 | 5 | 6 | def scale(x, space): 7 | """ 8 | Scale some input to be between the range [-1;1] from the range 9 | of the space it belongs to 10 | """ 11 | return 2.0 * (x - space.low) / (space.high - space.low) - 1.0 12 | 13 | 14 | def unscale(y, space): 15 | """ 16 | Unscale some input from [-1;1] to the range of another space 17 | """ 18 | return space.low + (y + 1.0) / 2.0 * (space.high - space.low) 19 | 20 | 21 | def compute_distance(a, b): 22 | """ 23 | Returns the Euclidean distance between two 24 | vectors (lists/arrays) 25 | """ 26 | return np.linalg.norm(np.subtract(a, b)) 27 | 28 | 29 | def sleep_until(until, accuracy=0.01): 30 | """ 31 | Sleep until the given time. 32 | 33 | Args: 34 | until (datetime.datetime): Time until the function should sleep. 35 | accuracy (float): Accuracy with which it will check if the "until" time 36 | is reached. 37 | 38 | """ 39 | while until > datetime.datetime.now(): 40 | time.sleep(accuracy) 41 | -------------------------------------------------------------------------------- /trifinger_simulation/observation.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import numpy as np 3 | 4 | 5 | # TODO use named tuple for this? 6 | class Observation: 7 | """Robot state observation.""" 8 | 9 | #: Angular joint positions in radian. Shape = (n_joints,) 10 | position: np.ndarray 11 | #: Joint velocities in rad/s. Shape = (n_joints,) 12 | velocity: np.ndarray 13 | #: Joint torques in Nm. Shape = (n_joints,) 14 | torque: np.ndarray 15 | #: Measurement of the push sensors on the finger tips. 16 | #: Shape = (n_fingers,) 17 | tip_force: np.ndarray 18 | 19 | def __init__(self): 20 | self.position = [] 21 | self.velocity = [] 22 | self.torque = [] 23 | self.tip_force = [] 24 | -------------------------------------------------------------------------------- /trifinger_simulation/pinocchio_utils.py: -------------------------------------------------------------------------------- 1 | """Wrappers around Pinocchio for easy forward and inverse kinematics.""" 2 | 3 | import warnings 4 | 5 | from robot_properties_fingers import Kinematics 6 | 7 | 8 | warnings.warn( 9 | "The pinocchio_utils module has been moved to the robot_properties_fingers package." 10 | " Please update your code accordingly. This version in trifinger_simulation is" 11 | " deprecated and will be removed soon!", 12 | FutureWarning, 13 | stacklevel=1, 14 | ) 15 | 16 | __all__ = ["Kinematics"] 17 | -------------------------------------------------------------------------------- /trifinger_simulation/py.typed: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/trifinger_simulation/py.typed -------------------------------------------------------------------------------- /trifinger_simulation/real_finger.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # ------------------------------------------------------------------------------------------------- 3 | # The documentation in this code is heavily derived from the official 4 | # documentation of PyBullet at 5 | # https://docs.google.com/document/d/10sXEhzFRSnvFcl3XxNGhnD4N2SedqwdAvK3dsihxVUA/edit# 6 | # among other scattered sources. 7 | # ------------------------------------------------------------------------------------------------- 8 | import os 9 | from ament_index_python.packages import get_package_share_directory 10 | 11 | import robot_interfaces 12 | import robot_fingers 13 | from trifinger_simulation import finger_types_data 14 | from trifinger_simulation.sim_finger import SimFinger 15 | 16 | 17 | class RealFinger: 18 | """ 19 | The RealFinger class provides an interface to the real robot. Any script 20 | that creates an instance of the :class:`.SimFinger` can create an instance 21 | of this class and use it in the same way. 22 | """ 23 | 24 | def __init__( 25 | self, 26 | finger_type, 27 | finger_config_suffix, 28 | enable_visualization=False, 29 | ): 30 | """ 31 | Constructor, initializes the physical world we will work in. 32 | 33 | Args: 34 | finger_type (string): Name of the finger type. In order to get 35 | a list of the valid finger types, call 36 | :meth:`.finger_types_data.get_valid_finger_types`. 37 | finger_config_suffix (int): ID of the finger that is used. Has to 38 | be one of [0, 120, 240]. This is only if a single finger is to 39 | be used on any of the robots, and is ignored otherwise. 40 | enable_visualization (bool, optional): Set to 'True' to run a GUI 41 | for visualization. This uses pyBullet but only for 42 | visualization, i.e. the state of the simulation is constantly 43 | set to match the one of the real robot. 44 | """ 45 | # Simulation is only used for visualization, so only run it when needed 46 | self.simulator = None 47 | if enable_visualization: 48 | self.simulator = SimFinger( 49 | finger_type=finger_type, 50 | time_step=0.001, # todo: not sure if this is correct 51 | enable_visualization=True, 52 | ) 53 | 54 | number_of_fingers = finger_types_data.get_number_of_fingers( 55 | finger_type 56 | ) 57 | 58 | if number_of_fingers == 1: 59 | if finger_type == "fingerone": 60 | config_file_path = os.path.join( 61 | get_package_share_directory("robot_fingers"), 62 | "config", 63 | "finger_%s.yml" % finger_config_suffix, 64 | ) 65 | elif finger_type == "fingeredu": 66 | config_file_path = os.path.join( 67 | get_package_share_directory("robot_fingers"), 68 | "config", 69 | "fingeredu_%s.yml" % finger_config_suffix, 70 | ) 71 | finger_data = robot_interfaces.finger.SingleProcessData() 72 | self.real_finger_backend = ( 73 | robot_fingers.create_real_finger_backend( 74 | finger_data, config_file_path 75 | ) 76 | ) 77 | self.robot = robot_interfaces.finger.Frontend(finger_data) 78 | self.Action = robot_interfaces.finger.Action 79 | elif number_of_fingers == 3: 80 | if finger_type == "trifingerone": 81 | config_file_path = os.path.join( 82 | get_package_share_directory("robot_fingers"), 83 | "config", 84 | "trifinger.yml", 85 | ) 86 | elif finger_type == "trifingeredu": 87 | config_file_path = os.path.join( 88 | get_package_share_directory("robot_fingers"), 89 | "config", 90 | "trifingeredu.yml", 91 | ) 92 | finger_data = robot_interfaces.trifinger.SingleProcessData() 93 | self.real_finger_backend = robot_fingers.create_trifinger_backend( 94 | finger_data, config_file_path 95 | ) 96 | self.robot = robot_interfaces.trifinger.Frontend(finger_data) 97 | self.Action = robot_interfaces.trifinger.Action 98 | 99 | self.real_finger_backend.initialize() 100 | 101 | def append_desired_action(self, action): 102 | """ 103 | Append an action to the action timeseries, that should be 104 | applied to the robot. 105 | 106 | Args: 107 | action (self.Action): Joint positions or torques or both 108 | 109 | Returns: 110 | self.action_index (int): The current time-index at which the action 111 | was applied. 112 | """ 113 | return self.robot.append_desired_action(action) 114 | 115 | def get_observation(self, time_index): 116 | """ 117 | Get the observation from the robot at a specified time_index. 118 | 119 | Args: 120 | time_index (int): the time_index at which the observation is 121 | needed 122 | Returns: 123 | observation (robot.Observation): the corresponding observation 124 | """ 125 | observation = self.robot.get_observation(time_index) 126 | 127 | if self.simulator is not None: 128 | self.simulator.reset_finger_positions_and_velocities( 129 | joint_positions=observation.position 130 | ) 131 | 132 | return observation 133 | 134 | def reset_finger(self, joint_positions): 135 | """ 136 | Move the finger(s) to a random position (sampled in the joint space). 137 | The sampled random position is set as target and the robot is stepped 138 | for one second to give it time to reach there. 139 | """ 140 | action = self.Action(position=joint_positions) 141 | for i in range(1000): 142 | t = self.append_desired_action(action) 143 | observation = self.get_observation(t) 144 | return observation 145 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/open-dynamic-robot-initiative/trifinger_simulation/bee637ae19785d582c1edfdfa433553d82929cd8/trifinger_simulation/tasks/__init__.py -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube/__main__.py: -------------------------------------------------------------------------------- 1 | """Run evaluation of the move_cube task. 2 | 3 | For more information on the different commands run `` --help``. 4 | """ 5 | 6 | import argparse 7 | import sys 8 | 9 | from . import json_goal_from_config, sample_goal, goal_to_json 10 | from . import run_evaluate_policy 11 | from . import run_replay 12 | from . import replay_action_log 13 | 14 | 15 | def cmd_sample_goal(args): 16 | try: 17 | t = sample_goal(args.difficulty) 18 | print(goal_to_json(t)) 19 | except Exception as e: 20 | print(e, file=sys.stderr) 21 | sys.exit(1) 22 | 23 | 24 | def cmd_goal_from_config(args): 25 | try: 26 | t = json_goal_from_config(args.goal_config_file) 27 | print(t) 28 | except Exception as e: 29 | print(e, file=sys.stderr) 30 | sys.exit(1) 31 | 32 | 33 | def cmd_evaluate_policy(args): 34 | try: 35 | run_evaluate_policy.main(args.output_directory) 36 | except Exception as e: 37 | print(e, file=sys.stderr) 38 | sys.exit(1) 39 | 40 | 41 | def cmd_replay_all(args): 42 | try: 43 | run_replay.main(args.input_directory) 44 | except Exception as e: 45 | print(e, file=sys.stderr) 46 | sys.exit(1) 47 | 48 | 49 | def cmd_replay_one(args): 50 | try: 51 | replay_action_log.replay_action_log( 52 | args.logfile, args.difficulty, args.initial_pose, args.goal_pose 53 | ) 54 | except Exception as e: 55 | print(e, file=sys.stderr) 56 | sys.exit(1) 57 | 58 | 59 | def cmd_evaluate_and_replay(args): 60 | try: 61 | print() 62 | print("Run Policy on Random Goals") 63 | print("==========================") 64 | run_evaluate_policy.main(args.output_directory) 65 | 66 | print() 67 | print("Validate Logs and Compute Reward") 68 | print("================================") 69 | print() 70 | run_replay.main(args.output_directory) 71 | except Exception as e: 72 | print(e, file=sys.stderr) 73 | sys.exit(1) 74 | 75 | 76 | def main(): 77 | parser = argparse.ArgumentParser("move_cube", description=__doc__) 78 | subparsers = parser.add_subparsers(title="command", dest="command") 79 | subparsers.required = True 80 | 81 | sub = subparsers.add_parser( 82 | "sample_goal", 83 | description="""Sample a goal. The trajectory is printed to 84 | stdout as JSON string. 85 | """, 86 | ) 87 | sub.add_argument( 88 | "--difficulty", 89 | type=int, 90 | choices=[1, 2, 3, 4], 91 | default=4, 92 | help="Difficulty level of the goal. Default: %(default)s.", 93 | ) 94 | sub.set_defaults(func=cmd_sample_goal) 95 | 96 | sub = subparsers.add_parser( 97 | "goal_from_config", 98 | description="""Load or sample a goal trajectory based on the given 99 | config file. The trajectory is printed to stdout as JSON string. 100 | """, 101 | ) 102 | sub.add_argument( 103 | "goal_config_file", type=str, help="Path to a goal config JSON file." 104 | ) 105 | sub.set_defaults(func=cmd_goal_from_config) 106 | 107 | sub = subparsers.add_parser( 108 | "evaluate_policy", 109 | description="Run a evaluate_policy script on a set of random goals.", 110 | ) 111 | run_evaluate_policy.add_arguments(sub) 112 | sub.set_defaults(func=cmd_evaluate_policy) 113 | 114 | sub = subparsers.add_parser( 115 | "replay_all", description="Replay all logs in the given directory." 116 | ) 117 | run_replay.add_arguments(sub) 118 | sub.set_defaults(func=cmd_replay_all) 119 | 120 | sub = subparsers.add_parser( 121 | "replay_one", 122 | description="""Replay an evaluation log file and compute reward based 123 | on the given goal. 124 | """, 125 | ) 126 | replay_action_log.add_arguments(sub) 127 | sub.set_defaults(func=cmd_replay_one) 128 | 129 | sub = subparsers.add_parser( 130 | "evaluate_and_check", 131 | description="Run 'evaluate_policy' followed by 'replay_all'.", 132 | ) 133 | run_evaluate_policy.add_arguments(sub) 134 | sub.set_defaults(func=cmd_evaluate_and_replay) 135 | 136 | args = parser.parse_args() 137 | args.func(args) 138 | 139 | 140 | if __name__ == "__main__": 141 | main() 142 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube/evaluate_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Example evaluation script to evaluate a policy. 3 | 4 | This is an example evaluation script for evaluating a "RandomPolicy". Use this 5 | as a base for your own script to evaluate your policy. All you need to do is 6 | to replace the `RandomPolicy` and potentially the Gym environment with your own 7 | ones (see the TODOs in the code below). 8 | 9 | This script will be executed in an automated procedure. For this to work, make 10 | sure you do not change the overall structure of the script! 11 | 12 | This script expects the following arguments in the given order: 13 | - Difficulty level (needed for reward computation) 14 | - initial pose of the cube (as JSON string) 15 | - goal pose of the cube (as JSON string) 16 | - file to which the action log is written 17 | 18 | It is then expected to initialize the environment with the given initial pose 19 | and execute exactly one episode with the policy that is to be evaluated. 20 | 21 | When finished, the action log, which is created by the TriFingerPlatform class, 22 | is written to the specified file. This log file is crucial as it is used to 23 | evaluate the actual performance of the policy. 24 | """ 25 | import sys 26 | 27 | import gym 28 | 29 | from trifinger_simulation.gym_wrapper.envs import cube_env 30 | from trifinger_simulation.tasks import move_cube 31 | 32 | 33 | class RandomPolicy: 34 | """Dummy policy which uses random actions.""" 35 | 36 | def __init__(self, action_space): 37 | self.action_space = action_space 38 | 39 | def predict(self, observation): 40 | return self.action_space.sample() 41 | 42 | 43 | def main(): 44 | try: 45 | difficulty = int(sys.argv[1]) 46 | initial_pose_json = sys.argv[2] 47 | goal_pose_json = sys.argv[3] 48 | output_file = sys.argv[4] 49 | except IndexError: 50 | print("Incorrect number of arguments.") 51 | print( 52 | "Usage:\n" 53 | "\tevaluate_policy.py " 54 | " " 55 | ) 56 | sys.exit(1) 57 | 58 | # the poses are passes as JSON strings, so they need to be converted first 59 | initial_pose = move_cube.Pose.from_json(initial_pose_json) 60 | goal_pose = move_cube.Pose.from_json(goal_pose_json) 61 | 62 | # create a FixedInitializer with the given values 63 | initializer = cube_env.FixedInitializer( 64 | difficulty, initial_pose, goal_pose 65 | ) 66 | 67 | # TODO: Replace with your environment if you used a custom one. 68 | env = gym.make( 69 | "trifinger_simulation.gym_wrapper:real_robot_challenge_phase_1-v1", 70 | initializer=initializer, 71 | action_type=cube_env.ActionType.POSITION, 72 | visualization=False, 73 | ) 74 | 75 | # TODO: Replace this with your model 76 | # Note: You may also use a different policy for each difficulty level (difficulty) 77 | policy = RandomPolicy(env.action_space) 78 | 79 | # Execute one episode. Make sure that the number of simulation steps 80 | # matches with the episode length of the task. When using the default Gym 81 | # environment, this is the case when looping until is_done == True. Make 82 | # sure to adjust this in case your custom environment behaves differently! 83 | is_done = False 84 | observation = env.reset() 85 | accumulated_reward = 0 86 | while not is_done: 87 | action = policy.predict(observation) 88 | observation, reward, is_done, info = env.step(action) 89 | accumulated_reward += reward 90 | 91 | print("Accumulated reward: {}".format(accumulated_reward)) 92 | 93 | # store the log for evaluation 94 | env.platform.store_action_log(output_file) 95 | 96 | 97 | if __name__ == "__main__": 98 | main() 99 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube/run_evaluate_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Run `evaluate_policy.py` with random goals for all difficulty levels 3 | 4 | Creates a dataset of multiple pairs of random initial state and goal for the 5 | cube for each difficulty level. Then executes `evaluate_policy.py` on each of 6 | these samples and collects the log files in the specified output directory. 7 | 8 | The `evaluate_policy.py` script is expected to be located in the current 9 | working directory. Replace the dummy policy there with your actual policy to 10 | evaluate it. 11 | 12 | This is used for the evaluation of the submissions of this phase of the first 13 | phase of the real robot challenge. Make sure this script runs with your 14 | `evaluate_policy.py` script without any errors before submitting your code. 15 | """ 16 | 17 | # IMPORTANT: DO NOT MODIFY THIS FILE! 18 | # Submissions will be evaluate on our side with a similar script but not 19 | # exactly this one. To make sure that your code is compatible with our 20 | # evaluation script, make sure it runs with this one without any modifications. 21 | 22 | import argparse 23 | import os 24 | import pickle 25 | import subprocess 26 | import sys 27 | import typing 28 | 29 | from trifinger_simulation.tasks import move_cube 30 | 31 | 32 | class TestSample(typing.NamedTuple): 33 | difficulty: int 34 | iteration: int 35 | init_pose_json: str 36 | goal_pose_json: str 37 | logfile: str 38 | 39 | 40 | def generate_test_set( 41 | levels: typing.Sequence[int], samples_per_level: int, logfile_tmpl: str 42 | ) -> typing.List[TestSample]: 43 | """Generate random test set for policy evaluation. 44 | 45 | For each difficulty level a list of samples, consisting of randomized 46 | initial pose and goal pose, is generated. 47 | 48 | Args: 49 | levels: List of difficulty levels. 50 | samples_per_level: How many samples are generated per level. 51 | logfile_tmpl: Format string for the log file associated with this 52 | sample. Needs to contain placeholders "{level}" and "{iteration}". 53 | 54 | Returns: 55 | List of ``len(levels) * samples_per_level`` test samples. 56 | """ 57 | samples = [] 58 | for level in levels: 59 | for i in range(samples_per_level): 60 | init = move_cube.sample_goal(-1) 61 | goal = move_cube.sample_goal(level) 62 | logfile = logfile_tmpl.format(level=level, iteration=i) 63 | 64 | samples.append( 65 | TestSample(level, i, init.to_json(), goal.to_json(), logfile) 66 | ) 67 | 68 | return samples 69 | 70 | 71 | def run_evaluate_policy(sample: TestSample): 72 | """Run evaluate_policy.py with the given sample. 73 | 74 | Args: 75 | sample (TestSample): Contains all required information to run the 76 | evaluation script. 77 | """ 78 | cmd = [ 79 | "python3", 80 | "./evaluate_policy.py", # TODO: make path configurable? 81 | str(sample.difficulty), 82 | sample.init_pose_json, 83 | sample.goal_pose_json, 84 | sample.logfile, 85 | ] 86 | subprocess.run(cmd, check=True) 87 | 88 | 89 | def main(output_directory: str): 90 | if not os.path.isdir(output_directory): 91 | print( 92 | "'{}' does not exist or is not a directory.".format( 93 | output_directory 94 | ) 95 | ) 96 | sys.exit(1) 97 | 98 | levels = (1, 2, 3, 4) 99 | runs_per_level = 10 100 | 101 | logfile_tmpl = os.path.join( 102 | output_directory, "action_log_l{level}_i{iteration}.p" 103 | ) 104 | 105 | # generate n samples for each level 106 | test_data = generate_test_set(levels, runs_per_level, logfile_tmpl) 107 | 108 | # store samples 109 | sample_file = os.path.join(output_directory, "test_data.p") 110 | with open(sample_file, "wb") as fh: 111 | pickle.dump(test_data, fh, pickle.HIGHEST_PROTOCOL) 112 | 113 | # run "evaluate_policy.py" for each sample 114 | for sample in test_data: 115 | print( 116 | "\n___Evaluate level {} sample {}___".format( 117 | sample.difficulty, sample.iteration 118 | ) 119 | ) 120 | run_evaluate_policy(sample) 121 | 122 | 123 | def add_arguments(parser): 124 | parser.add_argument( 125 | "output_directory", 126 | type=str, 127 | help="Directory in which generated files are stored.", 128 | ) 129 | 130 | 131 | if __name__ == "__main__": 132 | parser = argparse.ArgumentParser( 133 | description=__doc__, 134 | formatter_class=argparse.RawDescriptionHelpFormatter, 135 | ) 136 | add_arguments(parser) 137 | args = parser.parse_args() 138 | main(args.output_directory) 139 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube/run_replay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Replay results of "run_evaluate_policy_all_levels.py" and compute reward. 3 | 4 | Reads the files generated by "run_evaluate_policy_all_levels.py" and replays 5 | the action logs to verify the result and to compute the total reward over all 6 | runs. 7 | """ 8 | 9 | # IMPORTANT: DO NOT MODIFY THIS FILE! 10 | # Submissions will be evaluate on our side with our own version of this script. 11 | # To make sure that your code is compatible with our evaluation script, make 12 | # sure it runs with this one without any modifications. 13 | 14 | import argparse 15 | import os 16 | import pickle 17 | import sys 18 | import typing 19 | 20 | import numpy as np 21 | 22 | from . import replay_action_log 23 | 24 | 25 | class TestSample(typing.NamedTuple): 26 | difficulty: int 27 | iteration: int 28 | init_pose_json: str 29 | goal_pose_json: str 30 | logfile: str 31 | 32 | 33 | def main(input_directory: str): 34 | try: 35 | if not os.path.isdir(input_directory): 36 | print( 37 | "'{}' does not exist or is not a directory.".format( 38 | input_directory 39 | ) 40 | ) 41 | sys.exit(1) 42 | 43 | levels = (1, 2, 3, 4) 44 | 45 | # load samples 46 | sample_file = os.path.join(input_directory, "test_data.p") 47 | with open(sample_file, "rb") as fb: 48 | test_data = pickle.load(fb) 49 | 50 | # run "replay_action_log.py" for each sample 51 | level_rewards: dict = {level: [] for level in levels} 52 | for sample in test_data: 53 | print( 54 | "Replay level {} sample {}".format( 55 | sample.difficulty, sample.iteration 56 | ) 57 | ) 58 | reward = replay_action_log.replay_action_log( 59 | sample.logfile, 60 | sample.difficulty, 61 | sample.init_pose_json, 62 | sample.goal_pose_json, 63 | ) 64 | level_rewards[sample.difficulty].append(reward) 65 | 66 | # report 67 | print("\n=======================================================\n") 68 | 69 | report = "" 70 | total_reward = 0 71 | for level, rewards in level_rewards.items(): 72 | rewards = np.asarray(rewards) 73 | mean = rewards.mean() 74 | report += "Level {} mean reward:\t{:.3f},\tstd: {:.3f}\n".format( 75 | level, mean, rewards.std() 76 | ) 77 | total_reward += level * mean 78 | 79 | report += "-------------------------------------------------------\n" 80 | report += "Total Weighted Reward: {:.3f}\n".format(total_reward) 81 | 82 | print(report) 83 | 84 | # save report to file 85 | report_file = os.path.join(input_directory, "reward.txt") 86 | with open(report_file, "w") as f: 87 | f.write(report) 88 | 89 | except Exception as e: 90 | print(e, file=sys.stderr) 91 | sys.exit(1) 92 | 93 | 94 | def add_arguments(parser): 95 | parser.add_argument( 96 | "input_directory", 97 | type=str, 98 | help="Directory containing the generated log files.", 99 | ) 100 | 101 | 102 | if __name__ == "__main__": 103 | parser = argparse.ArgumentParser( 104 | description=__doc__, 105 | formatter_class=argparse.RawDescriptionHelpFormatter, 106 | ) 107 | add_arguments(parser) 108 | args = parser.parse_args() 109 | 110 | main(args.input_directory) 111 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube_on_trajectory/__main__.py: -------------------------------------------------------------------------------- 1 | """Run evaluation of the move_cube_on_trajectory task. 2 | 3 | For more information on the different commands run `` --help``. 4 | """ 5 | 6 | import argparse 7 | import sys 8 | 9 | from . import json_goal_from_config, sample_goal, trajectory_to_json 10 | from . import run_evaluate_policy 11 | from . import run_replay 12 | from . import replay_action_log 13 | 14 | 15 | def cmd_sample_goal(args): 16 | try: 17 | t = sample_goal() 18 | print(trajectory_to_json(t)) 19 | except Exception as e: 20 | print(e, file=sys.stderr) 21 | sys.exit(1) 22 | 23 | 24 | def cmd_goal_from_config(args): 25 | try: 26 | t = json_goal_from_config(args.goal_config_file) 27 | print(t) 28 | except Exception as e: 29 | print(e, file=sys.stderr) 30 | sys.exit(1) 31 | 32 | 33 | def cmd_evaluate_policy(args): 34 | try: 35 | run_evaluate_policy.main(args.output_directory, args.exec) 36 | except Exception as e: 37 | print(e, file=sys.stderr) 38 | sys.exit(1) 39 | 40 | 41 | def cmd_replay_all(args): 42 | try: 43 | run_replay.main(args.log_directory) 44 | except Exception as e: 45 | print(e, file=sys.stderr) 46 | sys.exit(1) 47 | 48 | 49 | def cmd_replay_one(args): 50 | try: 51 | replay_action_log.replay_action_log(args.logfile, args.trajectory) 52 | except Exception as e: 53 | print(e, file=sys.stderr) 54 | sys.exit(1) 55 | 56 | 57 | def cmd_evaluate_and_replay(args): 58 | try: 59 | print() 60 | print("Run Policy on Random Trajectories") 61 | print("=================================") 62 | run_evaluate_policy.main(args.output_directory, args.exec) 63 | 64 | print() 65 | print("Validate Logs and Compute Reward") 66 | print("================================") 67 | print() 68 | run_replay.main(args.output_directory) 69 | except Exception as e: 70 | print(e, file=sys.stderr) 71 | sys.exit(1) 72 | 73 | 74 | def main(): 75 | parser = argparse.ArgumentParser( 76 | "move_cube_on_trajectory", description=__doc__ 77 | ) 78 | subparsers = parser.add_subparsers(title="command", dest="command") 79 | subparsers.required = True 80 | 81 | sub = subparsers.add_parser( 82 | "sample_goal", 83 | description="""Sample a goal trajectory. The trajectory is printed to 84 | stdout as JSON string. 85 | """, 86 | ) 87 | sub.set_defaults(func=cmd_sample_goal) 88 | 89 | sub = subparsers.add_parser( 90 | "goal_from_config", 91 | description="""Load or sample a goal trajectory based on the given 92 | config file. The trajectory is printed to stdout as JSON string. 93 | """, 94 | ) 95 | sub.add_argument( 96 | "goal_config_file", type=str, help="Path to a goal config JSON file." 97 | ) 98 | sub.set_defaults(func=cmd_goal_from_config) 99 | 100 | sub = subparsers.add_parser( 101 | "evaluate_policy", 102 | description="""Run a evaluate_policy script on a set of random 103 | trajectories. 104 | """, 105 | ) 106 | run_evaluate_policy.add_arguments(sub) 107 | sub.set_defaults(func=cmd_evaluate_policy) 108 | 109 | sub = subparsers.add_parser( 110 | "replay_all", description="Replay all logs in the given directory." 111 | ) 112 | run_replay.add_arguments(sub) 113 | sub.set_defaults(func=cmd_replay_all) 114 | 115 | sub = subparsers.add_parser( 116 | "replay_one", 117 | description="""Replay an evaluation log file and compute reward based 118 | on the given trajectory. 119 | """, 120 | ) 121 | replay_action_log.add_arguments(sub) 122 | sub.set_defaults(func=cmd_replay_one) 123 | 124 | sub = subparsers.add_parser( 125 | "evaluate_and_check", 126 | description="Run 'evaluate_policy' followed by 'replay_all'.", 127 | ) 128 | run_evaluate_policy.add_arguments(sub) 129 | sub.set_defaults(func=cmd_evaluate_and_replay) 130 | 131 | args = parser.parse_args() 132 | args.func(args) 133 | 134 | 135 | if __name__ == "__main__": 136 | main() 137 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube_on_trajectory/evaluate_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Example evaluation script to evaluate a policy. 3 | 4 | This is an example evaluation script for evaluating a "RandomPolicy". Use this 5 | as a base for your own script to evaluate your policy. All you need to do is 6 | to replace the `RandomPolicy` and potentially the Gym environment with your own 7 | ones (see the TODOs in the code below). 8 | 9 | This script will be executed in an automated procedure. For this to work, make 10 | sure you do not change the overall structure of the script! 11 | 12 | This script expects the following arguments in the given order: 13 | - The trajectory as a JSON string 14 | - Path to the file to which the action log is written 15 | 16 | It is then expected to initialize the environment with the given initial pose 17 | and execute exactly one episode with the policy that is to be evaluated. 18 | 19 | When finished, the action log, which is created by the TriFingerPlatform class, 20 | is written to the specified file. This log file is crucial as it is used to 21 | evaluate the actual performance of the policy. 22 | """ 23 | import argparse 24 | import json 25 | 26 | import gym 27 | 28 | from trifinger_simulation.gym_wrapper.envs import cube_trajectory_env 29 | 30 | 31 | class RandomPolicy: 32 | """Dummy policy which uses random actions.""" 33 | 34 | def __init__(self, action_space): 35 | self.action_space = action_space 36 | 37 | def predict(self, observation): 38 | return self.action_space.sample() 39 | 40 | 41 | def main(): 42 | parser = argparse.ArgumentParser() 43 | parser.add_argument( 44 | "trajectory", 45 | type=json.loads, 46 | metavar="JSON", 47 | help="Goal trajectory as a JSON string.", 48 | ) 49 | parser.add_argument( 50 | "action_log_file", 51 | type=str, 52 | help="File to which the action log is written.", 53 | ) 54 | args = parser.parse_args() 55 | 56 | # create a FixedInitializer with the given trajectory 57 | initializer = cube_trajectory_env.FixedInitializer(args.trajectory) 58 | 59 | # TODO: Replace with your environment if you used a custom one. 60 | env = gym.make( 61 | "trifinger_simulation.gym_wrapper:cube_trajectory-v1", 62 | initializer=initializer, 63 | action_type=cube_trajectory_env.ActionType.POSITION, 64 | # IMPORTANT: Do not enable visualisation as this will result in invalid 65 | # log files (unfortunately the visualisation slightly influence the 66 | # behaviour of the physics in pyBullet...). 67 | visualization=False, 68 | ) 69 | 70 | # TODO: Replace this with your model 71 | policy = RandomPolicy(env.action_space) 72 | 73 | # Execute one episode. Make sure that the number of simulation steps 74 | # matches with the episode length of the task. When using the default Gym 75 | # environment, this is the case when looping until is_done == True. Make 76 | # sure to adjust this in case your custom environment behaves differently! 77 | is_done = False 78 | observation = env.reset() 79 | accumulated_reward = 0 80 | while not is_done: 81 | action = policy.predict(observation) 82 | observation, reward, is_done, info = env.step(action) 83 | accumulated_reward += reward 84 | 85 | print("Accumulated reward: {}".format(accumulated_reward)) 86 | 87 | # store the log for evaluation 88 | env.platform.store_action_log(args.action_log_file) 89 | 90 | 91 | if __name__ == "__main__": 92 | main() 93 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube_on_trajectory/replay_action_log.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Replay actions for a given logfile, verify steps and compute reward. 3 | 4 | The log file is a pickle file as produced by 5 | :meth:`trifinger_simulation.TriFingerPlatform.store_action_log` which contains 6 | a list of applied actions and observations of all steps. 7 | 8 | The simulation is initialised to the same state as during evaluation. Then the 9 | actions are applied one by one, verifying the resulting robot and object state 10 | after each step. If the simulation was only accessed via the provided 11 | interface during evaluation, the replay should result in exactly the same 12 | states. 13 | 14 | For each step the reward is computed. The cumulative reward over all steps is 15 | printed in the end. 16 | """ 17 | import argparse 18 | import json 19 | import pickle 20 | import sys 21 | import numpy as np 22 | 23 | from trifinger_simulation import trifinger_platform 24 | from trifinger_simulation.tasks import move_cube_on_trajectory as mct 25 | 26 | 27 | def replay_action_log(logfile: str, trajectory: mct.Trajectory) -> float: 28 | with open(logfile, "rb") as fh: 29 | log = pickle.load(fh) 30 | 31 | # initialize cube at the centre 32 | initial_object_pose = mct.move_cube.Pose( 33 | position=mct.INITIAL_CUBE_POSITION 34 | ) 35 | 36 | platform = trifinger_platform.TriFingerPlatform( 37 | visualization=False, initial_object_pose=initial_object_pose 38 | ) 39 | 40 | # verify that the robot is initialized to the same position as in the log 41 | # file 42 | initial_robot_position = platform.get_robot_observation(0).position 43 | try: 44 | np.testing.assert_array_equal( 45 | initial_robot_position, 46 | log["initial_robot_position"], 47 | err_msg=("Initial robot position does not match with log file."), 48 | ) 49 | except AssertionError as e: 50 | print("Failed.", file=sys.stderr) 51 | print(e, file=sys.stderr) 52 | sys.exit(1) 53 | 54 | # verify that the number of logged actions matches with the episode length 55 | n_actions = len(log["actions"]) 56 | assert ( 57 | n_actions == mct.EPISODE_LENGTH 58 | ), "Number of actions in log does not match with expected episode length." 59 | 60 | accumulated_reward = 0 61 | for logged_action in log["actions"]: 62 | action = logged_action["action"] 63 | 64 | t = platform.append_desired_action(action) 65 | 66 | robot_obs = platform.get_robot_observation(t) 67 | camera_obs = platform.get_camera_observation(t) 68 | # ensure we got a camera observation with object pose (mostly for mypy) 69 | assert isinstance( 70 | camera_obs, trifinger_platform.TriCameraObjectObservation 71 | ) 72 | 73 | cube_pose = camera_obs.filtered_object_pose 74 | reward = -mct.evaluate_state(trajectory, t, cube_pose.position) 75 | accumulated_reward += reward 76 | 77 | assert logged_action["t"] == t 78 | 79 | np.testing.assert_array_equal( 80 | robot_obs.position, 81 | logged_action["robot_observation"].position, 82 | err_msg=( 83 | "Step %d: Recorded robot position does not match with" 84 | " the one achieved by the replay" % t 85 | ), 86 | ) 87 | np.testing.assert_array_equal( 88 | robot_obs.torque, 89 | logged_action["robot_observation"].torque, 90 | err_msg=( 91 | "Step %d: Recorded robot torque does not match with" 92 | " the one achieved by the replay" % t 93 | ), 94 | ) 95 | np.testing.assert_array_equal( 96 | robot_obs.velocity, 97 | logged_action["robot_observation"].velocity, 98 | err_msg=( 99 | "Step %d: Recorded robot velocity does not match with" 100 | " the one achieved by the replay" % t 101 | ), 102 | ) 103 | 104 | np.testing.assert_array_equal( 105 | cube_pose.position, 106 | logged_action["object_pose"].position, 107 | err_msg=( 108 | "Step %d: Recorded object position does not match with" 109 | " the one achieved by the replay" % t 110 | ), 111 | ) 112 | np.testing.assert_array_equal( 113 | cube_pose.orientation, 114 | logged_action["object_pose"].orientation, 115 | err_msg=( 116 | "Step %d: Recorded object orientation does not match with" 117 | " the one achieved by the replay" % t 118 | ), 119 | ) 120 | 121 | camera_obs = platform.get_camera_observation(t) 122 | assert isinstance( 123 | camera_obs, trifinger_platform.TriCameraObjectObservation 124 | ) 125 | cube_pose = camera_obs.object_pose 126 | final_pose = log["final_object_pose"]["pose"] 127 | 128 | print("Accumulated Reward:", accumulated_reward) 129 | 130 | # verify that actual and logged final object pose match 131 | try: 132 | np.testing.assert_array_equal( 133 | cube_pose.position, 134 | final_pose.position, 135 | err_msg=( 136 | "Recorded object position does not match with the one" 137 | " achieved by the replay" 138 | ), 139 | ) 140 | np.testing.assert_array_equal( 141 | cube_pose.orientation, 142 | final_pose.orientation, 143 | err_msg=( 144 | "Recorded object orientation does not match with the one" 145 | " achieved by the replay" 146 | ), 147 | ) 148 | except AssertionError as e: 149 | print("Failed.", file=sys.stderr) 150 | print(e, file=sys.stderr) 151 | sys.exit(1) 152 | 153 | print("Passed.") 154 | 155 | return accumulated_reward 156 | 157 | 158 | def add_arguments(parser): 159 | parser.add_argument( 160 | "--logfile", 161 | "-l", 162 | required=True, 163 | type=str, 164 | help="Path to the log file.", 165 | ) 166 | parser.add_argument( 167 | "--trajectory", 168 | "-t", 169 | required=True, 170 | type=json.loads, 171 | metavar="JSON", 172 | help="Goal trajectory of the cube as JSON string.", 173 | ) 174 | 175 | 176 | if __name__ == "__main__": 177 | parser = argparse.ArgumentParser( 178 | description=__doc__, 179 | formatter_class=argparse.RawDescriptionHelpFormatter, 180 | ) 181 | add_arguments(parser) 182 | args = parser.parse_args() 183 | 184 | replay_action_log(args.logfile, args.trajectory) 185 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube_on_trajectory/run_evaluate_policy.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Run ``evaluate_policy.py`` with random trajectories. 3 | 4 | Creates a dataset of multiple random trajectories. Then executes 5 | ``evaluate_policy.py`` on each of them and collects the log files in the 6 | specified output directory. 7 | 8 | To evaluate your policy, copy the ``evaluate_policy.py`` from this package and 9 | replace the dummy policy there with your actual policy. 10 | 11 | This is used for the evaluation of the submissions of the first phase of the 12 | Real Robot Challenge 2021. Make sure this script runs with your 13 | ``evaluate_policy.py`` script without any errors before submitting your code. 14 | """ 15 | 16 | # IMPORTANT: DO NOT MODIFY THIS FILE! 17 | # Submissions will be evaluate on our side with a similar script but not 18 | # exactly this one. To make sure that your code is compatible with our 19 | # evaluation script, make sure it runs with this one without any modifications. 20 | 21 | import argparse 22 | import pathlib 23 | import pickle 24 | import subprocess 25 | import sys 26 | import typing 27 | 28 | from trifinger_simulation.tasks import move_cube_on_trajectory as mct 29 | 30 | 31 | def generate_test_set(num_trajectories: int) -> typing.List[str]: 32 | """Generate a test set of random trajectories. 33 | 34 | Args: 35 | num_trajectories: Number of trajectories in the test set. 36 | 37 | Returns: 38 | List of random trajectories encoded as JSON strings. 39 | """ 40 | return [ 41 | mct.trajectory_to_json(mct.sample_goal()) 42 | for i in range(num_trajectories) 43 | ] 44 | 45 | 46 | def run_evaluate_policy(script_path: str, sample: str, action_log_file: str): 47 | """Run the evaluation script with the given sample. 48 | 49 | Args: 50 | script_path: Path to the evaluate_policy.py script. 51 | sample: Goal trajectory encoded as JSON string. 52 | action_log_file: Destination of the action log file. 53 | """ 54 | cmd = [ 55 | "python3", 56 | script_path, 57 | sample, 58 | action_log_file, 59 | ] 60 | subprocess.run(cmd, check=True) 61 | 62 | 63 | def add_arguments(parser): 64 | parser.add_argument( 65 | "output_directory", 66 | type=pathlib.Path, 67 | help="Directory in which generated files are stored.", 68 | ) 69 | parser.add_argument( 70 | "--exec", 71 | type=str, 72 | default="./evaluate_policy.py", 73 | help="Path to the evaluate_policy.py script. Default: '%(default)s'", 74 | ) 75 | 76 | 77 | def main(output_directory: pathlib.Path, eval_executable: str): 78 | if not output_directory.is_dir(): 79 | print( 80 | "'{}' does not exist or is not a directory.".format( 81 | output_directory 82 | ) 83 | ) 84 | sys.exit(1) 85 | 86 | # Number of goals that are sampled for the evaluation 87 | num_trajectories = 10 88 | 89 | logfile_tmpl = str(output_directory / "action_log_{:02d}.p") 90 | 91 | # generate n samples for each level 92 | test_data = generate_test_set(num_trajectories) 93 | 94 | # store samples 95 | sample_file = output_directory / "test_data.p" 96 | with open(sample_file, "wb") as fh: 97 | pickle.dump(test_data, fh, pickle.HIGHEST_PROTOCOL) 98 | 99 | # run "evaluate_move_cube_on_trajectory.py" for each sample 100 | for i, sample in enumerate(test_data): 101 | print("\n___Evaluate trajectory {}___".format(i)) 102 | run_evaluate_policy(eval_executable, sample, logfile_tmpl.format(i)) 103 | 104 | 105 | if __name__ == "__main__": 106 | parser = argparse.ArgumentParser( 107 | description=__doc__, 108 | formatter_class=argparse.RawDescriptionHelpFormatter, 109 | ) 110 | add_arguments(parser) 111 | args = parser.parse_args() 112 | 113 | main(args.output_directory, args.exec) 114 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/move_cube_on_trajectory/run_replay.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Replay results of ``run_evaluate_policy()`` and compute reward. 3 | 4 | Reads the files generated by :func:`run_evaluate_policy.run_evaluate_policy` 5 | and replays the action logs to verify the result and to compute the total 6 | reward over all runs. 7 | """ 8 | 9 | # IMPORTANT: DO NOT MODIFY THIS FILE! 10 | # Submissions will be evaluate on our side with our own version of this script. 11 | # To make sure that your code is compatible with our evaluation script, make 12 | # sure it runs with this one without any modifications. 13 | 14 | import argparse 15 | import json 16 | import pathlib 17 | import pickle 18 | import sys 19 | 20 | import numpy as np 21 | 22 | from . import replay_action_log 23 | 24 | 25 | def add_arguments(parser): 26 | parser.add_argument( 27 | "log_directory", 28 | type=pathlib.Path, 29 | help="Directory containing the generated log files.", 30 | ) 31 | 32 | 33 | def main(log_directory: pathlib.Path): 34 | try: 35 | if not log_directory.is_dir(): 36 | print( 37 | "'{}' does not exist or is not a directory.".format( 38 | log_directory 39 | ) 40 | ) 41 | sys.exit(1) 42 | 43 | logfile_tmpl = str(log_directory / "action_log_{:02d}.p") 44 | 45 | # load samples 46 | sample_file = log_directory / "test_data.p" 47 | with open(sample_file, "rb") as fh: 48 | test_data = pickle.load(fh) 49 | 50 | # run "replay_action_log.py" for each sample 51 | rewards = [] 52 | for i, sample_json in enumerate(test_data): 53 | print("\n___Replay trajectory {}___".format(i)) 54 | sample = json.loads(sample_json) 55 | reward = replay_action_log.replay_action_log( 56 | logfile_tmpl.format(i), sample 57 | ) 58 | rewards.append(reward) 59 | 60 | # report 61 | print("\n=======================================================\n") 62 | 63 | report = { 64 | "mean": float(np.mean(rewards)), 65 | "median": float(np.median(rewards)), 66 | "std": float(np.std(rewards)), 67 | } 68 | print( 69 | "reward median: {:.3f},\tmean: {:.3f},\tstd: {:.3f}\n".format( 70 | report["median"], report["mean"], report["std"] 71 | ) 72 | ) 73 | 74 | # save report to file 75 | report_file = log_directory / "reward.json" 76 | with open(report_file, "w") as f: 77 | json.dump(report, f) 78 | 79 | except Exception as e: 80 | print("Error: {}".format(e), file=sys.stderr) 81 | sys.exit(1) 82 | 83 | 84 | if __name__ == "__main__": 85 | parser = argparse.ArgumentParser( 86 | description=__doc__, 87 | formatter_class=argparse.RawDescriptionHelpFormatter, 88 | ) 89 | add_arguments(parser) 90 | args = parser.parse_args() 91 | 92 | main(args.log_directory) 93 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/rearrange_dice/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import sys 3 | 4 | import numpy as np 5 | import cv2 6 | import json 7 | 8 | import trifinger_simulation 9 | 10 | from . import json_goal_from_config, sample_goal, goal_to_json 11 | from . import visualize_2d, generate_goal_mask 12 | from . import utils 13 | 14 | 15 | def cmd_sample_goal(args): 16 | try: 17 | goal = sample_goal() 18 | print(goal_to_json(goal)) 19 | if args.show: 20 | visualize_2d(goal) 21 | if args.show_masks: 22 | data_dir = trifinger_simulation.get_data_dir() 23 | camera_param_dir = data_dir / "camera_params" 24 | camera_params = trifinger_simulation.camera.load_camera_parameters( 25 | camera_param_dir, "camera{id}_cropped_and_downsampled.yml" 26 | ) 27 | masks = generate_goal_mask(camera_params, goal) 28 | masks = np.hstack(masks) 29 | cv2.imshow("Goal Masks", masks) 30 | cv2.waitKey() 31 | 32 | except FileExistsError as e: 33 | print(e, file=sys.stderr) 34 | sys.exit(1) 35 | 36 | 37 | def cmd_goal_from_config(args): 38 | try: 39 | goal = json_goal_from_config(args.goal_config_file) 40 | print(goal) 41 | except Exception as e: 42 | print(e, file=sys.stderr) 43 | sys.exit(1) 44 | 45 | 46 | def cmd_create_pattern_template(args): 47 | print(utils.create_empty_pattern()) 48 | 49 | 50 | def cmd_parse_goal_pattern(args): 51 | try: 52 | goal = utils.parse_pattern_file(args.infile) 53 | print(json.dumps(goal)) 54 | visualize_2d(goal) 55 | # except Exception as e: 56 | except IndentationError as e: 57 | print(e, file=sys.stderr) 58 | sys.exit(1) 59 | 60 | 61 | def main(): 62 | parser = argparse.ArgumentParser("rearrange_dice", description=__doc__) 63 | subparsers = parser.add_subparsers(title="command", dest="command") 64 | subparsers.required = True 65 | 66 | sub = subparsers.add_parser( 67 | "sample_goal", 68 | description="""Sample a random goal. The goal is written to stdout as 69 | JSON string. 70 | """, 71 | ) 72 | sub.add_argument( 73 | "--show", action="store_true", help="Visualize the goal positions." 74 | ) 75 | sub.add_argument( 76 | "--show-masks", 77 | action="store_true", 78 | help="Show the goal masks (using some default camera parameters).", 79 | ) 80 | sub.set_defaults(func=cmd_sample_goal) 81 | 82 | sub = subparsers.add_parser( 83 | "goal_from_config", 84 | description="""Load or sample a goal based on the given config file. 85 | The goal is writtten to stdout as JSON string. 86 | """, 87 | ) 88 | sub.add_argument( 89 | "goal_config_file", type=str, help="Path to a goal config JSON file." 90 | ) 91 | sub.set_defaults(func=cmd_goal_from_config) 92 | 93 | sub = subparsers.add_parser( 94 | "create_pattern_template", 95 | description="Output template for an ASCII goal pattern.", 96 | ) 97 | sub.set_defaults(func=cmd_create_pattern_template) 98 | 99 | sub = subparsers.add_parser( 100 | "parse_goal_pattern", description="Parse ASCII goal pattern." 101 | ) 102 | sub.add_argument( 103 | "infile", 104 | type=argparse.FileType("r"), 105 | default="-", 106 | help="File with the goal pattern (use '-' to read from stdin).", 107 | ) 108 | sub.set_defaults(func=cmd_parse_goal_pattern) 109 | 110 | args = parser.parse_args() 111 | args.func(args) 112 | 113 | 114 | if __name__ == "__main__": 115 | main() 116 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/rearrange_dice/test_reward.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """Load a set of images and a goal pattern and compute the reward.""" 3 | import argparse 4 | import json 5 | import pathlib 6 | import os 7 | import sys 8 | 9 | import cv2 10 | import numpy as np 11 | 12 | from trifinger_object_tracking.py_lightblue_segmenter import segment_image 13 | 14 | import trifinger_simulation.camera 15 | from trifinger_simulation.tasks import rearrange_dice 16 | from trifinger_simulation.tasks.rearrange_dice import utils 17 | 18 | 19 | def main(): 20 | parser = argparse.ArgumentParser(description=__doc__) 21 | parser.add_argument( 22 | "--image-dir", 23 | type=pathlib.PurePath, 24 | required=True, 25 | help="Directory that contains images 'camera{id}.png'", 26 | ) 27 | parser.add_argument( 28 | "--camera-calib-dir", 29 | type=pathlib.PurePath, 30 | help="""Directory containing camera calibration files. Defaults to 31 | '{image_dir}/..'. 32 | """, 33 | ) 34 | parser.add_argument( 35 | "--camera-calib-files", 36 | type=str, 37 | default="camera{id}_cropped_and_downsampled.yml", 38 | help="""Name pattern of the camera calibration files. Use '{id}' as 39 | placeholder for the id. Default: %(default)s. 40 | """, 41 | ) 42 | parser.add_argument( 43 | "--goal-file", 44 | type=pathlib.PurePath, 45 | help="""Either a JSON file containing the goal or a txt file containing 46 | an ASCII goal pattern (will be distinguished by the file 47 | extension). Defaults to '{image_dir}/goal.txt'. 48 | """, 49 | ) 50 | args = parser.parse_args() 51 | 52 | camera_ids = (60, 180, 300) 53 | 54 | image_dir = args.image_dir 55 | camera_calib_dir = args.camera_calib_dir or image_dir / ".." 56 | camera_calib_file = args.camera_calib_files 57 | goal_file = args.goal_file or image_dir / "goal.txt" 58 | 59 | # load images 60 | images = [ 61 | cv2.imread(os.fspath(image_dir / f"camera{id}.png")) 62 | for id in camera_ids 63 | ] 64 | 65 | # load camera parameters 66 | camera_info = trifinger_simulation.camera.load_camera_parameters( 67 | camera_calib_dir, camera_calib_file 68 | ) 69 | 70 | # load goal 71 | with open(goal_file, "r") as f: 72 | if goal_file.suffix == ".json": 73 | goal = json.load(f) 74 | else: 75 | goal = utils.parse_pattern_file(f) 76 | 77 | goal_masks = rearrange_dice.generate_goal_mask(camera_info, goal) 78 | segmented_images = [segment_image(img) for img in images] 79 | 80 | reward = rearrange_dice.evaluate_state(goal_masks, segmented_images) 81 | print("Reward: -%f" % reward) 82 | 83 | # Overlay goal mask with segmentation and mark pixels red/green depending 84 | # on if they are inside or outside the goal area. 85 | labelled_segmentations = [] 86 | for goalmask, segmask in zip(goal_masks, segmented_images): 87 | good = np.logical_and(goalmask, segmask) 88 | bad = np.logical_xor(good, segmask) 89 | good_but_empty = np.logical_and(goalmask, np.logical_not(segmask)) 90 | 91 | labelled = np.zeros_like(images[0]) 92 | labelled[good] = (0, 255, 0) 93 | labelled[bad] = (0, 0, 255) 94 | labelled[good_but_empty] = (0, 90, 0) 95 | 96 | labelled_segmentations.append(labelled) 97 | 98 | # show images 99 | images = np.hstack(images) 100 | labelled_segmentations = np.hstack(labelled_segmentations) 101 | debug_img = np.vstack([images, labelled_segmentations]) 102 | 103 | cv2.imshow("image", debug_img) 104 | cv2.waitKey() 105 | 106 | return 0 107 | 108 | 109 | if __name__ == "__main__": 110 | sys.exit(main()) 111 | -------------------------------------------------------------------------------- /trifinger_simulation/tasks/rearrange_dice/utils.py: -------------------------------------------------------------------------------- 1 | """Utility functions for the rearrange_dice task.""" 2 | 3 | import typing 4 | import os 5 | 6 | import numpy as np 7 | 8 | from . import ( 9 | NUM_DICE, 10 | N_CELLS_PER_ROW, 11 | Goal, 12 | _cell_center_position, 13 | _get_grid_cells, 14 | _is_cell_inside_arena, 15 | ) 16 | 17 | 18 | #: Character that are used to mark cells outside the circular arena. 19 | CHAR_OUTSIDE = "#" 20 | #: Character that are used to mark empty cells inside the arena. 21 | CHAR_EMPTY = "." 22 | #: Character that are used to mark goal positions. 23 | CHAR_FILLED = "X" 24 | 25 | 26 | def create_empty_pattern() -> str: 27 | """Get an ASCII template with an empty pattern.""" 28 | cells = np.full((N_CELLS_PER_ROW, N_CELLS_PER_ROW), CHAR_OUTSIDE) 29 | for i, j in _get_grid_cells(): 30 | cells[i, j] = CHAR_EMPTY 31 | 32 | output = np.apply_along_axis( 33 | "\n".join, axis=0, arr=np.apply_along_axis("".join, axis=1, arr=cells) 34 | ) 35 | 36 | return str(output) 37 | 38 | 39 | def create_pattern_template_file(filename: typing.Union[str, os.PathLike]): 40 | """Create a goal pattern template file.""" 41 | pattern = create_empty_pattern() 42 | 43 | with open(filename, "w") as f: 44 | f.write(pattern) 45 | f.write("\n") 46 | f.write("-- \n") 47 | f.write("Text after the empty line is ignored.\n") 48 | 49 | 50 | def parse_pattern(goal_pattern: typing.Sequence[str]) -> Goal: 51 | """Parse goal pattern.""" 52 | goal = [] 53 | for i, line in enumerate(goal_pattern): 54 | if not line or line == "\n": 55 | break 56 | 57 | if i >= N_CELLS_PER_ROW: 58 | raise RuntimeError("Invalid pattern: too many rows") 59 | 60 | if len(line.strip()) != N_CELLS_PER_ROW: 61 | raise RuntimeError("Invalid pattern: wrong row length") 62 | 63 | filled_mask = np.array(list(line)) == CHAR_FILLED 64 | filled_cells = np.nonzero(filled_mask)[0] 65 | 66 | for c in filled_cells: 67 | if not _is_cell_inside_arena((i, c)): 68 | raise RuntimeError("Invalid pattern: goal outside the arena") 69 | 70 | goal.append(_cell_center_position((i, c))) 71 | 72 | if len(goal) != NUM_DICE: 73 | raise RuntimeError( 74 | "Invalid number of goals. Expected %d, got %d" 75 | % (NUM_DICE, len(goal)) 76 | ) 77 | 78 | return goal 79 | 80 | 81 | def parse_pattern_file(f: typing.TextIO) -> Goal: 82 | """Parse goal pattern from file.""" 83 | return parse_pattern(f.readlines()) 84 | -------------------------------------------------------------------------------- /trifinger_simulation/trifingerpro_limits.py: -------------------------------------------------------------------------------- 1 | """Limits (torques, joint positions, etc.) of the TriFingerPro platform.""" 2 | 3 | from types import SimpleNamespace 4 | import numpy as np 5 | 6 | from .tasks import move_cube 7 | 8 | 9 | n_joints = 9 10 | n_fingers = 3 11 | # Note: The actual max. torque is 0.396 but due to different rounding errors 12 | # between float32 and float64, setting the exact value can result in failues 13 | # when checking the limits (because `np.float64(0.396) > np.float32(0.396)`). 14 | # Therefore add a bit of safety margin here. If a user sets a too high torque 15 | # due to this, nothing bad will happen, it will just get capped to the actual 16 | # max. value internally. 17 | max_torque_Nm = 0.397 18 | max_velocity_radps = 10 19 | 20 | 21 | #: Joint torque limits [Nm] 22 | robot_torque = SimpleNamespace( 23 | low=np.full(n_joints, -max_torque_Nm, dtype=np.float32), 24 | high=np.full(n_joints, max_torque_Nm, dtype=np.float32), 25 | default=np.zeros(n_joints, dtype=np.float32), 26 | ) 27 | #: Joint position limits [rad] 28 | robot_position = SimpleNamespace( 29 | low=np.array([-0.33, 0.0, -2.7] * n_fingers, dtype=np.float32), 30 | high=np.array([1.0, 1.57, 0.0] * n_fingers, dtype=np.float32), 31 | default=np.array([0.0, 0.9, -1.7] * n_fingers, dtype=np.float32), 32 | ) 33 | #: Joint velocity limits [rad/s] 34 | robot_velocity = SimpleNamespace( 35 | low=np.full(n_joints, -max_velocity_radps, dtype=np.float32), 36 | high=np.full(n_joints, max_velocity_radps, dtype=np.float32), 37 | default=np.zeros(n_joints, dtype=np.float32), 38 | ) 39 | 40 | #: Object position limits [m] 41 | object_position = SimpleNamespace( 42 | low=np.array([-0.3, -0.3, 0], dtype=np.float32), 43 | high=np.array([0.3, 0.3, 0.3], dtype=np.float32), 44 | default=np.array([0, 0, move_cube._min_height], dtype=np.float32), 45 | ) 46 | #: Object orientation limits 47 | object_orientation = SimpleNamespace( 48 | low=-np.ones(4, dtype=np.float32), 49 | high=np.ones(4, dtype=np.float32), 50 | default=move_cube.Pose().orientation, 51 | ) 52 | -------------------------------------------------------------------------------- /trifinger_simulation/visual_objects.py: -------------------------------------------------------------------------------- 1 | """Different types of visualisation markers for the simulation.""" 2 | 3 | import numpy as np 4 | import pybullet 5 | 6 | 7 | class Marker: 8 | """Visualise multiple positions using spheres.""" 9 | 10 | def __init__( 11 | self, 12 | number_of_goals, 13 | goal_size=0.015, 14 | initial_position=[0.18, 0.18, 0.08], 15 | **kwargs, 16 | ): 17 | """ 18 | Import a marker for visualization 19 | 20 | Args: 21 | number_of_goals (int): the desired number of goals to display 22 | goal_size (float): how big should this goal be 23 | initial_position (list of floats): where in xyz space should the 24 | goal first be displayed 25 | """ 26 | self._kwargs = kwargs 27 | color_cycle = [[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]] 28 | 29 | goal_shape_ids = [None] * number_of_goals 30 | self.goal_ids = [None] * number_of_goals 31 | self.goal_orientations = [None] * number_of_goals 32 | 33 | # Can use both a block, or a sphere: uncomment accordingly 34 | for i in range(number_of_goals): 35 | color = color_cycle[i % len(color_cycle)] 36 | goal_shape_ids[i] = pybullet.createVisualShape( 37 | # shapeType=pybullet.GEOM_BOX, 38 | # halfExtents=[goal_size] * number_of_goals, 39 | shapeType=pybullet.GEOM_SPHERE, 40 | radius=goal_size, 41 | rgbaColor=color, 42 | **self._kwargs, 43 | ) 44 | self.goal_ids[i] = pybullet.createMultiBody( 45 | baseVisualShapeIndex=goal_shape_ids[i], 46 | basePosition=initial_position, 47 | baseOrientation=[0, 0, 0, 1], 48 | **self._kwargs, 49 | ) 50 | ( 51 | _, 52 | self.goal_orientations[i], 53 | ) = pybullet.getBasePositionAndOrientation( 54 | self.goal_ids[i], 55 | **self._kwargs, 56 | ) 57 | 58 | def set_state(self, positions): 59 | """ 60 | Set new positions for the goal markers with the orientation being the 61 | same as when they were imported. 62 | 63 | Args: 64 | positions (list of lists): List of lists with 65 | x,y,z positions of all goals. 66 | """ 67 | for goal_id, orientation, position in zip( 68 | self.goal_ids, self.goal_orientations, positions 69 | ): 70 | pybullet.resetBasePositionAndOrientation( 71 | goal_id, 72 | position, 73 | orientation, 74 | **self._kwargs, 75 | ) 76 | 77 | 78 | class ObjectMarker: 79 | def __init__( 80 | self, 81 | shape_type, 82 | position, 83 | orientation, 84 | color=(0, 1, 0, 0.5), 85 | pybullet_client_id=0, 86 | **kwargs, 87 | ): 88 | """ 89 | Create a cube marker for visualization 90 | 91 | Args: 92 | shape_type: Shape type of the object (e.g. pybullet.GEOM_BOX). 93 | position: Position (x, y, z) 94 | orientation: Orientation as quaternion (x, y, z, w) 95 | kwargs: Keyword arguments that are passed to 96 | pybullet.createVisualShape. Use this to specify 97 | shape-type-specify parameters like the object size. 98 | """ 99 | self._pybullet_client_id = pybullet_client_id 100 | 101 | self.shape_id = pybullet.createVisualShape( 102 | shapeType=shape_type, 103 | rgbaColor=color, 104 | physicsClientId=self._pybullet_client_id, 105 | **kwargs, 106 | ) 107 | self.body_id = pybullet.createMultiBody( 108 | baseVisualShapeIndex=self.shape_id, 109 | basePosition=position, 110 | baseOrientation=orientation, 111 | physicsClientId=self._pybullet_client_id, 112 | ) 113 | 114 | def __del__(self): 115 | """Removes the cuboid from the environment.""" 116 | # At this point it may be that pybullet was already shut down. To avoid 117 | # an error, only remove the object if the simulation is still running. 118 | if pybullet.isConnected(self._pybullet_client_id): 119 | pybullet.removeBody(self.body_id, self._pybullet_client_id) 120 | 121 | def set_state(self, position, orientation): 122 | """Set pose of the marker. 123 | 124 | Args: 125 | position: Position (x, y, z) 126 | orientation: Orientation as quaternion (x, y, z, w) 127 | """ 128 | pybullet.resetBasePositionAndOrientation( 129 | self.body_id, 130 | position, 131 | orientation, 132 | physicsClientId=self._pybullet_client_id, 133 | ) 134 | 135 | 136 | class CuboidMarker(ObjectMarker): 137 | """Visualize a Cuboid.""" 138 | 139 | def __init__( 140 | self, 141 | size, 142 | position, 143 | orientation, 144 | color=(0, 1, 0, 0.5), 145 | pybullet_client_id=0, 146 | ): 147 | """ 148 | Create a cube marker for visualization 149 | 150 | Args: 151 | size (list): Lengths of the cuboid sides. 152 | position: Position (x, y, z) 153 | orientation: Orientation as quaternion (x, y, z, w) 154 | color: Color of the cube as a tuple (r, b, g, a) 155 | """ 156 | size = np.asarray(size) 157 | super().__init__( 158 | pybullet.GEOM_BOX, 159 | position, 160 | orientation, 161 | color, 162 | pybullet_client_id, 163 | halfExtents=size / 2, 164 | ) 165 | 166 | 167 | class CubeMarker(CuboidMarker): 168 | """Visualize a cube.""" 169 | 170 | def __init__( 171 | self, 172 | width, 173 | position, 174 | orientation, 175 | color=(0, 1, 0, 0.5), 176 | pybullet_client_id=0, 177 | ): 178 | """ 179 | Create a cube marker for visualization 180 | 181 | Args: 182 | width (float): Length of one side of the cube. 183 | position: Position (x, y, z) 184 | orientation: Orientation as quaternion (x, y, z, w) 185 | color: Color of the cube as a tuple (r, b, g, a) 186 | """ 187 | super().__init__( 188 | [width] * 3, 189 | position, 190 | orientation, 191 | color, 192 | pybullet_client_id, 193 | ) 194 | --------------------------------------------------------------------------------