├── .docker
├── build.bash
├── devel.bash
├── host
│ └── install_docker_with_nvidia.bash
├── internal
│ ├── .bash_aliases
│ ├── cyclonedds.xml
│ ├── entrypoint.bash
│ └── setup_symlinks.bash
└── run.bash
├── .dockerignore
├── .git_hooks
└── setup.bash
├── .gitignore
├── .gitmodules
├── .pre-commit-config.yaml
├── CHANGELOG.md
├── CMakeLists.txt
├── Dockerfile
├── LICENSE
├── README.md
├── drl_grasping.repos
├── drl_grasping
├── __init__.py
├── drl_octree
│ ├── __init__.py
│ ├── algorithms
│ │ ├── __init__.py
│ │ ├── off_policy_algorithm.py
│ │ ├── sac
│ │ │ ├── __init__.py
│ │ │ └── policies.py
│ │ ├── td3
│ │ │ ├── __init__.py
│ │ │ └── policies.py
│ │ └── tqc
│ │ │ ├── __init__.py
│ │ │ └── policies.py
│ ├── features_extractor
│ │ ├── __init__.py
│ │ ├── image_cnn.py
│ │ ├── modules.py
│ │ └── octree_cnn.py
│ └── replay_buffer
│ │ ├── __init__.py
│ │ ├── replay_buffer.py
│ │ └── utils.py
├── envs
│ ├── __init__.py
│ ├── control
│ │ └── __init__.py
│ ├── models
│ │ ├── __init__.py
│ │ ├── lights
│ │ │ ├── __init__.py
│ │ │ ├── random_sun.py
│ │ │ └── sun.py
│ │ ├── objects
│ │ │ ├── __init__.py
│ │ │ ├── primitives
│ │ │ │ ├── __init__.py
│ │ │ │ ├── box.py
│ │ │ │ ├── cylinder.py
│ │ │ │ ├── plane.py
│ │ │ │ └── sphere.py
│ │ │ ├── random_lunar_rock.py
│ │ │ ├── random_object.py
│ │ │ ├── random_primitive.py
│ │ │ └── rock.py
│ │ ├── robots
│ │ │ ├── __init__.py
│ │ │ ├── lunalab_summit_xl_gen.py
│ │ │ └── panda.py
│ │ ├── sensors
│ │ │ ├── __init__.py
│ │ │ └── camera.py
│ │ ├── terrains
│ │ │ ├── __init__.py
│ │ │ ├── ground.py
│ │ │ ├── lunar_heightmap.py
│ │ │ ├── lunar_surface.py
│ │ │ ├── random_ground.py
│ │ │ └── random_lunar_surface.py
│ │ └── utils
│ │ │ ├── __init__.py
│ │ │ ├── model_collection_randomizer.py
│ │ │ └── xacro2sdf.py
│ ├── perception
│ │ ├── __init__.py
│ │ ├── camera_subscriber.py
│ │ └── octree.py
│ ├── randomizers
│ │ ├── __init__.py
│ │ └── manipulation.py
│ ├── runtimes
│ │ ├── __init__.py
│ │ └── real_evaluation.py
│ ├── tasks
│ │ ├── __init__.py
│ │ ├── curriculums
│ │ │ ├── __init__.py
│ │ │ ├── common.py
│ │ │ └── grasp.py
│ │ ├── grasp
│ │ │ ├── __init__.py
│ │ │ ├── grasp.py
│ │ │ └── grasp_octree.py
│ │ ├── grasp_planetary
│ │ │ ├── __init__.py
│ │ │ ├── grasp_planetary.py
│ │ │ ├── grasp_planetary_color_image.py
│ │ │ ├── grasp_planetary_depth_image.py
│ │ │ └── grasp_planetary_octree.py
│ │ ├── manipulation.py
│ │ └── reach
│ │ │ ├── __init__.py
│ │ │ ├── reach.py
│ │ │ ├── reach_color_image.py
│ │ │ ├── reach_depth_image.py
│ │ │ └── reach_octree.py
│ ├── utils
│ │ ├── __init__.py
│ │ ├── conversions.py
│ │ ├── gazebo.py
│ │ ├── logging.py
│ │ ├── math.py
│ │ ├── tf2_broadcaster.py
│ │ └── tf2_listener.py
│ └── worlds
│ │ ├── default.sdf
│ │ └── lunar.sdf
└── utils
│ ├── README.md
│ ├── __init__.py
│ ├── callbacks.py
│ ├── exp_manager.py
│ ├── hyperparams_opt.py
│ ├── utils.py
│ └── wrappers.py
├── examples
├── ex_evaluate.bash
├── ex_evaluate_pretrained_agent.bash
├── ex_evaluate_real.bash
├── ex_optimize.bash
├── ex_random_agent.bash
├── ex_train.bash
└── ex_train_dreamerv2.bash
├── hyperparams
├── sac.yml
├── td3.yml
└── tqc.yml
├── launch
├── evaluate.launch.py
├── optimize.launch.py
├── random_agent.launch.py
├── real
│ └── real_lunalab_summit_xl_gen.launch.py
├── sim
│ └── sim.launch.py
├── train.launch.py
└── train_dreamerv2.launch.py
├── package.xml
├── python_requirements.txt
├── rviz
├── drl_grasping.rviz
└── drl_grasping_real_evaluation.rviz
└── scripts
├── evaluate.py
├── preload_replay_buffer.py
├── random_agent.py
├── train.py
├── train_dreamerv2.py
└── utils
├── dataset
├── dataset_download_test.bash
├── dataset_download_train.bash
├── dataset_set_test.bash
└── dataset_set_train.bash
└── process_collection.py
/.docker/build.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
4 | PROJECT_DIR="$(dirname "${SCRIPT_DIR}")"
5 |
6 | TAG="andrejorsula/$(basename "${PROJECT_DIR}")"
7 |
8 | if [ "${#}" -gt "0" ]; then
9 | if [[ "${1}" != "-"* ]]; then
10 | TAG="${TAG}:${1}"
11 | BUILD_ARGS=${*:2}
12 | else
13 | BUILD_ARGS=${*:1}
14 | fi
15 | fi
16 |
17 | DOCKER_BUILD_CMD=(
18 | docker build
19 | "${PROJECT_DIR}"
20 | --tag "${TAG}"
21 | "${BUILD_ARGS}"
22 | )
23 |
24 | echo -e "\033[1;30m${DOCKER_BUILD_CMD[*]}\033[0m" | xargs
25 |
26 | # shellcheck disable=SC2048
27 | exec ${DOCKER_BUILD_CMD[*]}
28 |
--------------------------------------------------------------------------------
/.docker/devel.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
4 | PROJECT_DIR="$(dirname "${SCRIPT_DIR}")"
5 |
6 | DOCKER_HOME="/root"
7 | DOCKER_WS_DIR="${DOCKER_HOME}/ws"
8 | DOCKER_WS_SRC_DIR="${DOCKER_WS_DIR}/src"
9 | DOCKER_TARGET_SRC_DIR="${DOCKER_WS_SRC_DIR}/$(basename "${PROJECT_DIR}")"
10 |
11 | echo -e "\033[2;37mDevelopment volume: ${PROJECT_DIR} -> ${DOCKER_TARGET_SRC_DIR}\033[0m" | xargs
12 |
13 | exec "${SCRIPT_DIR}/run.bash" -v "${PROJECT_DIR}:${DOCKER_TARGET_SRC_DIR}" "${@}"
14 |
--------------------------------------------------------------------------------
/.docker/host/install_docker_with_nvidia.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo "Installing Docker with utilities for NVIDIA GPUs..."
4 |
5 | # Install Docker
6 | wget https://get.docker.com -O - -o /dev/null | sh &&
7 | sudo systemctl --now enable docker
8 |
9 | # Install support for NVIDIA (Container Toolkit or Docker depending on Docker version)
10 | wget https://nvidia.github.io/nvidia-docker/gpgkey -O - -o /dev/null | sudo apt-key add - && wget "https://nvidia.github.io/nvidia-docker/$(source /etc/os-release && echo "${ID}${VERSION_ID}")/nvidia-docker.list" -O - -o /dev/null | sed "s#deb https://#deb [arch=$(dpkg --print-architecture)] https://#g" | sudo tee /etc/apt/sources.list.d/nvidia-docker.list >/dev/null
11 | sudo apt-get update
12 | if dpkg --compare-versions "$(sudo docker version --format '{{.Server.Version}}')" gt "19.3"; then
13 | # With Docker 19.03, nvidia-docker2 is deprecated since NVIDIA GPUs are natively supported as devices in the Docker runtime
14 | echo "Installing NVIDIA Container Toolkit"
15 | sudo apt-get update && sudo apt-get install -y nvidia-container-toolkit
16 | else
17 | echo "Installing NVIDIA Docker (Docker 19.03 or older)"
18 | sudo apt-get update && sudo apt-get install -y nvidia-docker2
19 | fi
20 | sudo systemctl restart docker
21 |
22 | # (Optional) Add user to docker group
23 | [ -z "${PS1}" ] && read -erp "Do you want to add user ${USER} to the docker group? [Y/n]: " ADD_USER_TO_DOCKER_GROUP
24 | if [[ "${ADD_USER_TO_DOCKER_GROUP,,}" =~ (y|yes) && ! "${ADD_USER_TO_DOCKER_GROUP,,}" =~ (n|no) ]]; then
25 | sudo groupadd -f docker && sudo usermod -aG docker "${USER}" && echo -e "User ${USER} was added to the docker group.\nPlease relog or execute the following command for changes to take affect.\n\tnewgrp docker"
26 | fi
27 |
--------------------------------------------------------------------------------
/.docker/internal/.bash_aliases:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # ~/.bash_aliases: Custom aliases.
3 |
4 | alias _nano_envs='nano ${WS_SRC_DIR}/drl_grasping/drl_grasping/envs/__init__.py'
5 | alias _nano_ex_train='nano ${WS_SRC_DIR}/drl_grasping/examples/ex_train.bash'
6 | alias _nano_ex_train_dreamerv2='nano ${WS_SRC_DIR}/drl_grasping/examples/ex_train_dreamerv2.bash'
7 | alias _nano_sac='nano ${WS_SRC_DIR}/drl_grasping/hyperparams/sac.yml'
8 | alias _nano_td3='nano ${WS_SRC_DIR}/drl_grasping/hyperparams/td3.yml'
9 | alias _nano_tqc='nano ${WS_SRC_DIR}/drl_grasping/hyperparams/tqc.yml'
10 | alias _train='ros2 run drl_grasping ex_train.bash'
11 | alias _train_dreamerv2='ros2 run drl_grasping ex_train_dreamerv2.bash'
12 |
--------------------------------------------------------------------------------
/.docker/internal/cyclonedds.xml:
--------------------------------------------------------------------------------
1 |
2 |
4 |
5 |
6 | lo
7 | false
8 | 65507B
9 | 16384B
10 |
11 |
12 | auto
13 |
14 |
15 |
16 | 120
17 |
18 |
19 | info
20 | stdout
21 |
22 |
23 |
24 |
25 | lo
26 | true
27 | 65507B
28 | 16384B
29 |
30 |
31 | info
32 | stdout
33 |
34 |
35 |
36 |
37 | auto
38 | default
39 |
40 |
41 | info
42 | stdout
43 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/.docker/internal/entrypoint.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
3 |
4 | ## Source ROS 2 installation and workspace
5 | source "/opt/ros/${ROS_DISTRO}/setup.bash" --
6 | source "${WS_INSTALL_DIR}/local_setup.bash" --
7 |
8 | ## Configure ROS 2 RMW
9 | export RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
10 | export CYCLONEDDS_URI=file://${SCRIPT_DIR}/cyclonedds.xml
11 |
12 | ## Default ROS_DOMAIN_ID to 0 if not specified (ROS 2 default)
13 | if [ -z "${ROS_DOMAIN_ID}" ]; then
14 | export ROS_DOMAIN_ID="0"
15 | fi
16 | ## Default IGN_PARTITION to ROS_DOMAIN_ID if not specified
17 | if [ -z "${IGN_PARTITION}" ]; then
18 | export IGN_PARTITION="${ROS_DOMAIN_ID}"
19 | fi
20 | ## Configure behaviour of ROS 2 and Gazebo Transport based on selected ROS_DOMAIN_ID
21 | if [ "${ROS_DOMAIN_ID}" == "69" ]; then
22 | ## ROS_DOMAIN_ID="69" - Default network interface and multicast configuration
23 | unset ROS_LOCALHOST_ONLY
24 | ## Gazebo Transport - Make sure the communication is not restricted
25 | unset IGN_IP
26 | unset IGN_RELAY
27 | else
28 | ## ROS_DOMAIN_ID!=69 - Restrict to localhost
29 | export ROS_LOCALHOST_ONLY=1
30 | ## Gazebo Transport - Restrict to localhost
31 | export IGN_IP=127.0.0.1
32 | export IGN_RELAY=127.0.0.1
33 | if [ "${ROS_DOMAIN_ID}" == "42" ]; then
34 | ## ROS_DOMAIN_ID==42 - Enable multicast
35 | ifconfig lo multicast
36 | route add -net 224.0.0.0 netmask 240.0.0.0 dev lo 2>/dev/null
37 | else
38 | ## ROS_DOMAIN_ID!=42 - Disable Gazebo Transport broadcasting and user commands
39 | export DRL_GRASPING_BROADCAST_INTERACTIVE_GUI=false
40 | fi
41 | fi
42 |
43 | ## Export paths to Gazebo plugins (ign_ros2_control)
44 | export IGN_GAZEBO_SYSTEM_PLUGIN_PATH=${WS_INSTALL_DIR}/lib${IGN_GAZEBO_SYSTEM_PLUGIN_PATH:+:${IGN_GAZEBO_SYSTEM_PLUGIN_PATH}}
45 |
46 | ## Export paths for O-CNN
47 | export PATH="${WS_SRC_DIR}/O-CNN/octree/build${PATH:+:${PATH}}"
48 | export PYTHONPATH="${WS_SRC_DIR}/O-CNN/octree/build/python${PYTHONPATH:+:${PYTHONPATH}}"
49 |
50 | ## Source textures and SDF models
51 | if [ -d "${ASSETS_DIR}/textures" ]; then
52 | source "${ASSETS_DIR}/textures/scripts/source.bash"
53 | fi
54 | if [ -d "${ASSETS_DIR}/sdf_models" ]; then
55 | source "${ASSETS_DIR}/sdf_models/scripts/source.bash"
56 | fi
57 |
58 | exec "$@"
59 |
--------------------------------------------------------------------------------
/.docker/internal/setup_symlinks.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
3 |
4 | ## Setup symbolic link for entrypoint
5 | ln -s "${SCRIPT_DIR}/entrypoint.bash" "${HOME}/entrypoint.bash"
6 |
7 | ## Setup symbolic link for bash aliases
8 | ln -s "${SCRIPT_DIR}/.bash_aliases" "${HOME}/.bash_aliases"
9 |
10 | ## Setup symbolic links to important directories and files of `drl_grasping`
11 | ln -s "${WS_SRC_DIR}/drl_grasping/hyperparams" "${HOME}/hyperparams"
12 | ln -s "${WS_SRC_DIR}/drl_grasping/examples" "${HOME}/examples"
13 | ln -s "${WS_SRC_DIR}/drl_grasping/scripts" "${HOME}/scripts"
14 | ln -s "${WS_SRC_DIR}/drl_grasping/launch" "${HOME}/launch"
15 | ln -s "${WS_SRC_DIR}/drl_grasping/drl_grasping/envs/__init__.py" "${HOME}/envs.py"
16 |
--------------------------------------------------------------------------------
/.docker/run.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | TAG="andrejorsula/drl_grasping"
4 |
5 | ## Forward custom volumes and environment variables
6 | CUSTOM_VOLUMES=()
7 | CUSTOM_ENVS=()
8 | while getopts ":v:e:" opt; do
9 | case "${opt}" in
10 | v) CUSTOM_VOLUMES+=("${OPTARG}") ;;
11 | e) CUSTOM_ENVS+=("${OPTARG}") ;;
12 | *)
13 | echo >&2 "Usage: ${0} [-v VOLUME] [-e ENV] [TAG] [CMD]"
14 | exit 2
15 | ;;
16 | esac
17 | done
18 | shift "$((OPTIND - 1))"
19 |
20 | ## Determine TAG and CMD positional arguments
21 | if [ "${#}" -gt "0" ]; then
22 | if [[ $(docker images --format "{{.Tag}}" "${TAG}") =~ (^|[[:space:]])${1}($|[[:space:]]) || $(wget -q https://registry.hub.docker.com/v2/repositories/${TAG}/tags -O - | grep -Poe '(?<=(\"name\":\")).*?(?=\")') =~ (^|[[:space:]])${1}($|[[:space:]]) ]]; then
23 | # Use the first argument as a tag is such tag exists either locally or on the remote registry
24 | TAG="${TAG}:${1}"
25 | CMD=${*:2}
26 | else
27 | CMD=${*:1}
28 | fi
29 | fi
30 |
31 | ## GPU
32 | # Enable GPU either via NVIDIA Container Toolkit or NVIDIA Docker (depending on Docker version)
33 | if dpkg --compare-versions "$(docker version --format '{{.Server.Version}}')" gt "19.3"; then
34 | GPU_OPT="--gpus all"
35 | else
36 | GPU_OPT="--runtime nvidia"
37 | fi
38 |
39 | ## GUI
40 | # To enable GUI, make sure processes in the container can connect to the x server
41 | XAUTH=/tmp/.docker.xauth
42 | if [ ! -f ${XAUTH} ]; then
43 | touch ${XAUTH}
44 | chmod a+r ${XAUTH}
45 |
46 | XAUTH_LIST=$(xauth nlist "${DISPLAY}")
47 | if [ -n "${XAUTH_LIST}" ]; then
48 | # shellcheck disable=SC2001
49 | XAUTH_LIST=$(sed -e 's/^..../ffff/' <<<"${XAUTH_LIST}")
50 | echo "${XAUTH_LIST}" | xauth -f ${XAUTH} nmerge -
51 | fi
52 | fi
53 | # GUI-enabling volumes
54 | GUI_VOLUMES=(
55 | "${XAUTH}:${XAUTH}"
56 | "/tmp/.X11-unix:/tmp/.X11-unix"
57 | "/dev/input:/dev/input"
58 | )
59 | # GUI-enabling environment variables
60 | GUI_ENVS=(
61 | XAUTHORITY="${XAUTH}"
62 | QT_X11_NO_MITSHM=1
63 | DISPLAY="${DISPLAY}"
64 | )
65 |
66 | ## Additional volumes
67 | # Synchronize timezone with host
68 | CUSTOM_VOLUMES+=("/etc/localtime:/etc/localtime:ro")
69 | # Persistent storage of logs
70 | CUSTOM_VOLUMES+=("${PWD}/drl_grasping_training_docker:/root/drl_grasping_training")
71 |
72 | ## Additional environment variables
73 | # Synchronize ROS_DOMAIN_ID with host
74 | if [ -n "${ROS_DOMAIN_ID}" ]; then
75 | CUSTOM_ENVS+=("ROS_DOMAIN_ID=${ROS_DOMAIN_ID}")
76 | fi
77 | # Synchronize IGN_PARTITION with host
78 | if [ -n "${IGN_PARTITION}" ]; then
79 | CUSTOM_ENVS+=("IGN_PARTITION=${IGN_PARTITION}")
80 | fi
81 |
82 | DOCKER_RUN_CMD=(
83 | docker run
84 | --interactive
85 | --tty
86 | --rm
87 | --network host
88 | --ipc host
89 | --privileged
90 | --security-opt "seccomp=unconfined"
91 | "${GUI_VOLUMES[@]/#/"--volume "}"
92 | "${GUI_ENVS[@]/#/"--env "}"
93 | "${GPU_OPT}"
94 | "${CUSTOM_VOLUMES[@]/#/"--volume "}"
95 | "${CUSTOM_ENVS[@]/#/"--env "}"
96 | "${TAG}"
97 | "${CMD}"
98 | )
99 |
100 | echo -e "\033[1;30m${DOCKER_RUN_CMD[*]}\033[0m" | xargs
101 |
102 | # shellcheck disable=SC2048
103 | exec ${DOCKER_RUN_CMD[*]}
104 |
--------------------------------------------------------------------------------
/.dockerignore:
--------------------------------------------------------------------------------
1 | # Docker
2 | **/Dockerfile
3 | **/.dockerignore
4 | **/.docker/host
5 | **/.docker/*.bash
6 |
7 | # Git
8 | **/.git
9 | **/.gitignore
10 |
11 | # Pre-commit
12 | **/.git_hooks
13 | **/.pre-commit-config.yaml
14 |
15 | # Colcon
16 | **/build
17 | **/install
18 | **/log
19 |
20 | # Python
21 | **/__pycache__
22 |
23 | # Markdown
24 | **/README.md
25 | **/CHANGELOG.md
26 |
--------------------------------------------------------------------------------
/.git_hooks/setup.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | # This script setups git hooks for this repository.
3 |
4 | SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
5 | REPO_DIR="$(dirname "${SCRIPT_DIR}")"
6 |
7 | pip install --user pre-commit &&
8 | cd "${REPO_DIR}" &&
9 | pre-commit install &&
10 | cd - || exit
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Colcon
2 | **/build
3 | **/install
4 | **/log
5 |
6 | # Python
7 | **/__pycache__
8 |
--------------------------------------------------------------------------------
/.gitmodules:
--------------------------------------------------------------------------------
1 | # [submodule "pretrained_agents"]
2 | # path = pretrained_agents
3 | # url = https://github.com/AndrejOrsula/drl_grasping_pretrained_agents.git
4 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | # See https://pre-commit.com for more information
2 | # See https://pre-commit.com/hooks.html for more hooks
3 |
4 | ci:
5 | skip: [hadolint-docker]
6 |
7 | repos:
8 | - repo: https://github.com/pre-commit/pre-commit-hooks
9 | rev: v4.3.0
10 | hooks:
11 | - id: check-added-large-files
12 | - id: check-case-conflict
13 | - id: check-executables-have-shebangs
14 | - id: check-merge-conflict
15 | - id: check-shebang-scripts-are-executable
16 | - id: check-symlinks
17 | - id: check-xml
18 | - id: check-yaml
19 | - id: debug-statements
20 | - id: destroyed-symlinks
21 | - id: detect-private-key
22 | - id: end-of-file-fixer
23 | - id: mixed-line-ending
24 | - id: requirements-txt-fixer
25 | - id: trailing-whitespace
26 |
27 | - repo: https://github.com/pycqa/isort
28 | rev: 5.10.1
29 | hooks:
30 | - id: isort
31 | args: ["--profile", "black"]
32 |
33 | - repo: https://github.com/psf/black
34 | rev: 22.6.0
35 | hooks:
36 | - id: black
37 |
38 | - repo: https://github.com/lovesegfault/beautysh
39 | rev: v6.2.1
40 | hooks:
41 | - id: beautysh
42 |
43 | - repo: https://github.com/executablebooks/mdformat
44 | rev: 0.7.14
45 | hooks:
46 | - id: mdformat
47 |
48 | - repo: https://github.com/hadolint/hadolint
49 | rev: v2.10.0
50 | hooks:
51 | - id: hadolint-docker
52 | args:
53 | [
54 | --ignore,
55 | DL3003,
56 | --ignore,
57 | DL3008,
58 | --ignore,
59 | DL3013,
60 | --ignore,
61 | DL4006,
62 | --ignore,
63 | SC1091,
64 | ]
65 |
66 | - repo: https://github.com/codespell-project/codespell
67 | rev: v2.1.0
68 | hooks:
69 | - id: codespell
70 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # CHANGELOG
2 |
3 | ## [\[2.0.0\] - 2022-12-16](https://github.com/AndrejOrsula/drl_grasping/releases/tag/2.0.0)
4 |
5 | ### Added
6 |
7 | - Full integration with `ros2_control`.
8 | - Support for `moveit_servo` via `pymoveit`.
9 | - Support for mobile manipulators.
10 | - `lunalab_summit_xl_gen` is now added to the supported (mobile) robot models.
11 | - New `GraspPlanetary` environment with various observation variants.
12 | - Additional observation variants for `Grasp` environments.
13 | - Models for `Sun` and `RandomSun`, replacing the default light contained in SDF worlds.
14 | - Camera can now be mounted relative to robot frame (configurable) via DetachableJoint.
15 | - Added experimental setup for Dreamer V2 algorithm.
16 | - Configuration of pre-commit git hooks.
17 |
18 | ### Changed
19 |
20 | - Major refactoring of `drl_grasping` module
21 | - Refactored into two primary submodules that can be imported separately
22 | - `drl_octree` that contains octree CNN-based policy
23 | - `envs` that contain the environment itself
24 | - `utils` submodule still contains boilerplate for RL training and evaluation
25 | - Randomizer is now used to wrap task environment during gym environment registration.
26 | - This means, that two environment variants of each task now exist, i.e. `*-v0` and `*-Gazebo-v0`. The default task can therefore be used with different run-times without requiring changes to the hyperparameters in order to make it functional.
27 | - Environments are now registered directly in `envs` module instead of `envs.tasks`
28 | - All important features are now exposed as ROS 2 launch scripts.
29 | - A single configurable launch script [sim.launch.py](./launch/sim.launch.py) now replaces all previous variations. Use its launch arguments to select robot model and enable/disable RViz2 GUI.
30 | - Instead of all fingers, more than half (`n//2 + 1`) are now needed to be in contact for a grasp to be successful
31 | - Changed PEP 8 python formatting for black to improve consistency.
32 | - Changed bash formatter to beautysh (minor changes).
33 | - Custom environment logging is now performed via ROS 2 loggers.
34 | - Local classes for interfacing with MoveIt 2.
35 | - Simplified usage and quality of life improvements.
36 |
37 | ### Fixed
38 |
39 | - Fix grasp checking for grippers with more than 2 fingers.
40 |
41 | ### Removed
42 |
43 | - `DRL_GRASPING_DEBUG_LEVEL` environment variable is now replaced by `log-level` ROS 2 argument.
44 | - `ur5_rg2` and `kinova_j2s7s300` from supported robot models due to shift to `ros2_control`.
45 |
46 | ## [\[1.1.0\] - 2021-10-13](https://github.com/AndrejOrsula/drl_grasping/releases/tag/1.1.0)
47 |
48 | ### Added
49 |
50 | - `kinova_j2s7s300` is now added to the supported robot models.
51 | - Custom SDF worlds are now used as the base for RL environments.
52 | - Support for `DRL_GRASPING_DEBUG_LEVEL` environment variable.
53 | - Ignition fortress is tested and fully functional.
54 | - ROS 2 rolling is tested and fully functional.
55 |
56 | ### Changed
57 |
58 | - Local implementation of conversions for quaternion sequence is now used.
59 | - Simplified installation instructions in README.md.
60 | - Simplified and improved Dockerfile.
61 |
62 | ### Fixed
63 |
64 | - Compatibility with Stable-Baselines3 v1.2.0
65 |
66 | ## [\[1.0.0\] - 2021-06-08](https://github.com/AndrejOrsula/drl_grasping/releases/tag/1.0.0)
67 |
68 | ### Added
69 |
70 | - Initial version of this project.
71 | - Supported environments
72 | - `Reach-Gazebo-v0`
73 | - `Reach-ColorImage-Gazebo-v0`
74 | - `Reach-Octree-Gazebo-v0`
75 | - `Reach-OctreeWithColor-Gazebo-v0`
76 | - `Grasp-Octree-Gazebo-v0`
77 | - `Grasp-OctreeWithColor-Gazebo-v0`
78 | - Supported robot models
79 | - `panda`
80 | - `ur5_rg2`
81 | - Custom feature extractor
82 | - `OctreeCnnFeaturesExtractor`
83 | - Tested RL algorithms
84 | - `td3`
85 | - `sac`
86 | - `tqc`
87 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.5)
2 | project(drl_grasping)
3 |
4 | # Default to C11
5 | if(NOT CMAKE_C_STANDARD)
6 | set(CMAKE_C_STANDARD 11)
7 | endif()
8 | # Default to C++17
9 | if(NOT CMAKE_CXX_STANDARD)
10 | set(CMAKE_CXX_STANDARD 17)
11 | endif()
12 |
13 | # Compiler options
14 | if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
15 | add_compile_options(-Wall -Wextra -Wpedantic)
16 | endif()
17 |
18 | # Testing and linting
19 | if(BUILD_TESTING)
20 | find_package(ament_lint_auto REQUIRED)
21 | ament_lint_auto_find_test_dependencies()
22 | endif()
23 |
24 | # Find dependencies
25 | find_package(ament_cmake REQUIRED)
26 | find_package(ament_cmake_python REQUIRED)
27 |
28 | # Install Python module
29 | ament_python_install_module(
30 | drl_grasping
31 | )
32 |
33 | # Install scripts
34 | set(SCRIPTS_DIR scripts)
35 | install(PROGRAMS
36 | ${SCRIPTS_DIR}/evaluate.py
37 | ${SCRIPTS_DIR}/preload_replay_buffer.py
38 | ${SCRIPTS_DIR}/random_agent.py
39 | ${SCRIPTS_DIR}/train.py
40 | ${SCRIPTS_DIR}/train_dreamerv2.py
41 | ${SCRIPTS_DIR}/utils/dataset/dataset_download_test.bash
42 | ${SCRIPTS_DIR}/utils/dataset/dataset_download_train.bash
43 | ${SCRIPTS_DIR}/utils/dataset/dataset_set_test.bash
44 | ${SCRIPTS_DIR}/utils/dataset/dataset_set_train.bash
45 | ${SCRIPTS_DIR}/utils/process_collection.py
46 | DESTINATION lib/${PROJECT_NAME}
47 | )
48 |
49 | # Install examples
50 | set(EXAMPLES_DIR examples)
51 | install(PROGRAMS
52 | ${EXAMPLES_DIR}/ex_evaluate.bash
53 | ${EXAMPLES_DIR}/ex_evaluate_pretrained_agent.bash
54 | ${EXAMPLES_DIR}/ex_evaluate_real.bash
55 | ${EXAMPLES_DIR}/ex_optimize.bash
56 | ${EXAMPLES_DIR}/ex_random_agent.bash
57 | ${EXAMPLES_DIR}/ex_train.bash
58 | ${EXAMPLES_DIR}/ex_train_dreamerv2.bash
59 | DESTINATION lib/${PROJECT_NAME}
60 | )
61 |
62 | # Install directories
63 | install(DIRECTORY drl_grasping/envs/worlds launch rviz DESTINATION share/${PROJECT_NAME})
64 |
65 | # Setup the project
66 | ament_package()
67 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | BSD 3-Clause License
2 |
3 | Copyright (c) 2021-2022 University of Luxembourg and Andrej Orsula
4 | All rights reserved.
5 |
6 | Redistribution and use in source and binary forms, with or without
7 | modification, are permitted provided that the following conditions are met:
8 |
9 | 1. Redistributions of source code must retain the above copyright notice, this
10 | list of conditions and the following disclaimer.
11 |
12 | 2. Redistributions in binary form must reproduce the above copyright notice,
13 | this list of conditions and the following disclaimer in the documentation
14 | and/or other materials provided with the distribution.
15 |
16 | 3. Neither the name of the copyright holder nor the names of its
17 | contributors may be used to endorse or promote products derived from
18 | this software without specific prior written permission.
19 |
20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 |
--------------------------------------------------------------------------------
/drl_grasping.repos:
--------------------------------------------------------------------------------
1 | repositories:
2 | pymoveit2:
3 | type: git
4 | url: https://github.com/AndrejOrsula/pymoveit2.git
5 | version: master
6 | panda_ign_moveit2:
7 | type: git
8 | url: https://github.com/AndrejOrsula/panda_ign_moveit2.git
9 | version: master
10 | lunalab_summit_xl_gen:
11 | type: git
12 | url: https://github.com/snt-spacer/lunalab_summit_xl_gen.git
13 | version: ros2
14 | ros_ign:
15 | type: git
16 | url: https://github.com/ignitionrobotics/ros_ign.git
17 | version: galactic
18 | ign_ros2_control:
19 | type: git
20 | url: https://github.com/AndrejOrsula/ign_ros2_control.git
21 | version: devel
22 | ros2_controllers:
23 | type: git
24 | url: https://github.com/AndrejOrsula/ros2_controllers.git
25 | version: jtc_effort
26 | # TODO: Use upstream moveit2 (e.g. binary installation) once https://github.com/ros-planning/moveit2/pull/899 is merged
27 | moveit2:
28 | type: git
29 | url: https://github.com/AndrejOrsula/moveit2.git
30 | version: ign_sim_time
31 |
--------------------------------------------------------------------------------
/drl_grasping/__init__.py:
--------------------------------------------------------------------------------
1 | # Submodules of `drl_grasping` module can be imported individually as needed.
2 |
3 | # `drl_octree` - Import via `from drl_grasping import drl_octree` in order to employ a policy with an octree-based CNN feature extractor (Stable-Baselines3 algorithms).
4 |
5 | # `drl_grasping_envs` - Import via `from drl_grasping import envs as drl_grasping_envs` to use of the environments.
6 |
7 | # `drl_grasping_utils` - Import via `from drl_grasping import utils as drl_grasping_utils` for boilerplate scripts that simplify training and evaluation.
8 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/__init__.py:
--------------------------------------------------------------------------------
1 | from . import algorithms, features_extractor, replay_buffer
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/algorithms/__init__.py:
--------------------------------------------------------------------------------
1 | from . import sac, td3, tqc
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/algorithms/off_policy_algorithm.py:
--------------------------------------------------------------------------------
1 | # This module adds a monkey patch to OffPolicyAlgorithm `_setup_model` function such that separae octree batches for stacks are supported
2 | # Note: needs to be included before `from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm` in the module that uses this
3 |
4 | # Note: Import monkey patch of ReplayBuffer before stable_baselines3 ReplayBuffer
5 | from drl_grasping.drl_octree.replay_buffer import replay_buffer # isort:skip
6 | from stable_baselines3.common.buffers import ReplayBuffer
7 | from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm
8 |
9 |
10 | def _setup_model_with_separate_octree_batches_for_stacks(self) -> None:
11 | self._setup_lr_schedule()
12 | self.set_random_seed(self.seed)
13 | if "separate_networks_for_stacks" in self.policy_kwargs:
14 | self.replay_buffer = ReplayBuffer(
15 | self.buffer_size,
16 | self.observation_space,
17 | self.action_space,
18 | self.device,
19 | optimize_memory_usage=self.optimize_memory_usage,
20 | separate_networks_for_stacks=self.policy_kwargs[
21 | "separate_networks_for_stacks"
22 | ],
23 | )
24 | else:
25 | self.replay_buffer = ReplayBuffer(
26 | self.buffer_size,
27 | self.observation_space,
28 | self.action_space,
29 | self.device,
30 | optimize_memory_usage=self.optimize_memory_usage,
31 | )
32 | self.policy = self.policy_class( # pytype:disable=not-instantiable
33 | self.observation_space,
34 | self.action_space,
35 | self.lr_schedule,
36 | **self.policy_kwargs, # pytype:disable=not-instantiable
37 | )
38 | self.policy = self.policy.to(self.device)
39 |
40 | # Convert train freq parameter to TrainFreq object
41 | self._convert_train_freq()
42 |
43 |
44 | OffPolicyAlgorithm._setup_model = _setup_model_with_separate_octree_batches_for_stacks
45 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/algorithms/sac/__init__.py:
--------------------------------------------------------------------------------
1 | from .policies import OctreeCnnPolicy
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/algorithms/td3/__init__.py:
--------------------------------------------------------------------------------
1 | from .policies import OctreeCnnPolicy
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/algorithms/tqc/__init__.py:
--------------------------------------------------------------------------------
1 | from .policies import OctreeCnnPolicy
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/features_extractor/__init__.py:
--------------------------------------------------------------------------------
1 | from .image_cnn import ImageCnnFeaturesExtractor
2 | from .octree_cnn import OctreeCnnFeaturesExtractor
3 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/features_extractor/modules.py:
--------------------------------------------------------------------------------
1 | import ocnn
2 | import torch
3 |
4 |
5 | class OctreeConvRelu(torch.nn.Module):
6 | def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
7 | super(OctreeConvRelu, self).__init__()
8 | self.conv = ocnn.OctreeConv(depth, channel_in, channel_out, kernel_size, stride)
9 | self.relu = torch.nn.ReLU(inplace=True)
10 |
11 | def forward(self, data_in, octree):
12 | out = self.conv(data_in, octree)
13 | out = self.relu(out)
14 | return out
15 |
16 |
17 | class OctreeConvBnRelu(torch.nn.Module):
18 | def __init__(
19 | self,
20 | depth,
21 | channel_in,
22 | channel_out,
23 | kernel_size=[3],
24 | stride=1,
25 | bn_eps=0.00001,
26 | bn_momentum=0.01,
27 | ):
28 | super(OctreeConvBnRelu, self).__init__()
29 | self.conv = ocnn.OctreeConv(depth, channel_in, channel_out, kernel_size, stride)
30 | self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
31 | self.relu = torch.nn.ReLU(inplace=True)
32 |
33 | def forward(self, data_in, octree):
34 | out = self.conv(data_in, octree)
35 | out = self.bn(out)
36 | out = self.relu(out)
37 | return out
38 |
39 |
40 | class OctreeConvFastRelu(torch.nn.Module):
41 | def __init__(self, depth, channel_in, channel_out, kernel_size=[3], stride=1):
42 | super(OctreeConvFastRelu, self).__init__()
43 | self.conv = ocnn.OctreeConvFast(
44 | depth, channel_in, channel_out, kernel_size, stride
45 | )
46 | self.relu = torch.nn.ReLU(inplace=True)
47 |
48 | def forward(self, data_in, octree):
49 | out = self.conv(data_in, octree)
50 | out = self.relu(out)
51 | return out
52 |
53 |
54 | class OctreeConvFastBnRelu(torch.nn.Module):
55 | def __init__(
56 | self,
57 | depth,
58 | channel_in,
59 | channel_out,
60 | kernel_size=[3],
61 | stride=1,
62 | bn_eps=0.00001,
63 | bn_momentum=0.01,
64 | ):
65 | super(OctreeConvFastBnRelu, self).__init__()
66 | self.conv = ocnn.OctreeConvFast(
67 | depth, channel_in, channel_out, kernel_size, stride
68 | )
69 | self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
70 | self.relu = torch.nn.ReLU(inplace=True)
71 |
72 | def forward(self, data_in, octree):
73 | out = self.conv(data_in, octree)
74 | out = self.bn(out)
75 | out = self.relu(out)
76 | return out
77 |
78 |
79 | class OctreeConv1x1Relu(torch.nn.Module):
80 | def __init__(self, channel_in, channel_out, use_bias=True):
81 | super(OctreeConv1x1Relu, self).__init__()
82 | self.conv1x1 = ocnn.OctreeConv1x1(channel_in, channel_out, use_bias)
83 | self.relu = torch.nn.ReLU(inplace=True)
84 |
85 | def forward(self, data_in):
86 | out = self.conv1x1(data_in)
87 | out = self.relu(out)
88 | return out
89 |
90 |
91 | class OctreeConv1x1BnRelu(torch.nn.Module):
92 | def __init__(
93 | self, channel_in, channel_out, use_bias=True, bn_eps=0.00001, bn_momentum=0.01
94 | ):
95 | super(OctreeConv1x1BnRelu, self).__init__()
96 | self.conv1x1 = ocnn.OctreeConv1x1(channel_in, channel_out, use_bias)
97 | self.bn = torch.nn.BatchNorm2d(channel_out, bn_eps, bn_momentum)
98 | self.relu = torch.nn.ReLU(inplace=True)
99 |
100 | def forward(self, data_in):
101 | out = self.conv1x1(data_in)
102 | out = self.bn(out)
103 | out = self.relu(out)
104 | return out
105 |
106 |
107 | class LinearRelu(torch.nn.Module):
108 | def __init__(self, channel_in, channel_out, use_bias=True):
109 | super(LinearRelu, self).__init__()
110 | self.fc = torch.nn.Linear(channel_in, channel_out, use_bias)
111 | self.relu = torch.nn.ReLU(inplace=True)
112 |
113 | def forward(self, data_in):
114 | out = self.fc(data_in)
115 | out = self.relu(out)
116 | return out
117 |
118 |
119 | class LinearBnRelu(torch.nn.Module):
120 | def __init__(
121 | self, channel_in, channel_out, use_bias=True, bn_eps=0.00001, bn_momentum=0.01
122 | ):
123 | super(LinearBnRelu, self).__init__()
124 | self.fc = torch.nn.Linear(channel_in, channel_out, use_bias)
125 | self.bn = torch.nn.BatchNorm1d(channel_out, bn_eps, bn_momentum)
126 | self.relu = torch.nn.ReLU(inplace=True)
127 |
128 | def forward(self, data_in):
129 | out = self.fc(data_in)
130 | out = self.bn(out)
131 | out = self.relu(out)
132 | return out
133 |
134 |
135 | class ImageConvRelu(torch.nn.Module):
136 | def __init__(self, channel_in, channel_out, kernel_size=3, stride=1, padding=1):
137 | super(ImageConvRelu, self).__init__()
138 | self.conv = torch.nn.Conv2d(
139 | channel_in, channel_out, kernel_size, stride, padding
140 | )
141 | self.relu = torch.nn.ReLU(inplace=True)
142 |
143 | def forward(self, data_in):
144 | out = self.conv(data_in)
145 | out = self.relu(out)
146 | return out
147 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/replay_buffer/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import *
2 |
--------------------------------------------------------------------------------
/drl_grasping/drl_octree/replay_buffer/replay_buffer.py:
--------------------------------------------------------------------------------
1 | # This module adds a monkey patch to ReplayBuffer, such that octrees are
2 | # directly supported and there is no extra RAM -> VRAM -> RAM overhead
3 |
4 | # Note: needs to be included before `from stable_baselines3.common.buffers import ReplayBuffer` in the module that uses this
5 |
6 | from typing import Optional, Union
7 |
8 | import numpy as np
9 | import torch as th
10 | from gym import spaces
11 | from stable_baselines3.common.buffers import ReplayBuffer
12 | from stable_baselines3.common.type_aliases import ReplayBufferSamples
13 | from stable_baselines3.common.vec_env import VecNormalize
14 |
15 | from .utils import preprocess_stacked_depth_image_batch, preprocess_stacked_octree_batch
16 |
17 | __old__init__ = ReplayBuffer.__init__
18 | __old_get_samples__ = ReplayBuffer._get_samples
19 |
20 |
21 | def __init___with_checking_for_stacked_images_and_octrees(
22 | self,
23 | buffer_size: int,
24 | observation_space: spaces.Space,
25 | action_space: spaces.Space,
26 | device: Union[th.device, str] = "cpu",
27 | n_envs: int = 1,
28 | optimize_memory_usage: bool = False,
29 | separate_networks_for_stacks: bool = True,
30 | ):
31 | __old__init__(
32 | self,
33 | buffer_size=buffer_size,
34 | observation_space=observation_space,
35 | action_space=action_space,
36 | device=device,
37 | n_envs=n_envs,
38 | optimize_memory_usage=optimize_memory_usage,
39 | )
40 |
41 | # Determine if octrees are used
42 | # Note: This is not 100% reliable as there could be other observations that do the same (outside of this repo)
43 | self.contains_octree_obs = False
44 | self.contains_stacked_image_obs = False
45 | if isinstance(observation_space, spaces.Box) and len(observation_space.shape) == 2:
46 | if (
47 | np.uint8 == observation_space.dtype
48 | and np.all(0 == observation_space.low)
49 | and np.all(255 == observation_space.high)
50 | ):
51 | self.contains_octree_obs = True
52 | self._separate_networks_for_stacks = separate_networks_for_stacks
53 | elif (
54 | np.float32 == observation_space.dtype
55 | and np.all(-1.0 == observation_space.low)
56 | and np.all(1.0 == observation_space.high)
57 | ):
58 | self.contains_stacked_image_obs = True
59 | self._separate_networks_for_stacks = separate_networks_for_stacks
60 |
61 |
62 | def _get_samples_with_support_for_octree(
63 | self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None
64 | ) -> ReplayBufferSamples:
65 |
66 | if self.contains_octree_obs:
67 | # Current observations
68 | obs = self.observations[batch_inds, 0, :]
69 | obs = preprocess_stacked_octree_batch(
70 | obs, self.device, separate_batches=self._separate_networks_for_stacks
71 | )
72 |
73 | # Next observations
74 | if self.optimize_memory_usage:
75 | next_obs = self.observations[(batch_inds + 1) % self.buffer_size, 0, :]
76 | else:
77 | next_obs = self.next_observations[batch_inds, 0, :]
78 | next_obs = preprocess_stacked_octree_batch(
79 | next_obs, self.device, separate_batches=self._separate_networks_for_stacks
80 | )
81 |
82 | return ReplayBufferSamples(
83 | observations=obs,
84 | actions=self.to_torch(self.actions[batch_inds, 0, :]),
85 | next_observations=next_obs,
86 | dones=self.to_torch(self.dones[batch_inds]),
87 | rewards=self.to_torch(
88 | self._normalize_reward(self.rewards[batch_inds], env)
89 | ),
90 | )
91 | elif self.contains_stacked_image_obs:
92 | # Current observations
93 | obs = self.observations[batch_inds, 0, :]
94 | obs = preprocess_stacked_depth_image_batch(
95 | obs, self.device, separate_batches=self._separate_networks_for_stacks
96 | )
97 |
98 | # Next observations
99 | if self.optimize_memory_usage:
100 | next_obs = self.observations[(batch_inds + 1) % self.buffer_size, 0, :]
101 | else:
102 | next_obs = self.next_observations[batch_inds, 0, :]
103 | next_obs = preprocess_stacked_depth_image_batch(
104 | next_obs, self.device, separate_batches=self._separate_networks_for_stacks
105 | )
106 |
107 | return ReplayBufferSamples(
108 | observations=obs,
109 | actions=self.to_torch(self.actions[batch_inds, 0, :]),
110 | next_observations=next_obs,
111 | dones=self.to_torch(self.dones[batch_inds]),
112 | rewards=self.to_torch(
113 | self._normalize_reward(self.rewards[batch_inds], env)
114 | ),
115 | )
116 | else:
117 | return __old_get_samples__(self, batch_inds=batch_inds, env=env)
118 |
119 |
120 | ReplayBuffer.__init__ = __init___with_checking_for_stacked_images_and_octrees
121 | ReplayBuffer._get_samples = _get_samples_with_support_for_octree
122 |
--------------------------------------------------------------------------------
/drl_grasping/envs/control/__init__.py:
--------------------------------------------------------------------------------
1 | from pymoveit2 import GripperCommand, MoveIt2, MoveIt2Gripper, MoveIt2Servo
2 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .lights import *
2 | from .objects import *
3 | from .robots import *
4 | from .sensors import *
5 | from .terrains import *
6 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/lights/__init__.py:
--------------------------------------------------------------------------------
1 | from gym_ignition.scenario.model_wrapper import ModelWrapper
2 |
3 | from .random_sun import RandomSun
4 | from .sun import Sun
5 |
6 |
7 | def get_light_model_class(light_type: str) -> ModelWrapper:
8 | # TODO: Refactor into enum
9 |
10 | if "sun" == light_type:
11 | return Sun
12 | elif "random_sun" == light_type:
13 | return RandomSun
14 |
15 |
16 | def is_light_type_randomizable(light_type: str) -> bool:
17 |
18 | if "random_sun" == light_type:
19 | return True
20 | return False
21 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/lights/random_sun.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, Tuple
2 |
3 | import numpy as np
4 | from gym_ignition.scenario import model_wrapper
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from numpy.random import RandomState
7 | from scenario import core as scenario
8 |
9 |
10 | class RandomSun(model_wrapper.ModelWrapper):
11 | def __init__(
12 | self,
13 | world: scenario.World,
14 | name: str = "sun",
15 | minmax_elevation: Tuple[float, float] = (-0.15, -0.65),
16 | distance: float = 800.0,
17 | visual: bool = True,
18 | radius: float = 20.0,
19 | color_minmax_r: Tuple[float, float] = (1.0, 1.0),
20 | color_minmax_g: Tuple[float, float] = (1.0, 1.0),
21 | color_minmax_b: Tuple[float, float] = (1.0, 1.0),
22 | specular: float = 1.0,
23 | attenuation_minmax_range: Tuple[float, float] = (750.0, 15000.0),
24 | attenuation_minmax_constant: Tuple[float, float] = (0.5, 1.0),
25 | attenuation_minmax_linear: Tuple[float, float] = (0.001, 0.1),
26 | attenuation_minmax_quadratic: Tuple[float, float] = (0.0001, 0.01),
27 | np_random: Optional[RandomState] = None,
28 | **kwargs,
29 | ):
30 |
31 | if np_random is None:
32 | np_random = np.random.default_rng()
33 |
34 | # Get a unique model name
35 | model_name = get_unique_model_name(world, name)
36 |
37 | # Get random yaw direction
38 | direction = np_random.uniform(-1.0, 1.0, (2,))
39 | # Normalize yaw direction
40 | direction = direction / np.linalg.norm(direction)
41 |
42 | # Get random elevation
43 | direction = np.append(
44 | direction,
45 | np_random.uniform(minmax_elevation[0], minmax_elevation[1]),
46 | )
47 | # Normalize again
48 | direction = direction / np.linalg.norm(direction)
49 |
50 | # Initial pose
51 | initial_pose = scenario.Pose(
52 | (
53 | -direction[0] * distance,
54 | -direction[1] * distance,
55 | -direction[2] * distance,
56 | ),
57 | (1, 0, 0, 0),
58 | )
59 |
60 | # Create SDF string for the model
61 | sdf = self.get_sdf(
62 | model_name=model_name,
63 | direction=direction,
64 | visual=visual,
65 | radius=radius,
66 | color_minmax_r=color_minmax_r,
67 | color_minmax_g=color_minmax_g,
68 | color_minmax_b=color_minmax_b,
69 | attenuation_minmax_range=attenuation_minmax_range,
70 | attenuation_minmax_constant=attenuation_minmax_constant,
71 | attenuation_minmax_linear=attenuation_minmax_linear,
72 | attenuation_minmax_quadratic=attenuation_minmax_quadratic,
73 | specular=specular,
74 | np_random=np_random,
75 | )
76 |
77 | # Insert the model
78 | ok_model = world.to_gazebo().insert_model_from_string(
79 | sdf, initial_pose, model_name
80 | )
81 | if not ok_model:
82 | raise RuntimeError("Failed to insert " + model_name)
83 |
84 | # Get the model
85 | model = world.get_model(model_name)
86 |
87 | # Initialize base class
88 | model_wrapper.ModelWrapper.__init__(self, model=model)
89 |
90 | @classmethod
91 | def get_sdf(
92 | self,
93 | model_name: str,
94 | direction: Tuple[float, float, float],
95 | visual: bool,
96 | radius: float,
97 | color_minmax_r: Tuple[float, float],
98 | color_minmax_g: Tuple[float, float],
99 | color_minmax_b: Tuple[float, float],
100 | attenuation_minmax_range: Tuple[float, float],
101 | attenuation_minmax_constant: Tuple[float, float],
102 | attenuation_minmax_linear: Tuple[float, float],
103 | attenuation_minmax_quadratic: Tuple[float, float],
104 | specular: float,
105 | np_random: RandomState,
106 | ) -> str:
107 |
108 | # Sample random values for parameters
109 | color_r = np_random.uniform(color_minmax_r[0], color_minmax_r[1])
110 | color_g = np_random.uniform(color_minmax_g[0], color_minmax_g[1])
111 | color_b = np_random.uniform(color_minmax_b[0], color_minmax_b[1])
112 | attenuation_range = np_random.uniform(
113 | attenuation_minmax_range[0], attenuation_minmax_range[1]
114 | )
115 | attenuation_constant = np_random.uniform(
116 | attenuation_minmax_constant[0], attenuation_minmax_constant[1]
117 | )
118 | attenuation_linear = np_random.uniform(
119 | attenuation_minmax_linear[0], attenuation_minmax_linear[1]
120 | )
121 | attenuation_quadratic = np_random.uniform(
122 | attenuation_minmax_quadratic[0], attenuation_minmax_quadratic[1]
123 | )
124 |
125 | return f'''
126 |
127 | true
128 |
129 |
130 | {direction[0]} {direction[1]} {direction[2]}
131 |
132 | {attenuation_range}
133 | {attenuation_constant}
134 | {attenuation_linear}
135 | {attenuation_quadratic}
136 |
137 | {color_r} {color_g} {color_b} 1
138 | {specular*color_r} {specular*color_g} {specular*color_b} 1
139 | true
140 |
141 | {
142 | f"""
143 |
144 |
145 |
146 | {radius}
147 |
148 |
149 |
150 | {color_r} {color_g} {color_b} 1
151 |
152 | false
153 |
154 | """ if visual else ""
155 | }
156 |
157 |
158 | '''
159 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/lights/sun.py:
--------------------------------------------------------------------------------
1 | from typing import List, Tuple
2 |
3 | import numpy as np
4 | from gym_ignition.scenario import model_wrapper
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Sun(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "sun",
14 | direction: Tuple[float, float, float] = (0.5, -0.25, -0.75),
15 | color: List[float] = (1.0, 1.0, 1.0, 1.0),
16 | distance: float = 800.0,
17 | visual: bool = True,
18 | radius: float = 20.0,
19 | specular: float = 1.0,
20 | attenuation_range: float = 10000.0,
21 | attenuation_constant: float = 0.9,
22 | attenuation_linear: float = 0.01,
23 | attenuation_quadratic: float = 0.001,
24 | **kwargs,
25 | ):
26 |
27 | # Get a unique model name
28 | model_name = get_unique_model_name(world, name)
29 |
30 | # Normalize direction
31 | direction = np.array(direction)
32 | direction = direction / np.linalg.norm(direction)
33 |
34 | # Initial pose
35 | initial_pose = scenario.Pose(
36 | (
37 | -direction[0] * distance,
38 | -direction[1] * distance,
39 | -direction[2] * distance,
40 | ),
41 | (1, 0, 0, 0),
42 | )
43 |
44 | # Create SDF string for the model
45 | sdf = self.get_sdf(
46 | model_name=model_name,
47 | direction=direction,
48 | color=color,
49 | visual=visual,
50 | radius=radius,
51 | specular=specular,
52 | attenuation_range=attenuation_range,
53 | attenuation_constant=attenuation_constant,
54 | attenuation_linear=attenuation_linear,
55 | attenuation_quadratic=attenuation_quadratic,
56 | )
57 |
58 | # Insert the model
59 | ok_model = world.to_gazebo().insert_model_from_string(
60 | sdf, initial_pose, model_name
61 | )
62 | if not ok_model:
63 | raise RuntimeError("Failed to insert " + model_name)
64 |
65 | # Get the model
66 | model = world.get_model(model_name)
67 |
68 | # Initialize base class
69 | model_wrapper.ModelWrapper.__init__(self, model=model)
70 |
71 | @classmethod
72 | def get_sdf(
73 | self,
74 | model_name: str,
75 | direction: Tuple[float, float, float],
76 | color: Tuple[float, float, float, float],
77 | visual: bool,
78 | radius: float,
79 | specular: float,
80 | attenuation_range: float,
81 | attenuation_constant: float,
82 | attenuation_linear: float,
83 | attenuation_quadratic: float,
84 | ) -> str:
85 |
86 | return f'''
87 |
88 | true
89 |
90 |
91 | {direction[0]} {direction[1]} {direction[2]}
92 |
93 | {attenuation_range}
94 | {attenuation_constant}
95 | {attenuation_linear}
96 | {attenuation_quadratic}
97 |
98 | {color[0]} {color[1]} {color[2]} 1
99 | {specular*color[0]} {specular*color[1]} {specular*color[2]} 1
100 | true
101 |
102 | {
103 | f"""
104 |
105 |
106 |
107 | {radius}
108 |
109 |
110 |
111 | {color[0]} {color[1]} {color[2]} 1
112 |
113 | false
114 |
115 | """ if visual else ""
116 | }
117 |
118 |
119 | '''
120 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/__init__.py:
--------------------------------------------------------------------------------
1 | from gym_ignition.scenario.model_wrapper import ModelWrapper
2 |
3 | from .primitives import Box, Cylinder, Plane, Sphere
4 | from .random_lunar_rock import RandomLunarRock
5 | from .random_object import RandomObject
6 | from .random_primitive import RandomPrimitive
7 | from .rock import Rock
8 |
9 |
10 | def get_object_model_class(object_type: str) -> ModelWrapper:
11 | # TODO: Refactor into enum
12 |
13 | if "box" == object_type:
14 | return Box
15 | elif "sphere" == object_type:
16 | return Sphere
17 | elif "cylinder" == object_type:
18 | return Cylinder
19 | elif "random_primitive" == object_type:
20 | return RandomPrimitive
21 | elif "random_mesh" == object_type:
22 | return RandomObject
23 | elif "rock" == object_type:
24 | return Rock
25 | elif "random_lunar_rock" == object_type:
26 | return RandomLunarRock
27 |
28 |
29 | def is_object_type_randomizable(object_type: str) -> bool:
30 |
31 | return (
32 | "random_primitive" == object_type
33 | or "random_mesh" == object_type
34 | or "random_lunar_rock" == object_type
35 | )
36 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/primitives/__init__.py:
--------------------------------------------------------------------------------
1 | from .box import Box
2 | from .cylinder import Cylinder
3 | from .plane import Plane
4 | from .sphere import Sphere
5 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/primitives/box.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils import misc
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Box(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "box",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | size: List[float] = (0.05, 0.05, 0.05),
17 | mass: float = 0.1,
18 | static: bool = False,
19 | collision: bool = True,
20 | friction: float = 1.0,
21 | visual: bool = True,
22 | gui_only: bool = False,
23 | color: List[float] = (0.8, 0.8, 0.8, 1.0),
24 | **kwargs,
25 | ):
26 |
27 | # Get a unique model name
28 | model_name = get_unique_model_name(world, name)
29 |
30 | # Initial pose
31 | initial_pose = scenario.Pose(position, orientation)
32 |
33 | # Create SDF string for the model
34 | sdf = self.get_sdf(
35 | model_name=model_name,
36 | size=size,
37 | mass=mass,
38 | static=static,
39 | collision=collision,
40 | friction=friction,
41 | visual=visual,
42 | gui_only=gui_only,
43 | color=color,
44 | )
45 |
46 | # Insert the model
47 | ok_model = world.to_gazebo().insert_model_from_string(
48 | sdf, initial_pose, model_name
49 | )
50 | if not ok_model:
51 | raise RuntimeError("Failed to insert " + model_name)
52 |
53 | # Get the model
54 | model = world.get_model(model_name)
55 |
56 | # Initialize base class
57 | model_wrapper.ModelWrapper.__init__(self, model=model)
58 |
59 | @classmethod
60 | def get_sdf(
61 | self,
62 | model_name: str,
63 | size: List[float],
64 | mass: float,
65 | static: bool,
66 | collision: bool,
67 | friction: float,
68 | visual: bool,
69 | gui_only: bool,
70 | color: List[float],
71 | ) -> str:
72 | return f'''
73 |
74 | {"true" if static else "false"}
75 |
76 | {
77 | f"""
78 |
79 |
80 |
81 | {size[0]} {size[1]} {size[2]}
82 |
83 |
84 |
85 |
86 |
87 | {friction}
88 | {friction}
89 | 0 0 0
90 | 0.0
91 | 0.0
92 |
93 |
94 |
95 |
96 | """ if collision else ""
97 | }
98 | {
99 | f"""
100 |
101 |
102 |
103 | {size[0]} {size[1]} {size[2]}
104 |
105 |
106 |
107 | {color[0]} {color[1]} {color[2]} {color[3]}
108 | {color[0]} {color[1]} {color[2]} {color[3]}
109 | {color[0]} {color[1]} {color[2]} {color[3]}
110 |
111 | {1.0-color[3]}
112 | {'1 false' if gui_only else ''}
113 |
114 | """ if visual else ""
115 | }
116 |
117 | {mass}
118 |
119 | {(size[1]**2 + size[2]**2)*mass/12}
120 | {(size[0]**2 + size[2]**2)*mass/12}
121 | {(size[0]**2 + size[1]**2)*mass/12}
122 | 0.0
123 | 0.0
124 | 0.0
125 |
126 |
127 |
128 |
129 | '''
130 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/primitives/cylinder.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils import misc
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Cylinder(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "cylinder",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | radius: float = 0.025,
17 | length: float = 0.1,
18 | mass: float = 0.1,
19 | static: bool = False,
20 | collision: bool = True,
21 | friction: float = 1.0,
22 | visual: bool = True,
23 | gui_only: bool = False,
24 | color: List[float] = (0.8, 0.8, 0.8, 1.0),
25 | **kwargs,
26 | ):
27 |
28 | # Get a unique model name
29 | model_name = get_unique_model_name(world, name)
30 |
31 | # Initial pose
32 | initial_pose = scenario.Pose(position, orientation)
33 |
34 | # Create SDF string for the model
35 | sdf = self.get_sdf(
36 | model_name=model_name,
37 | radius=radius,
38 | length=length,
39 | mass=mass,
40 | static=static,
41 | collision=collision,
42 | friction=friction,
43 | visual=visual,
44 | gui_only=gui_only,
45 | color=color,
46 | )
47 |
48 | # Insert the model
49 | ok_model = world.to_gazebo().insert_model_from_string(
50 | sdf, initial_pose, model_name
51 | )
52 | if not ok_model:
53 | raise RuntimeError("Failed to insert " + model_name)
54 |
55 | # Get the model
56 | model = world.get_model(model_name)
57 |
58 | # Initialize base class
59 | model_wrapper.ModelWrapper.__init__(self, model=model)
60 |
61 | @classmethod
62 | def get_sdf(
63 | self,
64 | model_name: str,
65 | radius: float,
66 | length: float,
67 | mass: float,
68 | static: bool,
69 | collision: bool,
70 | friction: float,
71 | visual: bool,
72 | gui_only: bool,
73 | color: List[float],
74 | ) -> str:
75 | # Inertia is identical for xx and yy components, compute only once
76 | inertia_xx_yy = (3 * radius**2 + length**2) * mass / 12
77 |
78 | return f'''
79 |
80 | {"true" if static else "false"}
81 |
82 | {
83 | f"""
84 |
85 |
86 |
87 | {radius}
88 | {length}
89 |
90 |
91 |
92 |
93 |
94 | {friction}
95 | {friction}
96 | 0 0 0
97 | 0.0
98 | 0.0
99 |
100 |
101 |
102 |
103 | """ if collision else ""
104 | }
105 | {
106 | f"""
107 |
108 |
109 |
110 | {radius}
111 | {length}
112 |
113 |
114 |
115 | {color[0]} {color[1]} {color[2]} {color[3]}
116 | {color[0]} {color[1]} {color[2]} {color[3]}
117 | {color[0]} {color[1]} {color[2]} {color[3]}
118 |
119 | {1.0-color[3]}
120 | {'1 false' if gui_only else ''}
121 |
122 | """ if visual else ""
123 | }
124 |
125 | {mass}
126 |
127 | {inertia_xx_yy}
128 | {inertia_xx_yy}
129 | {(mass*radius**2)/2}
130 | 0.0
131 | 0.0
132 | 0.0
133 |
134 |
135 |
136 |
137 | '''
138 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/primitives/plane.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils import misc
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Plane(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "plane",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | size: List[float] = (1.0, 1.0),
17 | direction: List[float] = (0.0, 0.0, 1.0),
18 | collision: bool = True,
19 | friction: float = 1.0,
20 | visual: bool = True,
21 | **kwargs,
22 | ):
23 |
24 | # Get a unique model name
25 | model_name = get_unique_model_name(world, name)
26 |
27 | # Initial pose
28 | initial_pose = scenario.Pose(position, orientation)
29 |
30 | # Create SDF string for the model
31 | sdf = f'''
32 |
33 | true
34 |
35 | {
36 | f"""
37 |
38 |
39 |
40 | {direction[0]} {direction[1]} {direction[2]}
41 | {size[0]} {size[1]}
42 |
43 |
44 |
45 |
46 |
47 | {friction}
48 | {friction}
49 | 0 0 0
50 | 0.0
51 | 0.0
52 |
53 |
54 |
55 |
56 | """ if collision else ""
57 | }
58 | {
59 | f"""
60 |
61 |
62 |
63 | {direction[0]} {direction[1]} {direction[2]}
64 | {size[0]} {size[1]}
65 |
66 |
67 |
68 | 0.8 0.8 0.8 1
69 | 0.8 0.8 0.8 1
70 | 0.8 0.8 0.8 1
71 |
72 |
73 | """ if visual else ""
74 | }
75 |
76 |
77 | '''
78 |
79 | # Insert the model
80 | ok_model = world.to_gazebo().insert_model_from_string(
81 | sdf, initial_pose, model_name
82 | )
83 | if not ok_model:
84 | raise RuntimeError("Failed to insert " + model_name)
85 |
86 | # Get the model
87 | model = world.get_model(model_name)
88 |
89 | # Initialize base class
90 | model_wrapper.ModelWrapper.__init__(self, model=model)
91 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/primitives/sphere.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils import misc
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Sphere(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "sphere",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | radius: float = 0.025,
17 | mass: float = 0.1,
18 | static: bool = False,
19 | collision: bool = True,
20 | friction: float = 1.0,
21 | visual: bool = True,
22 | gui_only: bool = False,
23 | color: List[float] = (0.8, 0.8, 0.8, 1.0),
24 | **kwargs,
25 | ):
26 |
27 | # Get a unique model name
28 | model_name = get_unique_model_name(world, name)
29 |
30 | # Initial pose
31 | initial_pose = scenario.Pose(position, orientation)
32 |
33 | # Create SDF string for the model
34 | sdf = self.get_sdf(
35 | model_name=model_name,
36 | radius=radius,
37 | mass=mass,
38 | static=static,
39 | collision=collision,
40 | friction=friction,
41 | visual=visual,
42 | gui_only=gui_only,
43 | color=color,
44 | )
45 |
46 | # Insert the model
47 | ok_model = world.to_gazebo().insert_model_from_string(
48 | sdf, initial_pose, model_name
49 | )
50 | if not ok_model:
51 | raise RuntimeError("Failed to insert " + model_name)
52 |
53 | # Get the model
54 | model = world.get_model(model_name)
55 |
56 | # Initialize base class
57 | model_wrapper.ModelWrapper.__init__(self, model=model)
58 |
59 | @classmethod
60 | def get_sdf(
61 | self,
62 | model_name: str,
63 | radius: float,
64 | mass: float,
65 | static: bool,
66 | collision: bool,
67 | friction: float,
68 | visual: bool,
69 | gui_only: bool,
70 | color: List[float],
71 | ) -> str:
72 | # Inertia is identical for all axes
73 | inertia_xx_yy_zz = (mass * radius**2) * 2 / 5
74 |
75 | return f'''
76 |
77 | {"true" if static else "false"}
78 |
79 | {
80 | f"""
81 |
82 |
83 |
84 | {radius}
85 |
86 |
87 |
88 |
89 |
90 | {friction}
91 | {friction}
92 | 0 0 0
93 | 0.0
94 | 0.0
95 |
96 |
97 |
98 |
99 | """ if collision else ""
100 | }
101 | {
102 | f"""
103 |
104 |
105 |
106 | {radius}
107 |
108 |
109 |
110 | {color[0]} {color[1]} {color[2]} {color[3]}
111 | {color[0]} {color[1]} {color[2]} {color[3]}
112 | {color[0]} {color[1]} {color[2]} {color[3]}
113 |
114 | {1.0-color[3]}
115 | {'1 false' if gui_only else ''}
116 |
117 | """ if visual else ""
118 | }
119 |
120 | {mass}
121 |
122 | {inertia_xx_yy_zz}
123 | {inertia_xx_yy_zz}
124 | {inertia_xx_yy_zz}
125 | 0.0
126 | 0.0
127 | 0.0
128 |
129 |
130 |
131 |
132 | '''
133 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/random_lunar_rock.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional
3 |
4 | import numpy as np
5 | from gym_ignition.scenario import model_wrapper
6 | from gym_ignition.utils.scenario import get_unique_model_name
7 | from numpy.random import RandomState
8 | from scenario import core as scenario
9 |
10 |
11 | class RandomLunarRock(model_wrapper.ModelWrapper):
12 | def __init__(
13 | self,
14 | world: scenario.World,
15 | name: str = "rock",
16 | position: List[float] = (0, 0, 0),
17 | orientation: List[float] = (1, 0, 0, 0),
18 | models_dir: Optional[str] = None,
19 | np_random: Optional[RandomState] = None,
20 | **kwargs,
21 | ):
22 |
23 | if np_random is None:
24 | np_random = np.random.default_rng()
25 |
26 | # Get a unique model name
27 | model_name = get_unique_model_name(world, name)
28 |
29 | # Initial pose
30 | initial_pose = scenario.Pose(position, orientation)
31 |
32 | # Get path to all lunar rock models
33 | if not models_dir:
34 | models_dir = os.environ.get("SDF_PATH_LUNAR_ROCK", default="")
35 |
36 | # Make sure the path exists
37 | if not os.path.exists(models_dir):
38 | raise ValueError(
39 | f"Invalid path '{models_dir}' pointed by 'SDF_PATH_LUNAR_ROCK' environment variable."
40 | )
41 |
42 | # Select a single model at random
43 | model_dir = np_random.choice(os.listdir(models_dir))
44 | sdf_filepath = os.path.join(model_dir, "model.sdf")
45 |
46 | # Insert the model
47 | ok_model = world.to_gazebo().insert_model_from_file(
48 | sdf_filepath, initial_pose, model_name
49 | )
50 | if not ok_model:
51 | raise RuntimeError("Failed to insert " + model_name)
52 |
53 | # Get the model
54 | model = world.get_model(model_name)
55 |
56 | # Initialize base class
57 | model_wrapper.ModelWrapper.__init__(self, model=model)
58 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/random_object.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils.scenario import get_unique_model_name
5 | from numpy.random import RandomState
6 | from scenario import core as scenario
7 |
8 | from drl_grasping.envs.models.utils import ModelCollectionRandomizer
9 |
10 |
11 | class RandomObject(model_wrapper.ModelWrapper):
12 | def __init__(
13 | self,
14 | world: scenario.World,
15 | name: str = "object",
16 | position: List[float] = (0, 0, 0),
17 | orientation: List[float] = (1, 0, 0, 0),
18 | model_paths: str = None,
19 | owner: str = "GoogleResearch",
20 | collection: str = "Google Scanned Objects",
21 | server: str = "https://fuel.ignitionrobotics.org",
22 | server_version: str = "1.0",
23 | unique_cache: bool = False,
24 | reset_collection: bool = False,
25 | np_random: Optional[RandomState] = None,
26 | **kwargs,
27 | ):
28 |
29 | # Get a unique model name
30 | model_name = get_unique_model_name(world, name)
31 |
32 | # Initial pose
33 | initial_pose = scenario.Pose(position, orientation)
34 |
35 | model_collection_randomizer = ModelCollectionRandomizer(
36 | model_paths=model_paths,
37 | owner=owner,
38 | collection=collection,
39 | server=server,
40 | server_version=server_version,
41 | unique_cache=unique_cache,
42 | reset_collection=reset_collection,
43 | np_random=np_random,
44 | )
45 |
46 | # Note: using default arguments here
47 | modified_sdf_file = model_collection_randomizer.random_model()
48 |
49 | # Insert the model
50 | ok_model = world.to_gazebo().insert_model(
51 | modified_sdf_file, initial_pose, model_name
52 | )
53 | if not ok_model:
54 | raise RuntimeError("Failed to insert " + model_name)
55 |
56 | # Get the model
57 | model = world.get_model(model_name)
58 |
59 | # Initialize base class
60 | model_wrapper.ModelWrapper.__init__(self, model=model)
61 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/random_primitive.py:
--------------------------------------------------------------------------------
1 | from typing import List, Optional, Union
2 |
3 | import numpy as np
4 | from gym_ignition.scenario import model_wrapper
5 | from gym_ignition.utils import misc
6 | from gym_ignition.utils.scenario import get_unique_model_name
7 | from numpy.random import RandomState
8 | from scenario import core as scenario
9 |
10 | from . import Box, Cylinder, Sphere
11 |
12 |
13 | class RandomPrimitive(model_wrapper.ModelWrapper):
14 | def __init__(
15 | self,
16 | world: scenario.World,
17 | name: str = "primitive",
18 | use_specific_primitive: Union[str, None] = None,
19 | position: List[float] = (0, 0, 0),
20 | orientation: List[float] = (1, 0, 0, 0),
21 | static: bool = False,
22 | collision: bool = True,
23 | visual: bool = True,
24 | gui_only: bool = False,
25 | np_random: Optional[RandomState] = None,
26 | **kwargs,
27 | ):
28 |
29 | if np_random is None:
30 | np_random = np.random.default_rng()
31 |
32 | # Get a unique model name
33 | model_name = get_unique_model_name(world, name)
34 |
35 | # Initial pose
36 | initial_pose = scenario.Pose(position, orientation)
37 |
38 | # Create SDF string for the model
39 | sdf = self.get_sdf(
40 | model_name=model_name,
41 | use_specific_primitive=use_specific_primitive,
42 | static=static,
43 | collision=collision,
44 | visual=visual,
45 | gui_only=gui_only,
46 | np_random=np_random,
47 | )
48 |
49 | # Insert the model
50 | ok_model = world.to_gazebo().insert_model_from_string(
51 | sdf, initial_pose, model_name
52 | )
53 | if not ok_model:
54 | raise RuntimeError("Failed to insert " + model_name)
55 |
56 | # Get the model
57 | model = world.get_model(model_name)
58 |
59 | # Initialize base class
60 | model_wrapper.ModelWrapper.__init__(self, model=model)
61 |
62 | @classmethod
63 | def get_sdf(
64 | self,
65 | model_name: str,
66 | use_specific_primitive: Union[str, None],
67 | static: bool,
68 | collision: bool,
69 | visual: bool,
70 | gui_only: bool,
71 | np_random: RandomState,
72 | ) -> str:
73 |
74 | if use_specific_primitive is not None:
75 | primitive = use_specific_primitive
76 | else:
77 | primitive = np_random.choice(["box", "cylinder", "sphere"])
78 |
79 | mass = np_random.uniform(0.05, 0.25)
80 | friction = np_random.uniform(0.75, 1.5)
81 | color = list(np_random.uniform(0.0, 1.0, (3,)))
82 | color.append(1.0)
83 |
84 | if "box" == primitive:
85 | return Box.get_sdf(
86 | model_name=model_name,
87 | size=list(np_random.uniform(0.04, 0.06, (3,))),
88 | mass=mass,
89 | static=static,
90 | collision=collision,
91 | friction=friction,
92 | visual=visual,
93 | gui_only=gui_only,
94 | color=color,
95 | )
96 | elif "cylinder" == primitive:
97 | return Cylinder.get_sdf(
98 | model_name=model_name,
99 | radius=np_random.uniform(0.01, 0.0375),
100 | length=np_random.uniform(0.025, 0.05),
101 | mass=mass,
102 | static=static,
103 | collision=collision,
104 | friction=friction,
105 | visual=visual,
106 | gui_only=gui_only,
107 | color=color,
108 | )
109 | elif "sphere" == primitive:
110 | return Sphere.get_sdf(
111 | model_name=model_name,
112 | radius=np_random.uniform(0.01, 0.0375),
113 | mass=mass,
114 | static=static,
115 | collision=collision,
116 | friction=friction,
117 | visual=visual,
118 | gui_only=gui_only,
119 | color=color,
120 | )
121 | else:
122 | raise TypeError(
123 | f"Type '{use_specific_primitive}' in not a supported primitive. Pleasure use 'box', 'cylinder' or 'sphere."
124 | )
125 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/objects/rock.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_with_file, model_wrapper
4 | from gym_ignition.utils.scenario import get_unique_model_name
5 | from scenario import core as scenario
6 | from scenario import gazebo as scenario_gazebo
7 |
8 |
9 | class Rock(model_wrapper.ModelWrapper, model_with_file.ModelWithFile):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "rock",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | model_file: str = None,
17 | use_fuel: bool = True,
18 | variant: int = 6,
19 | **kwargs,
20 | ):
21 |
22 | # Allow passing of custom model file as an argument
23 | if model_file is None:
24 | model_file = self.get_model_file(fuel=use_fuel, variant=variant)
25 |
26 | # Get a unique model name
27 | model_name = get_unique_model_name(world, name)
28 |
29 | # Setup initial pose
30 | initial_pose = scenario.Pose(position, orientation)
31 |
32 | # Insert the model
33 | ok_model = world.to_gazebo().insert_model_from_file(
34 | model_file, initial_pose, model_name
35 | )
36 | if not ok_model:
37 | raise RuntimeError("Failed to insert " + model_name)
38 |
39 | # Get the model
40 | model = world.get_model(model_name)
41 |
42 | # Initialize base class
43 | super().__init__(model=model)
44 |
45 | @classmethod
46 | def get_model_file(self, fuel: bool = False, variant: int = 6) -> str:
47 | if fuel:
48 | return scenario_gazebo.get_model_file_from_fuel(
49 | f"https://fuel.ignitionrobotics.org/1.0/OpenRobotics/models/Falling Rock {variant}"
50 | )
51 | else:
52 | return "lunar_surface"
53 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/robots/__init__.py:
--------------------------------------------------------------------------------
1 | from gym_ignition.scenario.model_wrapper import ModelWrapper
2 |
3 | from .lunalab_summit_xl_gen import LunalabSummitXlGen
4 | from .panda import Panda
5 |
6 | # TODO: When adding new a robot, create abstract classes to simplify such process
7 |
8 |
9 | def get_robot_model_class(robot_model: str) -> ModelWrapper:
10 | # TODO: Refactor into enum
11 |
12 | if "panda" == robot_model:
13 | return Panda
14 | elif "lunalab_summit_xl_gen" == robot_model:
15 | return LunalabSummitXlGen
16 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/sensors/__init__.py:
--------------------------------------------------------------------------------
1 | from .camera import Camera
2 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/__init__.py:
--------------------------------------------------------------------------------
1 | from gym_ignition.scenario.model_wrapper import ModelWrapper
2 |
3 | from .ground import Ground
4 | from .lunar_heightmap import LunarHeightmap
5 | from .lunar_surface import LunarSurface
6 | from .random_ground import RandomGround
7 | from .random_lunar_surface import RandomLunarSurface
8 |
9 |
10 | def get_terrain_model_class(terrain_type: str) -> ModelWrapper:
11 | # TODO: Refactor into enum
12 |
13 | if "flat" == terrain_type:
14 | return Ground
15 | elif "random_flat" == terrain_type:
16 | return RandomGround
17 | elif "lunar_heightmap" == terrain_type:
18 | return LunarHeightmap
19 | elif "lunar_surface" == terrain_type:
20 | return LunarSurface
21 | elif "random_lunar_surface" == terrain_type:
22 | return RandomLunarSurface
23 |
24 |
25 | def is_terrain_type_randomizable(terrain_type: str) -> bool:
26 |
27 | return "random_flat" == terrain_type or "random_lunar_surface" == terrain_type
28 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/ground.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_wrapper
4 | from gym_ignition.utils import misc
5 | from gym_ignition.utils.scenario import get_unique_model_name
6 | from scenario import core as scenario
7 |
8 |
9 | class Ground(model_wrapper.ModelWrapper):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "ground",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | size: List[float] = (1.0, 1.0),
17 | collision_thickness=0.05,
18 | friction: float = 5.0,
19 | **kwargs,
20 | ):
21 |
22 | # Get a unique model name
23 | model_name = get_unique_model_name(world, name)
24 |
25 | # Initial pose
26 | initial_pose = scenario.Pose(position, orientation)
27 |
28 | # Create SDF string for the model
29 | sdf = f"""
30 |
31 | true
32 |
33 |
34 |
35 |
36 | 0 0 1
37 | {size[0]} {size[1]}
38 |
39 |
40 |
41 |
42 |
43 | {friction}
44 | {friction}
45 | 0 0 0
46 | 0.0
47 | 0.0
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 | 0 0 1
56 | {size[0]} {size[1]}
57 |
58 |
59 |
60 | 0.8 0.8 0.8 1
61 | 0.8 0.8 0.8 1
62 | 0.8 0.8 0.8 1
63 |
64 |
65 |
66 |
67 | """
68 |
69 | # Insert the model
70 | ok_model = world.to_gazebo().insert_model_from_string(
71 | sdf, initial_pose, model_name
72 | )
73 | if not ok_model:
74 | raise RuntimeError("Failed to insert " + model_name)
75 |
76 | # Get the model
77 | model = world.get_model(model_name)
78 |
79 | # Initialize base class
80 | model_wrapper.ModelWrapper.__init__(self, model=model)
81 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/lunar_heightmap.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_with_file, model_wrapper
4 | from gym_ignition.utils.scenario import get_unique_model_name
5 | from scenario import core as scenario
6 | from scenario import gazebo as scenario_gazebo
7 |
8 |
9 | class LunarHeightmap(model_wrapper.ModelWrapper, model_with_file.ModelWithFile):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "lunar_heightmap",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | model_file: str = None,
17 | use_fuel: bool = False,
18 | **kwargs,
19 | ):
20 |
21 | # Allow passing of custom model file as an argument
22 | if model_file is None:
23 | model_file = self.get_model_file(fuel=use_fuel)
24 |
25 | # Get a unique model name
26 | model_name = get_unique_model_name(world, name)
27 |
28 | # Setup initial pose
29 | initial_pose = scenario.Pose(position, orientation)
30 |
31 | # Insert the model
32 | ok_model = world.to_gazebo().insert_model_from_file(
33 | model_file, initial_pose, model_name
34 | )
35 | if not ok_model:
36 | raise RuntimeError("Failed to insert " + model_name)
37 |
38 | # Get the model
39 | model = world.get_model(model_name)
40 |
41 | # Initialize base class
42 | super().__init__(model=model)
43 |
44 | @classmethod
45 | def get_model_file(self, fuel: bool = False) -> str:
46 | if fuel:
47 | raise NotImplementedError
48 | return scenario_gazebo.get_model_file_from_fuel(
49 | "https://fuel.ignitionrobotics.org/1.0/AndrejOrsula/models/lunar_heightmap"
50 | )
51 | else:
52 | return "lunar_heightmap"
53 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/lunar_surface.py:
--------------------------------------------------------------------------------
1 | from typing import List
2 |
3 | from gym_ignition.scenario import model_with_file, model_wrapper
4 | from gym_ignition.utils.scenario import get_unique_model_name
5 | from scenario import core as scenario
6 | from scenario import gazebo as scenario_gazebo
7 |
8 |
9 | class LunarSurface(model_wrapper.ModelWrapper, model_with_file.ModelWithFile):
10 | def __init__(
11 | self,
12 | world: scenario.World,
13 | name: str = "lunar_surface",
14 | position: List[float] = (0, 0, 0),
15 | orientation: List[float] = (1, 0, 0, 0),
16 | model_file: str = None,
17 | use_fuel: bool = False,
18 | variant: str = "tycho",
19 | **kwargs,
20 | ):
21 |
22 | # Allow passing of custom model file as an argument
23 | if model_file is None:
24 | model_file = self.get_model_file(fuel=use_fuel, variant=variant)
25 |
26 | # Get a unique model name
27 | model_name = get_unique_model_name(world, name)
28 |
29 | # Setup initial pose
30 | initial_pose = scenario.Pose(position, orientation)
31 |
32 | # Insert the model
33 | ok_model = world.to_gazebo().insert_model_from_file(
34 | model_file, initial_pose, model_name
35 | )
36 | if not ok_model:
37 | raise RuntimeError("Failed to insert " + model_name)
38 |
39 | # Get the model
40 | model = world.get_model(model_name)
41 |
42 | # Initialize base class
43 | super().__init__(model=model)
44 |
45 | @classmethod
46 | def get_model_file(self, fuel: bool = False, variant: str = "tycho") -> str:
47 | if fuel:
48 | raise NotImplementedError
49 | return scenario_gazebo.get_model_file_from_fuel(
50 | f"https://fuel.ignitionrobotics.org/1.0/AndrejOrsula/models/lunar_surface_{variant}"
51 | )
52 | else:
53 | return f"lunar_surface_{variant}"
54 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/random_ground.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional
3 |
4 | import numpy as np
5 | from gym_ignition.scenario import model_wrapper
6 | from gym_ignition.utils.scenario import get_unique_model_name
7 | from numpy.random import RandomState
8 | from scenario import core as scenario
9 |
10 |
11 | class RandomGround(model_wrapper.ModelWrapper):
12 | def __init__(
13 | self,
14 | world: scenario.World,
15 | name: str = "random_ground",
16 | position: List[float] = (0, 0, 0),
17 | orientation: List[float] = (1, 0, 0, 0),
18 | size: List[float] = (1.0, 1.0),
19 | collision_thickness: float = 0.05,
20 | friction: float = 5.0,
21 | texture_dir: Optional[str] = None,
22 | np_random: Optional[RandomState] = None,
23 | **kwargs,
24 | ):
25 |
26 | if np_random is None:
27 | np_random = np.random.default_rng()
28 |
29 | # Get a unique model name
30 | model_name = get_unique_model_name(world, name)
31 |
32 | # Initial pose
33 | initial_pose = scenario.Pose(position, orientation)
34 |
35 | # Get textures from ENV variable if not directly specified
36 | if not texture_dir:
37 | texture_dir = os.environ.get("TEXTURE_DIRS", default="")
38 |
39 | # Find random PBR texture
40 | albedo_map = None
41 | normal_map = None
42 | roughness_map = None
43 | metalness_map = None
44 | if texture_dir:
45 | if ":" in texture_dir:
46 | textures = []
47 | for d in texture_dir.split(":"):
48 | textures.extend([os.path.join(d, f) for f in os.listdir(d)])
49 | else:
50 | # Get list of the available textures
51 | textures = os.listdir(texture_dir)
52 |
53 | # Choose a random texture from these
54 | random_texture_dir = str(np_random.choice(textures))
55 |
56 | # List all files
57 | texture_files = os.listdir(random_texture_dir)
58 |
59 | # Extract the appropriate files
60 | for texture in texture_files:
61 | texture_lower = texture.lower()
62 | if "color" in texture_lower or "albedo" in texture_lower:
63 | albedo_map = os.path.join(random_texture_dir, texture)
64 | elif "normal" in texture_lower:
65 | normal_map = os.path.join(random_texture_dir, texture)
66 | elif "roughness" in texture_lower:
67 | roughness_map = os.path.join(random_texture_dir, texture)
68 | elif "specular" in texture_lower or "metalness" in texture_lower:
69 | metalness_map = os.path.join(random_texture_dir, texture)
70 |
71 | # Create SDF string for the model
72 | sdf = f"""
73 |
74 | true
75 |
76 |
77 |
78 |
79 | 0 0 1
80 | {size[0]} {size[1]}
81 |
82 |
83 |
84 |
85 |
86 | {friction}
87 | {friction}
88 | 0 0 0
89 | 0.0
90 | 0.0
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 | 0 0 1
99 | {size[0]} {size[1]}
100 |
101 |
102 |
103 | 1 1 1 1
104 | 1 1 1 1
105 | 1 1 1 1
106 |
107 |
108 | {"%s"
109 | % albedo_map if albedo_map is not None else ""}
110 | {"%s"
111 | % normal_map if normal_map is not None else ""}
112 | {"%s"
113 | % roughness_map if roughness_map is not None else ""}
114 | {"%s"
115 | % metalness_map if metalness_map is not None else ""}
116 |
117 |
118 |
119 |
120 |
121 |
122 | """
123 |
124 | # Insert the model
125 | ok_model = world.to_gazebo().insert_model_from_string(
126 | sdf, initial_pose, model_name
127 | )
128 | if not ok_model:
129 | raise RuntimeError("Failed to insert " + model_name)
130 |
131 | # Get the model
132 | model = world.get_model(model_name)
133 |
134 | # Initialize base class
135 | model_wrapper.ModelWrapper.__init__(self, model=model)
136 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/terrains/random_lunar_surface.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import List, Optional, Tuple
3 |
4 | import numpy as np
5 | from gym_ignition.scenario import model_wrapper
6 | from gym_ignition.utils.scenario import get_unique_model_name
7 | from numpy.random import RandomState
8 | from scenario import core as scenario
9 |
10 |
11 | class RandomLunarSurface(model_wrapper.ModelWrapper):
12 | def __init__(
13 | self,
14 | world: scenario.World,
15 | name: str = "lunar_surface",
16 | position: List[float] = (0, 0, 0),
17 | orientation: List[float] = (1, 0, 0, 0),
18 | models_dir: Optional[str] = None,
19 | np_random: Optional[RandomState] = None,
20 | **kwargs,
21 | ):
22 |
23 | if np_random is None:
24 | np_random = np.random.default_rng()
25 |
26 | # Get a unique model name
27 | model_name = get_unique_model_name(world, name)
28 |
29 | # Setup initial pose
30 | initial_pose = scenario.Pose(position, orientation)
31 |
32 | # Get path to all lunar surface models
33 | if not models_dir:
34 | models_dir = os.environ.get("SDF_PATH_LUNAR_SURFACE", default="")
35 |
36 | # Make sure the path exists
37 | if not os.path.exists(models_dir):
38 | raise ValueError(
39 | f"Invalid path '{models_dir}' pointed by 'SDF_PATH_LUNAR_SURFACE' environment variable."
40 | )
41 |
42 | # Select a single model at random
43 | model_dir = np_random.choice(os.listdir(models_dir))
44 | sdf_filepath = os.path.join(model_dir, "model.sdf")
45 |
46 | # Insert the model
47 | ok_model = world.to_gazebo().insert_model_from_file(
48 | sdf_filepath, initial_pose, model_name
49 | )
50 | if not ok_model:
51 | raise RuntimeError("Failed to insert " + model_name)
52 |
53 | # Get the model
54 | model = world.get_model(model_name)
55 |
56 | # Initialize base class
57 | model_wrapper.ModelWrapper.__init__(self, model=model)
58 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .model_collection_randomizer import ModelCollectionRandomizer
2 | from .xacro2sdf import xacro2sdf
3 |
--------------------------------------------------------------------------------
/drl_grasping/envs/models/utils/xacro2sdf.py:
--------------------------------------------------------------------------------
1 | import subprocess
2 | import tempfile
3 | from typing import Dict, Optional, Tuple
4 |
5 | import xacro
6 |
7 |
8 | def xacro2sdf(
9 | input_file_path: str, mappings: Dict, model_path_remap: Optional[Tuple[str, str]]
10 | ) -> str:
11 | """Convert xacro (URDF variant) with given arguments to SDF and return as a string."""
12 |
13 | # Convert all values in mappings to strings
14 | for keys, values in mappings.items():
15 | mappings[keys] = str(values)
16 |
17 | # Convert xacro to URDF
18 | urdf_xml = xacro.process(input_file_name=input_file_path, mappings=mappings)
19 |
20 | # Create temporary file for URDF (`ign sdf -p` accepts only files)
21 | with tempfile.NamedTemporaryFile() as tmp_urdf:
22 | with open(tmp_urdf.name, "w") as urdf_file:
23 | urdf_file.write(urdf_xml)
24 |
25 | # Convert to SDF
26 | result = subprocess.run(
27 | ["ign", "sdf", "-p", tmp_urdf.name], stdout=subprocess.PIPE
28 | )
29 | sdf_xml = result.stdout.decode("utf-8")
30 |
31 | # Remap package name to model name, such that meshes can be located by Ignition
32 | if model_path_remap is not None:
33 | sdf_xml = sdf_xml.replace(model_path_remap[0], model_path_remap[1])
34 |
35 | # Return as string
36 | return sdf_xml
37 |
--------------------------------------------------------------------------------
/drl_grasping/envs/perception/__init__.py:
--------------------------------------------------------------------------------
1 | from .camera_subscriber import CameraSubscriber, CameraSubscriberStandalone
2 | from .octree import OctreeCreator
3 |
--------------------------------------------------------------------------------
/drl_grasping/envs/perception/camera_subscriber.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from threading import Lock, Thread
3 | from typing import Optional, Union
4 |
5 | import rclpy
6 | from rclpy.callback_groups import CallbackGroup
7 | from rclpy.executors import SingleThreadedExecutor
8 | from rclpy.node import Node
9 | from rclpy.parameter import Parameter
10 | from rclpy.qos import (
11 | QoSDurabilityPolicy,
12 | QoSHistoryPolicy,
13 | QoSProfile,
14 | QoSReliabilityPolicy,
15 | )
16 | from sensor_msgs.msg import Image, PointCloud2
17 |
18 |
19 | class CameraSubscriber:
20 | def __init__(
21 | self,
22 | node: Node,
23 | topic: str,
24 | is_point_cloud: bool,
25 | callback_group: Optional[CallbackGroup] = None,
26 | ):
27 |
28 | self._node = node
29 |
30 | # Prepare the subscriber
31 | if is_point_cloud:
32 | camera_msg_type = PointCloud2
33 | else:
34 | camera_msg_type = Image
35 | self.__observation = camera_msg_type()
36 | self._node.create_subscription(
37 | msg_type=camera_msg_type,
38 | topic=topic,
39 | callback=self.observation_callback,
40 | qos_profile=QoSProfile(
41 | reliability=QoSReliabilityPolicy.RELIABLE,
42 | durability=QoSDurabilityPolicy.VOLATILE,
43 | history=QoSHistoryPolicy.KEEP_LAST,
44 | depth=1,
45 | ),
46 | callback_group=callback_group,
47 | )
48 | self.__observation_mutex = Lock()
49 | self.__new_observation_available = False
50 |
51 | def observation_callback(self, msg):
52 | """
53 | Callback for getting observation.
54 | """
55 |
56 | self.__observation_mutex.acquire()
57 | self.__observation = msg
58 | self.__new_observation_available = True
59 | self._node.get_logger().debug("New observation received.")
60 | self.__observation_mutex.release()
61 |
62 | def get_observation(self) -> Union[PointCloud2, Image]:
63 | """
64 | Get the last received observation.
65 | """
66 |
67 | self.__observation_mutex.acquire()
68 | observation = self.__observation
69 | self.__observation_mutex.release()
70 | return observation
71 |
72 | def reset_new_observation_checker(self):
73 | """
74 | Reset checker of new observations, i.e. `self.new_observation_available()`
75 | """
76 |
77 | self.__observation_mutex.acquire()
78 | self.__new_observation_available = False
79 | self.__observation_mutex.release()
80 |
81 | @property
82 | def new_observation_available(self):
83 | """
84 | Check if new observation is available since `self.reset_new_observation_checker()` was called
85 | """
86 |
87 | return self.__new_observation_available
88 |
89 |
90 | class CameraSubscriberStandalone(Node, CameraSubscriber):
91 | def __init__(
92 | self,
93 | topic: str,
94 | is_point_cloud: bool,
95 | node_name: str = "drl_grasping_camera_sub",
96 | use_sim_time: bool = True,
97 | ):
98 |
99 | try:
100 | rclpy.init()
101 | except Exception as e:
102 | if not rclpy.ok():
103 | sys.exit(f"ROS 2 context could not be initialised: {e}")
104 |
105 | Node.__init__(self, node_name)
106 | self.set_parameters(
107 | [Parameter("use_sim_time", type_=Parameter.Type.BOOL, value=use_sim_time)]
108 | )
109 |
110 | CameraSubscriber.__init__(
111 | self, node=self, topic=topic, is_point_cloud=is_point_cloud
112 | )
113 |
114 | # Spin the node in a separate thread
115 | self._executor = SingleThreadedExecutor()
116 | self._executor.add_node(self)
117 | self._executor_thread = Thread(target=self._executor.spin, daemon=True, args=())
118 | self._executor_thread.start()
119 |
--------------------------------------------------------------------------------
/drl_grasping/envs/randomizers/__init__.py:
--------------------------------------------------------------------------------
1 | from .manipulation import ManipulationGazeboEnvRandomizer
2 |
--------------------------------------------------------------------------------
/drl_grasping/envs/runtimes/__init__.py:
--------------------------------------------------------------------------------
1 | from .real_evaluation import RealEvaluationRuntime
2 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/__init__.py:
--------------------------------------------------------------------------------
1 | from .curriculums import *
2 | from .grasp import *
3 | from .grasp_planetary import *
4 | from .reach import *
5 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/curriculums/__init__.py:
--------------------------------------------------------------------------------
1 | from .grasp import GraspCurriculum
2 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp/__init__.py:
--------------------------------------------------------------------------------
1 | from .grasp import Grasp
2 | from .grasp_octree import GraspOctree
3 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp/grasp_octree.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from collections import deque
3 | from typing import Tuple
4 |
5 | import gym
6 | import numpy as np
7 | from gym_ignition.utils.typing import Observation, ObservationSpace
8 |
9 | from drl_grasping.envs.models.sensors import Camera
10 | from drl_grasping.envs.perception import CameraSubscriber, OctreeCreator
11 | from drl_grasping.envs.tasks.grasp import Grasp
12 | from drl_grasping.envs.utils.conversions import orientation_quat_to_6d
13 |
14 |
15 | class GraspOctree(Grasp, abc.ABC):
16 | def __init__(
17 | self,
18 | octree_reference_frame_id: str,
19 | octree_min_bound: Tuple[float, float, float],
20 | octree_max_bound: Tuple[float, float, float],
21 | octree_depth: int,
22 | octree_full_depth: int,
23 | octree_include_color: bool,
24 | octree_include_intensity: bool,
25 | octree_n_stacked: int,
26 | octree_max_size: int,
27 | proprioceptive_observations: bool,
28 | camera_type: str = "rgbd_camera",
29 | **kwargs,
30 | ):
31 |
32 | # Initialize the Task base class
33 | Grasp.__init__(
34 | self,
35 | **kwargs,
36 | )
37 |
38 | # Perception (depth/RGB-D camera - point cloud)
39 | self.camera_sub = CameraSubscriber(
40 | node=self,
41 | topic=Camera.get_points_topic(camera_type),
42 | is_point_cloud=True,
43 | callback_group=self._callback_group,
44 | )
45 |
46 | # Offset octree bounds by the robot base offset
47 | octree_min_bound = (
48 | octree_min_bound[0],
49 | octree_min_bound[1],
50 | octree_min_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
51 | )
52 | octree_max_bound = (
53 | octree_max_bound[0],
54 | octree_max_bound[1],
55 | octree_max_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
56 | )
57 |
58 | # Octree creator
59 | self.octree_creator = OctreeCreator(
60 | node=self,
61 | tf2_listener=self.tf2_listener,
62 | reference_frame_id=self.substitute_special_frame(octree_reference_frame_id),
63 | min_bound=octree_min_bound,
64 | max_bound=octree_max_bound,
65 | include_color=octree_include_color,
66 | include_intensity=octree_include_intensity,
67 | depth=octree_depth,
68 | full_depth=octree_full_depth,
69 | )
70 |
71 | # Additional parameters
72 | self._octree_n_stacked = octree_n_stacked
73 | self._octree_max_size = octree_max_size
74 | self._proprioceptive_observations = proprioceptive_observations
75 |
76 | # List of all octrees
77 | self.__stacked_octrees = deque([], maxlen=self._octree_n_stacked)
78 |
79 | def create_observation_space(self) -> ObservationSpace:
80 |
81 | # 0:n - octree
82 | # Note: octree is currently padded with zeros to have constant size
83 | # TODO: Customize replay buffer to support variable sized observations
84 | # If enabled, proprieceptive observations will be embedded inside octree in a hacky way
85 | # (replace with Dict once https://github.com/DLR-RM/stable-baselines3/pull/243 is merged)
86 | # 0 - (gripper) Gripper state
87 | # - 1.0: opened
88 | # - -1.0: closed
89 | # 1:4 - (x, y, z) displacement
90 | # - metric units, unbound
91 | # 4:10 - (v1_x, v1_y, v1_z, v2_x, v2_y, v2_z) 3D orientation in "6D representation"
92 | # - normalised
93 | return gym.spaces.Box(
94 | low=0,
95 | high=255,
96 | shape=(self._octree_n_stacked, self._octree_max_size),
97 | dtype=np.uint8,
98 | )
99 |
100 | def get_observation(self) -> Observation:
101 |
102 | # Get the latest point cloud
103 | point_cloud = self.camera_sub.get_observation()
104 |
105 | # Contrust octree from this point cloud
106 | octree = self.octree_creator(point_cloud).numpy()
107 |
108 | # Pad octree with zeros to have a consistent length
109 | # TODO: Customize replay buffer to support variable sized observations
110 | octree_size = octree.shape[0]
111 | if octree_size > self._octree_max_size:
112 | self.get_logger().error(
113 | f"Octree is larger than the maximum allowed size of {self._octree_max_size} (exceeded with {octree_size})"
114 | )
115 | octree = np.pad(
116 | octree,
117 | (0, self._octree_max_size - octree_size),
118 | "constant",
119 | constant_values=0,
120 | )
121 |
122 | # Write the original length into the padded octree for reference
123 | octree[-4:] = np.ndarray(
124 | buffer=np.array([octree_size], dtype=np.uint32).tobytes(),
125 | shape=(4,),
126 | dtype=np.uint8,
127 | )
128 | # To get it back:
129 | # octree_size = np.frombuffer(buffer=octree[-4:], dtype=np.uint32, count=1)
130 |
131 | if self._proprioceptive_observations:
132 | # Add number of auxiliary observations to octree structure
133 | octree[-8:-4] = np.ndarray(
134 | buffer=np.array([10], dtype=np.uint32).tobytes(),
135 | shape=(4,),
136 | dtype=np.uint8,
137 | )
138 |
139 | # Gather proprioceptive observations
140 | ee_position, ee_orientation = self.get_ee_pose()
141 | ee_orientation = orientation_quat_to_6d(quat_xyzw=ee_orientation)
142 | aux_obs = (
143 | (1.0 if self.gripper.is_open else -1.0,)
144 | + ee_position
145 | + ee_orientation[0]
146 | + ee_orientation[1]
147 | )
148 |
149 | # Add auxiliary observations into the octree structure
150 | octree[-48:-8] = np.ndarray(
151 | buffer=np.array(aux_obs, dtype=np.float32).tobytes(),
152 | shape=(40,),
153 | dtype=np.uint8,
154 | )
155 |
156 | self.__stacked_octrees.append(octree)
157 | # For the first buffer after reset, fill with identical observations until deque is full
158 | while not self._octree_n_stacked == len(self.__stacked_octrees):
159 | self.__stacked_octrees.append(octree)
160 |
161 | # Create the observation
162 | observation = Observation(np.array(self.__stacked_octrees, dtype=np.uint8))
163 |
164 | self.get_logger().debug(f"\nobservation: {observation}")
165 |
166 | # Return the observation
167 | return observation
168 |
169 | def reset_task(self):
170 |
171 | self.__stacked_octrees.clear()
172 | Grasp.reset_task(self)
173 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp_planetary/__init__.py:
--------------------------------------------------------------------------------
1 | from .grasp_planetary import GraspPlanetary
2 | from .grasp_planetary_color_image import GraspPlanetaryColorImage
3 | from .grasp_planetary_depth_image import GraspPlanetaryDepthImage
4 | from .grasp_planetary_octree import GraspPlanetaryOctree
5 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp_planetary/grasp_planetary.py:
--------------------------------------------------------------------------------
1 | import abc
2 |
3 | from drl_grasping.envs.models.robots import LunalabSummitXlGen
4 | from drl_grasping.envs.tasks.grasp import Grasp
5 |
6 |
7 | class GraspPlanetary(Grasp, abc.ABC):
8 | def __init__(
9 | self,
10 | **kwargs,
11 | ):
12 |
13 | # Initialize the Task base class
14 | Grasp.__init__(
15 | self,
16 | **kwargs,
17 | )
18 |
19 | # Overwrite initial joint positions for certain robots
20 | if LunalabSummitXlGen == self.robot_model_class:
21 | # self.initial_arm_joint_positions = [
22 | # 1.0471975511965976,
23 | # 2.356194490192345,
24 | # 0.0,
25 | # 4.71238898038469,
26 | # 0.0,
27 | # 2.356194490192345,
28 | # 0.0,
29 | # ]
30 | self.initial_arm_joint_positions = [
31 | 0.0,
32 | 2.356194490192345,
33 | 0.0,
34 | 4.71238898038469,
35 | 0.0,
36 | 2.356194490192345,
37 | 0.0,
38 | ]
39 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp_planetary/grasp_planetary_color_image.py:
--------------------------------------------------------------------------------
1 | import abc
2 |
3 | import gym
4 | import numpy as np
5 | from gym_ignition.utils.typing import Observation, ObservationSpace
6 |
7 | from drl_grasping.envs.models.sensors import Camera
8 | from drl_grasping.envs.perception import CameraSubscriber
9 | from drl_grasping.envs.tasks.grasp_planetary import GraspPlanetary
10 |
11 |
12 | class GraspPlanetaryColorImage(GraspPlanetary, abc.ABC):
13 | def __init__(
14 | self,
15 | camera_width: int,
16 | camera_height: int,
17 | camera_type: str = "camera",
18 | monochromatic: bool = False,
19 | **kwargs,
20 | ):
21 |
22 | # Initialize the Task base class
23 | GraspPlanetary.__init__(
24 | self,
25 | **kwargs,
26 | )
27 |
28 | # Store parameters for later use
29 | self._camera_width = camera_width
30 | self._camera_height = camera_height
31 | self._monochromatic = monochromatic
32 |
33 | # Perception (RGB camera)
34 | self.camera_sub = CameraSubscriber(
35 | node=self,
36 | topic=Camera.get_color_topic(camera_type),
37 | is_point_cloud=False,
38 | callback_group=self._callback_group,
39 | )
40 |
41 | def create_observation_space(self) -> ObservationSpace:
42 |
43 | # 0:3*height*width - rgb image
44 | # 0:1*height*width - monochromatic (intensity) image
45 | return gym.spaces.Box(
46 | low=0,
47 | high=255,
48 | shape=(
49 | self._camera_height,
50 | self._camera_width,
51 | 1 if self._monochromatic else 3,
52 | ),
53 | dtype=np.uint8,
54 | )
55 |
56 | def get_observation(self) -> Observation:
57 |
58 | # Get the latest image
59 | image = self.camera_sub.get_observation()
60 |
61 | assert (
62 | image.width == self._camera_width and image.height == self._camera_height
63 | ), f"Error: Resolution of the input image does not match the configured observation space. ({image.width}x{image.height} instead of {self._camera_width}x{self._camera_height})"
64 |
65 | # Reshape and create the observation
66 | color_image = np.array(image.data, dtype=np.uint8).reshape(
67 | self._camera_height, self._camera_width, 3
68 | )
69 |
70 | # # Debug save images
71 | # from PIL import Image
72 | # img_color = Image.fromarray(color_image)
73 | # img_color.save("img_color.png")
74 |
75 | if self._monochromatic:
76 | observation = Observation(color_image[:, :, 0])
77 | else:
78 | observation = Observation(color_image)
79 |
80 | self.get_logger().debug(f"\nobservation: {observation}")
81 |
82 | # Return the observation
83 | return observation
84 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/grasp_planetary/grasp_planetary_octree.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from collections import deque
3 | from typing import Tuple
4 |
5 | import gym
6 | import numpy as np
7 | from gym_ignition.utils.typing import Observation, ObservationSpace
8 |
9 | from drl_grasping.envs.models.sensors import Camera
10 | from drl_grasping.envs.perception import CameraSubscriber, OctreeCreator
11 | from drl_grasping.envs.tasks.grasp_planetary import GraspPlanetary
12 | from drl_grasping.envs.utils.conversions import orientation_quat_to_6d
13 |
14 |
15 | class GraspPlanetaryOctree(GraspPlanetary, abc.ABC):
16 | def __init__(
17 | self,
18 | octree_reference_frame_id: str,
19 | octree_min_bound: Tuple[float, float, float],
20 | octree_max_bound: Tuple[float, float, float],
21 | octree_depth: int,
22 | octree_full_depth: int,
23 | octree_include_color: bool,
24 | octree_include_intensity: bool,
25 | octree_n_stacked: int,
26 | octree_max_size: int,
27 | proprioceptive_observations: bool,
28 | camera_type: str = "rgbd_camera",
29 | **kwargs,
30 | ):
31 |
32 | # Initialize the Task base class
33 | GraspPlanetary.__init__(
34 | self,
35 | **kwargs,
36 | )
37 |
38 | # Perception (depth/RGB-D camera - point cloud)
39 | self.camera_sub = CameraSubscriber(
40 | node=self,
41 | topic=Camera.get_points_topic(camera_type),
42 | is_point_cloud=True,
43 | callback_group=self._callback_group,
44 | )
45 |
46 | # Offset octree bounds by the robot base offset
47 | octree_min_bound = (
48 | octree_min_bound[0],
49 | octree_min_bound[1],
50 | octree_min_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
51 | )
52 | octree_max_bound = (
53 | octree_max_bound[0],
54 | octree_max_bound[1],
55 | octree_max_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
56 | )
57 |
58 | # Octree creator
59 | self.octree_creator = OctreeCreator(
60 | node=self,
61 | tf2_listener=self.tf2_listener,
62 | reference_frame_id=self.substitute_special_frame(octree_reference_frame_id),
63 | min_bound=octree_min_bound,
64 | max_bound=octree_max_bound,
65 | include_color=octree_include_color,
66 | include_intensity=octree_include_intensity,
67 | depth=octree_depth,
68 | full_depth=octree_full_depth,
69 | )
70 |
71 | # Additional parameters
72 | self._octree_n_stacked = octree_n_stacked
73 | self._octree_max_size = octree_max_size
74 | self._proprioceptive_observations = proprioceptive_observations
75 |
76 | # List of all octrees
77 | self.__stacked_octrees = deque([], maxlen=self._octree_n_stacked)
78 |
79 | def create_observation_space(self) -> ObservationSpace:
80 |
81 | # 0:n - octree
82 | # Note: octree is currently padded with zeros to have constant size
83 | # TODO: Customize replay buffer to support variable sized observations
84 | # If enabled, proprieceptive observations will be embedded inside octree in a hacky way
85 | # (replace with Dict once https://github.com/DLR-RM/stable-baselines3/pull/243 is merged)
86 | # 0 - (gripper) Gripper state
87 | # - 1.0: opened
88 | # - -1.0: closed
89 | # 1:4 - (x, y, z) displacement
90 | # - metric units, unbound
91 | # 4:10 - (v1_x, v1_y, v1_z, v2_x, v2_y, v2_z) 3D orientation in "6D representation"
92 | # - normalised
93 | return gym.spaces.Box(
94 | low=0,
95 | high=255,
96 | shape=(self._octree_n_stacked, self._octree_max_size),
97 | dtype=np.uint8,
98 | )
99 |
100 | def get_observation(self) -> Observation:
101 |
102 | # Get the latest point cloud
103 | point_cloud = self.camera_sub.get_observation()
104 |
105 | # Contrust octree from this point cloud
106 | octree = self.octree_creator(point_cloud).numpy()
107 |
108 | # Pad octree with zeros to have a consistent length
109 | # TODO: Customize replay buffer to support variable sized observations
110 | octree_size = octree.shape[0]
111 | if octree_size > self._octree_max_size:
112 | self.get_logger().error(
113 | f"Octree is larger than the maximum allowed size of {self._octree_max_size} (exceeded with {octree_size})"
114 | )
115 | octree = np.pad(
116 | octree,
117 | (0, self._octree_max_size - octree_size),
118 | "constant",
119 | constant_values=0,
120 | )
121 |
122 | # Write the original length into the padded octree for reference
123 | octree[-4:] = np.ndarray(
124 | buffer=np.array([octree_size], dtype=np.uint32).tobytes(),
125 | shape=(4,),
126 | dtype=np.uint8,
127 | )
128 | # To get it back:
129 | # octree_size = np.frombuffer(buffer=octree[-4:], dtype=np.uint32, count=1)
130 |
131 | if self._proprioceptive_observations:
132 | # Add number of auxiliary observations to octree structure
133 | octree[-8:-4] = np.ndarray(
134 | buffer=np.array([10], dtype=np.uint32).tobytes(),
135 | shape=(4,),
136 | dtype=np.uint8,
137 | )
138 |
139 | # Gather proprioceptive observations
140 | ee_position, ee_orientation = self.get_ee_pose()
141 | ee_orientation = orientation_quat_to_6d(quat_xyzw=ee_orientation)
142 | aux_obs = (
143 | (1.0 if self.gripper.is_open else -1.0,)
144 | + ee_position
145 | + ee_orientation[0]
146 | + ee_orientation[1]
147 | )
148 |
149 | # Add auxiliary observations into the octree structure
150 | octree[-48:-8] = np.ndarray(
151 | buffer=np.array(aux_obs, dtype=np.float32).tobytes(),
152 | shape=(40,),
153 | dtype=np.uint8,
154 | )
155 |
156 | self.__stacked_octrees.append(octree)
157 | # For the first buffer after reset, fill with identical observations until deque is full
158 | while not self._octree_n_stacked == len(self.__stacked_octrees):
159 | self.__stacked_octrees.append(octree)
160 |
161 | # Create the observation
162 | observation = Observation(np.array(self.__stacked_octrees, dtype=np.uint8))
163 |
164 | self.get_logger().debug(f"\nobservation: {observation}")
165 |
166 | # Return the observation
167 | return observation
168 |
169 | def reset_task(self):
170 |
171 | self.__stacked_octrees.clear()
172 | GraspPlanetary.reset_task(self)
173 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/reach/__init__.py:
--------------------------------------------------------------------------------
1 | from .reach import Reach
2 | from .reach_color_image import ReachColorImage
3 | from .reach_depth_image import ReachDepthImage
4 | from .reach_octree import ReachOctree
5 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/reach/reach.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from typing import Tuple
3 |
4 | import gym
5 | import numpy as np
6 | from gym_ignition.utils.typing import (
7 | Action,
8 | ActionSpace,
9 | Observation,
10 | ObservationSpace,
11 | Reward,
12 | )
13 |
14 | from drl_grasping.envs.tasks.manipulation import Manipulation
15 | from drl_grasping.envs.utils.math import distance_to_nearest_point
16 |
17 |
18 | class Reach(Manipulation, abc.ABC):
19 | def __init__(
20 | self,
21 | sparse_reward: bool,
22 | act_quick_reward: float,
23 | required_accuracy: float,
24 | **kwargs,
25 | ):
26 |
27 | # Initialize the Task base class
28 | Manipulation.__init__(
29 | self,
30 | **kwargs,
31 | )
32 |
33 | # Additional parameters
34 | self._sparse_reward: bool = sparse_reward
35 | self._act_quick_reward = (
36 | act_quick_reward if act_quick_reward >= 0.0 else -act_quick_reward
37 | )
38 | self._required_accuracy: float = required_accuracy
39 |
40 | # Flag indicating if the task is done (performance - get_reward + is_done)
41 | self._is_done: bool = False
42 |
43 | # Distance to target in the previous step (or after reset)
44 | self._previous_distance: float = None
45 |
46 | self.initial_gripper_joint_positions = (
47 | self.robot_model_class.CLOSED_GRIPPER_JOINT_POSITIONS
48 | )
49 |
50 | def create_action_space(self) -> ActionSpace:
51 |
52 | # 0:3 - (x, y, z) displacement
53 | # - rescaled to metric units before use
54 | return gym.spaces.Box(low=-1.0, high=1.0, shape=(3,), dtype=np.float32)
55 |
56 | def create_observation_space(self) -> ObservationSpace:
57 |
58 | # 0:3 - (x, y, z) end effector position
59 | # 3:6 - (x, y, z) target position
60 | # Note: These could theoretically be restricted to the workspace and object spawn area instead of inf
61 | return gym.spaces.Box(low=-np.inf, high=np.inf, shape=(6,), dtype=np.float32)
62 |
63 | def set_action(self, action: Action):
64 |
65 | self.get_logger().debug(f"action: {action}")
66 |
67 | if self._use_servo:
68 | linear = action[0:3]
69 | self.servo(linear=linear)
70 | else:
71 | position = self.get_relative_ee_position(action[0:3])
72 | quat_xyzw = (1.0, 0.0, 0.0, 0.0)
73 | self.moveit2.move_to_pose(position=position, quat_xyzw=quat_xyzw)
74 |
75 | def get_observation(self) -> Observation:
76 |
77 | # Get current end-effector and target positions
78 | ee_position = self.get_ee_position()
79 | target_position = self.get_object_position(object_model=self.object_names[0])
80 |
81 | # Create the observation
82 | observation = Observation(
83 | np.concatenate([ee_position, target_position], dtype=np.float32)
84 | )
85 |
86 | self.get_logger().debug(f"\nobservation: {observation}")
87 |
88 | # Return the observation
89 | return observation
90 |
91 | def get_reward(self) -> Reward:
92 |
93 | reward = 0.0
94 |
95 | # Compute the current distance to the target
96 | current_distance = self.get_distance_to_target()
97 |
98 | # Mark the episode done if target is reached
99 | if current_distance < self._required_accuracy:
100 | self._is_done = True
101 | if self._sparse_reward:
102 | reward += 1.0
103 |
104 | # Give reward based on how much closer robot got relative to the target for dense reward
105 | if not self._sparse_reward:
106 | reward += self._previous_distance - current_distance
107 | self._previous_distance = current_distance
108 |
109 | # Subtract a small reward each step to provide incentive to act quickly (if enabled)
110 | reward -= self._act_quick_reward
111 |
112 | self.get_logger().debug(f"reward: {reward}")
113 |
114 | return Reward(reward)
115 |
116 | def is_done(self) -> bool:
117 |
118 | done = self._is_done
119 |
120 | self.get_logger().debug(f"done: {done}")
121 |
122 | return done
123 |
124 | def reset_task(self):
125 |
126 | Manipulation.reset_task(self)
127 |
128 | self._is_done = False
129 |
130 | # Compute and store the distance after reset if using dense reward
131 | if not self._sparse_reward:
132 | self._previous_distance = self.get_distance_to_target()
133 |
134 | self.get_logger().debug(f"\ntask reset")
135 |
136 | def get_distance_to_target(self) -> Tuple[float, float, float]:
137 |
138 | ee_position = self.get_ee_position()
139 | object_position = self.get_object_position(object_model=self.object_names[0])
140 |
141 | return distance_to_nearest_point(origin=ee_position, points=[object_position])
142 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/reach/reach_color_image.py:
--------------------------------------------------------------------------------
1 | import abc
2 |
3 | import gym
4 | import numpy as np
5 | from gym_ignition.utils.typing import Observation, ObservationSpace
6 |
7 | from drl_grasping.envs.models.sensors import Camera
8 | from drl_grasping.envs.perception import CameraSubscriber
9 | from drl_grasping.envs.tasks.reach import Reach
10 |
11 |
12 | class ReachColorImage(Reach, abc.ABC):
13 | def __init__(
14 | self,
15 | camera_width: int,
16 | camera_height: int,
17 | camera_type: str = "camera",
18 | monochromatic: bool = False,
19 | **kwargs,
20 | ):
21 |
22 | # Initialize the Task base class
23 | Reach.__init__(
24 | self,
25 | **kwargs,
26 | )
27 |
28 | # Store parameters for later use
29 | self._camera_width = camera_width
30 | self._camera_height = camera_height
31 | self._monochromatic = monochromatic
32 |
33 | # Perception (RGB camera)
34 | self.camera_sub = CameraSubscriber(
35 | node=self,
36 | topic=Camera.get_color_topic(camera_type),
37 | is_point_cloud=False,
38 | callback_group=self._callback_group,
39 | )
40 |
41 | def create_observation_space(self) -> ObservationSpace:
42 |
43 | # 0:3*height*width - rgb image
44 | # 0:1*height*width - monochromatic (intensity) image
45 | return gym.spaces.Box(
46 | low=0,
47 | high=255,
48 | shape=(
49 | self._camera_height,
50 | self._camera_width,
51 | 1 if self._monochromatic else 3,
52 | ),
53 | dtype=np.uint8,
54 | )
55 |
56 | def get_observation(self) -> Observation:
57 |
58 | # Get the latest image
59 | image = self.camera_sub.get_observation()
60 |
61 | assert (
62 | image.width == self._camera_width and image.height == self._camera_height
63 | ), f"Error: Resolution of the input image does not match the configured observation space. ({image.width}x{image.height} instead of {self._camera_width}x{self._camera_height})"
64 |
65 | # Reshape and create the observation
66 | color_image = np.array(image.data, dtype=np.uint8).reshape(
67 | self._camera_height, self._camera_width, 3
68 | )
69 |
70 | # # Debug save images
71 | # from PIL import Image
72 | # img_color = Image.fromarray(color_image)
73 | # img_color.save("img_color.png")
74 |
75 | if self._monochromatic:
76 | observation = Observation(color_image[:, :, 0])
77 | else:
78 | observation = Observation(color_image)
79 |
80 | self.get_logger().debug(f"\nobservation: {observation}")
81 |
82 | # Return the observation
83 | return observation
84 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/reach/reach_depth_image.py:
--------------------------------------------------------------------------------
1 | import abc
2 |
3 | import gym
4 | import numpy as np
5 | from gym_ignition.utils.typing import Observation, ObservationSpace
6 |
7 | from drl_grasping.envs.models.sensors import Camera
8 | from drl_grasping.envs.perception import CameraSubscriber
9 | from drl_grasping.envs.tasks.reach import Reach
10 |
11 |
12 | class ReachDepthImage(Reach, abc.ABC):
13 | def __init__(
14 | self,
15 | camera_width: int,
16 | camera_height: int,
17 | camera_type: str = "depth_camera",
18 | **kwargs,
19 | ):
20 |
21 | # Initialize the Task base class
22 | Reach.__init__(
23 | self,
24 | **kwargs,
25 | )
26 |
27 | # Store parameters for later use
28 | self._camera_width = camera_width
29 | self._camera_height = camera_height
30 |
31 | # Perception (depth camera)
32 | self.camera_sub = CameraSubscriber(
33 | node=self,
34 | topic=Camera.get_depth_topic(camera_type),
35 | is_point_cloud=False,
36 | callback_group=self._callback_group,
37 | )
38 |
39 | def create_observation_space(self) -> ObservationSpace:
40 |
41 | # 0:height*width - depth image
42 | return gym.spaces.Box(
43 | low=0,
44 | high=np.inf,
45 | shape=(self._camera_height, self._camera_width, 1),
46 | dtype=np.float32,
47 | )
48 |
49 | def get_observation(self) -> Observation:
50 |
51 | # Get the latest image
52 | image = self.camera_sub.get_observation()
53 |
54 | # Construct from buffer and reshape
55 | depth_image = np.frombuffer(image.data, dtype=np.float32).reshape(
56 | self._camera_height, self._camera_width, 1
57 | )
58 | # Replace all instances of infinity with 0
59 | depth_image[depth_image == np.inf] = 0.0
60 |
61 | # Create the observation
62 | observation = Observation(depth_image)
63 |
64 | self.get_logger().debug(f"\nobservation: {observation}")
65 |
66 | # Return the observation
67 | return observation
68 |
--------------------------------------------------------------------------------
/drl_grasping/envs/tasks/reach/reach_octree.py:
--------------------------------------------------------------------------------
1 | import abc
2 | from collections import deque
3 | from typing import Tuple
4 |
5 | import gym
6 | import numpy as np
7 | from gym_ignition.utils.typing import Observation, ObservationSpace
8 |
9 | from drl_grasping.envs.models.sensors import Camera
10 | from drl_grasping.envs.perception import CameraSubscriber, OctreeCreator
11 | from drl_grasping.envs.tasks.reach import Reach
12 |
13 |
14 | class ReachOctree(Reach, abc.ABC):
15 |
16 | _octree_min_bound: Tuple[float, float, float] = (0.15, -0.3, 0.0)
17 | _octree_max_bound: Tuple[float, float, float] = (0.75, 0.3, 0.6)
18 |
19 | def __init__(
20 | self,
21 | octree_reference_frame_id: str,
22 | octree_min_bound: Tuple[float, float, float],
23 | octree_max_bound: Tuple[float, float, float],
24 | octree_depth: int,
25 | octree_full_depth: int,
26 | octree_include_color: bool,
27 | octree_include_intensity: bool,
28 | octree_n_stacked: int,
29 | octree_max_size: int,
30 | camera_type: str = "rgbd_camera",
31 | **kwargs,
32 | ):
33 |
34 | # Initialize the Task base class
35 | Reach.__init__(
36 | self,
37 | **kwargs,
38 | )
39 |
40 | # Store parameters for later use
41 | self._octree_n_stacked = octree_n_stacked
42 | self._octree_max_size = octree_max_size
43 |
44 | # Perception (RGB-D camera - point cloud)
45 | self.camera_sub = CameraSubscriber(
46 | node=self,
47 | topic=Camera.get_points_topic(camera_type),
48 | is_point_cloud=True,
49 | callback_group=self._callback_group,
50 | )
51 |
52 | # Offset octree bounds by the robot base offset
53 | octree_min_bound = (
54 | octree_min_bound[0],
55 | octree_min_bound[1],
56 | octree_min_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
57 | )
58 | octree_max_bound = (
59 | octree_max_bound[0],
60 | octree_max_bound[1],
61 | octree_max_bound[2] + self.robot_model_class.BASE_LINK_Z_OFFSET,
62 | )
63 |
64 | # Octree creator
65 | self.octree_creator = OctreeCreator(
66 | node=self,
67 | tf2_listener=self.tf2_listener,
68 | reference_frame_id=self.substitute_special_frame(octree_reference_frame_id),
69 | min_bound=octree_min_bound,
70 | max_bound=octree_max_bound,
71 | include_color=octree_include_color,
72 | include_intensity=octree_include_intensity,
73 | depth=octree_depth,
74 | full_depth=octree_full_depth,
75 | )
76 |
77 | # Variable initialisation
78 | self.__stacked_octrees = deque([], maxlen=self._octree_n_stacked)
79 |
80 | def create_observation_space(self) -> ObservationSpace:
81 |
82 | # 0:n - octree
83 | # Note: octree is currently padded with zeros to have constant size
84 | # TODO: Customize replay buffer to support variable sized observations
85 | return gym.spaces.Box(
86 | low=0,
87 | high=255,
88 | shape=(self._octree_n_stacked, self._octree_max_size),
89 | dtype=np.uint8,
90 | )
91 |
92 | def get_observation(self) -> Observation:
93 |
94 | # Get the latest point cloud
95 | point_cloud = self.camera_sub.get_observation()
96 |
97 | # Contrust octree from this point cloud
98 | octree = self.octree_creator(point_cloud).numpy()
99 |
100 | # Pad octree with zeros to have a consistent length
101 | # TODO: Customize replay buffer to support variable sized observations
102 | octree_size = octree.shape[0]
103 | if octree_size > self._octree_max_size:
104 | self.get_logger().error(
105 | f"Octree is larger than the maximum allowed size of {self._octree_max_size} (exceeded with {octree_size})"
106 | )
107 | octree = np.pad(
108 | octree,
109 | (0, self._octree_max_size - octree_size),
110 | "constant",
111 | constant_values=0,
112 | )
113 |
114 | # Write the original length into the padded octree for reference
115 | octree[-4:] = np.ndarray(
116 | buffer=np.array([octree_size], dtype=np.uint32).tobytes(),
117 | shape=(4,),
118 | dtype=np.uint8,
119 | )
120 | # To get it back:
121 | # octree_size = np.frombuffer(buffer=octree[-4:], dtype=np.uint32, count=1)
122 |
123 | self.__stacked_octrees.append(octree)
124 | # For the first buffer after reset, fill with identical observations until deque is full
125 | while not self._octree_n_stacked == len(self.__stacked_octrees):
126 | self.__stacked_octrees.append(octree)
127 |
128 | # Create the observation
129 | observation = Observation(np.array(self.__stacked_octrees, dtype=np.uint8))
130 |
131 | self.get_logger().debug(f"\nobservation: {observation}")
132 |
133 | # Return the observation
134 | return observation
135 |
136 | def reset_task(self):
137 |
138 | self.__stacked_octrees.clear()
139 | Reach.reset_task(self)
140 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from . import conversions, gazebo, logging, math
2 | from .tf2_broadcaster import Tf2Broadcaster, Tf2BroadcasterStandalone
3 | from .tf2_listener import Tf2Listener, Tf2ListenerStandalone
4 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/conversions.py:
--------------------------------------------------------------------------------
1 | from typing import Tuple, Union
2 |
3 | import geometry_msgs
4 | import numpy
5 | import open3d
6 | import pyoctree
7 | import sensor_msgs
8 | from scipy.spatial.transform import Rotation
9 |
10 |
11 | def pointcloud2_to_open3d(
12 | ros_point_cloud2: sensor_msgs.msg.PointCloud2,
13 | include_color: bool = False,
14 | include_intensity: bool = False,
15 | # Note: Order does not matter for DL, that's why channel swapping is disabled by default
16 | fix_rgb_channel_order: bool = False,
17 | ) -> open3d.geometry.PointCloud:
18 |
19 | # Create output Open3D PointCloud
20 | open3d_pc = open3d.geometry.PointCloud()
21 |
22 | size = ros_point_cloud2.width * ros_point_cloud2.height
23 | xyz_dtype = ">f4" if ros_point_cloud2.is_bigendian else " 3:
39 | bgr = numpy.ndarray(
40 | shape=(size, 3),
41 | dtype=numpy.uint8,
42 | buffer=ros_point_cloud2.data,
43 | offset=ros_point_cloud2.fields[3].offset,
44 | strides=(ros_point_cloud2.point_step, 1),
45 | )
46 | if fix_rgb_channel_order:
47 | # Swap channels to gain rgb (faster than `bgr[:, [2, 1, 0]]`)
48 | bgr[:, 0], bgr[:, 2] = bgr[:, 2], bgr[:, 0].copy()
49 | open3d_pc.colors = open3d.utility.Vector3dVector(
50 | (bgr[valid_points] / 255).astype(numpy.float64)
51 | )
52 | else:
53 | open3d_pc.colors = open3d.utility.Vector3dVector(
54 | numpy.zeros((len(valid_points), 3), dtype=numpy.float64)
55 | )
56 | # TODO: Update octree creator once L8 image format is supported in Ignition Gazebo
57 | # elif include_intensity:
58 | # # Faster approach, but only the first channel gets the intensity value (rest is 0)
59 | # intensities = numpy.zeros((len(valid_points), 3), dtype=numpy.float64)
60 | # intensities[:, [0]] = (
61 | # numpy.ndarray(
62 | # shape=(size, 1),
63 | # dtype=numpy.uint8,
64 | # buffer=ros_point_cloud2.data,
65 | # offset=ros_point_cloud2.fields[3].offset,
66 | # strides=(ros_point_cloud2.point_step, 1),
67 | # )[valid_points]
68 | # / 255
69 | # ).astype(numpy.float64)
70 | # open3d_pc.colors = open3d.utility.Vector3dVector(intensities)
71 | # # # Slower approach, but all channels get the intensity value
72 | # # intensities = numpy.ndarray(
73 | # # shape=(size, 1),
74 | # # dtype=numpy.uint8,
75 | # # buffer=ros_point_cloud2.data,
76 | # # offset=ros_point_cloud2.fields[3].offset,
77 | # # strides=(ros_point_cloud2.point_step, 1),
78 | # # )
79 | # # open3d_pc.colors = open3d.utility.Vector3dVector(
80 | # # numpy.tile(intensities[valid_points] / 255, (1, 3)).astype(numpy.float64)
81 | # # )
82 |
83 | # Return the converted Open3D PointCloud
84 | return open3d_pc
85 |
86 |
87 | def transform_to_matrix(transform: geometry_msgs.msg.Transform) -> numpy.ndarray:
88 |
89 | transform_matrix = numpy.zeros((4, 4))
90 | transform_matrix[3, 3] = 1.0
91 |
92 | transform_matrix[0:3, 0:3] = open3d.geometry.get_rotation_matrix_from_quaternion(
93 | [
94 | transform.rotation.w,
95 | transform.rotation.x,
96 | transform.rotation.y,
97 | transform.rotation.z,
98 | ]
99 | )
100 | transform_matrix[0, 3] = transform.translation.x
101 | transform_matrix[1, 3] = transform.translation.y
102 | transform_matrix[2, 3] = transform.translation.z
103 |
104 | return transform_matrix
105 |
106 |
107 | def open3d_point_cloud_to_octree_points(
108 | open3d_point_cloud: open3d.geometry.PointCloud,
109 | include_color: bool = False,
110 | include_intensity: bool = False,
111 | ) -> pyoctree.Points:
112 |
113 | octree_points = pyoctree.Points()
114 |
115 | if include_color:
116 | features = numpy.reshape(numpy.asarray(open3d_point_cloud.colors), -1)
117 | elif include_intensity:
118 | features = numpy.asarray(open3d_point_cloud.colors)[:, 0]
119 | else:
120 | features = []
121 |
122 | octree_points.set_points(
123 | # XYZ points
124 | numpy.reshape(numpy.asarray(open3d_point_cloud.points), -1),
125 | # Normals
126 | numpy.reshape(numpy.asarray(open3d_point_cloud.normals), -1),
127 | # Other features, e.g. color
128 | features,
129 | # Labels - not used
130 | [],
131 | )
132 |
133 | return octree_points
134 |
135 |
136 | def orientation_6d_to_quat(
137 | v1: Tuple[float, float, float], v2: Tuple[float, float, float]
138 | ) -> Tuple[float, float, float, float]:
139 |
140 | # Normalize vectors
141 | col1 = v1 / numpy.linalg.norm(v1)
142 | col2 = v2 / numpy.linalg.norm(v2)
143 |
144 | # Find their orthogonal vector via cross product
145 | col3 = numpy.cross(col1, col2)
146 |
147 | # Stack into rotation matrix as columns, convert to quaternion and return
148 | quat_xyzw = Rotation.from_matrix(numpy.array([col1, col2, col3]).T).as_quat()
149 | return quat_xyzw
150 |
151 |
152 | def orientation_quat_to_6d(
153 | quat_xyzw: Tuple[float, float, float, float]
154 | ) -> Tuple[Tuple[float, float, float], Tuple[float, float, float]]:
155 |
156 | # Convert quaternion into rotation matrix
157 | rot_mat = Rotation.from_quat(quat_xyzw).as_matrix()
158 |
159 | # Return first two columns (already normalised)
160 | return (tuple(rot_mat[:, 0]), tuple(rot_mat[:, 1]))
161 |
162 |
163 | def quat_to_wxyz(
164 | xyzw: Union[numpy.ndarray, Tuple[float, float, float, float]]
165 | ) -> numpy.ndarray:
166 |
167 | if isinstance(xyzw, tuple):
168 | return (xyzw[3], xyzw[0], xyzw[1], xyzw[2])
169 |
170 | return xyzw[[3, 0, 1, 2]]
171 |
172 |
173 | def quat_to_xyzw(
174 | wxyz: Union[numpy.ndarray, Tuple[float, float, float, float]]
175 | ) -> numpy.ndarray:
176 |
177 | if isinstance(wxyz, tuple):
178 | return (wxyz[1], wxyz[2], wxyz[3], wxyz[0])
179 |
180 | return wxyz[[1, 2, 3, 0]]
181 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/logging.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | from gym import logger as gym_logger
4 | from gym_ignition.utils import logger as gym_ign_logger
5 |
6 |
7 | def set_log_level(log_level: Union[int, str]):
8 | """
9 | Set log level for (Gym) Ignition.
10 | """
11 |
12 | if not isinstance(log_level, int):
13 | log_level = str(log_level).upper()
14 |
15 | if "WARNING" == log_level:
16 | log_level = "WARN"
17 | elif not log_level in ["DEBUG", "INFO", "WARN", "ERROR", "DISABLED"]:
18 | log_level = "DISABLED"
19 |
20 | log_level = getattr(gym_logger, log_level)
21 |
22 | gym_ign_logger.set_level(
23 | level=log_level,
24 | scenario_level=log_level,
25 | )
26 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/math.py:
--------------------------------------------------------------------------------
1 | from typing import List, Tuple
2 |
3 | import numpy as np
4 |
5 |
6 | def quat_mul(
7 | quat_0: Tuple[float, float, float, float],
8 | quat_1: Tuple[float, float, float, float],
9 | xyzw: bool = True,
10 | ) -> Tuple[float, float, float, float]:
11 | """
12 | Multiply two quaternions
13 | """
14 | if xyzw:
15 | x0, y0, z0, w0 = quat_0
16 | x1, y1, z1, w1 = quat_1
17 | return (
18 | x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
19 | -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
20 | x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
21 | -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
22 | )
23 | else:
24 | w0, x0, y0, z0 = quat_0
25 | w1, x1, y1, z1 = quat_1
26 | return (
27 | -x1 * x0 - y1 * y0 - z1 * z0 + w1 * w0,
28 | x1 * w0 + y1 * z0 - z1 * y0 + w1 * x0,
29 | -x1 * z0 + y1 * w0 + z1 * x0 + w1 * y0,
30 | x1 * y0 - y1 * x0 + z1 * w0 + w1 * z0,
31 | )
32 |
33 |
34 | def distance_to_nearest_point(
35 | origin: Tuple[float, float, float], points: List[Tuple[float, float, float]]
36 | ) -> float:
37 |
38 | return np.linalg.norm(np.array(points) - np.array(origin), axis=1).min()
39 |
40 |
41 | def get_nearest_point(
42 | origin: Tuple[float, float, float], points: List[Tuple[float, float, float]]
43 | ) -> Tuple[float, float, float]:
44 |
45 | target_distances = np.linalg.norm(np.array(points) - np.array(origin), axis=1)
46 | return points[target_distances.argmin()]
47 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/tf2_broadcaster.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Tuple
3 |
4 | import rclpy
5 | from geometry_msgs.msg import TransformStamped
6 | from rclpy.node import Node
7 | from rclpy.parameter import Parameter
8 | from tf2_ros import StaticTransformBroadcaster
9 |
10 |
11 | class Tf2Broadcaster:
12 | def __init__(
13 | self,
14 | node: Node,
15 | ):
16 |
17 | self._node = node
18 | self.__tf2_broadcaster = StaticTransformBroadcaster(node=self._node)
19 | self._transform_stamped = TransformStamped()
20 |
21 | def broadcast_tf(
22 | self,
23 | parent_frame_id: str,
24 | child_frame_id: str,
25 | translation: Tuple[float, float, float],
26 | rotation: Tuple[float, float, float, float],
27 | xyzw: bool = True,
28 | ):
29 | """
30 | Broadcast transformation of the camera
31 | """
32 |
33 | self._transform_stamped.header.frame_id = parent_frame_id
34 | self._transform_stamped.child_frame_id = child_frame_id
35 |
36 | self._transform_stamped.header.stamp = self._node.get_clock().now().to_msg()
37 |
38 | self._transform_stamped.transform.translation.x = float(translation[0])
39 | self._transform_stamped.transform.translation.y = float(translation[1])
40 | self._transform_stamped.transform.translation.z = float(translation[2])
41 |
42 | if xyzw:
43 | self._transform_stamped.transform.rotation.x = float(rotation[0])
44 | self._transform_stamped.transform.rotation.y = float(rotation[1])
45 | self._transform_stamped.transform.rotation.z = float(rotation[2])
46 | self._transform_stamped.transform.rotation.w = float(rotation[3])
47 | else:
48 | self._transform_stamped.transform.rotation.w = float(rotation[0])
49 | self._transform_stamped.transform.rotation.x = float(rotation[1])
50 | self._transform_stamped.transform.rotation.y = float(rotation[2])
51 | self._transform_stamped.transform.rotation.z = float(rotation[3])
52 |
53 | self.__tf2_broadcaster.sendTransform(self._transform_stamped)
54 |
55 |
56 | class Tf2BroadcasterStandalone(Node, Tf2Broadcaster):
57 | def __init__(
58 | self,
59 | node_name: str = "drl_grasping_tf_broadcaster",
60 | use_sim_time: bool = True,
61 | ):
62 |
63 | try:
64 | rclpy.init()
65 | except Exception as e:
66 | if not rclpy.ok():
67 | sys.exit(f"ROS 2 context could not be initialised: {e}")
68 |
69 | Node.__init__(self, node_name)
70 | self.set_parameters(
71 | [Parameter("use_sim_time", type_=Parameter.Type.BOOL, value=use_sim_time)]
72 | )
73 |
74 | Tf2Broadcaster.__init__(self, node=self)
75 |
--------------------------------------------------------------------------------
/drl_grasping/envs/utils/tf2_listener.py:
--------------------------------------------------------------------------------
1 | import sys
2 | from typing import Optional
3 |
4 | import rclpy
5 | from geometry_msgs.msg import Transform
6 | from rclpy.node import Node
7 | from rclpy.parameter import Parameter
8 | from tf2_ros import Buffer, TransformListener
9 |
10 |
11 | class Tf2Listener:
12 | def __init__(
13 | self,
14 | node: Node,
15 | ):
16 |
17 | self._node = node
18 |
19 | # Create tf2 buffer and listener for transform lookup
20 | self.__tf2_buffer = Buffer()
21 | TransformListener(buffer=self.__tf2_buffer, node=node)
22 |
23 | def lookup_transform_sync(
24 | self, target_frame: str, source_frame: str, retry: bool = True
25 | ) -> Optional[Transform]:
26 |
27 | try:
28 | return self.__tf2_buffer.lookup_transform(
29 | target_frame=target_frame,
30 | source_frame=source_frame,
31 | time=rclpy.time.Time(),
32 | ).transform
33 | except:
34 | if retry:
35 | while rclpy.ok():
36 | if self.__tf2_buffer.can_transform(
37 | target_frame=target_frame,
38 | source_frame=source_frame,
39 | time=rclpy.time.Time(),
40 | timeout=rclpy.time.Duration(seconds=1, nanoseconds=0),
41 | ):
42 | return self.__tf2_buffer.lookup_transform(
43 | target_frame=target_frame,
44 | source_frame=source_frame,
45 | time=rclpy.time.Time(),
46 | ).transform
47 |
48 | self._node.get_logger().warn(
49 | f'Lookup of transform from "{source_frame}"'
50 | f' to "{target_frame}" failed, retrying...'
51 | )
52 | else:
53 | return None
54 |
55 |
56 | class Tf2ListenerStandalone(Node, Tf2Listener):
57 | def __init__(
58 | self,
59 | node_name: str = "drl_grasping_tf_listener",
60 | use_sim_time: bool = True,
61 | ):
62 |
63 | try:
64 | rclpy.init()
65 | except Exception as e:
66 | if not rclpy.ok():
67 | sys.exit(f"ROS 2 context could not be initialised: {e}")
68 |
69 | Node.__init__(self, node_name)
70 | self.set_parameters(
71 | [Parameter("use_sim_time", type_=Parameter.Type.BOOL, value=use_sim_time)]
72 | )
73 |
74 | Tf2Listener.__init__(self, node=self)
75 |
--------------------------------------------------------------------------------
/drl_grasping/envs/worlds/default.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | 1.0 1.0 1.0
10 | false
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/drl_grasping/envs/worlds/lunar.sdf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | 0.0 0.0 0.0
10 | false
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/drl_grasping/utils/README.md:
--------------------------------------------------------------------------------
1 | # Disclaimer
2 |
3 | This submodule is adapted from [rl-baselines3-zoo](https://github.com/DLR-RM/rl-baselines3-zoo) and modified for use with `drl_grasping`.
4 |
--------------------------------------------------------------------------------
/drl_grasping/utils/__init__.py:
--------------------------------------------------------------------------------
1 | from .utils import (
2 | ALGOS,
3 | create_test_env,
4 | get_latest_run_id,
5 | get_saved_hyperparams,
6 | get_trained_models,
7 | get_wrapper_class,
8 | linear_schedule,
9 | )
10 |
--------------------------------------------------------------------------------
/examples/ex_evaluate.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping evaluate.launch.py` and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping evaluate.launch.py --show-args`.
5 |
6 | ## Enable testing dataset
7 | ros2 run drl_grasping dataset_set_test.bash 2> /dev/null
8 |
9 | ### Arguments
10 | ## Random seed to use for both the environment and agent (-1 for random)
11 | SEED="77"
12 |
13 | ## Robot to use during training
14 | ROBOT_MODEL="panda"
15 | # ROBOT_MODEL="lunalab_summit_xl_gen"
16 |
17 | ## ID of the environment
18 | ## Reach
19 | # ENV="Reach-Gazebo-v0"
20 | # ENV="Reach-ColorImage-Gazebo-v0"
21 | # ENV="Reach-DepthImage-Gazebo-v0"
22 | # ENV="Reach-Octree-Gazebo-v0"
23 | # ENV="Reach-OctreeWithIntensity-Gazebo-v0"
24 | # ENV="Reach-OctreeWithColor-Gazebo-v0"
25 | ## Grasp
26 | ENV="Grasp-Gazebo-v0"
27 | # ENV="Grasp-Octree-Gazebo-v0"
28 | # ENV="Grasp-OctreeWithIntensity-Gazebo-v0"
29 | # ENV="Grasp-OctreeWithColor-Gazebo-v0"
30 | ## GraspPlanetary
31 | # ENV="GraspPlanetary-Gazebo-v0"
32 | # ENV="GraspPlanetary-MonoImage-Gazebo-v0"
33 | # ENV="GraspPlanetary-ColorImage-Gazebo-v0"
34 | # ENV="GraspPlanetary-DepthImage-Gazebo-v0"
35 | # ENV="GraspPlanetary-DepthImageWithIntensity-Gazebo-v0"
36 | # ENV="GraspPlanetary-DepthImageWithColor-Gazebo-v0"
37 | # ENV="GraspPlanetary-Octree-Gazebo-v0"
38 | # ENV="GraspPlanetary-OctreeWithIntensity-Gazebo-v0"
39 | # ENV="GraspPlanetary-OctreeWithColor-Gazebo-v0"
40 |
41 | ## Selection of RL algorithm
42 | ALGO="sac"
43 | # ALGO="td3"
44 | # ALGO="tqc"
45 |
46 | ## Path to logs directory
47 | LOG_FOLDER="${PWD}/drl_grasping_training/train/${ENV}/logs"
48 |
49 | ## Path to reward log directory
50 | REWARD_LOG="${PWD}/drl_grasping_training/evaluate/${ENV}"
51 |
52 | ## Load checkpoint instead of last model (# steps)
53 | # LOAD_CHECKPOINT="0"
54 |
55 | ### Arguments
56 | LAUNCH_ARGS=(
57 | "seed:=${SEED}"
58 | "robot_model:=${ROBOT_MODEL}"
59 | "env:=${ENV}"
60 | "algo:=${ALGO}"
61 | "log_folder:=${LOG_FOLDER}"
62 | "reward_log:=${REWARD_LOG}"
63 | "stochastic:=false"
64 | "n_episodes:=200"
65 | "load_best:=false"
66 | "enable_rviz:=true"
67 | "log_level:=error"
68 | )
69 | if [[ -n ${LOAD_CHECKPOINT} ]]; then
70 | LAUNCH_ARGS+=("load_checkpoint:=${LOAD_CHECKPOINT}")
71 | fi
72 |
73 | ### Launch script
74 | LAUNCH_CMD=(
75 | ros2 launch -a
76 | drl_grasping evaluate.launch.py
77 | "${LAUNCH_ARGS[*]}"
78 | )
79 |
80 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
81 |
82 | # shellcheck disable=SC2048
83 | exec ${LAUNCH_CMD[*]}
84 |
--------------------------------------------------------------------------------
/examples/ex_evaluate_pretrained_agent.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping evaluate.launch.py` on some of the already pretrained agents.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 |
5 | ## Enable testing dataset
6 | ros2 run drl_grasping dataset_set_test.bash 2> /dev/null
7 |
8 | ### Arguments
9 | ## Random seed to use for both the environment and agent (-1 for random)
10 | SEED="77"
11 |
12 | ## Robot to use during training
13 | ROBOT_MODEL="panda"
14 | # ROBOT_MODEL="lunalab_summit_xl_gen"
15 |
16 | ## ID of the environment
17 | ## Reach
18 | # ENV="Reach-Gazebo-v0"
19 | # ENV="Reach-ColorImage-Gazebo-v0"
20 | # ENV="Reach-DepthImage-Gazebo-v0"
21 | # ENV="Reach-Octree-Gazebo-v0"
22 | # ENV="Reach-OctreeWithIntensity-Gazebo-v0"
23 | # ENV="Reach-OctreeWithColor-Gazebo-v0"
24 | ## Grasp
25 | ENV="Grasp-Gazebo-v0"
26 | # ENV="Grasp-Octree-Gazebo-v0"
27 | # ENV="Grasp-OctreeWithIntensity-Gazebo-v0"
28 | # ENV="Grasp-OctreeWithColor-Gazebo-v0"
29 | ## GraspPlanetary
30 | # ENV="GraspPlanetary-Gazebo-v0"
31 | # ENV="GraspPlanetary-MonoImage-Gazebo-v0"
32 | # ENV="GraspPlanetary-ColorImage-Gazebo-v0"
33 | # ENV="GraspPlanetary-DepthImage-Gazebo-v0"
34 | # ENV="GraspPlanetary-DepthImageWithIntensity-Gazebo-v0"
35 | # ENV="GraspPlanetary-DepthImageWithColor-Gazebo-v0"
36 | # ENV="GraspPlanetary-Octree-Gazebo-v0"
37 | # ENV="GraspPlanetary-OctreeWithIntensity-Gazebo-v0"
38 | # ENV="GraspPlanetary-OctreeWithColor-Gazebo-v0"
39 |
40 | ## Selection of RL algorithm
41 | ALGO="sac"
42 | # ALGO="td3"
43 | # ALGO="tqc"
44 |
45 | ## Path to logs directory
46 | LOG_FOLDER="$(ros2 pkg prefix --share drl_grasping)/pretrained_agents"
47 |
48 | ## Path to reward log directory
49 | REWARD_LOG="${PWD}/drl_grasping_training/evaluate/${ENV}"
50 |
51 | ## Load checkpoint instead of last model (# steps)
52 | # LOAD_CHECKPOINT="0"
53 |
54 | ### Arguments
55 | LAUNCH_ARGS=(
56 | "seed:=${SEED}"
57 | "robot_model:=${ROBOT_MODEL}"
58 | "env:=${ENV}"
59 | "algo:=${ALGO}"
60 | "log_folder:=${LOG_FOLDER}"
61 | "reward_log:=${REWARD_LOG}"
62 | "stochastic:=false"
63 | "n_episodes:=200"
64 | "load_best:=false"
65 | "enable_rviz:=true"
66 | "log_level:=error"
67 | )
68 | if [[ -n ${LOAD_CHECKPOINT} ]]; then
69 | LAUNCH_ARGS+=("load_checkpoint:=${LOAD_CHECKPOINT}")
70 | fi
71 |
72 | ### Launch script
73 | LAUNCH_CMD=(
74 | ros2 launch -a
75 | drl_grasping evaluate.launch.py
76 | "${LAUNCH_ARGS[*]}"
77 | )
78 |
79 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
80 |
81 | # shellcheck disable=SC2048
82 | exec ${LAUNCH_CMD[*]}
83 |
--------------------------------------------------------------------------------
/examples/ex_evaluate_real.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping evaluate.launch.py` on a real robot and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping evaluate.launch.py --show-args`.
5 |
6 | ## Use the correct runtime
7 | export DRL_GRASPING_REAL_EVALUATION=True
8 |
9 | ### Arguments
10 | ## Random seed to use for both the environment and agent (-1 for random)
11 | SEED="77"
12 |
13 | ## Robot to use during training
14 | ROBOT_MODEL="panda"
15 | # ROBOT_MODEL="lunalab_summit_xl_gen"
16 |
17 | ## ID of the environment
18 | ## Reach
19 | # ENV="Reach-v0"
20 | # ENV="Reach-ColorImage-v0"
21 | # ENV="Reach-DepthImage-v0"
22 | # ENV="Reach-Octree-v0"
23 | # ENV="Reach-OctreeWithIntensity-v0"
24 | # ENV="Reach-OctreeWithColor-v0"
25 | ## Grasp
26 | # ENV="Grasp-v0"
27 | # ENV="Grasp-Octree-v0"
28 | # ENV="Grasp-OctreeWithIntensity-v0"
29 | ENV="Grasp-OctreeWithColor-v0"
30 | ## GraspPlanetary
31 | # ENV="GraspPlanetary-v0"
32 | # ENV="GraspPlanetary-MonoImage-v0"
33 | # ENV="GraspPlanetary-ColorImage-v0"
34 | # ENV="GraspPlanetary-DepthImage-v0"
35 | # ENV="GraspPlanetary-DepthImageWithIntensity-v0"
36 | # ENV="GraspPlanetary-DepthImageWithColor-v0"
37 | # ENV="GraspPlanetary-Octree-v0"
38 | # ENV="GraspPlanetary-OctreeWithIntensity-v0"
39 | # ENV="GraspPlanetary-OctreeWithColor-v0"
40 |
41 | ## Selection of RL algorithm
42 | ALGO="sac"
43 | # ALGO="td3"
44 | # ALGO="tqc"
45 |
46 | ## Path to logs directory
47 | LOG_FOLDER="${PWD}/drl_grasping_training/train/${ENV}/logs"
48 |
49 | ## Path to reward log directory
50 | REWARD_LOG="${PWD}/drl_grasping_training/evaluate/${ENV}"
51 |
52 | ## Load checkpoint instead of last model (# steps)
53 | # LOAD_CHECKPOINT="0"
54 |
55 | #### Launch script ####
56 | ### Arguments
57 | LAUNCH_ARGS=(
58 | "enable_rviz:=true"
59 | "log_level:=warn"
60 | )
61 |
62 | ### Launch script
63 | LAUNCH_CMD=(
64 | ros2 launch -a
65 | drl_grasping "real_${ROBOT_MODEL}.launch.py"
66 | "${LAUNCH_ARGS[*]}"
67 | )
68 |
69 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
70 |
71 | # shellcheck disable=SC2048
72 | exec ${LAUNCH_CMD[*]} &
73 |
74 | terminate_child_processes() {
75 | echo "Signal received. Terminating all child processes..."
76 | for job in $(jobs -p); do
77 | kill -TERM "$job" 2>/dev/null || echo -e "\033[31m$job could not be terminated...\033[0m" >&2
78 | done
79 | }
80 | trap terminate_child_processes SIGINT SIGTERM SIGQUIT
81 |
82 | #### Evaluation node ####
83 | # Note: Evaluation node is started separately in order to enable user input
84 | ### Arguments
85 | NODE_ARGS=(
86 | "--env" "${ENV}"
87 | "--env-kwargs" "robot_model:\"${ROBOT_MODEL}\""
88 | "--algo" "${ALGO}"
89 | "--seed" "${SEED}"
90 | "--num-threads" "-1"
91 | "--n-episodes" "200"
92 | "--stochastic" "false"
93 | "--log-folder" "${LOG_FOLDER}"
94 | "--reward-log" "${REWARD_LOG}"
95 | "--exp-id" "0"
96 | "--load-best" "false"
97 | "--norm-reward" "false"
98 | "--no-render" "true"
99 | "--verbose" "1"
100 | )
101 | if [[ -n ${LOAD_CHECKPOINT} ]]; then
102 | NODE_ARGS+=("--load-checkpoint" "${LOAD_CHECKPOINT}")
103 | fi
104 |
105 | ### ROS arguments
106 | NODE_ARGS+=(
107 | "--ros-args"
108 | "--log-level" "warn"
109 | "--param" "use_sim_time:=false"
110 | "--remap" "/rgbd_camera/points:=/lunalab_summit_xl_gen_d435/depth/color/points"
111 | "--remap" "/rgbd_camera/image:=/lunalab_summit_xl_gen_d435/color/image_raw"
112 | "--remap" "/rgbd_camera/depth_image:=/lunalab_summit_xl_gen_d435/depth/image_rect_raw"
113 | )
114 |
115 | ### Run the node
116 | NODE_CMD=(
117 | ros2 run
118 | drl_grasping evaluate.py
119 | "${NODE_ARGS[*]}"
120 | )
121 |
122 | echo -e "\033[1;30m${NODE_CMD[*]}\033[0m" | xargs
123 |
124 | # shellcheck disable=SC2048
125 | exec ${NODE_CMD[*]}
126 |
--------------------------------------------------------------------------------
/examples/ex_optimize.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping optimize.launch.py` and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping optimize.launch.py --show-args`.
5 |
6 | ## Enable training dataset
7 | ros2 run drl_grasping dataset_set_train.bash 2> /dev/null
8 |
9 | ### Arguments
10 | ## Random seed to use for both the environment and agent (-1 for random)
11 | SEED="69"
12 |
13 | ## Robot to use during training
14 | ROBOT_MODEL="panda"
15 | # ROBOT_MODEL="lunalab_summit_xl_gen"
16 |
17 | ## ID of the environment
18 | ## Reach
19 | # ENV="Reach-Gazebo-v0"
20 | # ENV="Reach-ColorImage-Gazebo-v0"
21 | # ENV="Reach-DepthImage-Gazebo-v0"
22 | # ENV="Reach-Octree-Gazebo-v0"
23 | # ENV="Reach-OctreeWithIntensity-Gazebo-v0"
24 | # ENV="Reach-OctreeWithColor-Gazebo-v0"
25 | ## Grasp
26 | ENV="Grasp-Gazebo-v0"
27 | # ENV="Grasp-Octree-Gazebo-v0"
28 | # ENV="Grasp-OctreeWithIntensity-Gazebo-v0"
29 | # ENV="Grasp-OctreeWithColor-Gazebo-v0"
30 | ## GraspPlanetary
31 | # ENV="GraspPlanetary-Gazebo-v0"
32 | # ENV="GraspPlanetary-MonoImage-Gazebo-v0"
33 | # ENV="GraspPlanetary-ColorImage-Gazebo-v0"
34 | # ENV="GraspPlanetary-DepthImage-Gazebo-v0"
35 | # ENV="GraspPlanetary-DepthImageWithIntensity-Gazebo-v0"
36 | # ENV="GraspPlanetary-DepthImageWithColor-Gazebo-v0"
37 | # ENV="GraspPlanetary-Octree-Gazebo-v0"
38 | # ENV="GraspPlanetary-OctreeWithIntensity-Gazebo-v0"
39 | # ENV="GraspPlanetary-OctreeWithColor-Gazebo-v0"
40 |
41 | ## Selection of RL algorithm
42 | ALGO="sac"
43 | # ALGO="td3"
44 | # ALGO="tqc"
45 |
46 | ## Path to logs directory
47 | LOG_FOLDER="${PWD}/drl_grasping_training/optimize/${ENV}/logs"
48 |
49 | ## Path to tensorboard logs directory
50 | TENSORBOARD_LOG="${PWD}/drl_grasping_training/optimize/${ENV}/tensorboard_logs"
51 |
52 | ## Path to a replay buffer that should be loaded before each trial begins (`**/*.pkl`)
53 | # PRELOAD_REPLAY_BUFFER=""
54 |
55 | ### Arguments
56 | LAUNCH_ARGS=(
57 | "seed:=${SEED}"
58 | "robot_model:=${ROBOT_MODEL}"
59 | "env:=${ENV}"
60 | "algo:=${ALGO}"
61 | "log_folder:=${LOG_FOLDER}"
62 | "tensorboard_log:=${TENSORBOARD_LOG}"
63 | "n_timesteps:=1000000"
64 | "sampler:=tpe"
65 | "pruner:=median"
66 | "n_trials:=20"
67 | "n_startup_trials:=5"
68 | "n_evaluations:=4"
69 | "eval_episodes:=20"
70 | "log_interval:=-1"
71 | "enable_rviz:=true"
72 | "log_level:=fatal"
73 | )
74 | if [[ -n ${PRELOAD_REPLAY_BUFFER} ]]; then
75 | LAUNCH_ARGS+=("preload_replay_buffer:=${PRELOAD_REPLAY_BUFFER}")
76 | fi
77 |
78 | ### Launch script
79 | LAUNCH_CMD=(
80 | ros2 launch -a
81 | drl_grasping optimize.launch.py
82 | "${LAUNCH_ARGS[*]}"
83 | )
84 |
85 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
86 |
87 | # shellcheck disable=SC2048
88 | exec ${LAUNCH_CMD[*]}
89 |
--------------------------------------------------------------------------------
/examples/ex_random_agent.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping random_agent.launch.py` and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping random_agent.launch.py --show-args`.
5 |
6 | ### Arguments
7 | ## Random seed to use for both the environment and agent (-1 for random)
8 | SEED="42"
9 |
10 | ## Robot to use during training
11 | # ROBOT_MODEL="panda"
12 | ROBOT_MODEL="lunalab_summit_xl_gen"
13 |
14 | ## ID of the environment
15 | ## Reach
16 | # ENV="Reach-Gazebo-v0"
17 | # ENV="Reach-ColorImage-Gazebo-v0"
18 | # ENV="Reach-DepthImage-Gazebo-v0"
19 | # ENV="Reach-Octree-Gazebo-v0"
20 | # ENV="Reach-OctreeWithIntensity-Gazebo-v0"
21 | # ENV="Reach-OctreeWithColor-Gazebo-v0"
22 | ## Grasp
23 | # ENV="Grasp-Gazebo-v0"
24 | # ENV="Grasp-Octree-Gazebo-v0"
25 | # ENV="Grasp-OctreeWithIntensity-Gazebo-v0"
26 | # ENV="Grasp-OctreeWithColor-Gazebo-v0"
27 | ## GraspPlanetary
28 | # ENV="GraspPlanetary-Gazebo-v0"
29 | # ENV="GraspPlanetary-MonoImage-Gazebo-v0"
30 | # ENV="GraspPlanetary-ColorImage-Gazebo-v0"
31 | # ENV="GraspPlanetary-DepthImage-Gazebo-v0"
32 | # ENV="GraspPlanetary-DepthImageWithIntensity-Gazebo-v0"
33 | # ENV="GraspPlanetary-DepthImageWithColor-Gazebo-v0"
34 | # ENV="GraspPlanetary-Octree-Gazebo-v0"
35 | ENV="GraspPlanetary-OctreeWithIntensity-Gazebo-v0"
36 | # ENV="GraspPlanetary-OctreeWithColor-Gazebo-v0"
37 |
38 | ### Arguments
39 | LAUNCH_ARGS=(
40 | "seed:=${SEED}"
41 | "robot_model:=${ROBOT_MODEL}"
42 | "env:=${ENV}"
43 | "check_env:=false"
44 | "render:=true"
45 | "enable_rviz:=true"
46 | "log_level:=warn"
47 | )
48 |
49 | ### Launch script
50 | LAUNCH_CMD=(
51 | ros2 launch -a
52 | drl_grasping random_agent.launch.py
53 | "${LAUNCH_ARGS[*]}"
54 | )
55 |
56 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
57 |
58 | # shellcheck disable=SC2048
59 | exec ${LAUNCH_CMD[*]}
60 |
--------------------------------------------------------------------------------
/examples/ex_train.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping train.launch.py` and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping train.launch.py --show-args`.
5 |
6 | ## Enable training dataset
7 | ros2 run drl_grasping dataset_set_train.bash 2> /dev/null
8 |
9 | ### Arguments
10 | ## Random seed to use for both the environment and agent (-1 for random)
11 | SEED="42"
12 |
13 | ## Robot to use during training
14 | ROBOT_MODEL="panda"
15 | # ROBOT_MODEL="lunalab_summit_xl_gen"
16 |
17 | ## ID of the environment
18 | ## Reach
19 | # ENV="Reach-Gazebo-v0"
20 | # ENV="Reach-ColorImage-Gazebo-v0"
21 | # ENV="Reach-DepthImage-Gazebo-v0"
22 | # ENV="Reach-Octree-Gazebo-v0"
23 | # ENV="Reach-OctreeWithIntensity-Gazebo-v0"
24 | # ENV="Reach-OctreeWithColor-Gazebo-v0"
25 | ## Grasp
26 | ENV="Grasp-Gazebo-v0"
27 | # ENV="Grasp-Octree-Gazebo-v0"
28 | # ENV="Grasp-OctreeWithIntensity-Gazebo-v0"
29 | # ENV="Grasp-OctreeWithColor-Gazebo-v0"
30 | ## GraspPlanetary
31 | # ENV="GraspPlanetary-Gazebo-v0"
32 | # ENV="GraspPlanetary-MonoImage-Gazebo-v0"
33 | # ENV="GraspPlanetary-ColorImage-Gazebo-v0"
34 | # ENV="GraspPlanetary-DepthImage-Gazebo-v0"
35 | # ENV="GraspPlanetary-DepthImageWithIntensity-Gazebo-v0"
36 | # ENV="GraspPlanetary-DepthImageWithColor-Gazebo-v0"
37 | # ENV="GraspPlanetary-Octree-Gazebo-v0"
38 | # ENV="GraspPlanetary-OctreeWithIntensity-Gazebo-v0"
39 | # ENV="GraspPlanetary-OctreeWithColor-Gazebo-v0"
40 |
41 | ## Selection of RL algorithm
42 | ALGO="sac"
43 | # ALGO="td3"
44 | # ALGO="tqc"
45 |
46 | ## Path to logs directory
47 | LOG_FOLDER="${PWD}/drl_grasping_training/train/${ENV}/logs"
48 |
49 | ## Path to tensorboard logs directory
50 | TENSORBOARD_LOG="${PWD}/drl_grasping_training/train/${ENV}/tensorboard_logs"
51 |
52 | ## Path to a trained agent to continue training (`**.zip`)
53 | # TRAINED_AGENT_SESSION="1"
54 | # TRAINED_AGENT_STEPS="0"
55 | # TRAINED_AGENT="${LOG_FOLDER}/${ALGO}/${ENV}_${TRAINED_AGENT_SESSION}/rl_model_${TRAINED_AGENT_STEPS}_steps.zip"
56 |
57 | ## Path to a replay buffer that should be loaded before the training begins (`**.pkl`)
58 | # PRELOAD_REPLAY_BUFFER=""
59 |
60 | ### Arguments
61 | LAUNCH_ARGS=(
62 | "seed:=${SEED}"
63 | "robot_model:=${ROBOT_MODEL}"
64 | "env:=${ENV}"
65 | "algo:=${ALGO}"
66 | "log_folder:=${LOG_FOLDER}"
67 | "tensorboard_log:=${TENSORBOARD_LOG}"
68 | "save_freq:=10000"
69 | "save_replay_buffer:=true"
70 | "log_interval:=-1"
71 | "eval_freq:=10000"
72 | "eval_episodes:=20"
73 | "enable_rviz:=false"
74 | "log_level:=fatal"
75 | )
76 | if [[ -n ${TRAINED_AGENT} ]]; then
77 | LAUNCH_ARGS+=("trained_agent:=${TRAINED_AGENT}")
78 | fi
79 | if [[ -n ${PRELOAD_REPLAY_BUFFER} ]]; then
80 | LAUNCH_ARGS+=("preload_replay_buffer:=${PRELOAD_REPLAY_BUFFER}")
81 | fi
82 |
83 | ### Launch script
84 | LAUNCH_CMD=(
85 | ros2 launch -a
86 | drl_grasping train.launch.py
87 | "${LAUNCH_ARGS[*]}"
88 | )
89 |
90 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
91 |
92 | # shellcheck disable=SC2048
93 | exec ${LAUNCH_CMD[*]}
94 |
--------------------------------------------------------------------------------
/examples/ex_train_dreamerv2.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 | #### This script serves as an example of utilising `ros2 launch drl_grasping train_dreamerv2.launch.py` and configuring some of its most common arguments.
3 | #### When this script is called, the corresponding launch string is printed to STDOUT. Therefore, feel free to modify and use such command directly.
4 | #### To view all arguments, run `ros2 launch drl_grasping train_dreamerv2.launch.py --show-args`.
5 |
6 | ## Enable training dataset
7 | ros2 run drl_grasping dataset_set_train.bash 2> /dev/null
8 |
9 | ### Arguments
10 | ## Random seed to use for both the environment and agent (-1 for random)
11 | SEED="42"
12 |
13 | ## Robot to use during training
14 | # ROBOT_MODEL="panda"
15 | ROBOT_MODEL="lunalab_summit_xl_gen"
16 |
17 | ## ID of the environment
18 | ## Reach
19 | # ENV="Reach-ColorImage-Gazebo-v0"
20 | ## GraspPlanetary
21 | ENV="GraspPlanetary-ColorImage-Gazebo-v0"
22 |
23 | ## Path to logs directory
24 | LOG_FOLDER="${PWD}/drl_grasping_training/train/${ENV}/logs"
25 |
26 | ### Arguments
27 | LAUNCH_ARGS=(
28 | "seed:=${SEED}"
29 | "robot_model:=${ROBOT_MODEL}"
30 | "env:=${ENV}"
31 | "log_folder:=${LOG_FOLDER}"
32 | "eval_freq:=10000"
33 | "enable_rviz:=false"
34 | "log_level:=fatal"
35 | )
36 |
37 | ### Launch script
38 | LAUNCH_CMD=(
39 | ros2 launch -a
40 | drl_grasping train_dreamerv2.launch.py
41 | "${LAUNCH_ARGS[*]}"
42 | )
43 |
44 | echo -e "\033[1;30m${LAUNCH_CMD[*]}\033[0m" | xargs
45 |
46 | # shellcheck disable=SC2048
47 | exec ${LAUNCH_CMD[*]}
48 |
--------------------------------------------------------------------------------
/launch/random_agent.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S ros2 launch
2 | """Test environment by running a random agent"""
3 |
4 | from os import path
5 | from typing import List
6 |
7 | from ament_index_python.packages import get_package_share_directory
8 | from launch_ros.actions import Node
9 | from launch_ros.substitutions import FindPackageShare
10 |
11 | from launch import LaunchDescription
12 | from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
13 | from launch.launch_description_sources import PythonLaunchDescriptionSource
14 | from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
15 |
16 |
17 | def generate_launch_description() -> LaunchDescription:
18 |
19 | # Declare all launch arguments
20 | declared_arguments = generate_declared_arguments()
21 |
22 | # Get substitution for all arguments
23 | robot_model = LaunchConfiguration("robot_model")
24 | robot_name = LaunchConfiguration("robot_name")
25 | prefix = LaunchConfiguration("prefix")
26 | env = LaunchConfiguration("env")
27 | env_kwargs = LaunchConfiguration("env_kwargs")
28 | n_episodes = LaunchConfiguration("n_episodes")
29 | seed = LaunchConfiguration("seed")
30 | check_env = LaunchConfiguration("check_env")
31 | render = LaunchConfiguration("render")
32 | enable_rviz = LaunchConfiguration("enable_rviz")
33 | rviz_config = LaunchConfiguration("rviz_config")
34 | use_sim_time = LaunchConfiguration("use_sim_time")
35 | log_level = LaunchConfiguration("log_level")
36 |
37 | # List of included launch descriptions
38 | launch_descriptions = [
39 | # Configure and setup interface with simulated robots inside Ignition Gazebo
40 | IncludeLaunchDescription(
41 | PythonLaunchDescriptionSource(
42 | PathJoinSubstitution(
43 | [
44 | FindPackageShare("drl_grasping"),
45 | "launch",
46 | "sim",
47 | "sim.launch.py",
48 | ]
49 | )
50 | ),
51 | launch_arguments=[
52 | ("robot_model", robot_model),
53 | ("robot_name", robot_name),
54 | ("prefix", prefix),
55 | ("enable_rviz", enable_rviz),
56 | ("rviz_config", rviz_config),
57 | ("use_sim_time", use_sim_time),
58 | ("log_level", log_level),
59 | ],
60 | ),
61 | ]
62 |
63 | # List of nodes to be launched
64 | nodes = [
65 | # Train node
66 | Node(
67 | package="drl_grasping",
68 | executable="random_agent.py",
69 | output="log",
70 | arguments=[
71 | "--env",
72 | env,
73 | "--env-kwargs",
74 | env_kwargs,
75 | # Make sure `robot_model` is specified (with priority)
76 | "--env-kwargs",
77 | ['robot_model:"', robot_model, '"'],
78 | "--n-episodes",
79 | n_episodes,
80 | "--seed",
81 | seed,
82 | "--check-env",
83 | check_env,
84 | "--render",
85 | render,
86 | "--ros-args",
87 | "--log-level",
88 | log_level,
89 | ],
90 | parameters=[{"use_sim_time": use_sim_time}],
91 | ),
92 | ]
93 |
94 | return LaunchDescription(declared_arguments + launch_descriptions + nodes)
95 |
96 |
97 | def generate_declared_arguments() -> List[DeclareLaunchArgument]:
98 | """
99 | Generate list of all launch arguments that are declared for this launch script.
100 | """
101 |
102 | return [
103 | # Robot model and its name
104 | DeclareLaunchArgument(
105 | "robot_model",
106 | default_value="lunalab_summit_xl_gen",
107 | description="Name of the robot to use. Supported options are: 'panda' and 'lunalab_summit_xl_gen'.",
108 | ),
109 | DeclareLaunchArgument(
110 | "robot_name",
111 | default_value=LaunchConfiguration("robot_model"),
112 | description="Name of the robot.",
113 | ),
114 | DeclareLaunchArgument(
115 | "prefix",
116 | default_value="robot_",
117 | description="Prefix for all robot entities. If modified, then joint names in the configuration of controllers must also be updated.",
118 | ),
119 | # Environment and its parameters
120 | DeclareLaunchArgument(
121 | "env",
122 | default_value="GraspPlanetary-OctreeWithColor-Gazebo-v0",
123 | description="Environment ID",
124 | ),
125 | DeclareLaunchArgument(
126 | "env_kwargs",
127 | default_value=['robot_model:"', LaunchConfiguration("robot_model"), '"'],
128 | description="Optional keyword argument to pass to the env constructor.",
129 | ),
130 | DeclareLaunchArgument(
131 | "n_episodes",
132 | default_value="1000",
133 | description="Overwrite the number of episodes.",
134 | ),
135 | # Random seed
136 | DeclareLaunchArgument(
137 | "seed",
138 | default_value="69",
139 | description="Random generator seed.",
140 | ),
141 | # Flag to check environment
142 | DeclareLaunchArgument(
143 | "check_env",
144 | default_value="True",
145 | description="Flag to check the environment before running the random agent.",
146 | ),
147 | # Flag to enable rendering
148 | DeclareLaunchArgument(
149 | "render",
150 | default_value="True",
151 | description="Flag to enable rendering.",
152 | ),
153 | # Miscellaneous
154 | DeclareLaunchArgument(
155 | "enable_rviz", default_value="true", description="Flag to enable RViz2."
156 | ),
157 | DeclareLaunchArgument(
158 | "rviz_config",
159 | default_value=path.join(
160 | get_package_share_directory("drl_grasping"), "rviz", "drl_grasping.rviz"
161 | ),
162 | description="Path to configuration for RViz2.",
163 | ),
164 | DeclareLaunchArgument(
165 | "use_sim_time",
166 | default_value="true",
167 | description="If true, use simulated clock.",
168 | ),
169 | DeclareLaunchArgument(
170 | "log_level",
171 | default_value="error",
172 | description="The level of logging that is applied to all ROS 2 nodes launched by this script.",
173 | ),
174 | ]
175 |
--------------------------------------------------------------------------------
/launch/real/real_lunalab_summit_xl_gen.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S ros2 launch
2 | """Configure and setup interface with real Summit XL-GEN (LunaLab variant)"""
3 |
4 | from os import path
5 | from typing import List
6 |
7 | from ament_index_python.packages import get_package_share_directory
8 | from launch_ros.actions import Node
9 | from launch_ros.substitutions import FindPackageShare
10 |
11 | from launch import LaunchDescription
12 | from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription, LogInfo
13 | from launch.launch_description_sources import PythonLaunchDescriptionSource
14 | from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
15 |
16 |
17 | def generate_launch_description() -> LaunchDescription:
18 |
19 | # Declare all launch arguments
20 | declared_arguments = generate_declared_arguments()
21 |
22 | # Get substitution for all arguments
23 | robot_name = LaunchConfiguration("robot_name")
24 | prefix = LaunchConfiguration("prefix")
25 | enable_rviz = LaunchConfiguration("enable_rviz")
26 | rviz_config = LaunchConfiguration("rviz_config")
27 | use_sim_time = LaunchConfiguration("use_sim_time")
28 | log_level = LaunchConfiguration("log_level")
29 |
30 | # List of included launch descriptions
31 | launch_descriptions = [
32 | # Launch move_group of MoveIt 2
33 | IncludeLaunchDescription(
34 | PythonLaunchDescriptionSource(
35 | PathJoinSubstitution(
36 | [
37 | FindPackageShare(["lunalab_summit_xl_gen_moveit_config"]),
38 | "launch",
39 | "move_group_ros1_controllers.launch.py",
40 | ]
41 | )
42 | ),
43 | launch_arguments=[
44 | ("name", robot_name),
45 | ("prefix", prefix),
46 | ("enable_rviz", enable_rviz),
47 | ("rviz_config", rviz_config),
48 | ("use_sim_time", use_sim_time),
49 | ("log_level", log_level),
50 | ],
51 | ),
52 | ]
53 |
54 | # List of nodes to be launched
55 | nodes = [
56 | # Static tf for world
57 | Node(
58 | package="tf2_ros",
59 | executable="static_transform_publisher",
60 | output="log",
61 | arguments=[
62 | # "--x",
63 | "0",
64 | # "--y",
65 | "0",
66 | # "--z",
67 | "0",
68 | # "--yaw",
69 | "0",
70 | # "--pitch",
71 | "0",
72 | # "--roll",
73 | "0",
74 | # "--frame-id",
75 | "drl_grasping_world",
76 | # "--child-frame-id",
77 | [prefix, "summit_xl_base_footprint"],
78 | "--ros-args",
79 | "--log-level",
80 | log_level,
81 | ],
82 | parameters=[{"use_sim_time": use_sim_time}],
83 | ),
84 | # Static tf for camera
85 | Node(
86 | package="tf2_ros",
87 | executable="static_transform_publisher",
88 | output="log",
89 | arguments=[
90 | # "--x",
91 | "0.222518",
92 | # "--y",
93 | "0.0152645",
94 | # "--z",
95 | "0.0862207",
96 | # "--yaw",
97 | "-0.00926349",
98 | # "--pitch",
99 | "0.730585",
100 | # "--roll",
101 | "0.0803686",
102 | # "--frame-id",
103 | [prefix, "j2s7s300_link_base"],
104 | # "--child-frame-id",
105 | "rs_d435",
106 | "--ros-args",
107 | "--log-level",
108 | log_level,
109 | ],
110 | parameters=[{"use_sim_time": use_sim_time}],
111 | ),
112 | ]
113 |
114 | # List for logging
115 | logs = [
116 | LogInfo(
117 | msg=[
118 | "Configuring drl_grasping for real Summit XL-GEN (LunaLab variant)",
119 | ],
120 | )
121 | ]
122 |
123 | return LaunchDescription(declared_arguments + launch_descriptions + nodes + logs)
124 |
125 |
126 | def generate_declared_arguments() -> List[DeclareLaunchArgument]:
127 | """
128 | Generate list of all launch arguments that are declared for this launch script.
129 | """
130 |
131 | return [
132 | # Naming of the world and robot
133 | DeclareLaunchArgument(
134 | "robot_name",
135 | default_value="lunalab_summit_xl_gen",
136 | description="Name of the robot.",
137 | ),
138 | DeclareLaunchArgument(
139 | "prefix",
140 | default_value="robot_",
141 | description="Prefix for all robot entities. If modified, then joint names in the configuration of controllers must also be updated.",
142 | ),
143 | # Miscellaneous
144 | DeclareLaunchArgument(
145 | "enable_rviz", default_value="true", description="Flag to enable RViz2."
146 | ),
147 | DeclareLaunchArgument(
148 | "rviz_config",
149 | default_value=path.join(
150 | get_package_share_directory("drl_grasping"),
151 | "rviz",
152 | "drl_grasping_real_evaluation.rviz",
153 | ),
154 | description="Path to configuration for RViz2.",
155 | ),
156 | DeclareLaunchArgument(
157 | "use_sim_time",
158 | default_value="false",
159 | description="If true, use simulated clock.",
160 | ),
161 | DeclareLaunchArgument(
162 | "log_level",
163 | default_value="warn",
164 | description="The level of logging that is applied to all ROS 2 nodes launched by this script.",
165 | ),
166 | ]
167 |
--------------------------------------------------------------------------------
/launch/train_dreamerv2.launch.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S ros2 launch
2 | """Train an RL agent"""
3 |
4 | from os import path
5 | from typing import List
6 |
7 | from ament_index_python.packages import get_package_share_directory
8 | from launch_ros.actions import Node
9 | from launch_ros.substitutions import FindPackageShare
10 |
11 | from launch import LaunchDescription
12 | from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
13 | from launch.launch_description_sources import PythonLaunchDescriptionSource
14 | from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
15 |
16 |
17 | def generate_launch_description() -> LaunchDescription:
18 |
19 | # Declare all launch arguments
20 | declared_arguments = generate_declared_arguments()
21 |
22 | # Get substitution for all arguments
23 | robot_model = LaunchConfiguration("robot_model")
24 | robot_name = LaunchConfiguration("robot_name")
25 | prefix = LaunchConfiguration("prefix")
26 | env = LaunchConfiguration("env")
27 | env_kwargs = LaunchConfiguration("env_kwargs")
28 | seed = LaunchConfiguration("seed")
29 | log_folder = LaunchConfiguration("log_folder")
30 | eval_freq = LaunchConfiguration("eval_freq")
31 | verbose = LaunchConfiguration("verbose")
32 | enable_rviz = LaunchConfiguration("enable_rviz")
33 | rviz_config = LaunchConfiguration("rviz_config")
34 | use_sim_time = LaunchConfiguration("use_sim_time")
35 | log_level = LaunchConfiguration("log_level")
36 |
37 | # List of included launch descriptions
38 | launch_descriptions = [
39 | # Configure and setup interface with simulated robots inside Ignition Gazebo
40 | IncludeLaunchDescription(
41 | PythonLaunchDescriptionSource(
42 | PathJoinSubstitution(
43 | [
44 | FindPackageShare("drl_grasping"),
45 | "launch",
46 | "sim",
47 | "sim.launch.py",
48 | ]
49 | )
50 | ),
51 | launch_arguments=[
52 | ("robot_model", robot_model),
53 | ("robot_name", robot_name),
54 | ("prefix", prefix),
55 | ("enable_rviz", enable_rviz),
56 | ("rviz_config", rviz_config),
57 | ("use_sim_time", use_sim_time),
58 | ("log_level", log_level),
59 | ],
60 | ),
61 | ]
62 |
63 | # List of nodes to be launched
64 | nodes = [
65 | # Train node
66 | Node(
67 | package="drl_grasping",
68 | executable="train_dreamerv2.py",
69 | output="log",
70 | arguments=[
71 | "--env",
72 | env,
73 | "--env-kwargs",
74 | env_kwargs,
75 | # Make sure `robot_model` is specified (with priority)
76 | "--env-kwargs",
77 | ['robot_model:"', robot_model, '"'],
78 | "--seed",
79 | seed,
80 | "--log-folder",
81 | log_folder,
82 | "--eval-freq",
83 | eval_freq,
84 | "--verbose",
85 | verbose,
86 | "--ros-args",
87 | "--log-level",
88 | log_level,
89 | ],
90 | parameters=[{"use_sim_time": use_sim_time}],
91 | ),
92 | ]
93 |
94 | return LaunchDescription(declared_arguments + launch_descriptions + nodes)
95 |
96 |
97 | def generate_declared_arguments() -> List[DeclareLaunchArgument]:
98 | """
99 | Generate list of all launch arguments that are declared for this launch script.
100 | """
101 |
102 | return [
103 | # Robot model and its name
104 | DeclareLaunchArgument(
105 | "robot_model",
106 | default_value="lunalab_summit_xl_gen",
107 | description="Name of the robot to use. Supported options are: 'panda' and 'lunalab_summit_xl_gen'.",
108 | ),
109 | DeclareLaunchArgument(
110 | "robot_name",
111 | default_value=LaunchConfiguration("robot_model"),
112 | description="Name of the robot.",
113 | ),
114 | DeclareLaunchArgument(
115 | "prefix",
116 | default_value="robot_",
117 | description="Prefix for all robot entities. If modified, then joint names in the configuration of controllers must also be updated.",
118 | ),
119 | # Environment and its parameters
120 | DeclareLaunchArgument(
121 | "env",
122 | default_value="GraspPlanetary-OctreeWithColor-Gazebo-v0",
123 | description="Environment ID",
124 | ),
125 | DeclareLaunchArgument(
126 | "env_kwargs",
127 | default_value=['robot_model:"', LaunchConfiguration("robot_model"), '"'],
128 | description="Optional keyword argument to pass to the env constructor.",
129 | ),
130 | # Random seed
131 | DeclareLaunchArgument(
132 | "seed",
133 | default_value="-1",
134 | description="Random generator seed.",
135 | ),
136 | # Logging
137 | DeclareLaunchArgument(
138 | "log_folder",
139 | default_value="logs",
140 | description="Path to the log directory.",
141 | ),
142 | # Evaluation
143 | DeclareLaunchArgument(
144 | "eval_freq",
145 | default_value="-1",
146 | description="Evaluate the agent every n steps (if negative, no evaluation).",
147 | ),
148 | # Verbosity
149 | DeclareLaunchArgument(
150 | "verbose",
151 | default_value="1",
152 | description="Verbose mode (0: no output, 1: INFO).",
153 | ),
154 | # Miscellaneous
155 | DeclareLaunchArgument(
156 | "enable_rviz", default_value="true", description="Flag to enable RViz2."
157 | ),
158 | DeclareLaunchArgument(
159 | "rviz_config",
160 | default_value=path.join(
161 | get_package_share_directory("drl_grasping"), "rviz", "drl_grasping.rviz"
162 | ),
163 | description="Path to configuration for RViz2.",
164 | ),
165 | DeclareLaunchArgument(
166 | "use_sim_time",
167 | default_value="true",
168 | description="If true, use simulated clock.",
169 | ),
170 | DeclareLaunchArgument(
171 | "log_level",
172 | default_value="error",
173 | description="The level of logging that is applied to all ROS 2 nodes launched by this script.",
174 | ),
175 | ]
176 |
--------------------------------------------------------------------------------
/package.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 | drl_grasping
4 | 2.0.0
5 | Application of Deep Reinforcement Learning to acquire a robust policy that allows robots to grasp diverse objects from compact 3D observations in the form of octrees.
6 | Andrej Orsula
7 | Andrej Orsula
8 | BSD
9 |
10 | ament_cmake
11 | ament_cmake_python
12 |
13 | ign_ros2_control
14 | lunalab_summit_xl_gen_description
15 | lunalab_summit_xl_gen_ign
16 | lunalab_summit_xl_gen_moveit_config
17 | moveit
18 | panda_description
19 | panda_moveit_config
20 | pymoveit2
21 | rclpy
22 | robot_state_publisher
23 | ros_ign_bridge
24 | ros_ign_gazebo
25 | rviz2
26 | tf2_ros
27 | urdf
28 | xacro
29 |
30 | ament_lint_auto
31 | ament_lint_common
32 |
33 |
34 | ament_cmake
35 |
36 |
37 |
--------------------------------------------------------------------------------
/python_requirements.txt:
--------------------------------------------------------------------------------
1 | --find-links https://download.pytorch.org/whl/cu113/torch_stable.html
2 | numpy==1.21.4
3 | open3d==0.13.0
4 | optuna==2.10.0
5 | pcg-gazebo==0.7.12
6 | pynput==1.7.6
7 | sb3-contrib==1.4.0
8 | scipy==1.7.2
9 | seaborn==0.11.2
10 | stable-baselines3[extra]==1.4.0
11 | torch==1.10.1+cu113
12 | torchaudio==0.10.1+cu113
13 | torchvision==0.11.2+cu113
14 | tqdm==4.62.3
15 | trimesh==3.9.35
16 | yacs==0.1.8
17 |
--------------------------------------------------------------------------------
/scripts/preload_replay_buffer.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | import difflib
5 | import os
6 | import uuid
7 | from typing import Dict
8 |
9 | import gym
10 | import numpy as np
11 | import torch as th
12 | from stable_baselines3.common.utils import set_random_seed
13 |
14 | from drl_grasping import envs as drl_grasping_envs
15 | from drl_grasping.utils.exp_manager import ExperimentManager
16 | from drl_grasping.utils.utils import ALGOS, StoreDict, empty_str2none, str2bool
17 |
18 |
19 | def main(args: Dict):
20 |
21 | # Check if the selected environment is valid
22 | # If it could not be found, suggest the closest match
23 | registered_envs = set(gym.envs.registry.env_specs.keys())
24 | if args.env not in registered_envs:
25 | try:
26 | closest_match = difflib.get_close_matches(args.env, registered_envs, n=1)[0]
27 | except IndexError:
28 | closest_match = "'no close match found...'"
29 | raise ValueError(
30 | f"{args.env} not found in gym registry, you maybe meant {closest_match}?"
31 | )
32 |
33 | # If no specific seed is selected, choose a random one
34 | if args.seed < 0:
35 | args.seed = np.random.randint(2**32 - 1, dtype=np.int64).item()
36 |
37 | # Set the random seed across platforms
38 | set_random_seed(args.seed)
39 |
40 | # Setting num threads to 1 makes things run faster on cpu
41 | if args.num_threads > 0:
42 | if args.verbose > 1:
43 | print(f"Setting torch.num_threads to {args.num_threads}")
44 | th.set_num_threads(args.num_threads)
45 |
46 | # Verify that pre-trained agent exists before continuing to train it
47 | if args.trained_agent != "":
48 | assert args.trained_agent.endswith(".zip") and os.path.isfile(
49 | args.trained_agent
50 | ), "The trained_agent must be a valid path to a .zip file"
51 |
52 | # If enabled, ensure that the run has a unique ID
53 | uuid_str = f"_{uuid.uuid4()}" if args.uuid else ""
54 |
55 | # Enable preloading of replay buffer in the environment
56 | env_kwargs = args.env_kwargs
57 | env_kwargs.update({"preload_replay_buffer": True})
58 |
59 | print("=" * 10, args.env, "=" * 10)
60 | print(f"Seed: {args.seed}")
61 |
62 | exp_manager = ExperimentManager(
63 | args,
64 | args.algo,
65 | args.env,
66 | args.log_folder,
67 | args.tensorboard_log,
68 | args.n_timesteps,
69 | args.eval_freq,
70 | args.eval_episodes,
71 | args.save_freq,
72 | args.hyperparams,
73 | args.env_kwargs,
74 | args.trained_agent,
75 | truncate_last_trajectory=args.truncate_last_trajectory,
76 | uuid_str=uuid_str,
77 | seed=args.seed,
78 | log_interval=args.log_interval,
79 | save_replay_buffer=args.save_replay_buffer,
80 | verbose=args.verbose,
81 | vec_env_type=args.vec_env,
82 | )
83 |
84 | # Prepare experiment
85 | model = exp_manager.setup_experiment()
86 |
87 | # Collect transitions for demonstration
88 | exp_manager.collect_demonstration(model)
89 |
90 |
91 | if __name__ == "__main__":
92 |
93 | parser = argparse.ArgumentParser()
94 |
95 | # Environment and its parameters
96 | parser.add_argument(
97 | "--env", type=str, default="Reach-Gazebo-v0", help="Environment ID"
98 | )
99 | parser.add_argument(
100 | "--env-kwargs",
101 | type=str,
102 | nargs="+",
103 | action=StoreDict,
104 | help="Optional keyword argument to pass to the env constructor",
105 | )
106 | parser.add_argument(
107 | "--vec-env",
108 | type=str,
109 | choices=["dummy", "subproc"],
110 | default="dummy",
111 | help="Type of VecEnv to use",
112 | )
113 |
114 | # Algorithm and training
115 | parser.add_argument(
116 | "--algo",
117 | type=str,
118 | choices=list(ALGOS.keys()),
119 | required=False,
120 | default="sac",
121 | help="RL algorithm to use during the training",
122 | )
123 | parser.add_argument(
124 | "-params",
125 | "--hyperparams",
126 | type=str,
127 | nargs="+",
128 | action=StoreDict,
129 | help="Optional RL hyperparameter overwrite (e.g. learning_rate:0.01 train_freq:10)",
130 | )
131 | parser.add_argument(
132 | "-n",
133 | "--n-timesteps",
134 | type=int,
135 | default=-1,
136 | help="Overwrite the number of timesteps",
137 | )
138 | parser.add_argument(
139 | "--num-threads",
140 | type=int,
141 | default=-1,
142 | help="Number of threads for PyTorch (-1 to use default)",
143 | )
144 |
145 | # Continue training an already trained agent
146 | parser.add_argument(
147 | "-i",
148 | "--trained-agent",
149 | type=str,
150 | default="",
151 | help="Path to a pretrained agent to continue training",
152 | )
153 |
154 | # Random seed
155 | parser.add_argument("--seed", type=int, default=-1, help="Random generator seed")
156 |
157 | # Saving of model
158 | parser.add_argument(
159 | "--save-freq",
160 | type=int,
161 | default=10000,
162 | help="Save the model every n steps (if negative, no checkpoint)",
163 | )
164 | parser.add_argument(
165 | "--save-replay-buffer",
166 | type=str2bool,
167 | default=False,
168 | help="Save the replay buffer too (when applicable)",
169 | )
170 |
171 | # Logging
172 | parser.add_argument(
173 | "-f", "--log-folder", type=str, default="logs", help="Path to the log directory"
174 | )
175 | parser.add_argument(
176 | "-tb",
177 | "--tensorboard-log",
178 | type=empty_str2none,
179 | default="tensorboard_logs",
180 | help="Tensorboard log dir",
181 | )
182 | parser.add_argument(
183 | "--log-interval",
184 | type=int,
185 | default=-1,
186 | help="Override log interval (default: -1, no change)",
187 | )
188 | parser.add_argument(
189 | "-uuid",
190 | "--uuid",
191 | type=str2bool,
192 | default=False,
193 | help="Ensure that the run has a unique ID",
194 | )
195 |
196 | # Evaluation
197 | parser.add_argument(
198 | "--eval-freq",
199 | type=int,
200 | default=-1,
201 | help="Evaluate the agent every n steps (if negative, no evaluation)",
202 | )
203 | parser.add_argument(
204 | "--eval-episodes",
205 | type=int,
206 | default=5,
207 | help="Number of episodes to use for evaluation",
208 | )
209 |
210 | # Verbosity
211 | parser.add_argument(
212 | "--verbose", type=int, default=1, help="Verbose mode (0: no output, 1: INFO)"
213 | )
214 |
215 | # HER specifics
216 | parser.add_argument(
217 | "--truncate-last-trajectory",
218 | type=str2bool,
219 | default=True,
220 | help="When using HER with online sampling the last trajectory in the replay buffer will be truncated after reloading the replay buffer.",
221 | )
222 |
223 | args, unknown = parser.parse_known_args()
224 |
225 | main(args=args)
226 |
--------------------------------------------------------------------------------
/scripts/random_agent.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 | from typing import Dict
5 |
6 | import gym
7 | from stable_baselines3.common.env_checker import check_env
8 |
9 | from drl_grasping import envs as drl_grasping_envs
10 | from drl_grasping.utils.utils import StoreDict, str2bool
11 |
12 |
13 | def main(args: Dict):
14 |
15 | # Create the environment
16 | env = gym.make(args.env, **args.env_kwargs)
17 |
18 | # Initialize random seed
19 | env.seed(args.seed)
20 |
21 | # Check the environment
22 | if args.check_env:
23 | check_env(env, warn=True, skip_render_check=True)
24 |
25 | # Step environment for bunch of episodes
26 | for episode in range(args.n_episodes):
27 |
28 | # Initialize returned values
29 | done = False
30 | total_reward = 0
31 |
32 | # Reset the environment
33 | observation = env.reset()
34 |
35 | # Step through the current episode until it is done
36 | while not done:
37 |
38 | # Enable rendering
39 | if args.render:
40 | env.render("human")
41 |
42 | # Sample random action
43 | action = env.action_space.sample()
44 |
45 | # Step the environment with the random action
46 | observation, reward, done, info = env.step(action)
47 |
48 | # Accumulate the reward
49 | total_reward += reward
50 |
51 | print(f"Episode #{episode}\n\treward: {total_reward}")
52 |
53 | # Cleanup once done
54 | env.close()
55 |
56 |
57 | if __name__ == "__main__":
58 |
59 | parser = argparse.ArgumentParser()
60 |
61 | # Environment and its parameters
62 | parser.add_argument(
63 | "--env", type=str, default="Reach-Gazebo-v0", help="Environment ID"
64 | )
65 | parser.add_argument(
66 | "--env-kwargs",
67 | type=str,
68 | nargs="+",
69 | action=StoreDict,
70 | help="Optional keyword argument to pass to the env constructor",
71 | )
72 |
73 | # Number of episodes to run
74 | parser.add_argument(
75 | "-n",
76 | "--n-episodes",
77 | type=int,
78 | default=10000,
79 | help="Overwrite the number of episodes",
80 | )
81 |
82 | # Random seed
83 | parser.add_argument("--seed", type=int, default=69, help="Random generator seed")
84 |
85 | # Flag to check environment
86 | parser.add_argument(
87 | "--check-env",
88 | type=str2bool,
89 | default=True,
90 | help="Flag to check the environment before running the random agent",
91 | )
92 |
93 | # Flag to enable rendering
94 | parser.add_argument(
95 | "--render",
96 | type=str2bool,
97 | default=False,
98 | help="Flag to enable rendering",
99 | )
100 |
101 | args, unknown = parser.parse_known_args()
102 |
103 | main(args=args)
104 |
--------------------------------------------------------------------------------
/scripts/train_dreamerv2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env -S python3 -O
2 |
3 | import argparse
4 | import difflib
5 | from typing import Dict
6 |
7 | import dreamerv2.api as dv2
8 | import gym
9 | import numpy as np
10 |
11 | from drl_grasping import envs as drl_grasping_envs
12 | from drl_grasping.utils.utils import StoreDict
13 |
14 |
15 | def main(args: Dict):
16 |
17 | # Check if the selected environment is valid
18 | # If it could not be found, suggest the closest match
19 | registered_envs = set(gym.envs.registry.env_specs.keys())
20 | if args.env not in registered_envs:
21 | try:
22 | closest_match = difflib.get_close_matches(args.env, registered_envs, n=1)[0]
23 | except IndexError:
24 | closest_match = "'no close match found...'"
25 | raise ValueError(
26 | f"{args.env} not found in gym registry, you maybe meant {closest_match}?"
27 | )
28 |
29 | # If no specific seed is selected, choose a random one
30 | if args.seed < 0:
31 | args.seed = np.random.randint(2**32 - 1, dtype=np.int64).item()
32 |
33 | config, _ = dv2.defaults.update(
34 | {
35 | "logdir": args.log_folder,
36 | "eval_every": args.eval_freq,
37 | "prefill": 500,
38 | "pretrain": 100,
39 | "clip_rewards": "identity",
40 | "pred_discount": False,
41 | "replay": {
42 | "capacity": 1e6,
43 | "ongoing": False,
44 | "minlen": 10,
45 | "maxlen": 10,
46 | "prioritize_ends": True,
47 | },
48 | "dataset": {"batch": 16, "length": 10},
49 | "grad_heads": ["decoder", "reward"],
50 | "rssm": {"hidden": 200, "deter": 200},
51 | "model_opt": {"lr": 1e-4},
52 | "actor_opt": {"lr": 1e-5},
53 | "critic_opt": {"lr": 1e-5},
54 | "actor_ent": 1e-4,
55 | "render_size": [64, 64],
56 | "kl": {"free": 1.0},
57 | }
58 | ).parse_flags(known_only=True)
59 |
60 | # Set the random seed across platforms
61 | np.random.seed(args.seed)
62 |
63 | print("=" * 10, args.env, "=" * 10)
64 | print(f"Seed: {args.seed}")
65 |
66 | env = gym.make(args.env, **args.env_kwargs)
67 | env.seed(args.seed)
68 |
69 | dv2.train(env, config)
70 |
71 |
72 | if __name__ == "__main__":
73 |
74 | parser = argparse.ArgumentParser()
75 |
76 | # Environment and its parameters
77 | parser.add_argument(
78 | "--env",
79 | type=str,
80 | default="GraspPlanetary-ColorImage-Gazebo-v0",
81 | help="Environment ID",
82 | )
83 | parser.add_argument(
84 | "--env-kwargs",
85 | type=str,
86 | nargs="+",
87 | action=StoreDict,
88 | help="Optional keyword argument to pass to the env constructor",
89 | )
90 |
91 | # Random seed
92 | parser.add_argument("--seed", type=int, default=-1, help="Random generator seed")
93 |
94 | # Logging
95 | parser.add_argument(
96 | "-f", "--log-folder", type=str, default="logs", help="Path to the log directory"
97 | )
98 |
99 | # Evaluation
100 | parser.add_argument(
101 | "--eval-freq",
102 | type=int,
103 | default=10000,
104 | help="Evaluate the agent every n steps (if negative, no evaluation)",
105 | )
106 |
107 | # Verbosity
108 | parser.add_argument(
109 | "--verbose", type=int, default=1, help="Verbose mode (0: no output, 1: INFO)"
110 | )
111 |
112 | args, unknown = parser.parse_known_args()
113 |
114 | main(args=args)
115 |
--------------------------------------------------------------------------------
/scripts/utils/dataset/dataset_download_test.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | COLLECTION_OWNER="googleresearch"
4 | OWNER_DIR="${HOME}/.ignition/fuel/fuel.ignitionrobotics.org/${COLLECTION_OWNER}"
5 | MODELS_TEST_DIR="models_test"
6 |
7 | MODEL_NAMES=(
8 | 3d_dollhouse_sofa
9 | dino_4
10 | nintendo_mario_action_figure
11 | android_figure_orange
12 | dpc_handmade_hat_brown
13 | olive_kids_robots_pencil_case
14 | bia_cordon_bleu_white_porcelain_utensil_holder_900028
15 | germanium_ge132
16 | schleich_african_black_rhino
17 | central_garden_flower_pot_goo_425
18 | grandmother
19 | schleich_s_bayala_unicorn_70432
20 | chelsea_lo_fl_rdheel_zaqrnhlefw8
21 | kong_puppy_teething_rubber_small_pink
22 | school_bus
23 | cole_hardware_butter_dish_square_red
24 | lenovo_yoga_2_11
25 | weisshai_great_white_shark
26 | cole_hardware_school_bell_solid_brass_38
27 | mini_fire_engine
28 | )
29 |
30 | if [[ -d "${OWNER_DIR}/models" ]]; then
31 | echo >&2 "Error: There are already some models inside '${OWNER_DIR}/models'. Please move them to a temporary location before continuing."
32 | exit 1
33 | fi
34 |
35 | N_PARALLEL_DOWNLOADS=$(($(nproc) < 16 ? $(nproc) : 16))
36 | for model_name in "${MODEL_NAMES[@]}"; do
37 | if [[ ! -d "${OWNER_DIR}/models/${model_name}" ]]; then
38 | MODEL_URI="https://fuel.ignitionrobotics.org/1.0/${COLLECTION_OWNER}/models/${model_name}"
39 | echo "Info: Downloading model '${MODEL_URI}'."
40 | ign fuel download -t model -u "${MODEL_URI}" &
41 | fi
42 | while (($(jobs -p | wc -l | tr -d 0) >= "${N_PARALLEL_DOWNLOADS}")); do
43 | wait -n
44 | done
45 | done
46 | for job in $(jobs -p); do
47 | wait "${job}"
48 | done
49 |
50 | mv "${OWNER_DIR}/models" "${OWNER_DIR}/${MODELS_TEST_DIR}" &&
51 | echo "Info: All downloaded models were moved to '${OWNER_DIR}/${MODELS_TEST_DIR}'."
52 |
--------------------------------------------------------------------------------
/scripts/utils/dataset/dataset_download_train.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | COLLECTION_OWNER="googleresearch"
4 | OWNER_DIR="${HOME}/.ignition/fuel/fuel.ignitionrobotics.org/${COLLECTION_OWNER}"
5 | MODELS_TRAIN_DIR="models_train"
6 |
7 | MODEL_NAMES=(
8 | 3d_dollhouse_happy_brother
9 | ecoforms_plant_container_urn_55_mocha
10 | 3d_dollhouse_lamp
11 | f5_trx_fg
12 | 3d_dollhouse_refrigerator
13 | fire_engine
14 | 3d_dollhouse_sink
15 | folic_acid
16 | 3d_dollhouse_swing
17 | grandfather_doll
18 | 3d_dollhouse_tablepurple
19 | granimals_20_wooden_abc_blocks_wagon
20 | 3m_vinyl_tape_green_1_x_36_yd
21 | heavyduty_flashlight
22 | 45oz_ramekin_asst_deep_colors
23 | imaginext_castle_ogre
24 | ace_coffee_mug_kristen_16_oz_cup
25 | lacing_sheep
26 | air_hogs_wind_flyers_set_airplane_red
27 | markings_letter_holder
28 | android_figure_panda
29 | mens_santa_cruz_thong_in_tan_r59c69darph
30 | animal_planet_foam_2headed_dragon
31 | mini_excavator
32 | baby_elements_stacking_cups
33 | mini_roller
34 | balancing_cactus
35 | my_little_pony_princess_celestia
36 | bia_porcelain_ramekin_with_glazed_rim_35_45_oz_cup
37 | nickelodeon_teenage_mutant_ninja_turtles_leonardo
38 | big_dot_aqua_pencil_case
39 | nikon_1_aw1_w11275mm_lens_silver
40 | black_and_decker_tr3500sd_2slice_toaster
41 | nintendo_yoshi_action_figure
42 | blackblack_nintendo_3dsxl
43 | nordic_ware_original_bundt_pan
44 | bradshaw_international_11642_7_qt_mp_plastic_bowl
45 | olive_kids_birdie_munch_n_lunch
46 | breyer_horse_of_the_year_2015
47 | peekaboo_roller
48 | brisk_iced_tea_lemon_12_12_fl_oz_355_ml_cans_144_fl_oz_426_lt
49 | playmates_industrial_cosplinter_teenage_mutant_ninja_turtle_action_figure
50 | brother_lc_1053pks_ink_cartridge_cyanmagentayellow_1pack
51 | playmates_nickelodeon_teenage_mutant_ninja_turtles_shredder
52 | bunny_racer
53 | retail_leadership_summit_ect3zqhyikx
54 | calphalon_kitchen_essentials_12_cast_iron_fry_pan_black
55 | room_essentials_mug_white_yellow
56 | canon_225226_ink_cartridges_blackcolor_cyan_magenta_yellow_6_count
57 | schleich_allosaurus
58 | chefmate_8_frypan
59 | schleich_hereford_bull
60 | chelsea_blkheelpmp_dwxltznxlzz
61 | schleich_lion_action_figure
62 | chicken_nesting
63 | schleich_spinosaurus_action_figure
64 | circo_fish_toothbrush_holder_14995988
65 | schleich_therizinosaurus_ln9cruulpqc
66 | closetmaid_premium_fabric_cube_red
67 | shaxon_100_molded_category_6_rj45rj45_shielded_patch_cord_white
68 | coast_guard_boat
69 | spiderman_titan_hero_12inch_action_figure_5hnn4mtkfsp
70 | cole_hardware_antislip_surfacing_material_white
71 | stacking_bear
72 | colton_wntr_chukka_y4jo0i8jqfw
73 | stacking_ring
74 | craftsman_grip_screwdriver_phillips_cushion
75 | thomas_friends_wooden_railway_talking_thomas_z7yi7ufhjrj
76 | crazy_shadow_2
77 | threshold_porcelain_pitcher_white
78 | crosley_alarm_clock_vintage_metal
79 | victor_reversible_bookend
80 | digital_camo_double_decker_lunch_bag
81 | weston_no_33_signature_sausage_tonic_12_fl_oz
82 | dino_3
83 | whale_whistle_6pcs_set
84 | dino_5
85 | wooden_abc_123_blocks_50_pack
86 | ecoforms_plant_container_qp6coral
87 | xyli_pure_xylitol
88 | )
89 |
90 | if [[ -d "${OWNER_DIR}/models" ]]; then
91 | echo >&2 "Error: There are already some models inside '${OWNER_DIR}/models'. Please move them to a temporary location before continuing."
92 | exit 1
93 | fi
94 |
95 | N_PARALLEL_DOWNLOADS=$(($(nproc) < 16 ? $(nproc) : 16))
96 | for model_name in "${MODEL_NAMES[@]}"; do
97 | if [[ ! -d "${OWNER_DIR}/models/${model_name}" ]]; then
98 | MODEL_URI="https://fuel.ignitionrobotics.org/1.0/${COLLECTION_OWNER}/models/${model_name}"
99 | echo "Info: Downloading model '${MODEL_URI}'."
100 | ign fuel download -t model -u "${MODEL_URI}" &
101 | fi
102 | while (($(jobs -p | wc -l | tr -d 0) >= "${N_PARALLEL_DOWNLOADS}")); do
103 | wait -n
104 | done
105 | done
106 | for job in $(jobs -p); do
107 | wait "${job}"
108 | done
109 |
110 | mv "${OWNER_DIR}/models" "${OWNER_DIR}/${MODELS_TRAIN_DIR}" &&
111 | echo "Info: All downloaded models were moved to '${OWNER_DIR}/${MODELS_TRAIN_DIR}'."
112 |
--------------------------------------------------------------------------------
/scripts/utils/dataset/dataset_set_test.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | COLLECTION_OWNER="googleresearch"
4 | OWNER_DIR="${HOME}/.ignition/fuel/fuel.ignitionrobotics.org/${COLLECTION_OWNER}"
5 | MODELS_TEST_DIR="models_test"
6 |
7 | if [[ -d "${OWNER_DIR}/${MODELS_TEST_DIR}" ]]; then
8 | ln -sTf "${OWNER_DIR}/${MODELS_TEST_DIR}" "${OWNER_DIR}/models" && \
9 | echo "Info: All models under '${OWNER_DIR}/${MODELS_TEST_DIR}' are now symbolically linked to '${OWNER_DIR}/models'."
10 | else
11 | echo >&2 "Error: Directory '${OWNER_DIR}/${MODELS_TEST_DIR}' does not exist. Setting of training dataset skipped. Please make sure to download the dataset first."
12 | fi
13 |
--------------------------------------------------------------------------------
/scripts/utils/dataset/dataset_set_train.bash:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | COLLECTION_OWNER="googleresearch"
4 | OWNER_DIR="${HOME}/.ignition/fuel/fuel.ignitionrobotics.org/${COLLECTION_OWNER}"
5 | MODELS_TRAIN_DIR="models_train"
6 |
7 | if [[ -d "${OWNER_DIR}/${MODELS_TRAIN_DIR}" ]]; then
8 | ln -sTf "${OWNER_DIR}/${MODELS_TRAIN_DIR}" "${OWNER_DIR}/models" && \
9 | echo "Info: All models under '${OWNER_DIR}/${MODELS_TRAIN_DIR}' are now symbolically linked to '${OWNER_DIR}/models'."
10 | else
11 | echo >&2 "Error: Directory '${OWNER_DIR}/${MODELS_TRAIN_DIR}' does not exist. Setting of training dataset skipped. Please make sure to download the dataset first."
12 | fi
13 |
--------------------------------------------------------------------------------
/scripts/utils/process_collection.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 |
3 | import argparse
4 |
5 | from drl_grasping.envs.models.utils import ModelCollectionRandomizer
6 |
7 |
8 | def main(args=None):
9 |
10 | model_collection_randomizer = ModelCollectionRandomizer(
11 | owner=args.owner,
12 | collection=args.collection,
13 | server=args.server,
14 | server_version=args.version,
15 | unique_cache=True,
16 | enable_blacklisting=True,
17 | )
18 | print("Processing all models from owner [%s]..." % args.owner)
19 | model_collection_randomizer.process_all_models(
20 | decimation_fraction_of_visual=args.decimate_fraction,
21 | decimation_min_faces=args.decimate_min_faces,
22 | decimation_max_faces=args.decimate_max_faces,
23 | max_faces=40000,
24 | max_vertices=None,
25 | component_min_faces_fraction=0.1,
26 | component_max_volume_fraction=0.35,
27 | fix_mtl_texture_paths=True,
28 | )
29 | print("Processing finished")
30 |
31 |
32 | if __name__ == "__main__":
33 |
34 | parser = argparse.ArgumentParser(
35 | description="Process all models from a collection. "
36 | + "If local cache already contains one or more models from the owner [-o], "
37 | + "these models will be processed and [-s, -c, -v] arguments ignored."
38 | )
39 | parser.add_argument(
40 | "-o",
41 | "--owner",
42 | action="store",
43 | default="GoogleResearch",
44 | help="Owner of the collection",
45 | )
46 | parser.add_argument(
47 | "-c",
48 | "--collection",
49 | action="store",
50 | default="Google Scanned Objects",
51 | help="Name of the collection",
52 | )
53 | parser.add_argument(
54 | "-s",
55 | "--server",
56 | action="store",
57 | default="https://fuel.ignitionrobotics.org",
58 | help="URI to Ignition Fuel server where the collection resides",
59 | )
60 | parser.add_argument(
61 | "-v",
62 | "--version",
63 | action="store",
64 | default="1.0",
65 | help="Version of the Fuel server",
66 | )
67 | parser.add_argument(
68 | "--decimate_fraction",
69 | action="store",
70 | default="0.25",
71 | help="Fraction of faces collision geometry should have compared to visual geometry (min/max faces will be enforced)",
72 | )
73 | parser.add_argument(
74 | "--decimate_min_faces",
75 | action="store",
76 | default="40",
77 | help="Min number of faces for collision geometry",
78 | )
79 | parser.add_argument(
80 | "--decimate_max_faces",
81 | action="store",
82 | default="200",
83 | help="Max number of faces for collision geometry",
84 | )
85 |
86 | args = parser.parse_args()
87 |
88 | main(args)
89 |
--------------------------------------------------------------------------------