├── .gitignore ├── README.md ├── docker-compose.yml ├── experiments └── joint_efforts.txt ├── generate_usb_config.sh ├── host_install.sh ├── host_install_scripts ├── docker_compose_install.sh ├── docker_install.sh ├── nvidia_docker_install.sh ├── ros_install.sh └── widowx_install.sh └── widowx_envs ├── .Dockerfile.swn ├── .Dockerfile.swo ├── .Dockerfile.swp ├── Dockerfile ├── LICENSE ├── README.md ├── experiments └── bridge_data_v2 │ ├── collection_metadata.json │ └── conf.py ├── multicam_server ├── CMakeLists.txt ├── launch │ ├── cameras.launch │ └── streamer.launch ├── package.xml ├── setup.py └── src │ ├── __init__.py │ ├── multicam_server │ ├── __init__.py │ ├── camera_recorder.py │ └── topic_utils.py │ ├── start_streamers.py │ ├── streamer.py │ ├── usb_connector_chart_example.yml │ └── usbreset.c ├── requirements.txt ├── scripts ├── go_to_neutral_pose.py ├── go_to_sleep_pose.py ├── run.sh └── setup.sh ├── setup.py ├── widowx_controller ├── CMakeLists.txt ├── launch │ ├── launch.launch │ └── widowx_rs.launch ├── package.xml ├── setup.py ├── src │ └── widowx_controller │ │ ├── __init__.py │ │ ├── controller_base.py │ │ ├── custom_gripper_controller.py │ │ ├── velocity_controller.py │ │ ├── vr_controller_client.py │ │ ├── vr_controller_server.py │ │ └── widowx_controller.py └── srv │ ├── DisableController.srv │ ├── EnableController.srv │ ├── GetCartesianPose.srv │ ├── GetGripperDesiredState.srv │ ├── GetState.srv │ ├── GetVRButtons.srv │ ├── GotoNeutral.srv │ ├── MoveToEEP.srv │ ├── MoveToState.srv │ ├── OpenGripper.srv │ └── SetGripperPosition.srv └── widowx_envs ├── __init__.py ├── base ├── base_env.py ├── robot_base_env.py └── robot_configs.json ├── control_loops.py ├── policies ├── __init__.py ├── policy.py └── vr_teleop_policy.py ├── run_data_collection.py ├── teleop.py ├── trajectory_collector.py ├── utils ├── __init__.py ├── exceptions.py ├── grasp_utils.py ├── image_utils.py ├── metadata_helper.py ├── raw_saver.py ├── sync.py ├── transformation_utils.py └── utils.py ├── widowx_env.py └── widowx_env_service.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | usb_connector_chart.yml 163 | code -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bridge Data Robot 2 | 3 | Code for controlling Trossen WidowX robot arms. 4 | 5 | - `widowx_envs`: contains the `widowx_envs` Python package with all of the WidowX controller code. 6 | - `docker_compose.yml`: contains all of the docker-compose services that will be used to run the robot. 7 | 8 | ## Setup 9 | 10 | First, install the dependencies on the host machine by running `./host_install.sh`. 11 | 12 | Next, we need to build and launch the ROS service that communicates with the robot. This service is defined by the `robonet` entry in `docker-compose.yml`. It uses the `robonet-base` image which is built from the `widowx_envs` directory (see `widowx_envs/Dockerfile`). To build and run the `robonet-base` service, run: 13 | 14 | ```bash 15 | # first generate the usb config file 16 | ./generate_usb_config.sh 17 | 18 | # build and run the robonet service 19 | USB_CONNECTOR_CHART=$(pwd)/usb_connector_chart.yml docker compose up --build robonet 20 | ``` 21 | 22 | This builds the `robonet-base` image, which contains all of the ROS dependencies and the Python controller code from the `widowx_envs` directory. The USB connector chart is required to start the camera stream. You can get the USB device IDs by running `v4l2-ctl --list-devices`, `./generate_usb_config.sh` automatically generates the config file for you. 23 | 24 | Once this is running, you can execute commands in the running container like so: 25 | 26 | ```bash 27 | docker compose exec robonet bash -lic "go_sleep" 28 | ``` 29 | 30 | Explanation: 31 | - `docker compose exec`: execute a command in a running container 32 | - `robonet`: the service name (as specified in `docker-compose.yml`) 33 | - `bash`: the executable to run inside the container 34 | - `-l`: tells bash to open a login shell, sourcing `~/.bashrc` inside the container, which is required to set up a few ROS things and the correct Python virtual environment 35 | - `-i`: makes the shell interactive, in case you want to run an interactive command (like `python`) 36 | - `-c`: tells bash to execute the next argument 37 | - `go_sleep`: the string to be executed by bash; in this case, it's a utility script that is built in to the `robonet-base` image that moves the arm to the sleep position 38 | 39 | If you really want to, you can also attach a bash shell interactively using `docker compose exec robonet bash`. 40 | 41 | ### Using RealSense cameras 42 | 43 | The RealSense cameras require different drivers than RGB cameras. If you are using RealSenses, change the `camera_string` in `widowx_envs/scripts/run.sh` to `realsense:=true`. 44 | 45 | You will also need to update the device serial number in `widowx_envs/widowx_controller/launch/widowx_rs.launch` to match your cameras. 46 | 47 | ## Data collection 48 | 49 | First, make sure the `robonet-base` container is running using the above command. Then, run the following commands: 50 | 51 | ```bash 52 | # first create an empty directory to store the data 53 | mkdir -p $HOME/widowx_data 54 | 55 | # give sudo write access to the container 56 | # we can check the id by running `id` in the container 57 | sudo chown -R 1000:1002 $HOME/widowx_data 58 | 59 | # access the container 60 | docker compose exec robonet bash 61 | 62 | # start the data collection script 63 | python widowx_envs/widowx_envs/run_data_collection.py widowx_envs/experiments/bridge_data_v2/conf.py 64 | ``` 65 | 66 | You can specify a different directory to save the data in `docker-compose.yml`. 67 | 68 | At this point, the data_collection script will start initializing, and then throw the error: 69 | ```bash 70 | Device not found. Make sure that device is running and is connected over USB 71 | Run `adb devices` to verify that the device is visible. 72 | ``` 73 | 74 | This is expected, as our data collection requires the use of a Meta Quest VR headset to control the widowx arm. Turn on and connect the VR headset to the computer via USB. Make sure USB debugging is enabled and answer "Yes/Agree" to any prompts that appear in the headset. 75 | 76 | ## Evaluating policies 77 | 78 | There are two ways to interface a policy with the robot: (1) the docker compose service method or (2) the (newer) server-client method. In general, we recommend the server-client method. 79 | 80 | ### Docker compose service method 81 | 82 | The first method is to install the neccesary code and dependencies for running the policy in the same environment as the robot dependencies. This is done by creating a new docker compose service in `docker-compose.yml`. 83 | 84 | See `docker-compose.yml` for an example with the [bridge_data_v2](https://github.com/rail-berkeley/bridge_data_v2) codebase. Each codebase also needs a minimal Dockerfile that builds on top of the `robonet-base` image. The Dockerfile for the `bridge_data_v2` codebase looks like: 85 | 86 | ```Dockerfile 87 | FROM robonet-base:latest 88 | 89 | COPY requirements.txt /tmp/requirements.txt 90 | RUN ~/myenv/bin/pip install -r /tmp/requirements.txt 91 | RUN ~/myenv/bin/pip install --upgrade "jax[cuda11_pip]==0.4.13" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html 92 | 93 | # this path will get mounted as a volume (see `docker-compose.yml`) 94 | ENV PYTHONPATH=${PYTHONPATH}:/home/robonet/code/bridge_data_v2 95 | 96 | # modify packages to work with python 3.8 (ros noetic needs python 3.8) 97 | # to avoid orbax checkpoint error, downgrade flax 98 | RUN ~/myenv/bin/pip install flax==0.6.11 99 | # to avoid typing errors, upgrade distrax 100 | RUN ~/myenv/bin/pip install distrax==0.1.3 101 | 102 | # avoid git safe directory errors 103 | RUN git config --global --add safe.directory /home/robonet/code/bridge_data_v2 104 | 105 | WORKDIR /home/robonet/code/bridge_data_v2 106 | ``` 107 | 108 | To run the service, clone the [bridge_data_v2](https://github.com/rail-berkeley/bridge_data_v2) repo into `bridge_data_robot/code`. 109 | 110 | Then, build the new service/container: 111 | 112 | ```bash 113 | docker compose build bridge_data_v2 114 | ``` 115 | 116 | Now, we can run commands in this container similar to the previous section. We just need to make sure the`robonet-base` container is running in the background (see the setup section). 117 | 118 | For instance, to run the `bridge_data_v2` evaluation script: 119 | 120 | ```bash 121 | docker compose run bridge_data_v2 bash -lic "python experiments/eval_gc.py ..." 122 | ``` 123 | 124 | To execute commands in the docker container with an interactive shell, you can again do `docker compose run bridge_data_v2 bash`. 125 | 126 | ### Server-client method 127 | 128 | With this setup, the robot is run as a server that receives actions and the policy acts as a client that sends actions. This "server-client" architecture allows us to both isolate robot controller and policy dependencies, as well as perform inference on a separate machine from the one used to control the robot (though in the simplest case, both the robot and policy can run on the same machine). 129 | 130 | First, run the server on the robot: 131 | 132 | ```bash 133 | docker compose exec robonet bash -lic "widowx_env_service --server" 134 | ``` 135 | 136 | Then, we'll create an environment to run the client. This can be on the same machine as the robot or a different machine. First, create a new python environment (e.g conda) with the policy dependencies (e.g [bridge_data_v2](https://github.com/rail-berkeley/bridge_data_v2)). Additionally, install the [edgeml](https://github.com/youliangtan/edgeml) library. Finally, install the `widowx_envs` package from this repo: 137 | 138 | ```bash 139 | cd widowx_envs 140 | pip install -e . 141 | ``` 142 | 143 | Now, we are ready to communicate with the server. To verify that everything is set up correctly run: 144 | 145 | ```bash 146 | python widowx_envs/widowx_env_service.py --client 147 | ``` 148 | 149 | This command assumes the server is running on `localhost` (i.e the same machine). If you're running the client on a different machine from the server, use `--ip` to specify the ip address of the server. 150 | 151 | The robot should execute a few predefined movements. See the `bridge_data_v2` evaluation script [here](https://github.com/rail-berkeley/bridge_data_v2/blob/main/experiments/eval.py) for an example of how to send actions to the server. 152 | 153 | Extra Util: To try teleop the robot arm remotely 154 | ```bash 155 | python3 widowx_envs/widowx_envs/teleop.py --ip 156 | ``` 157 | 158 | ## Troubleshooting 159 | 160 | ##### Permission errors 161 | 162 | If you run into following errors: 163 | 164 | ```bash 165 | Traceback (most recent call last): 166 | File "urllib3/connectionpool.py", line 677, in urlopen 167 | File "urllib3/connectionpool.py", line 392, in _make_request 168 | File "http/client.py", line 1277, in request 169 | File "http/client.py", line 1323, in _send_request 170 | File "http/client.py", line 1272, in endheaders 171 | File "http/client.py", line 1032, in _send_output 172 | File "http/client.py", line 972, in send 173 | File "docker/transport/unixconn.py", line 43, in connect 174 | PermissionError: [Errno 13] Permission denied 175 | ``` 176 | that can be fixed by running the following commands and subsequently restarting the PC (the log out and log back in is sometimes not sufficient): 177 | 178 | ```bash 179 | sudo groupadd docker 180 | sudo usermod -aG docker $USER 181 | ``` 182 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | services: 3 | robonet: 4 | image: robonet-base 5 | container_name: robonet_$USER 6 | build: 7 | context: widowx_envs 8 | entrypoint: ./widowx_envs/scripts/run.sh 9 | environment: 10 | - DISPLAY=:0 11 | user: robonet:1002 12 | stdin_open: true 13 | tty: true 14 | device_cgroup_rules: 15 | - 'c *:* rmw' 16 | network_mode: host 17 | runtime: nvidia 18 | volumes: 19 | - ${USB_CONNECTOR_CHART:-/dev/null}:/tmp/camera_connector_chart 20 | # if you want to be able to edit widowx_envs code without rebuilding the image, this 21 | # overwrites the widowx_envs directory that was copied in at build time 22 | - ./experiments:/home/robonet/experiments 23 | - /media/harddrive/:/home/robonet/trainingdata # TODO (YL): better way to mount a local directory 24 | # $HOME/widowx_data:/home/robonet/trainingdata 25 | - ./widowx_envs:/home/robonet/widowx_envs 26 | - /dev:/dev # for host tty access 27 | 28 | bridge_data_v2: 29 | image: robonet-bridge-data-v2 30 | container_name: robonet_${USER}_bridge_data_v2 31 | user: robonet:1002 32 | build: 33 | context: code/bridge_data_v2 34 | network_mode: host 35 | environment: 36 | - WANDB_API_KEY=putwandbkeyhere 37 | runtime: nvidia 38 | volumes: 39 | - ./widowx_envs:/home/robonet/widowx_envs 40 | - ./code/bridge_data_v2:/home/robonet/code/bridge_data_v2 41 | - ./experiments:/home/robonet/experiments 42 | - /mount/harddrive/:/home/robonet/trainingdata 43 | -------------------------------------------------------------------------------- /generate_usb_config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Initialize the YAML variables to empty strings 4 | blue="" 5 | yellow="" 6 | wrist="" 7 | d435="" 8 | 9 | # Run v4l2-ctl to fetch devices and parse them line by line 10 | while IFS= read -r line; do 11 | # Check for the device identifiers and store them accordingly 12 | if [[ $line == *"Piwebcam: UVC Camera"* ]]; then 13 | wrist=$(echo "$line" | awk -F '(' '{print $2}' | awk -F ')' '{print $1}') 14 | elif [[ $line == *"HD Pro Webcam C920"* ]] && [ -z "$blue" ]; then 15 | blue=$(echo "$line" | awk -F '(' '{print $2}' | awk -F ')' '{print $1}') 16 | elif [[ $line == *"HD Pro Webcam C920"* ]]; then 17 | yellow=$(echo "$line" | awk -F '(' '{print $2}' | awk -F ')' '{print $1}') 18 | elif [[ $line == *"Intel(R) RealSense(TM) Depth Ca"* ]]; then 19 | d435=$(echo "$line" | awk -F 'Ca ' '{print $2}' | awk -F ')' '{print $1}') 20 | fi 21 | done < <(v4l2-ctl --list-devices) 22 | 23 | # Print the generated YAML format 24 | cat << EOF > usb_connector_chart.yml 25 | blue: '$blue' 26 | yellow: '$yellow' 27 | wrist: '$wrist' 28 | D435: '$d435' 29 | EOF 30 | -------------------------------------------------------------------------------- /host_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | 4 | read -r -p "Install ROS? [Y/n] " response 5 | case "$response" in 6 | [nN][oO]|[nN]) 7 | install_ros=false 8 | ;; 9 | *) 10 | install_ros=true 11 | ;; 12 | esac 13 | 14 | read -r -p "Install WidowX drivers? [Y/n] " response 15 | case "$response" in 16 | [nN][oO]|[nN]) 17 | install_widowx=false 18 | ;; 19 | *) 20 | install_widowx=true 21 | ;; 22 | esac 23 | 24 | read -r -p "Install Docker? [Y/n] " response 25 | case "$response" in 26 | [nN][oO]|[nN]) 27 | install_docker=false 28 | ;; 29 | *) 30 | install_docker=true 31 | read -r -p "Do you want to use Docker as a non-root user (without sudo)? [Y/n] " response 32 | case "$response" in 33 | [nN][oO]|[nN]) 34 | docker_without_sudo=false 35 | ;; 36 | *) 37 | docker_without_sudo=true 38 | echo "Remember to log out and back in for this to take effect after the script completes!" 39 | if [[ $EUID -eq 0 ]]; then 40 | echo "If you want to set up Docker as a non-root user run this script $0 as non-root user." 41 | exit 1 42 | fi 43 | ;; 44 | esac 45 | ;; 46 | esac 47 | 48 | 49 | read -r -p "Install nvidia-docker? [Y/n] " response 50 | case "$response" in 51 | [nN][oO]|[nN]) 52 | install_nv_docker=false 53 | ;; 54 | *) 55 | install_nv_docker=true 56 | echo "If you want to use Nvidia GPU in docker, please make sure that nvidia drivers are already installed. https://github.com/NVIDIA/nvidia-docker/wiki/Frequently-Asked-Questions#how-do-i-install-the-nvidia-driver" 57 | read -r -p "Do you want to continue? [Y/n] " response 58 | case "$response" in 59 | [nN][oO]|[nN]) 60 | exit 0 61 | esac 62 | ;; 63 | esac 64 | 65 | read -r -p "Install docker-compose? [Y/n] " response 66 | case "$response" in 67 | [nN][oO]|[nN]) 68 | install_docker_compose=false 69 | ;; 70 | *) 71 | install_docker_compose=true 72 | ;; 73 | esac 74 | 75 | full_path=$(realpath $0) 76 | dir_path=$(dirname $full_path)/host_install_scripts 77 | 78 | if [ "$install_ros" = "true" ]; then 79 | $dir_path/ros_install.sh 80 | fi 81 | if [ "$install_widowx" = "true" ]; then 82 | $dir_path/widowx_install.sh 83 | fi 84 | if [ "$install_docker" = "true" ]; then 85 | $dir_path/docker_install.sh $docker_without_sudo 86 | fi 87 | if [ "$install_nv_docker" = "true" ]; then 88 | $dir_path/nvidia_docker_install.sh 89 | fi 90 | if [ "$install_docker_compose" = "true" ]; then 91 | $dir_path/docker_compose_install.sh 92 | fi 93 | 94 | echo "All done!" 95 | -------------------------------------------------------------------------------- /host_install_scripts/docker_compose_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | sudo curl -L "https://github.com/docker/compose/releases/download/1.29.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 4 | sudo chmod +x /usr/local/bin/docker-compose -------------------------------------------------------------------------------- /host_install_scripts/docker_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | docker_without_sudo=$1 4 | curl -fsSL https://get.docker.com -o /tmp/get-docker.sh || (echo "get-docker.sh download failed" && exit 1) 5 | sudo sh /tmp/get-docker.sh || (echo "Docker install failed" && exit 1) 6 | sudo systemctl --now enable docker || (echo "Docker startup failed" && exit 1) 7 | 8 | if [ "$docker_without_sudo" = "true" ]; then 9 | if [[ $EUID -eq 0 ]]; then 10 | echo "If you want to set up Docker as a non-root user run this script $0 as non-root user." 11 | exit 1 12 | fi 13 | sudo usermod -aG docker $USER 14 | fi -------------------------------------------------------------------------------- /host_install_scripts/nvidia_docker_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | distribution=$(. /etc/os-release;echo $ID$VERSION_ID) \ 4 | && curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | sudo apt-key add - \ 5 | && curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | sudo tee /etc/apt/sources.list.d/nvidia-docker.list 6 | sudo apt-get update 7 | sudo apt-get install -y nvidia-docker2 8 | sudo systemctl restart docker 9 | sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi > /dev/null || (echo "nvidia-docker test failed" && exit 1) 10 | -------------------------------------------------------------------------------- /host_install_scripts/ros_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | # The BSD License 4 | # Copyright (c) 2018 PickNik Consulting 5 | # Copyright (c) 2014 OROCA and ROS Korea Users Group 6 | 7 | #set -x 8 | 9 | function usage { 10 | # Print out usage of this script. 11 | echo >&2 "usage: $0" 12 | echo >&2 "[-h|--help] Print help message." 13 | exit 0 14 | } 15 | 16 | # Parse command line. If the number of argument differs from what is expected, call `usage` function. 17 | OPT=`getopt -o h -l help -- $*` 18 | eval set -- $OPT 19 | while [ -n "$1" ] ; do 20 | case $1 in 21 | -h|--help) usage ;; 22 | --) shift; break;; 23 | *) echo "Unknown option($1)"; usage;; 24 | esac 25 | done 26 | 27 | if ! command -v lsb_release &> /dev/null 28 | then 29 | sudo apt-get install lsb-release 30 | fi 31 | 32 | version=`lsb_release -sc` 33 | echo "" 34 | echo "INSTALLING ROS --------------------------------" 35 | echo "" 36 | echo "Checking the Ubuntu version" 37 | case $version in 38 | "trusty" | "xenial" | "bionic" | "focal") 39 | ;; 40 | *) 41 | echo "ERROR: This script will only work on Trusty / Xenial / Bionic / Focal. Exit." 42 | exit 0 43 | esac 44 | 45 | case $version in 46 | "trusty") 47 | ROS_DISTRO="indigo" 48 | ;; 49 | "xenial") 50 | ROS_DISTRO="kinetic" 51 | ;; 52 | "bionic") 53 | ROS_DISTRO="melodic" 54 | ;; 55 | "focal") 56 | ROS_DISTRO="noetic" 57 | ;; 58 | esac 59 | 60 | relesenum=`grep DISTRIB_DESCRIPTION /etc/*-release | awk -F 'Ubuntu ' '{print $2}' | awk -F ' LTS' '{print $1}'` 61 | if [ "$relesenum" = "14.04.2" ] 62 | then 63 | echo "Your ubuntu version is $relesenum" 64 | echo "Install the libgl1-mesa-dev-lts-utopic package to solve the dependency issues for the ROS installation specifically on $relesenum" 65 | sudo apt-get install -y libgl1-mesa-dev-lts-utopic 66 | else 67 | echo "Your ubuntu version is $relesenum" 68 | fi 69 | 70 | echo "Add the ROS repository" 71 | if [ ! -e /etc/apt/sources.list.d/ros-latest.list ]; then 72 | sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' 73 | fi 74 | 75 | echo "Download the ROS keys" 76 | sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 77 | 78 | sudo apt update 79 | 80 | echo "Installing ROS $ROS_DISTRO" 81 | 82 | # Support for Python 3 in Noetic 83 | if [ "$ROS_DISTRO" = "noetic" ] 84 | then 85 | sudo apt install -y \ 86 | python3-rosdep \ 87 | python3-rosinstall \ 88 | python3-bloom \ 89 | python3-rosclean \ 90 | python3-wstool \ 91 | python3-pip \ 92 | python3-catkin-lint \ 93 | python3-catkin-tools \ 94 | python3-rosinstall \ 95 | ros-$ROS_DISTRO-desktop-full 96 | else 97 | sudo apt install -y \ 98 | python-rosdep \ 99 | python-rosinstall \ 100 | python-bloom \ 101 | python-rosclean \ 102 | python-wstool \ 103 | python-pip \ 104 | python-catkin-lint \ 105 | python-catkin-tools \ 106 | python-rosinstall \ 107 | ros-$ROS_DISTRO-desktop-full 108 | fi 109 | 110 | # Only init if it has not already been done before 111 | if [ ! -e /etc/ros/rosdep/sources.list.d/20-default.list ]; then 112 | sudo rosdep init 113 | fi 114 | rosdep update 115 | 116 | if ! command -v roscore &> /dev/null 117 | then 118 | echo "source /opt/ros/$ROS_DISTRO/setup.bash" >> ~/.bashrc 119 | fi 120 | 121 | echo "Done installing ROS" 122 | 123 | exit 0 124 | -------------------------------------------------------------------------------- /host_install_scripts/widowx_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | wget -q https://raw.githubusercontent.com/Interbotix/interbotix_ros_core/main/interbotix_ros_xseries/interbotix_xs_sdk/99-interbotix-udev.rules -O /tmp/99-interbotix-udev.rules || (echo "interbotix-udev.rules download failed" && exit 1) 4 | sudo cp /tmp/99-interbotix-udev.rules /etc/udev/rules.d/ 5 | sudo udevadm control --reload-rules && sudo udevadm trigger 6 | -------------------------------------------------------------------------------- /widowx_envs/.Dockerfile.swn: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/.Dockerfile.swn -------------------------------------------------------------------------------- /widowx_envs/.Dockerfile.swo: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/.Dockerfile.swo -------------------------------------------------------------------------------- /widowx_envs/.Dockerfile.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/.Dockerfile.swp -------------------------------------------------------------------------------- /widowx_envs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 2 | 3 | ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 TZ=America/Los_Angeles 4 | ARG USER_ID=robonet 5 | ARG DEBIAN_FRONTEND=noninteractive 6 | 7 | SHELL ["/bin/bash", "-c"] 8 | 9 | RUN apt-get update -y && apt-get install -y --no-install-recommends \ 10 | build-essential \ 11 | git \ 12 | python3-pip \ 13 | python3-dev \ 14 | vim \ 15 | wget \ 16 | curl \ 17 | lsb-release \ 18 | sudo \ 19 | android-tools-adb \ 20 | libglew-dev \ 21 | patchelf \ 22 | libosmesa6-dev \ 23 | python3-venv \ 24 | python3-cffi \ 25 | v4l-utils \ 26 | keyboard-configuration \ 27 | tzdata \ 28 | unzip \ 29 | ffmpeg \ 30 | git-lfs \ 31 | && apt-get clean \ 32 | && rm -rf /var/lib/apt/lists/* 33 | 34 | 35 | # Installs ROS 36 | RUN sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' 37 | RUN apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654 38 | RUN apt-get update -y && apt-get install -y \ 39 | ros-noetic-ros-base \ 40 | ros-noetic-realsense2-camera \ 41 | ros-noetic-video-stream-opencv \ 42 | && apt-get clean \ 43 | && rm -rf /var/lib/apt/lists/* 44 | 45 | # based on https://askubuntu.com/a/1168971: enable running sudo commands as $USER_ID 46 | RUN adduser --disabled-password --gecos '' ${USER_ID} 47 | RUN adduser ${USER_ID} sudo 48 | RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 49 | 50 | # from now on run build commands as user $USER_ID 51 | USER ${USER_ID} 52 | 53 | # install interbotic_ros_arms repo 54 | RUN curl 'https://raw.githubusercontent.com/Interbotix/interbotix_ros_manipulators/834edcbe6b955497d10ebc20ea9242c3f61e8fd1/interbotix_ros_xsarms/install/amd64/xsarm_amd64_install.sh' \ 55 | > /tmp/xsarm_amd64_install.sh \ 56 | && chmod +x /tmp/xsarm_amd64_install.sh \ 57 | && echo "n" | /tmp/xsarm_amd64_install.sh \ 58 | && rm /tmp/xsarm_amd64_install.sh 59 | ENV PYTHONPATH="${PYTHONPATH}:~/interbotix_ws/src/interbotix_ros_toolboxes/interbotix_xs_toolbox" 60 | 61 | # install python dependencies 62 | COPY requirements.txt /tmp/requirements.txt 63 | RUN source /opt/ros/noetic/setup.bash && source ~/interbotix_ws/devel/setup.bash && python3 -m venv --system-site-packages ~/myenv 64 | RUN source ~/myenv/bin/activate && pip install wheel && pip install -r /tmp/requirements.txt 65 | 66 | # add widowx_envs code for build step 67 | COPY . /home/${USER_ID}/widowx_envs 68 | RUN ln -s ~/widowx_envs ~/interbotix_ws/src/ 69 | ENV PYTHONPATH="${PYTHONPATH}:/home/${USER_ID}/interbotix_ws/src/widowx_envs" 70 | 71 | ENV ROBONETV2_ARM=wx250s 72 | ENV DATA=/home/robonet/trainingdata 73 | ENV EXP=/home/robonet/experiments 74 | ENV REALSENSE_SERIAL=920312072629 75 | 76 | # build interbox ros packages 77 | RUN source /opt/ros/noetic/setup.bash \ 78 | && source ~/interbotix_ws/devel/setup.bash \ 79 | && cd ~/interbotix_ws \ 80 | && catkin_make \ 81 | && touch ~/.built 82 | 83 | # add utility functions 84 | RUN sudo ln -s ~/widowx_envs/scripts/go_to_sleep_pose.py /usr/local/bin/go_sleep 85 | RUN sudo ln -s ~/widowx_envs/scripts/go_to_neutral_pose.py /usr/local/bin/go_neutral 86 | 87 | RUN echo 'source ~/myenv/bin/activate' >> ~/.bashrc 88 | RUN echo 'source /opt/ros/noetic/setup.bash' >> ~/.bashrc 89 | RUN echo 'source ~/interbotix_ws/devel/setup.bash' >> ~/.bashrc 90 | 91 | WORKDIR /home/${USER_ID} 92 | 93 | # Clone and install the edgeml library 94 | RUN git clone https://github.com/youliangtan/edgeml && \ 95 | cd edgeml && \ 96 | pip3 install -e . 97 | 98 | # Create alias to python3 widowx_envs/widowx_envs/widowx_env_service.py 99 | # This is a workaround for pip3 install -e . 100 | RUN echo 'alias widowx_env_service="python3 ~/widowx_envs/widowx_envs/widowx_env_service.py"' >> ~/.bashrc 101 | -------------------------------------------------------------------------------- /widowx_envs/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Robotics and AI Lab @ BAIR 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /widowx_envs/README.md: -------------------------------------------------------------------------------- 1 | # widowx_envs 2 | 3 | ## Directory Structure 4 | 5 | - experiments: Contains the configuration files for the experiments 6 | - multicam_server: rospkg which handles multi camera streaming setup 7 | - scripts: high-level scritps/entrypoints to run widowx_envs 8 | - widowx_controller: Low-level scripts that handles communication with robot and VR controller 9 | - widowx_envs: Contains the gym environment for the robot arm 10 | 11 | ## Supported robots 12 | (https://github.com/Interbotix/interbotix_ros_manipulators/tree/main/interbotix_ros_xsarms): 13 | - PincherX 100 Robot Arm 14 | - PincherX 150 Robot Arm 15 | - ReactorX 150 Robot Arm 16 | - ReactorX 200 Robot Arm 17 | - WidowX 200 Robot Arm 18 | - WidowX 250 Robot Arm 19 | - WidowX 250 Robot Arm 6DOF 20 | - ViperX 250 Robot Arm 21 | - ViperX 300 Robot Arm 22 | - ViperX 300 Robot Arm 6DOF 23 | - PincherX 100 Mobile Robot Arm 24 | - WidowX 200 Mobile Robot Arm 25 | - WidowX 250 Mobile Robot Arm 6DOF 26 | -------------------------------------------------------------------------------- /widowx_envs/experiments/bridge_data_v2/collection_metadata.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /widowx_envs/experiments/bridge_data_v2/conf.py: -------------------------------------------------------------------------------- 1 | """ Hyperparameters for Large Scale Data Collection """ 2 | import os.path 3 | 4 | BASE_DIR = '/'.join(str.split(__file__, '/')[:-1]) 5 | current_dir = os.path.dirname(os.path.realpath(__file__)) 6 | 7 | from multicam_server.topic_utils import IMTopic 8 | from widowx_envs.widowx_env import VR_WidowX 9 | from widowx_envs.control_loops import TimedLoop 10 | from widowx_envs.policies.vr_teleop_policy import VRTeleopPolicy 11 | 12 | env_params = { 13 | 'camera_topics': [IMTopic('/D435/color/image_raw'), 14 | #IMTopic('/yellow/image_raw'), 15 | IMTopic('/blue/image_raw'), 16 | IMTopic('/wrist/image_raw') 17 | ], 18 | 'depth_camera_topics': [IMTopic('/D435/depth/image_rect_raw', dtype='16UC1')], 19 | 'gripper_attached': 'custom', 20 | 'skip_move_to_neutral': True, 21 | 'move_to_rand_start_freq': -1, 22 | 'fix_zangle': 0.1, 23 | 'move_duration': 0.2, 24 | 'adaptive_wait': True, 25 | 'action_clipping': None 26 | } 27 | 28 | agent = { 29 | 'type': TimedLoop, 30 | 'env': (VR_WidowX, env_params), 31 | 'recreate_env': (False, 1), 32 | 'T': 500, 33 | 'image_height': 480, 34 | 'image_width': 640, 35 | 'make_final_gif': False, 36 | 'video_format': 'mp4', 37 | } 38 | 39 | policy = { 40 | 'type': VRTeleopPolicy, 41 | } 42 | 43 | config = { 44 | 'current_dir' : current_dir, 45 | 'collection_metadata' : current_dir + '/collection_metadata.json', 46 | 'start_index':0, 47 | 'end_index': 500, 48 | 'agent': agent, 49 | 'policy': policy, 50 | 'save_format': ['raw'], 51 | 'make_diagnostics': False, 52 | 'record_floor_height': False 53 | } 54 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(multicam_server) 3 | 4 | catkin_python_setup() 5 | 6 | ## Find catkin macros and libraries 7 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 8 | ## is used, also find other catkin packages 9 | find_package(catkin REQUIRED COMPONENTS 10 | rospy 11 | std_msgs 12 | message_generation 13 | ) 14 | 15 | catkin_package() 16 | 17 | ## Specify additional locations of header files 18 | ## Your package locations should be listed before other locations 19 | include_directories( 20 | # include 21 | ${catkin_INCLUDE_DIRS} 22 | ) 23 | 24 | catkin_install_python(PROGRAMS # add executable python scripts here 25 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 26 | ) 27 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/launch/cameras.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/launch/streamer.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | multicam_server 4 | 0.0.0 5 | The multicam_server package 6 | 7 | 8 | 9 | 10 | jonathan 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | message_generation 41 | 42 | 43 | 44 | 45 | 46 | message_runtime 47 | video_stream_opencv 48 | 49 | 50 | 51 | 52 | catkin 53 | rospy 54 | std_msgs 55 | rospy 56 | std_msgs 57 | rospy 58 | std_msgs 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from catkin_pkg.python_setup import generate_distutils_setup 3 | 4 | # fetch values from package.xml 5 | setup_args = generate_distutils_setup( 6 | packages=['multicam_server'], 7 | package_dir={'': 'src'}) 8 | 9 | setup(**setup_args) 10 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/multicam_server/src/__init__.py -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/multicam_server/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/multicam_server/src/multicam_server/__init__.py -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/multicam_server/camera_recorder.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import rospy 3 | import os 4 | from threading import Lock, Semaphore 5 | from cv_bridge import CvBridge 6 | import cv2 7 | from sensor_msgs.msg import Image as Image_msg 8 | from sensor_msgs.msg import CameraInfo 9 | import copy 10 | import hashlib 11 | import logging 12 | 13 | 14 | class LatestObservation(object): 15 | def __init__(self, create_tracker=False, save_buffer=False): 16 | self.img_cv2 = None 17 | self.tstamp_img = None 18 | self.img_msg = None 19 | self.mutex = Lock() 20 | if save_buffer: 21 | self.reset_saver() 22 | if create_tracker: 23 | self.reset_tracker() 24 | 25 | def reset_tracker(self): 26 | self.cv2_tracker = cv2.TrackerMIL_create() 27 | self.bbox = None 28 | self.track_itr = 0 29 | 30 | def reset_saver(self): 31 | self.save_itr = 0 32 | 33 | 34 | class CameraRecorder: 35 | TRACK_SKIP = 2 # the camera publisher works at 60 FPS but camera itself only goes at 30 36 | MAX_REPEATS = 100 # camera errors after 10 repeated frames in a row 37 | 38 | def __init__(self, topic_data, opencv_tracking=False, save_videos=False): 39 | """ 40 | :param topic_data: 41 | :param opencv_tracking: 42 | :param save_videos: 43 | :param server_id: if not None run in server mode. If the inference code requires python3 the camera server 44 | can still be launched in python2. 45 | """ 46 | self._tracking_enabled, self._save_vides = opencv_tracking, save_videos 47 | 48 | self._latest_image = LatestObservation(self._tracking_enabled, self._save_vides) 49 | 50 | self._is_tracking = False 51 | if self._tracking_enabled: 52 | self.box_height = 80 53 | 54 | self.bridge = CvBridge() 55 | self._is_first_status, self._status_sem = True, Semaphore(value=0) 56 | self._cam_height, self._cam_width = None, None 57 | self._last_hash, self._num_repeats = None, 0 58 | self._last_hash_get_image = None 59 | if self._save_vides: 60 | self._buffers = [] 61 | self._saving = False 62 | 63 | self._topic_data = topic_data 64 | self._image_dtype = topic_data.dtype 65 | self._topic_name = topic_data.name 66 | 67 | if topic_data.info_name is not None: 68 | self._camera_info_name = topic_data.info_name 69 | else: 70 | # Trys to guess the correct camera info name based on ROS conventions. 71 | # If this code is hanging, manually set the info_name in the conf file when defining the IMTopic 72 | self._camera_info_name= os.path.join(os.path.split(self._topic_name)[0], "camera_info") 73 | print("Trying to read camera info from ", self._camera_info_name) 74 | 75 | self._camera_info = rospy.wait_for_message(self._camera_info_name, CameraInfo) 76 | 77 | print("Successfully read camera info") 78 | 79 | 80 | rospy.Subscriber(topic_data.name, Image_msg, self.store_latest_im) 81 | logger = logging.getLogger('robot_logger') 82 | logger.debug('downing sema on topic: {}'.format(topic_data.name)) 83 | success = self._status_sem.acquire(timeout=5) 84 | if not success: 85 | print('Still waiting for an image to arrive at CameraRecorder... Topic name:', self._topic_name) 86 | self._status_sem.acquire() 87 | logger.info("Cameras {} subscribed: stream is {}x{}".format(self._topic_data.name, self._cam_width, self._cam_height)) 88 | 89 | def _cam_start_tracking(self, lt_ob, point): 90 | lt_ob.reset_tracker() 91 | lt_ob.bbox = np.array([int(point[1] - self.box_height / 2.), 92 | int(point[0] - self.box_height / 2.), 93 | self.box_height, self.box_height]).astype(np.int64) 94 | 95 | lt_ob.cv2_tracker.init(lt_ob.img_cv2, tuple(lt_ob.bbox)) 96 | lt_ob.track_itr = 0 97 | 98 | def start_tracking(self, start_points): 99 | assert self._tracking_enabled 100 | n_desig, xy_dim = start_points.shape 101 | if n_desig != 1: 102 | raise NotImplementedError("opencv_tracking requires 1 designated pixel") 103 | if xy_dim != 2: 104 | raise ValueError("Requires XY pixel location") 105 | 106 | self._latest_image.mutex.acquire() 107 | self._cam_start_tracking(self._latest_image, start_points[0]) 108 | self._is_tracking = True 109 | self._latest_image.mutex.release() 110 | rospy.sleep(2) # sleep a bit for first few messages to initialize tracker 111 | 112 | logging.getLogger('robot_logger').info("TRACKING INITIALIZED") 113 | 114 | def end_tracking(self): 115 | self._latest_image.mutex.acquire() 116 | self._is_tracking = False 117 | self._latest_image.reset_tracker() 118 | self._latest_image.mutex.release() 119 | 120 | def _bbox2point(self, bbox): 121 | point = np.array([int(bbox[1]), int(bbox[0])]) \ 122 | + np.array([self.box_height / 2, self.box_height / 2]) 123 | return point.astype(np.int32) 124 | 125 | def get_track(self): 126 | assert self._tracking_enabled, "OPENCV TRACKING IS NOT ENABLED" 127 | assert self._is_tracking, "RECORDER IS NOT TRACKING" 128 | 129 | points = np.zeros((1, 2), dtype=np.int64) 130 | self._latest_image.mutex.acquire() 131 | points[0] = self._bbox2point(self._latest_image.bbox) 132 | self._latest_image.mutex.release() 133 | 134 | return points.astype(np.int64) 135 | 136 | def get_image(self, arg=None): 137 | self._latest_image.mutex.acquire() 138 | time_stamp, img_cv2 = self._latest_image.tstamp_img, self._latest_image.img_cv2 139 | current_hash = hashlib.sha256(img_cv2.tostring()).hexdigest() 140 | if self._last_hash_get_image is not None: 141 | if current_hash == self._last_hash_get_image: 142 | # logging.getLogger('robot_logger').error('Repeated images, camera queried too fast!') 143 | # rospy.signal_shutdown('shutting down.') 144 | print('repated images for get_image method!') 145 | self._last_hash_get_image = current_hash 146 | self._latest_image.mutex.release() 147 | return time_stamp, img_cv2 148 | 149 | def start_recording(self, reset_buffer=False): 150 | assert self._save_vides, "Video saving not enabled!" 151 | 152 | self._latest_image.mutex.acquire() 153 | if reset_buffer: 154 | self.reset_recording() 155 | self._saving = True 156 | self._latest_image.mutex.release() 157 | 158 | def stop_recording(self): 159 | assert self._save_vides, "Video saving not enabled!" 160 | self._latest_image.mutex.acquire() 161 | self._saving = False 162 | self._latest_image.mutex.release() 163 | 164 | def reset_recording(self): 165 | assert self._save_vides, "Video saving not enabled!" 166 | assert not self._saving, "Can't reset while saving (run stop_recording first)" 167 | 168 | old_buffers = self._buffers 169 | self._buffers = [] 170 | self._latest_image.reset_saver() 171 | return old_buffers 172 | 173 | def _proc_image(self, latest_obsv, data): 174 | latest_obsv.img_msg = data 175 | latest_obsv.tstamp_img = data.header.stamp 176 | 177 | cv_image = self.bridge.imgmsg_to_cv2(data, self._image_dtype) 178 | if len(cv_image.shape) > 2 and cv_image.shape[2] > 3: 179 | cv_image = cv_image[:, :, :3] 180 | latest_obsv.img_cv2 = copy.deepcopy(self._topic_data.process_image(cv_image)) 181 | 182 | if self._tracking_enabled and self._is_tracking: 183 | if latest_obsv.track_itr % self.TRACK_SKIP == 0: 184 | _, bbox = latest_obsv.cv2_tracker.update(latest_obsv.img_cv2) 185 | latest_obsv.bbox = np.array(bbox).astype(np.int32).reshape(-1) 186 | latest_obsv.track_itr += 1 187 | 188 | def store_latest_im(self, data): 189 | self._latest_image.mutex.acquire() 190 | # if self._latest_image.tstamp_img is not None: 191 | # print('{} delta t image {}'.format(self._topic_name, rospy.get_time() - self._latest_image.tstamp_img)) 192 | 193 | t0 = rospy.get_time() 194 | self._proc_image(self._latest_image, data) 195 | 196 | current_hash = hashlib.sha256(self._latest_image.img_cv2.tostring()).hexdigest() 197 | if self._is_first_status: 198 | self._cam_height, self._cam_width = self._latest_image.img_cv2.shape[:2] 199 | self._is_first_status = False 200 | self._status_sem.release() 201 | elif self._last_hash == current_hash: 202 | if self._num_repeats < self.MAX_REPEATS: 203 | self._num_repeats += 1 204 | else: 205 | logging.getLogger('robot_logger').error(f'Too many repeated images.\ 206 | Check camera with topic {self._topic_name}!') 207 | rospy.signal_shutdown('Too many repeated images. Check camera!') 208 | else: 209 | self._num_repeats = 0 210 | self._last_hash = current_hash 211 | 212 | if self._save_vides and self._saving: 213 | if self._latest_image.save_itr % self.TRACK_SKIP == 0: 214 | self._buffers.append(copy.deepcopy(self._latest_image.img_cv2)[:, :, ::-1]) 215 | self._latest_image.save_itr += 1 216 | self._latest_image.mutex.release() 217 | # print('time process image', rospy.get_time() - t0) 218 | 219 | @property 220 | def img_width(self): 221 | return self._cam_width 222 | 223 | @property 224 | def img_height(self): 225 | return self._cam_height 226 | 227 | @property 228 | def camera_info(self): 229 | return self._camera_info 230 | 231 | 232 | 233 | 234 | if __name__ == '__main__': 235 | from multicam_server.topic_utils import IMTopic 236 | rospy.init_node("camera_rec_test") 237 | imtopic = IMTopic('/cam1/image_raw') 238 | rec = CameraRecorder(imtopic) 239 | 240 | r = rospy.Rate(5) # 10hz 241 | start_time = rospy.get_time() 242 | for t in range(20): 243 | print('t{} before get image {}'.format(t, rospy.get_time() - start_time)) 244 | t0 = rospy.get_time() 245 | tstamp, im = rec.get_image() 246 | # print('get image took', rospy.get_time() - t0) 247 | 248 | im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) 249 | 250 | t1 = rospy.get_time() 251 | cv2.imwrite(os.environ['EXP'] + '/test_image_t{}_{}.jpg'.format(t, rospy.get_time() - start_time), im) 252 | # print('save took ', rospy.get_time() - t1) 253 | 254 | r.sleep() 255 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/multicam_server/topic_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import cv2 4 | from dataclasses import dataclass, asdict 5 | from typing import Any, Type 6 | 7 | 8 | @dataclass 9 | class IMTopic: 10 | """ 11 | Configuration for an image topic. 12 | """ 13 | name: str 14 | width: int = 640 15 | height: int = 480 16 | top: int = 0 17 | bot: int = 0 18 | right: int = 0 19 | left: int = 0 20 | dtype: str = "bgr8" 21 | flip: bool = False 22 | info_name: str = None 23 | 24 | def process_image(self, img): 25 | # Check for overcrop conditions 26 | assert self.bot + self.top <= img.shape[0], "Overcrop! bot + top crop >= image height!" 27 | assert self.right + self.left <= img.shape[1], "Overcrop! right + left crop >= image width!" 28 | 29 | # If bot or right is negative, set to value that crops the entire image 30 | bot = self.bot if self.bot > 0 else -(img.shape[0] + 10) 31 | right = self.right if self.right > 0 else -(img.shape[1] + 10) 32 | 33 | # Crop image 34 | img = img[self.top:-bot, self.left:-right] 35 | 36 | # Flip image if necessary 37 | if self.flip: 38 | img = img[::-1, ::-1] 39 | 40 | # Resize image if necessary 41 | if (self.height, self.width) != img.shape[:2]: 42 | return cv2.resize(img, (self.width, self.height), interpolation=cv2.INTER_AREA) 43 | return img 44 | 45 | @classmethod 46 | def from_dict(cls: Type[Any], data: dict) -> Any: 47 | return cls(**data) 48 | 49 | def to_dict(self): 50 | return asdict(self) 51 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/start_streamers.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | from streamer import Streamer 4 | import os 5 | import yaml 6 | import re 7 | import shutil 8 | import subprocess 9 | import rospy 10 | 11 | ############################################################################## 12 | 13 | def get_param(parameter_name): 14 | if not rospy.has_param(parameter_name): 15 | rospy.logerr('Parameter %s not provided. Exiting.', parameter_name) 16 | exit(1) 17 | return rospy.get_param(parameter_name) 18 | 19 | 20 | def load_connector_chart(): 21 | config_path = get_param("~camera_connector_chart") 22 | if not os.path.exists(config_path): 23 | rospy.logerr(f"The usb connector chart in path {config_path} does not exist. You can use the example usb_connector_chart_example.yml as a template.") 24 | rospy.logerr("you can find the usb outlets of the webcams by running $v4l2-ctl --list-devices") 25 | exit(1) 26 | return yaml.load(open(config_path, 'r'), Loader=yaml.CLoader) 27 | 28 | 29 | def get_dev(output_string, usb_id): 30 | lines = output_string.decode().split('\n') 31 | for i, line in enumerate(lines): 32 | if usb_id in line: 33 | return re.search('video(\d+)', lines[i + 1]).group(1) 34 | raise ValueError('usb_id {} not found!'.format(usb_id)) 35 | 36 | 37 | def reset_usb(reset_names): 38 | if shutil.which('usbreset') is None: 39 | current_dir = os.path.dirname(os.path.realpath(__file__)) 40 | res = subprocess.call(f'gcc {current_dir}/usbreset.c -o /usr/local/bin/usbreset') 41 | if not res == 0: 42 | rospy.logerr(f'usbreset install exit code: {res}') 43 | raise ValueError('could not install usbreset !') 44 | res = subprocess.run(['lsusb'], stdout=subprocess.PIPE) 45 | lines = res.stdout.decode().split('\n') 46 | for line in lines: 47 | for name in reset_names: 48 | if name in line: 49 | numbers = re.findall(r'(\d\d\d)', line)[:2] 50 | rospy.loginfo('resetting usb with lsusb line: {}'.format(line)) 51 | cmd = 'sudo usbreset /dev/bus/usb/{}/{}'.format(*numbers) 52 | res = subprocess.call(cmd.split()) 53 | if not res == 0: 54 | rospy.logerr(f'exit code: {res}') 55 | raise ValueError('could not reset !') 56 | 57 | 58 | def process_camera_connector_chart(): 59 | connector_chart_dict = load_connector_chart() 60 | 61 | res = subprocess.run(['v4l2-ctl', '--list-devices'], stdout=subprocess.PIPE) 62 | output_string = res.stdout 63 | 64 | # reset_names = set() 65 | # for key, value in connector_chart_dict.items(): 66 | # reset_names.add(value[1]) 67 | # rospy.loginfo(f"reseting lsusb names {reset_names}") 68 | # reset_usb(reset_names) 69 | 70 | providers = [] 71 | topic_names = [] 72 | for topic_name, usb_id in connector_chart_dict.items(): 73 | dev_number = get_dev(output_string, usb_id) 74 | providers.append(dev_number) 75 | topic_names.append(topic_name) 76 | return providers, topic_names 77 | 78 | 79 | def populate_params(): 80 | params = {} 81 | params['fps'] = get_param("~fps") 82 | params['frame_id'] = get_param("~frame_id") 83 | params['retry_on_fail'] = get_param("~retry_on_fail") 84 | params['buffer_queue_size'] = get_param("~buffer_queue_size") 85 | params['python_node'] = get_param("~python_node") 86 | return params 87 | 88 | ############################################################################## 89 | 90 | def main(): 91 | base_call = "roslaunch multicam_server streamer.launch" 92 | rospy.init_node('start_streamers', anonymous=True) # node name is provided in the roslaunch file 93 | topic_names = [] 94 | if get_param("~camera_connector_chart"): 95 | video_stream_providers, topic_names = process_camera_connector_chart() 96 | else: 97 | video_stream_provider = get_param("~video_stream_provider") 98 | parsed_video_stream_provider = eval(video_stream_provider) 99 | if isinstance(parsed_video_stream_provider, list): 100 | video_stream_providers = parsed_video_stream_provider 101 | elif isinstance(parsed_video_stream_provider, int): 102 | video_stream_providers = [parsed_video_stream_provider] 103 | else: 104 | rospy.logerr("Pass either list or an integer as video_stream_provider to video_stream_opencv node.") 105 | rospy.loginfo(f"Arguments provided: {video_stream_provider}") 106 | for i in range(len(video_stream_providers)): 107 | topic_names.append(f'camera{i}') 108 | 109 | processes = [] 110 | for index, [video_stream_provider, topic_name] in enumerate(zip(video_stream_providers, topic_names)): 111 | full_params = {'video_stream_provider': video_stream_provider, 'camera_name': topic_name, 'node_name': f'streamer_{index}'} 112 | full_params.update(populate_params()) 113 | appended_string = '' 114 | for key, val in full_params.items(): 115 | appended_string += key + ':=' + str(val) + ' ' 116 | proc = subprocess.Popen((base_call + ' ' + appended_string).split()) 117 | processes.append(proc) 118 | while not rospy.is_shutdown(): 119 | rospy.sleep(0.1) 120 | for proc in processes: 121 | proc.kill() 122 | proc.communicate() 123 | 124 | if __name__ == '__main__': 125 | main() 126 | -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/streamer.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python3 2 | 3 | import cv2 4 | import rospy 5 | import threading 6 | import os 7 | from sensor_msgs.msg import Image 8 | from cv_bridge import CvBridge 9 | import copy 10 | 11 | ############################################################################## 12 | 13 | class Streamer: 14 | def __init__(self): 15 | self.parse_rosparam() 16 | self.full_resource_path = "/dev/video" + str(self._video_stream_provider) 17 | success = self.setup_capture_device() 18 | if not success: 19 | return 20 | self.publisher = rospy.Publisher(self._topic_name + '/image_raw', Image, queue_size=1) 21 | self._buffer = [] 22 | self.bridge = CvBridge() 23 | self._lock = threading.Lock() 24 | self.start_capture() 25 | self.start_publishing() 26 | 27 | def setup_capture_device(self): 28 | success = False 29 | if not os.path.exists(self.full_resource_path): 30 | rospy.logerr("Device '%s' does not exist.", self.full_resource_path) 31 | return success 32 | rospy.loginfo("Trying to open resource: '%s' for topic '%s'", self.full_resource_path, self._topic_name) 33 | self.cap = cv2.VideoCapture(self.full_resource_path) 34 | if not self.cap.isOpened(): 35 | rospy.logerr(f"Error opening resource: {self.full_resource_path}") 36 | rospy.loginfo("opencv VideoCapture can't open it.") 37 | rospy.loginfo("The device '%s' is possibly in use. You could try reconnecting the camera.", self.full_resource_path) 38 | if self.cap.isOpened(): 39 | rospy.loginfo(f"Correctly opened resource {self.full_resource_path}.") 40 | success = True 41 | return success 42 | 43 | @staticmethod 44 | def get_param(parameter_name): 45 | if not rospy.has_param(parameter_name): 46 | rospy.logerr('Parameter %s not provided. Exiting.', parameter_name) 47 | exit(1) 48 | return rospy.get_param(parameter_name) 49 | 50 | def parse_rosparam(self): 51 | self._fps = self.get_param("~fps") 52 | self._frame_id = self.get_param("~frame_id") 53 | self._retry_on_fail = self.get_param("~retry_on_fail") 54 | self._buffer_queue_size = self.get_param("~buffer_queue_size") 55 | self._topic_name = self.get_param("~camera_name") 56 | self._video_stream_provider = self.get_param("~video_stream_provider") 57 | 58 | def start_capture(self): 59 | self._capture_thread = threading.Thread(target=self.capture) 60 | self._capture_thread.start() 61 | 62 | def capture(self): 63 | # running at full speed the camera allows 64 | while not rospy.is_shutdown(): 65 | rval, frame = self.cap.read() 66 | if not rval: 67 | rospy.logwarn(f"The frame has not been captured. You could try reconnecting the camera resource {self.full_resource_path}.") 68 | rospy.sleep(3) 69 | if self._retry_on_fail: 70 | rospy.loginfo(f"Searching for the device {self.full_resource_path}...") 71 | self.setup_capture_device(exit_on_error=False) 72 | else: 73 | reading = [frame, rospy.Time()] 74 | with self._lock: 75 | while(len(self._buffer) > self._buffer_queue_size): 76 | self._buffer.pop(0) 77 | self._buffer.append(reading) 78 | 79 | def publish_image(self, reading): 80 | img = reading[0] 81 | time = reading[1] 82 | imgmsg = self.bridge.cv2_to_imgmsg(img, 'bgr8') 83 | imgmsg.header.frame_id = self._frame_id 84 | imgmsg.header.stamp = time 85 | self.publisher.publish(imgmsg) 86 | 87 | def start_publishing(self): 88 | self._publishing_thread = threading.Thread(target=self.publishing) 89 | self._publishing_thread.start() 90 | 91 | def publishing(self): 92 | rate = rospy.Rate(self._fps) 93 | while not rospy.is_shutdown(): 94 | reading = None 95 | with self._lock: 96 | if self._buffer: 97 | reading = self._buffer[0] 98 | self._buffer.pop(0) 99 | if reading is not None: 100 | self.publish_image(reading) 101 | rate.sleep() 102 | 103 | def main(): 104 | rospy.init_node('streamer', anonymous=True) 105 | streamer = Streamer() 106 | rospy.spin() 107 | 108 | ############################################################################## 109 | 110 | if __name__ == '__main__': 111 | main() -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/usb_connector_chart_example.yml: -------------------------------------------------------------------------------- 1 | #rename this file to "usb_connector_chart_user.yml" 2 | cam0: 'usb-0000:00:14.0-10.2' 3 | cam1: 'usb-0000:00:14.0-10.4' 4 | cam2: 'usb-0000:00:14.0-9.3' 5 | cam3: 'usb-0000:08:00.0-2' 6 | cam4: "usb-0000:00:14.0-9.2" -------------------------------------------------------------------------------- /widowx_envs/multicam_server/src/usbreset.c: -------------------------------------------------------------------------------- 1 | /* usbreset -- send a USB port reset to a USB device */ 2 | 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | 9 | #include 10 | 11 | 12 | int main(int argc, char **argv) 13 | { 14 | const char *filename; 15 | int fd; 16 | int rc; 17 | 18 | if (argc != 2) { 19 | fprintf(stderr, "Usage: usbreset device-filename\n"); 20 | return 1; 21 | } 22 | filename = argv[1]; 23 | 24 | fd = open(filename, O_WRONLY); 25 | if (fd < 0) { 26 | perror("Error opening output file"); 27 | return 1; 28 | } 29 | 30 | printf("Resetting USB device %s\n", filename); 31 | rc = ioctl(fd, USBDEVFS_RESET, 0); 32 | if (rc < 0) { 33 | perror("Error in ioctl"); 34 | return 1; 35 | } 36 | printf("Reset successful\n"); 37 | 38 | close(fd); 39 | return 0; 40 | } -------------------------------------------------------------------------------- /widowx_envs/requirements.txt: -------------------------------------------------------------------------------- 1 | pillow 2 | protobuf 3 | funcsigs 4 | future 5 | imageio 6 | imageio-ffmpeg<0.5.0 7 | matplotlib 8 | moviepy 9 | opencv-python 10 | numpy>=1.23.4 11 | pyquaternion 12 | scikit-learn 13 | scikit-image 14 | scipy 15 | six 16 | requests 17 | nvidia_smi 18 | rospkg 19 | modern_robotics==1.1.1 20 | gym 21 | tqdm 22 | transformations 23 | ipdb 24 | joblib 25 | pickle5 26 | h5py 27 | funcsigs 28 | numba 29 | git+https://github.com/rail-berkeley/oculus_reader.git 30 | -------------------------------------------------------------------------------- /widowx_envs/scripts/go_to_neutral_pose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | if __name__ == '__main__': 4 | from widowx_envs.widowx_env import StateReachingWidowX 5 | env = StateReachingWidowX() 6 | env.move_to_neutral() 7 | -------------------------------------------------------------------------------- /widowx_envs/scripts/go_to_sleep_pose.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | if __name__ == '__main__': 4 | from widowx_envs.widowx_env import StateReachingWidowX 5 | env = StateReachingWidowX() 6 | env.move_to_neutral() 7 | env._controller.bot.arm.go_to_sleep_pose() 8 | -------------------------------------------------------------------------------- /widowx_envs/scripts/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | bash $(dirname "$0")/setup.sh || exit 1 4 | 5 | python_node_string='python_node:=false' 6 | camera_string='realsense:=true' 7 | 8 | source /opt/ros/noetic/setup.bash 9 | source ~/interbotix_ws/devel/setup.bash 10 | source ~/myenv/bin/activate 11 | 12 | # using 'exec' here is very important because roslaunch needs to do some cleanup after it exits 13 | # so when the container is killed the SIGTERM needs to be passed to roslaunch 14 | exec roslaunch widowx_controller widowx_rs.launch \ 15 | ${video_stream_provider_string} camera_connector_chart:=/tmp/camera_connector_chart \ 16 | serial_no_camera1:=${REALSENSE_SERIAL} \ 17 | python_node:=false realsense:=true 18 | -------------------------------------------------------------------------------- /widowx_envs/scripts/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | if [[ -z "${ROBONETV2_ARM}" ]]; then 4 | echo 'Env variable "ROBONETV2_ARM" is not set. Please define it based on https://github.com/Interbotix/interbotix_ros_manipulators/tree/main/interbotix_ros_xsarms' 5 | echo 'For instance in case of WidowX 250 Robot Arm 6DOF use:' 6 | echo 'echo "export ROBONETV2_ARM=wx250s" >> ~/.bashrc && source ~/.bashrc' 7 | exit 1 8 | fi 9 | 10 | cd 11 | if [ ! -f ".built" ]; then 12 | cd ~/interbotix_ws && catkin_make && touch ~/.built 13 | fi 14 | -------------------------------------------------------------------------------- /widowx_envs/setup.py: -------------------------------------------------------------------------------- 1 | import setuptools 2 | 3 | setuptools.setup( 4 | name='widowx_envs', 5 | version='0.0.1', 6 | packages=setuptools.find_packages(), 7 | license='MIT License', 8 | long_description=open('README.md').read(), 9 | entry_points={ 10 | 'console_scripts': [ 11 | 'widowx_env_service = widowx_envs.widowx_env_service:main', 12 | ], 13 | }, 14 | ) 15 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/CMakeLists.txt: -------------------------------------------------------------------------------- 1 | cmake_minimum_required(VERSION 2.8.3) 2 | project(widowx_controller) 3 | 4 | ## Compile as C++11, supported in ROS Kinetic and newer 5 | # add_compile_options(-std=c++11) 6 | 7 | catkin_python_setup() 8 | 9 | ## Find catkin macros and libraries 10 | ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) 11 | ## is used, also find other catkin packages 12 | find_package(catkin REQUIRED COMPONENTS 13 | rospy 14 | std_msgs 15 | message_generation 16 | ) 17 | 18 | ## Generate services in the 'srv' folder 19 | add_service_files( 20 | FILES 21 | GotoNeutral.srv 22 | OpenGripper.srv 23 | MoveToEEP.srv 24 | MoveToState.srv 25 | GetGripperDesiredState.srv 26 | GetCartesianPose.srv 27 | GetState.srv 28 | GetVRButtons.srv 29 | EnableController.srv 30 | DisableController.srv 31 | SetGripperPosition.srv 32 | ) 33 | 34 | ## Generate added messages and services with any dependencies listed here 35 | generate_messages( 36 | DEPENDENCIES 37 | std_msgs 38 | ) 39 | 40 | catkin_package() 41 | 42 | include_directories( 43 | ${catkin_INCLUDE_DIRS} 44 | ) 45 | 46 | catkin_install_python(PROGRAMS # add executable python files here 47 | DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} 48 | ) 49 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/launch/launch.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/launch/widowx_rs.launch: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/package.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | widowx_controller 4 | 0.0.0 5 | Package for controlling WidowX robots 6 | 7 | 8 | 9 | 10 | jonathan 11 | 12 | 13 | 14 | 15 | 16 | TODO 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | message_generation 39 | 40 | 41 | 42 | 43 | 44 | message_runtime 45 | 46 | 47 | 48 | 49 | catkin 50 | rospy 51 | std_msgs 52 | rospy 53 | std_msgs 54 | rospy 55 | std_msgs 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from catkin_pkg.python_setup import generate_distutils_setup 3 | 4 | # fetch values from package.xml 5 | setup_args = generate_distutils_setup( 6 | packages=['widowx_controller'], 7 | package_dir={'': 'src'}) 8 | 9 | setup(**setup_args) 10 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/widowx_controller/src/widowx_controller/__init__.py -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/controller_base.py: -------------------------------------------------------------------------------- 1 | # /usr/bin/env python3 2 | 3 | from abc import ABC, abstractmethod 4 | from typing import Optional 5 | 6 | ############################################################################## 7 | 8 | class GripperControllerBase(ABC): 9 | 10 | @abstractmethod 11 | def __init__(self, robot_name): 12 | pass 13 | 14 | @property 15 | def des_pos(self) -> Optional[float]: 16 | return None 17 | 18 | @des_pos.setter 19 | @abstractmethod 20 | def des_pos(self, value): 21 | pass 22 | 23 | @abstractmethod 24 | def get_gripper_pos(self): 25 | pass 26 | 27 | @abstractmethod 28 | def open(self): 29 | pass 30 | 31 | @abstractmethod 32 | def close(self): 33 | pass 34 | 35 | @abstractmethod 36 | def set_continuous_position(self, target): 37 | pass 38 | 39 | @abstractmethod 40 | def get_continuous_position(self): 41 | pass 42 | 43 | @abstractmethod 44 | def is_moving(self): 45 | pass 46 | 47 | @abstractmethod 48 | def get_gripper_target_position(self): 49 | pass 50 | 51 | 52 | ############################################################################## 53 | 54 | class RobotControllerBase(ABC): 55 | 56 | @abstractmethod 57 | def __init__(self, robot_name, print_debug): 58 | pass 59 | 60 | @abstractmethod 61 | def move_to_state(self, target_xyz, target_zangle, duration=1.5): 62 | pass 63 | 64 | @abstractmethod 65 | def set_moving_time(self, moving_time): 66 | pass 67 | 68 | @abstractmethod 69 | def move_to_eep(self, target_pose, duration=1.5): 70 | """ 71 | :param target_pose: Cartesian pose (x,y,z, quat). 72 | :param duration: Total time trajectory will take before ending 73 | """ 74 | pass 75 | 76 | @abstractmethod 77 | def set_joint_angles(self, target_positions, duration=4): 78 | pass 79 | 80 | @abstractmethod 81 | def move_to_neutral(self, duration=4): 82 | pass 83 | 84 | @abstractmethod 85 | def get_joint_angles(self): 86 | """Get current joint angles""" 87 | pass 88 | 89 | @abstractmethod 90 | def get_joint_effort(self): 91 | """Get current joint efforts""" 92 | pass 93 | 94 | @abstractmethod 95 | def get_joint_angles_velocity(self): 96 | """Get current joint angle velocities""" 97 | pass 98 | 99 | @abstractmethod 100 | def get_cartesian_pose(self, matrix=False): 101 | pass 102 | 103 | def get_state(self): 104 | """Get a tuple of (joint_angles, joint_angles_velocity, cartesian_pose)""" 105 | return self.get_joint_angles(), \ 106 | self.get_joint_angles_velocity(), \ 107 | self.get_cartesian_pose() 108 | 109 | @abstractmethod 110 | def open_gripper(self, wait=False): 111 | pass 112 | 113 | @abstractmethod 114 | def close_gripper(self, wait=False): 115 | pass 116 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/custom_gripper_controller.py: -------------------------------------------------------------------------------- 1 | import rospy 2 | import numpy as np 3 | from threading import Lock 4 | from sensor_msgs.msg import JointState 5 | import time 6 | 7 | from std_msgs.msg import Float64 8 | from widowx_controller.srv import OpenGripper, OpenGripperResponse 9 | from widowx_controller.srv import GetGripperDesiredState, GetGripperDesiredStateResponse 10 | 11 | try: 12 | # older version of interbotix sdk 13 | from interbotix_xs_sdk.msg import JointSingleCommand 14 | except: 15 | # newer version of interbotix sdk 16 | from interbotix_xs_msgs.msg import JointSingleCommand 17 | 18 | from widowx_controller.controller_base import GripperControllerBase 19 | 20 | 21 | class GripperController(GripperControllerBase): 22 | def __init__(self, robot_name, create_node=False, upper_limit=0.035, lower_limit=0.010, des_pos_max=1, des_pos_min=0): 23 | if create_node: 24 | rospy.init_node('gripper_controller') 25 | assert des_pos_max >= des_pos_min, "gripper des_pos_max has to be >= des_pos_min" 26 | self.des_pos_max = des_pos_max 27 | self.des_pos_min = des_pos_min 28 | self._upper_limit = upper_limit 29 | self._lower_limit = lower_limit 30 | assert self._upper_limit > self._lower_limit 31 | if not create_node: 32 | rospy.Timer(rospy.Duration(0.02), self.update_gripper_pwm) 33 | 34 | self._joint_lock = Lock() 35 | self._des_pos_lock = Lock() 36 | 37 | self._angles = {} 38 | self._velocities = {} 39 | self._pub_gripper_command = rospy.Publisher(f"/{robot_name}/commands/joint_single", JointSingleCommand, queue_size=3) 40 | rospy.Subscriber(f"/{robot_name}/joint_states", JointState, self._joint_callback) 41 | 42 | self._moving = False 43 | self._time_movement_started = None 44 | self._grace_period_until_can_be_marked_as_stopped = 0.1 45 | self._des_pos = None 46 | self.des_pos = self._upper_limit 47 | 48 | @property 49 | def des_pos(self): 50 | return self._des_pos 51 | 52 | @des_pos.setter 53 | def des_pos(self, value): 54 | if value != self._des_pos: 55 | with self._des_pos_lock: 56 | self._moving = True 57 | self._time_movement_started = time.time() 58 | self._des_pos = value 59 | 60 | def get_gripper_pos(self): 61 | with self._joint_lock: 62 | if 'left_finger' not in self._angles: 63 | print(" WARNING!! left_finger should be avail in gripper") 64 | return 0.0 65 | return self._angles['left_finger'] 66 | 67 | def _joint_callback(self, msg): 68 | with self._joint_lock: 69 | for name, position, velocity in zip(msg.name, msg.position, msg.velocity): 70 | self._angles[name] = position 71 | self._velocities[name] = velocity 72 | 73 | def open(self): 74 | self.des_pos = self._upper_limit 75 | 76 | def close(self): 77 | self.des_pos = self._lower_limit 78 | 79 | def set_continuous_position(self, target): 80 | target_clipped = np.clip(target, self.des_pos_min, self.des_pos_max) 81 | if target != target_clipped: 82 | print('Warning target gripper pos outside of range', target) 83 | self.des_pos = self.denormalize(target_clipped) 84 | 85 | def get_continuous_position(self): 86 | gripper_pos = self.get_gripper_pos() 87 | return self.normalize(gripper_pos) 88 | 89 | def normalize(self, x): 90 | return (self.des_pos_max - self.des_pos_min) * (x - self._lower_limit) / (self._upper_limit - self._lower_limit) + self.des_pos_min 91 | 92 | def denormalize(self, x): 93 | return (x - self.des_pos_min) * (self._upper_limit - self._lower_limit) / (self.des_pos_max - self.des_pos_min) + self._lower_limit 94 | 95 | def is_moving(self): 96 | return self._moving 97 | 98 | def get_gripper_target_position(self): 99 | des_pos_normed = self.normalize(self.des_pos) 100 | assert des_pos_normed <= self.des_pos_max and des_pos_normed >= self.des_pos_min 101 | return des_pos_normed 102 | 103 | def update_gripper_pwm(self, event): 104 | with self._des_pos_lock: 105 | moving = self._moving 106 | des_pos = self.des_pos 107 | 108 | if moving: 109 | gripper_pos = self.get_gripper_pos() 110 | ctrl = (des_pos - gripper_pos)*300 111 | pwm = self.get_gripper_pwm(ctrl) 112 | gripper_command = JointSingleCommand('gripper', pwm) 113 | self._pub_gripper_command.publish(gripper_command) 114 | 115 | def get_gripper_pwm(self, pressure): 116 | """ 117 | :param pressure: range -1, 1 118 | :return: pwm 119 | """ 120 | pressure = np.clip(pressure, -1, 1) 121 | # offset = 150 122 | offset = 0 123 | if pressure < 0: 124 | gripper_pwm = -(offset + int(-pressure * 350)) 125 | if pressure >= 0: 126 | gripper_pwm = offset + int(pressure * 350) 127 | time_since_movements_started = time.time() - self._time_movement_started 128 | # if abs(self._velocities['gripper']) == 0.0 and time_since_movements_started > self._grace_period_until_can_be_marked_as_stopped: 129 | # # set 0 to stop sending commands by setting self.moving = False 130 | # gripper_pwm = 0 131 | # self._moving = False 132 | # self._time_movement_started = None 133 | return gripper_pwm 134 | 135 | 136 | class GripperControllerServer(GripperController): 137 | def __init__(self, robot_name, create_node=True, upper_limit=0.037, lower_limit=0.010, des_pos_max=1, des_pos_min=0): 138 | super(GripperControllerServer, self).__init__(robot_name, create_node, upper_limit, lower_limit, des_pos_max, des_pos_min) 139 | 140 | rospy.Service('open_gripper', OpenGripper, self.open_gripper_service) 141 | # rospy.Service('set_gripper_position', SetGripperPosition, self.set_gripper_position_service) 142 | rospy.Service('get_gripper_desired_state', GetGripperDesiredState, self.get_gripper_desired_state_service) 143 | rospy.Subscriber("/gripper_despos", Float64, self._gripper_despos_callback) 144 | 145 | def _gripper_despos_callback(self, msg): 146 | self.set_continuous_position(msg.data) 147 | 148 | def open_gripper_service(self, req): 149 | self.open() 150 | return OpenGripperResponse() 151 | 152 | def get_gripper_desired_state_service(self, req): 153 | return GetGripperDesiredStateResponse(self.get_gripper_target_position()) 154 | 155 | 156 | def run(): 157 | controller = GripperControllerServer(robot_name='wx250s') 158 | rospy.sleep(0.1) 159 | r = rospy.Rate(50) 160 | while not rospy.is_shutdown(): 161 | controller.update_gripper_pwm(None) 162 | r.sleep() 163 | 164 | if __name__ == '__main__': 165 | run() 166 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/velocity_controller.py: -------------------------------------------------------------------------------- 1 | """ 2 | NOTE (YL) This VelocityController is not working, 3 | mainly serves as a backup from the original code. 4 | 5 | This is moved here from the original widowx_controller.py file, to 6 | make the code more readable and cleaner. 7 | """ 8 | 9 | from __future__ import print_function 10 | import numpy as np 11 | import rospy 12 | import time 13 | import pickle as pkl 14 | from pyquaternion import Quaternion 15 | 16 | try: 17 | # older version of interbotix sdk 18 | from interbotix_xs_sdk.msg import JointGroupCommand 19 | except: 20 | # newer version of interbotix sdk 21 | from interbotix_xs_msgs.msg import JointGroupCommand 22 | 23 | from widowx_controller.widowx_controller import WidowX_Controller 24 | from visual_mpc.envs.util.teleop.server import SpaceMouseRemoteReader 25 | 26 | from widowx_envs.utils.exceptions import Environment_Exception 27 | 28 | from modern_robotics.core import \ 29 | JacobianSpace, Adjoint, MatrixLog6, se3ToVec, TransInv, FKinSpace 30 | 31 | ############################################################################## 32 | 33 | 34 | def compute_joint_velocities_from_cartesian(Slist, M, T, thetalist_current): 35 | """Computes inverse kinematics in the space frame for an open chain robot 36 | 37 | :param Slist: The joint screw axes in the space frame when the 38 | manipulator is at the home position, in the format of a 39 | matrix with axes as the columns 40 | :param M: The home configuration of the end-effector 41 | :param T: The desired end-effector configuration Tsd 42 | :param thetalist_current: An initial guess of joint angles that are close to 43 | satisfying Tsd 44 | """ 45 | thetalist = np.array(thetalist_current).copy() 46 | Tsb = FKinSpace(M, Slist, thetalist) 47 | Vs = np.dot(Adjoint(Tsb), 48 | se3ToVec(MatrixLog6(np.dot(TransInv(Tsb), T)))) 49 | theta_vel = np.dot(np.linalg.pinv(JacobianSpace(Slist, 50 | thetalist)), Vs) 51 | return theta_vel 52 | 53 | ############################################################################## 54 | 55 | 56 | class WidowXVelocityController(WidowX_Controller): 57 | def __init__(self, *args, **kwargs): 58 | super(WidowXVelocityController, self).__init__(*args, **kwargs) 59 | self.bot.dxl.robot_set_operating_modes("group", "arm", "velocity") 60 | self.bot.arm.set_trajectory_time(moving_time=0.2, accel_time=0.05) 61 | 62 | # TODO: do we need the SpaceMouseRemoteReader? 63 | # TODO(YL): WARNING: This package is not avail, fix this 64 | self.space_mouse = SpaceMouseRemoteReader() 65 | rospy.Timer(rospy.Duration(0.02), self.update_robot_cmds) 66 | self.last_update_cmd = time.time() 67 | self.enable_cmd_thread = False 68 | self.do_reset = False 69 | self.task_stage = 0 70 | self.num_task_stages = 1e9 71 | 72 | def update_robot_cmds(self, event): 73 | reading = self.space_mouse.get_reading() 74 | if reading is not None and self.enable_cmd_thread: 75 | # print('delta t cmd update, ', time.time() - self.last_update_cmd) 76 | self.last_update_cmd = time.time() 77 | if reading['left'] and reading['right'] or reading['left_and_right']: 78 | self.task_stage += 1 79 | self.task_stage = np.clip(self.task_stage, 0, self.num_task_stages) 80 | if self.task_stage == self.num_task_stages: 81 | print('resetting!') 82 | self.do_reset = True 83 | rospy.sleep(1.0) 84 | # t0 = time.time() 85 | self.apply_spacemouse_action(reading) 86 | # print('apply action time', time.time() - t0) 87 | 88 | def apply_spacemouse_action(self, readings): 89 | if readings is None: 90 | print('readings are None!') 91 | return 92 | if self.custom_gripper_controller: 93 | if readings['left']: 94 | self._gripper.open() 95 | if readings['right']: 96 | self._gripper.close() 97 | else: 98 | if readings['left']: 99 | self.bot.open_gripper() 100 | if readings['right']: 101 | self.bot.close_gripper() 102 | 103 | if self.enable_rotation: 104 | # t1 = time.time() 105 | pose = self.get_cartesian_pose(matrix=True) 106 | # print('get pose time', time.time() - t1) 107 | 108 | current_quat = Quaternion(matrix=pose[:3, :3]) 109 | translation_scale = 0.1 110 | commanded_translation_velocity = readings['xyz'] * translation_scale 111 | new_pos = pose[:3, 3] + commanded_translation_velocity 112 | 113 | # rotation_scale = 0.3 114 | rotation_scale = 0.4 115 | commanded_rotation_velocity = readings['rot'] * rotation_scale 116 | if self.enable_rotation == '4dof': 117 | commanded_rotation_velocity = commanded_rotation_velocity[2] 118 | new_rot = Quaternion(axis=[0, 0, 1], angle=commanded_rotation_velocity) * current_quat 119 | elif self.enable_rotation == '6dof': 120 | new_rot = Quaternion(axis=[1, 0, 0], angle=commanded_rotation_velocity[0]) * \ 121 | Quaternion(axis=[0, 1, 0], angle=commanded_rotation_velocity[1]) * \ 122 | Quaternion(axis=[0, 0, 1], angle=commanded_rotation_velocity[2]) * current_quat 123 | else: 124 | raise NotImplementedError 125 | 126 | new_transform = np.eye(4) 127 | new_transform[:3, :3] = new_rot.rotation_matrix 128 | new_transform[:3, 3] = new_pos 129 | else: 130 | new_transform = self.get_cartesian_pose(matrix=True) 131 | new_transform[:3, 3] += readings['xyz'] * 0.1 132 | 133 | # t2 = time.time() 134 | joint_velocities = compute_joint_velocities_from_cartesian( 135 | self.bot.robot_des.Slist, self.bot.robot_des.M, 136 | new_transform, self.get_joint_angles() 137 | ) 138 | # print('compute joint vel time', time.time() - t2) 139 | self.cap_joint_limits(joint_velocities) 140 | try: 141 | # TODO(YL): where joint_commands is defined? cant find it 142 | joint_commands = JointCommands(joint_velocities) 143 | self.bot.core.pub_group.publish(joint_commands) 144 | except: 145 | print('could not set joint velocity!') 146 | 147 | def stop_motors(self): 148 | joint_commands = JointGroupCommand(self.bot.arm.group_name, ) 149 | self.bot.core.pub_group.publish(joint_commands) 150 | self.bot.core.pub_group.publish(JointCommands()) 151 | 152 | def move_to_state(self, target_xyz, target_zangle, duration=2): 153 | pose = np.eye(4) 154 | rot = Quaternion(axis=[0, 0, 1], angle=target_zangle) * Quaternion(matrix=self.default_rot) 155 | pose[:3, :3] = rot.rotation_matrix 156 | pose[:3, 3] = target_xyz 157 | joint_pos, success = self.bot.set_ee_pose_matrix( 158 | pose, 159 | custom_guess=self.get_joint_angles(), 160 | moving_time=2, 161 | execute=False 162 | ) 163 | if success: 164 | self.move_to_pos_with_velocity_ctrl(joint_pos) 165 | return True 166 | else: 167 | print('no kinematics solution found!') 168 | raise Environment_Exception 169 | 170 | def cap_joint_limits(self, ctrl): 171 | for i in range(self.qn): 172 | if self.get_joint_angles()[i] < self._lower_joint_limits[i]: 173 | print('ctrl', ctrl) 174 | print('ja', self.get_joint_angles()) 175 | print('limit', self._lower_joint_limits) 176 | print('lower joint angle limit violated for j{}'.format(i + 1)) 177 | if ctrl[i] < 0: 178 | print('setting to zero') 179 | ctrl[i] = 0 180 | if self.get_joint_angles()[i] > self._upper_joint_limits[i]: 181 | print('ctrl', ctrl) 182 | print('ja', self.get_joint_angles()) 183 | print('limit', self._lower_joint_limits) 184 | print('upper joint angle limit violated for j{}'.format(i + 1)) 185 | if ctrl[i] > 0: 186 | print('setting to zero') 187 | ctrl[i] = 0 188 | 189 | def move_to_neutral(self, duration=4): 190 | self.move_to_pos_with_velocity_ctrl(self.neutral_joint_angles, duration=duration) 191 | 192 | def move_to_pos_with_velocity_ctrl(self, des, duration=3): 193 | nsteps = 30 194 | per_step = float(duration)/nsteps 195 | 196 | tstart = time.time() 197 | current = self.get_joint_angles() 198 | error = des - current 199 | while (time.time() - tstart) < duration and np.linalg.norm(error) > 0.15: 200 | current = self.get_joint_angles() 201 | error = des - current 202 | ctrl = error * 0.8 203 | max_speed = 0.5 204 | ctrl = np.clip(ctrl, -max_speed, max_speed) 205 | self.cap_joint_limits(ctrl) 206 | self.bot.core.pub_group.publish(JointCommands(ctrl)) 207 | self._last_healthy_tstamp = rospy.get_time() 208 | rospy.sleep(per_step) 209 | self.bot.core.pub_group.publish(JointCommands(np.zeros(self._qn))) 210 | 211 | 212 | ############################################################################## 213 | 214 | if __name__ == '__main__': 215 | dir = '/mount/harddrive/spt/trainingdata/realworld/can_pushing_line/2020-09-04_09-28-29/raw/traj_group0/traj2' 216 | dict = pkl.load(open(dir + '/policy_out.pkl', "rb")) 217 | actions = np.stack([d['actions'] for d in dict], axis=0) 218 | dict = pkl.load(open(dir + '/obs_dict.pkl', "rb")) 219 | states = dict['raw_state'] 220 | 221 | # TODO: check if renaming is required for widowx to widowx_controller 222 | controller = WidowXVelocityController('widowx', True) 223 | 224 | rospy.sleep(2) 225 | controller.move_to_neutral() 226 | # controller.move_to_state(states[0, :3], target_zangle=states[0, 3]) 227 | controller.move_to_state(states[0, :3], target_zangle=0.) 228 | 229 | prev_eef = controller.get_cartesian_pose()[:3] 230 | for t in range(20): 231 | # low_bound = np.array([1.12455181e-01, 8.52311223e-05, 3.23975718e-02, -2.02, -0.55]) + np.array([0, 0, 0.05, 0, 0]) 232 | # high_bound = np.array([0.29880695, 0.22598613, 0.15609235, 1.52631092, 1.39]) 233 | # x, y, z, theta = np.random.uniform(low_bound[:4], high_bound[:4]) 234 | controller.apply_endeffector_velocity(actions[t]/0.2) 235 | 236 | new_eef = controller.get_cartesian_pose()[:3] 237 | print("current eef pos", new_eef[:3]) 238 | print('desired eef pos', states[t, :3]) 239 | print('delta', states[t, :3] - new_eef[:3]) 240 | rospy.sleep(0.2) 241 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/vr_controller_client.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | import numpy as np 4 | import rospy 5 | 6 | from widowx_controller.srv import DisableController 7 | from widowx_controller.srv import EnableController 8 | from widowx_controller.srv import GetCartesianPose 9 | from widowx_controller.srv import GetGripperDesiredState 10 | from widowx_controller.srv import GetState 11 | from widowx_controller.srv import GetVRButtons 12 | from widowx_controller.srv import GotoNeutral 13 | from widowx_controller.srv import MoveToEEP 14 | from widowx_controller.srv import MoveToState 15 | from widowx_controller.srv import OpenGripper 16 | from widowx_controller.srv import SetGripperPosition 17 | from widowx_envs.utils.exceptions import Environment_Exception 18 | 19 | from widowx_controller.controller_base import RobotControllerBase 20 | 21 | # TODO: abstract class for all controllers: 22 | # with internal ROS init, thus higher level impl doesnt need to interact 23 | # with ROS directly, middleware agnostic 24 | 25 | class WidowX_VRContollerClient(RobotControllerBase): 26 | def __init__(self, print_debug=False): 27 | rospy.init_node("vr_controller_client") 28 | logger = logging.getLogger('robot_logger') 29 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 30 | logger.setLevel(logging.DEBUG) 31 | 32 | log_level = logging.INFO 33 | if print_debug: 34 | log_level = logging.DEBUG 35 | ch = logging.StreamHandler() 36 | ch.setLevel(log_level) 37 | ch.setFormatter(formatter) 38 | logger.addHandler(ch) 39 | 40 | def move_to_neutral(self, duration=4): 41 | rospy.wait_for_service('go_to_neutral') 42 | try: 43 | goto_neutral = rospy.ServiceProxy('go_to_neutral', GotoNeutral) 44 | goto_neutral(duration) 45 | except rospy.ServiceException as e: 46 | print("Service call failed: %s" % e) 47 | 48 | def move_to_eep(self, target_pose, duration=1.5): 49 | rospy.wait_for_service('move_to_eep') 50 | try: 51 | service_func = rospy.ServiceProxy('move_to_eep', MoveToEEP) 52 | service_func(target_pose, duration) 53 | except rospy.ServiceException as e: 54 | print("Service call failed: %s" % e) 55 | 56 | def move_to_state(self, startpos, zangle, duration=1.5): 57 | rospy.wait_for_service('move_to_state') 58 | try: 59 | service_func = rospy.ServiceProxy('move_to_state', MoveToState) 60 | resp = service_func(startpos, zangle, duration) 61 | if not resp.success: 62 | raise Environment_Exception 63 | except rospy.ServiceException as e: 64 | print("Service call failed: %s" % e) 65 | 66 | 67 | def get_cartesian_pose(self, matrix=False): 68 | assert matrix 69 | rospy.wait_for_service('get_cartesian_pose') 70 | try: 71 | service_func = rospy.ServiceProxy('get_cartesian_pose', GetCartesianPose) 72 | return np.array(service_func().eep).reshape(4,4) 73 | except rospy.ServiceException as e: 74 | print("Service call failed: %s" % e) 75 | 76 | def get_state(self): 77 | rospy.wait_for_service('get_state') 78 | try: 79 | service_func = rospy.ServiceProxy('get_state', GetState) 80 | response = service_func() 81 | return np.array(response.joint_angles), np.array(response.joint_velocities), np.array(response.cartesian_pose) 82 | except rospy.ServiceException as e: 83 | print("Service call failed: %s" % e) 84 | 85 | def get_vr_buttons(self): 86 | rospy.wait_for_service('get_vr_buttons') 87 | try: 88 | service_func = rospy.ServiceProxy('get_vr_buttons', GetVRButtons) 89 | response = service_func() 90 | return {'handle': bool(response.handle), 'A': bool(response.a), 'B': bool(response.b), 'RJ': bool(response.rj)} 91 | except rospy.ServiceException as e: 92 | print("Service call failed: %s" % e) 93 | 94 | 95 | def enable_controller(self): 96 | rospy.wait_for_service('enable_controller') 97 | try: 98 | service_func = rospy.ServiceProxy('enable_controller', EnableController) 99 | service_func() 100 | except rospy.ServiceException as e: 101 | print("Service call failed: %s" % e) 102 | 103 | def disable_controller(self): 104 | rospy.wait_for_service('disable_controller') 105 | try: 106 | service_func = rospy.ServiceProxy('disable_controller', DisableController) 107 | service_func() 108 | except rospy.ServiceException as e: 109 | print("Service call failed: %s" % e) 110 | 111 | def get_gripper_desired_state(self): 112 | rospy.wait_for_service('get_gripper_desired_state') 113 | try: 114 | service_func = rospy.ServiceProxy('get_gripper_desired_state', GetGripperDesiredState) 115 | return service_func().des_pos 116 | except rospy.ServiceException as e: 117 | print("Service call failed: %s" % e) 118 | 119 | def open_gripper(self, wait=False): 120 | print('opening gripper') 121 | rospy.wait_for_service('open_gripper') 122 | try: 123 | service_func = rospy.ServiceProxy('open_gripper', OpenGripper) 124 | service_func() 125 | except rospy.ServiceException as e: 126 | print("Service call failed: %s" % e) 127 | 128 | def set_gripper_position(self, position): 129 | rospy.wait_for_service('set_gripper_position') 130 | try: 131 | service_func = rospy.ServiceProxy('set_gripper_position', SetGripperPosition) 132 | service_func(position) 133 | except rospy.ServiceException as e: 134 | print("Service call failed: %s" % e) 135 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/vr_controller_server.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import rospy 4 | import time 5 | from oculus_reader import OculusReader 6 | from widowx_controller.widowx_controller import WidowX_Controller 7 | from widowx_envs.control_loops import Environment_Exception 8 | import widowx_envs.utils.transformation_utils as tr 9 | from pyquaternion import Quaternion 10 | import numpy as np 11 | 12 | from std_msgs.msg import Float64 13 | 14 | from widowx_controller.srv import GotoNeutral, GotoNeutralResponse 15 | from widowx_controller.srv import MoveToEEP, MoveToEEPResponse 16 | from widowx_controller.srv import MoveToState, MoveToStateResponse 17 | from widowx_controller.srv import GetCartesianPose, GetCartesianPoseResponse 18 | from widowx_controller.srv import GetState, GetStateResponse 19 | from widowx_controller.srv import GetVRButtons, GetVRButtonsResponse 20 | from widowx_controller.srv import EnableController, EnableControllerResponse 21 | from widowx_controller.srv import DisableController, DisableControllerResponse 22 | 23 | from widowx_controller.widowx_controller import publish_transform 24 | 25 | ############################################################################## 26 | 27 | class VR_WidowX_ControllerServer(WidowX_Controller): 28 | def __init__(self, grasp_mode='continuous', *args, **kwargs): 29 | super(VR_WidowX_ControllerServer, self).__init__(*args, **kwargs) 30 | 31 | self.reader = OculusReader() 32 | 33 | self._moving_time = 0.05 34 | self.prev_handle_press = False 35 | self.reference_vr_transform = None 36 | self.reference_robot_transform = None 37 | 38 | self.do_reset = False 39 | self.task_stage = 0 40 | self.num_task_stages = 1e9 41 | self.last_update_time = time.time() 42 | 43 | self._control_loop_active = True 44 | rospy.Service('go_to_neutral', GotoNeutral, self.goto_neutral_service) 45 | rospy.Service('move_to_eep', MoveToEEP, self.move_to_eep_service) 46 | rospy.Service('move_to_state', MoveToState, self.move_to_state_service) 47 | rospy.Service('get_cartesian_pose', GetCartesianPose, self.get_cartesian_pose_service) 48 | rospy.Service('get_state', GetState, self.get_state_service) 49 | rospy.Service('get_vr_buttons', GetVRButtons, self.get_vr_buttons_service) 50 | rospy.Service('enable_controller', EnableController, self.enable_controller_service) 51 | rospy.Service('disable_controller', DisableController, self.disable_controller_service) 52 | 53 | self.pub_gripper_command = rospy.Publisher("/gripper_despos", Float64, queue_size=3) 54 | self.grasp_mode = grasp_mode 55 | 56 | self.last_pressed_times = {} 57 | 58 | def _init_gripper(self, gripper_attached, gripper_params): 59 | pass 60 | 61 | def goto_neutral_service(self, req): 62 | print('moving to neutral: seconds:', req.duration) 63 | self._control_loop_active = False 64 | self.move_to_neutral(req.duration) 65 | self._control_loop_active = True 66 | return GotoNeutralResponse() 67 | 68 | def move_to_eep_service(self, req): 69 | self._control_loop_active = False 70 | des_transform = np.array(req.des_eep).reshape(4,4) 71 | self.move_to_eep(des_transform, req.duration) 72 | self._control_loop_active = True 73 | return MoveToEEPResponse() 74 | 75 | def move_to_state_service(self, req): 76 | self._control_loop_active = False 77 | try: 78 | self.move_to_state(req.target_xyz, req.target_zangle, req.duration) 79 | success = True 80 | except Environment_Exception: 81 | success = False 82 | self._control_loop_active = True 83 | return MoveToStateResponse(success) 84 | 85 | def get_cartesian_pose_service(self, req): 86 | pose = self.get_cartesian_pose(matrix=True) 87 | return GetCartesianPoseResponse(pose.flatten()) 88 | 89 | def get_state_service(self, req): 90 | joint_angles, joint_velocities, cartesian_pose = self.get_state() 91 | return GetStateResponse(joint_angles, joint_velocities, cartesian_pose) 92 | 93 | def get_vr_buttons_service(self, req): 94 | def check_press(key): 95 | if key in self.last_pressed_times: 96 | if time.time() - self.last_pressed_times[key] < 0.5: 97 | return True 98 | return False 99 | resp = GetVRButtonsResponse(int(check_press('RG')), int(check_press('A')), int(check_press('B')), int(check_press('RJ'))) 100 | return resp 101 | 102 | def enable_controller_service(self, req): 103 | self._control_loop_active = True 104 | return EnableControllerResponse() 105 | 106 | def disable_controller_service(self, req): 107 | self._control_loop_active = False 108 | return DisableControllerResponse() 109 | 110 | def get_pose_and_button(self): 111 | poses, buttons = self.reader.get_transformations_and_buttons() 112 | 113 | # store in dict the times buttons were pressed last. 114 | for key, value in buttons.items(): 115 | if not isinstance(value, tuple): 116 | if value: 117 | self.last_pressed_times[key] = time.time() 118 | 119 | if 'r' not in poses: 120 | return None, None, None, None 121 | return poses['r'], buttons['RTr'], buttons['rightTrig'][0], buttons['RG'] 122 | 123 | def set_gripper_position(self, position): 124 | self.pub_gripper_command.publish(Float64(position)) 125 | 126 | def update_robot_cmds(self, event): 127 | # print("time update cmds", time.time() - self.last_update_time) 128 | self.last_update_time = time.time() 129 | t1 = time.time() 130 | current_vr_transform, trigger, trigger_continuous, handle_button = self.get_pose_and_button() 131 | if current_vr_transform is None: 132 | return 133 | elif not self._control_loop_active: 134 | self.prev_handle_press = False 135 | return 136 | else: 137 | if not self.prev_handle_press and handle_button: 138 | print("resetting reference pose") 139 | self.reference_vr_transform = self.oculus_to_robot(current_vr_transform) 140 | self.initial_vr_offset = tr.RpToTrans(np.eye(3), self.reference_vr_transform[:3, 3]) 141 | self.reference_vr_transform = tr.TransInv(self.initial_vr_offset).dot(self.reference_vr_transform) ## 142 | 143 | self.reference_robot_transform = self.get_cartesian_pose(matrix=True) 144 | self.bot.arm.set_trajectory_time(moving_time=self._moving_time, accel_time=self._moving_time * 0.5) 145 | 146 | if not handle_button: 147 | self.reference_vr_transform = None 148 | self.reference_robot_transform = self.get_cartesian_pose(matrix=True) 149 | self.prev_handle_press = False 150 | return 151 | self.prev_handle_press = True 152 | 153 | print('gripper set point', 1 - trigger_continuous) 154 | self.set_gripper_position(1 - trigger_continuous) 155 | 156 | current_vr_transform = self.oculus_to_robot(current_vr_transform) 157 | current_vr_transform = tr.TransInv(self.initial_vr_offset).dot(current_vr_transform) ## 158 | 159 | publish_transform(current_vr_transform, 'currentvr_robotsystem') 160 | publish_transform(self.reference_vr_transform, 'reference_vr_transform') 161 | 162 | delta_vr_transform = current_vr_transform.dot(tr.TransInv(self.reference_vr_transform)) 163 | publish_transform(self.reference_robot_transform, 'reference_robot_transform') 164 | 165 | M_rob, v_rob = tr.TransToRp(self.reference_robot_transform) 166 | M_delta, v_delta = tr.TransToRp(delta_vr_transform) 167 | new_robot_transform = tr.RpToTrans(M_delta.dot(M_rob), v_rob + v_delta) 168 | # new_robot_transform = delta_vr_transform.dot(self.reference_robot_transform) 169 | 170 | publish_transform(new_robot_transform, 'des_robot_transform') 171 | 172 | delta_translation_norm = np.linalg.norm(self.get_cartesian_pose(matrix=True)[:3, 3] - new_robot_transform[:3, 3]) 173 | # if delta_translation_norm > 0.15: 174 | if delta_translation_norm > 0.2: 175 | print('delta transform norm, too large: ', delta_translation_norm) 176 | import pdb; pdb.set_trace() 177 | 178 | try: 179 | tset = time.time() 180 | solution, success = self.bot.arm.set_ee_pose_matrix_fast(new_robot_transform, custom_guess=self.get_joint_angles()) 181 | print("time for setting pos", time.time() - tset) 182 | except rospy.service.ServiceException: 183 | print('stuck during move') 184 | import pdb; 185 | pdb.set_trace() 186 | self.move_to_neutral() 187 | 188 | loop_time = time.time() - t1 189 | print("loop time", loop_time) 190 | if loop_time > 0.02: 191 | print('Warning: Control loop is slow!') 192 | 193 | def oculus_to_robot(self, current_vr_transform): 194 | current_vr_transform = tr.RpToTrans(Quaternion(axis=[0, 0, 1], angle=-np.pi / 2).rotation_matrix, 195 | np.zeros(3)).dot( 196 | tr.RpToTrans(Quaternion(axis=[1, 0, 0], angle=np.pi / 2).rotation_matrix, np.zeros(3))).dot( 197 | current_vr_transform) 198 | return current_vr_transform 199 | 200 | 201 | def run(): 202 | controller = VR_WidowX_ControllerServer(robot_name='wx250s', print_debug=True, 203 | gripper_attached='custom', 204 | enable_rotation='6dof') 205 | controller.set_moving_time(1) 206 | controller.move_to_neutral(duration=3) 207 | controller.set_moving_time(controller._moving_time) 208 | while not rospy.is_shutdown(): 209 | controller.update_robot_cmds(None) 210 | 211 | 212 | if __name__ == '__main__': 213 | run() 214 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/src/widowx_controller/widowx_controller.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | import numpy as np 4 | import rospy 5 | from pyquaternion import Quaternion 6 | from sensor_msgs.msg import JointState 7 | from geometry_msgs.msg import TransformStamped 8 | 9 | from threading import Lock 10 | import logging 11 | import os 12 | import time 13 | 14 | import tf2_ros 15 | from transformations import quaternion_from_matrix 16 | 17 | from interbotix_xs_modules.arm import InterbotixArmXSInterface, InterbotixArmXSInterface, \ 18 | InterbotixRobotXSCore, InterbotixGripperXSInterface 19 | 20 | try: 21 | # older version of interbotix sdk 22 | from interbotix_xs_sdk.msg import JointGroupCommand 23 | except: 24 | # newer version of interbotix sdk 25 | from interbotix_xs_msgs.msg import JointGroupCommand 26 | from interbotix_xs_msgs.srv import Reboot 27 | 28 | from widowx_envs.utils.exceptions import Environment_Exception 29 | from modern_robotics.core import JacobianSpace, Adjoint, MatrixLog6, se3ToVec, TransInv, FKinSpace 30 | import widowx_envs.utils.transformation_utils as tr 31 | 32 | from widowx_controller.custom_gripper_controller import GripperController 33 | from widowx_controller.controller_base import RobotControllerBase 34 | 35 | import numpy as np 36 | from numba import jit 37 | 38 | ############################################################################## 39 | 40 | # NOTE: experimental values 41 | ABS_MAX_JOINT_EFFORTS = np.array([800, 1000, 600.0, 600.0, 600.0, 700.0]) * 1.7 42 | NEUTRAL_JOINT_STATE = np.array([-0.13192235, -0.76238847, 0.44485444, 43 | -0.01994175, 1.7564081, -0.15953401]) 44 | DEFAULT_ROTATION = np.array([[0 , 0, 1.0], 45 | [0, 1.0, 0], 46 | [-1.0, 0, 0]]) 47 | 48 | ############################################################################## 49 | 50 | @jit() 51 | def ModifiedIKinSpace(Slist, M, T, thetalist0, eomg, ev, maxiterations=40): 52 | """ 53 | ModifiedIKinSpace - Inverse Kinematics in the Space Frame 54 | this exposed the max_iterations parameter to the user 55 | 56 | # original source: 57 | https://github.com/NxRLab/ModernRobotics/blob/master/packages/Python/modern_robotics/core.py 58 | """ 59 | thetalist = np.array(thetalist0).copy() 60 | i = 0 61 | Tsb = FKinSpace(M,Slist, thetalist) 62 | Vs = np.dot(Adjoint(Tsb), \ 63 | se3ToVec(MatrixLog6(np.dot(TransInv(Tsb), T)))) 64 | err = np.linalg.norm([Vs[0], Vs[1], Vs[2]]) > eomg \ 65 | or np.linalg.norm([Vs[3], Vs[4], Vs[5]]) > ev 66 | while err and i < maxiterations: 67 | thetalist = thetalist \ 68 | + np.dot(np.linalg.pinv(JacobianSpace(Slist, \ 69 | thetalist)), Vs) 70 | i = i + 1 71 | Tsb = FKinSpace(M, Slist, thetalist) 72 | Vs = np.dot(Adjoint(Tsb), \ 73 | se3ToVec(MatrixLog6(np.dot(TransInv(Tsb), T)))) 74 | err = np.linalg.norm([Vs[0], Vs[1], Vs[2]]) > eomg \ 75 | or np.linalg.norm([Vs[3], Vs[4], Vs[5]]) > ev 76 | if err: 77 | print('IKinSpace: did not converge') 78 | print('Vs', Vs) 79 | return (thetalist, not err) 80 | 81 | ############################################################################## 82 | 83 | def publish_transform(transform, name, parent_name='wx250s/base_link'): 84 | """TODO(YL): it's bad to reinit the broadcaster every time, improve this""" 85 | translation = transform[:3, 3] 86 | 87 | br = tf2_ros.TransformBroadcaster() 88 | t = TransformStamped() 89 | 90 | t.header.stamp = rospy.Time.now() 91 | t.header.frame_id = parent_name 92 | t.child_frame_id = name 93 | t.transform.translation.x = translation[0] 94 | t.transform.translation.y = translation[1] 95 | t.transform.translation.z = translation[2] 96 | 97 | quat = quaternion_from_matrix(transform) 98 | t.transform.rotation.w = quat[0] 99 | t.transform.rotation.x = quat[1] 100 | t.transform.rotation.y = quat[2] 101 | t.transform.rotation.z = quat[3] 102 | 103 | # print('publish transofrm', name) 104 | br.sendTransform(t) 105 | 106 | 107 | class ModifiedInterbotixManipulatorXS(object): 108 | def __init__(self, robot_model, group_name="arm", gripper_name="gripper", robot_name=None, moving_time=2.0, accel_time=0.3, gripper_pressure=0.5, gripper_pressure_lower_limit=150, gripper_pressure_upper_limit=350, init_node=True): 109 | self.dxl = InterbotixRobotXSCore(robot_model, robot_name, init_node) 110 | self.arm = ModifiedInterbotixArmXSInterface(self.dxl, robot_model, group_name, moving_time, accel_time) 111 | if gripper_name is not None: 112 | self.gripper = InterbotixGripperXSInterface(self.dxl, gripper_name, gripper_pressure, gripper_pressure_lower_limit, gripper_pressure_upper_limit) 113 | 114 | 115 | class ModifiedInterbotixArmXSInterface(InterbotixArmXSInterface): 116 | def __init__(self, *args, **kwargs): 117 | super(ModifiedInterbotixArmXSInterface, self).__init__(*args, **kwargs) 118 | self.waist_index = self.group_info.joint_names.index("waist") 119 | 120 | def set_ee_pose_matrix_fast(self, T_sd, custom_guess=None, execute=True): 121 | """ 122 | this version of set_ee_pose_matrix does not set the velocity profile registers in the servos and therefore runs faster 123 | """ 124 | if (custom_guess is None): 125 | initial_guesses = self.initial_guesses 126 | else: 127 | initial_guesses = [custom_guess] 128 | 129 | for guess in initial_guesses: 130 | theta_list, success = ModifiedIKinSpace(self.robot_des.Slist, self.robot_des.M, T_sd, guess, 0.001, 0.001) 131 | solution_found = True 132 | 133 | # Check to make sure a solution was found and that no joint limits were violated 134 | if success: 135 | theta_list = [int(elem * 1000)/1000.0 for elem in theta_list] 136 | for x in range(self.group_info.num_joints): 137 | if not (self.group_info.joint_lower_limits[x] <= theta_list[x] <= self.group_info.joint_upper_limits[x]): 138 | solution_found = False 139 | break 140 | else: 141 | solution_found = False 142 | 143 | if solution_found: 144 | if execute: 145 | self.publish_positions_fast(theta_list) 146 | self.T_sb = T_sd 147 | return theta_list, True 148 | else: 149 | rospy.loginfo("Guess failed to converge...") 150 | 151 | rospy.loginfo("No valid pose could be found") 152 | return theta_list, False 153 | 154 | def publish_positions_fast(self, positions): 155 | self.joint_commands = list(positions) 156 | joint_commands = JointGroupCommand(self.group_name, self.joint_commands) 157 | self.core.pub_group.publish(joint_commands) 158 | self.T_sb = FKinSpace(self.robot_des.M, self.robot_des.Slist, self.joint_commands) 159 | 160 | ############################################################################## 161 | 162 | class WidowX_Controller(RobotControllerBase): 163 | def __init__(self, robot_name, print_debug, gripper_params, 164 | enable_rotation='6dof', 165 | gripper_attached='custom', 166 | normal_base_angle=0): 167 | """ 168 | gripper_attached: either "custom" or "default" 169 | """ 170 | print('waiting for widowx_controller to be set up...') 171 | self.bot = ModifiedInterbotixManipulatorXS(robot_model=robot_name) 172 | # TODO: Why was this call necessary in the visual_foresight? With the new SDK it messes the motor parameters 173 | # self.bot.dxl.robot_set_operating_modes("group", "arm", "position", profile_type="velocity", profile_velocity=131, profile_acceleration=15) 174 | 175 | if gripper_params is None: 176 | gripper_params = {} 177 | 178 | self._robot_name = robot_name 179 | rospy.on_shutdown(self.clean_shutdown) 180 | 181 | logger = logging.getLogger('robot_logger') 182 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 183 | log_level = logging.WARN 184 | if print_debug: 185 | log_level = logging.DEBUG 186 | ch = logging.StreamHandler() 187 | ch.setLevel(log_level) 188 | ch.setFormatter(formatter) 189 | logger.addHandler(ch) 190 | self._init_gripper(gripper_attached, gripper_params) 191 | 192 | self._joint_lock = Lock() 193 | self._angles, self._velocities, self._effort = {}, {}, {} 194 | rospy.Subscriber(f"/{robot_name}/joint_states", JointState, self._joint_callback) 195 | time.sleep(1) 196 | self._n_errors = 0 197 | 198 | self._upper_joint_limits = np.array(self.bot.arm.group_info.joint_upper_limits) 199 | self._lower_joint_limits = np.array(self.bot.arm.group_info.joint_lower_limits) 200 | self._qn = self.bot.arm.group_info.num_joints 201 | 202 | self.joint_names = self.bot.arm.group_info.joint_names 203 | self.default_rot = np.dot(tr.eulerAnglesToRotationMatrix([0, 0, normal_base_angle]), DEFAULT_ROTATION) 204 | 205 | # self.neutral_joint_angles = np.zeros(self._qn) 206 | # self.neutral_joint_angles[0] = normal_base_angle 207 | # self.neutral_joint_angles[-2] = np.pi / 2 208 | self.neutral_joint_angles = NEUTRAL_JOINT_STATE 209 | self.enable_rotation = enable_rotation 210 | 211 | def reboot_motor(self, joint_name: str): 212 | """Experimental function to reboot the motor 213 | Supported joint names: 214 | - waist, shoulder, elbow, forearm_roll, 215 | - wrist_angle, wrist_rotate, gripper, left_finger, right_finger 216 | """ 217 | rospy.wait_for_service('/wx250s/reboot_motors') 218 | try: 219 | reboot_motors = rospy.ServiceProxy('/wx250s/reboot_motors', Reboot) 220 | response = reboot_motors(cmd_type='single', name=joint_name, 221 | enable=True, smart_reboot=True) 222 | return response 223 | except rospy.ServiceException as e: 224 | print("Service call failed:", e) 225 | 226 | def clean_shutdown(self): 227 | pid = os.getpid() 228 | logging.getLogger('robot_logger').info('Exiting example w/ pid: {}'.format(pid)) 229 | logging.shutdown() 230 | os.kill(pid, 9) 231 | 232 | def move_to_state(self, target_xyz, target_zangle, duration=1.5): 233 | new_pose = np.eye(4) 234 | new_pose[:3, -1] = target_xyz 235 | new_quat = Quaternion(axis=np.array([0.0, 0.0, 1.0]), angle=target_zangle) * Quaternion(matrix=self.default_rot) 236 | new_pose[:3, :3] = new_quat.rotation_matrix 237 | self.move_to_eep(new_pose, duration) 238 | 239 | def set_moving_time(self, moving_time): 240 | self.bot.arm.set_trajectory_time(moving_time=moving_time*1.25, accel_time=moving_time * 0.5) 241 | 242 | def move_to_eep(self, target_pose, duration=1.5, blocking=True, check_effort=True, step=True): 243 | try: 244 | if step and not blocking: 245 | # this is a call from the `step` function so we use a custom faster way to set the ee pose 246 | solution, success = self.bot.arm.set_ee_pose_matrix_fast(target_pose, custom_guess=self.get_joint_angles(), execute=True) 247 | else: 248 | if not step: # we want to move fast when it is step() call 249 | self.set_moving_time(moving_time=duration) 250 | solution, success = self.bot.arm.set_ee_pose_matrix(target_pose, custom_guess=self.get_joint_angles(), 251 | moving_time=duration, accel_time=duration * 0.45, blocking=blocking) 252 | 253 | self.des_joint_angles = solution 254 | 255 | if not success: 256 | print('no IK solution found, do nothing') 257 | # self.open_gripper() 258 | # self.move_to_neutral() 259 | # raise Environment_Exception 260 | 261 | if check_effort: 262 | if np.max(np.abs(self.get_joint_effort()) - ABS_MAX_JOINT_EFFORTS) > 10: 263 | print('violation ', np.abs(self.get_joint_effort()) - ABS_MAX_JOINT_EFFORTS) 264 | print('motor number: ', np.argmax(np.abs(self.get_joint_effort()) - ABS_MAX_JOINT_EFFORTS)) 265 | print('max effort reached: ', self.get_joint_effort()) 266 | print('max effort allowed ', max_effort_abs_values) 267 | # self.open_gripper() 268 | # self.move_to_neutral() 269 | # raise Environment_Exception 270 | 271 | except rospy.service.ServiceException: 272 | print('stuck during move') 273 | import pdb; pdb.set_trace() 274 | self.move_to_neutral() 275 | 276 | def set_joint_angles(self, target_positions, duration=4): 277 | target_positions_to_reach = [target_positions] 278 | if len(target_positions_to_reach) > 1000: 279 | print('set_joint_angles failed') 280 | raise Environment_Exception 281 | try: 282 | while target_positions_to_reach: 283 | target_position = target_positions_to_reach[-1] 284 | success = self.bot.arm.set_joint_positions(target_position, moving_time=duration) 285 | if success is False: 286 | intermediate_pos = np.mean([self.get_joint_angles(), target_position], axis=0) 287 | target_positions_to_reach.append(intermediate_pos) 288 | else: 289 | target_positions_to_reach.pop() 290 | except rospy.service.ServiceException: 291 | print('stuck during motion') 292 | import pdb; pdb.set_trace() 293 | 294 | def check_motor_status_and_reboot(self): 295 | # print("checking motor status") 296 | status_codes = self.bot.dxl.robot_get_motor_registers("group", "all", "Hardware_Error_Status") 297 | # print(status_codes.values) 298 | # import ipdb; ipdb.set_trace() 299 | if len(status_codes.values) < 7: 300 | print("Some motor went wrong!") 301 | self.bot.dxl.robot_reboot_motors("group", "all", enable=True, smart_reboot=True) 302 | print("robot rebooted") 303 | self.move_to_neutral() 304 | raise Environment_Exception 305 | 306 | def move_to_neutral(self, duration=4): 307 | print('moving to neutral..') 308 | try: 309 | self.bot.arm.publish_positions(self.neutral_joint_angles, moving_time=duration) 310 | # print("Error in neutral position", np.linalg.norm(self.neutral_joint_angles - self.get_joint_angles())) 311 | # import ipdb; ipdb.set_trace() 312 | if np.linalg.norm(self.neutral_joint_angles - self.get_joint_angles()) > 0.1: 313 | print("moving to neutral failed!") 314 | self.check_motor_status_and_reboot() 315 | except rospy.service.ServiceException: 316 | print('stuck during reset') 317 | import pdb; pdb.set_trace() 318 | 319 | def _joint_callback(self, msg): 320 | with self._joint_lock: 321 | for name, position, velocity, effort in zip(msg.name, msg.position, msg.velocity, msg.effort): 322 | self._angles[name] = position 323 | self._velocities[name] = velocity 324 | self._effort[name] = effort 325 | 326 | def get_joint_angles(self): 327 | ''' 328 | Returns current joint angles 329 | ''' 330 | with self._joint_lock: 331 | try: 332 | return np.array([self._angles[k] for k in self.joint_names]) 333 | except KeyError: 334 | return None 335 | 336 | def get_joint_effort(self): 337 | ''' 338 | Returns current joint angles 339 | ''' 340 | with self._joint_lock: 341 | try: 342 | return np.array([self._effort[k] for k in self.joint_names]) 343 | except KeyError: 344 | return None 345 | 346 | def get_joint_angles_velocity(self): 347 | ''' 348 | Returns velocities for joints 349 | ''' 350 | with self._joint_lock: 351 | try: 352 | return np.array([self._velocities[k] for k in self.joint_names]) 353 | except KeyError: 354 | return None 355 | 356 | def get_cartesian_pose(self, matrix=False): 357 | """Returns cartesian end-effector pose""" 358 | joint_positions = list(self.bot.dxl.joint_states.position[self.bot.arm.waist_index:(self._qn + self.bot.arm.waist_index)]) 359 | pose = FKinSpace(self.bot.arm.robot_des.M, self.bot.arm.robot_des.Slist, joint_positions) 360 | if matrix: 361 | return pose 362 | else: 363 | return np.concatenate([pose[:3, -1], np.array(Quaternion(matrix=pose[:3, :3]).elements)]) 364 | 365 | def _init_gripper(self, gripper_attached, gripper_params): 366 | if gripper_attached == 'custom': 367 | self._gripper = GripperController(robot_name=self.bot.dxl.robot_name, des_pos_max=gripper_params.des_pos_max, des_pos_min=gripper_params.des_pos_min) 368 | self.custom_gripper_controller = True 369 | elif gripper_attached == 'custom_narrow': 370 | self._gripper = GripperController(robot_name=self.bot.dxl.robot_name, upper_limit=0.022, des_pos_max=gripper_params.des_pos_max, des_pos_min=gripper_params.des_pos_min) 371 | self.custom_gripper_controller = True 372 | elif gripper_attached == 'default': 373 | self.custom_gripper_controller = False 374 | else: 375 | raise ValueError("gripper_attached value has to be either 'custom', 'custom_narrow' or 'default'") 376 | 377 | def get_gripper_desired_position(self): 378 | if self.custom_gripper_controller: 379 | return self._gripper.get_gripper_target_position() 380 | else: 381 | return self.des_gripper_state 382 | 383 | def set_continuous_gripper_position(self, target): 384 | assert self.custom_gripper_controller 385 | self._gripper.set_continuous_position(target) 386 | 387 | def get_gripper_position(self): 388 | assert self.custom_gripper_controller # we need the joint_states subscriber to keep track of gripper position 389 | # TODO: for standard gripper we could use WidowX_Controller self._angles to return the current position similarly as custom_gripper_controller 390 | return self.get_continuous_gripper_position() 391 | 392 | def get_continuous_gripper_position(self): 393 | assert self.custom_gripper_controller 394 | return self._gripper.get_continuous_position() 395 | 396 | def wait_until_gripper_position_reached(self): 397 | if self.custom_gripper_controller: 398 | goal_reached = self.get_gripper_desired_position() - 0.075 < self.get_continuous_gripper_position() < self.get_gripper_desired_position() + 0.075 399 | if goal_reached: 400 | # don't wait if we are already at the target 401 | return 402 | # otherwise wait until gripper stopped moving 403 | still_moving = True 404 | previous_position = self.get_continuous_gripper_position() 405 | while still_moving: 406 | time.sleep(0.05) 407 | current_position = self.get_continuous_gripper_position() 408 | still_moving = not (previous_position - 0.01 < current_position < previous_position + 0.01) 409 | previous_position = current_position 410 | # wait a bit to give time for gripper to exert force on the object 411 | time.sleep(0.1) 412 | 413 | def open_gripper(self, wait=False): 414 | if self.custom_gripper_controller: 415 | self._gripper.open() 416 | else: 417 | self.des_gripper_state = np.array([1]) 418 | self.bot.gripper.open() 419 | 420 | def close_gripper(self, wait=False): 421 | if self.custom_gripper_controller: 422 | self._gripper.close() 423 | else: 424 | self.des_gripper_state = np.array([0]) 425 | self.bot.gripper.close() 426 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/DisableController.srv: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/EnableController.srv: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/GetCartesianPose.srv: -------------------------------------------------------------------------------- 1 | --- 2 | float32[] eep 3 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/GetGripperDesiredState.srv: -------------------------------------------------------------------------------- 1 | --- 2 | float32 des_pos 3 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/GetState.srv: -------------------------------------------------------------------------------- 1 | --- 2 | float32[] joint_angles 3 | float32[] joint_velocities 4 | float32[] cartesian_pose 5 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/GetVRButtons.srv: -------------------------------------------------------------------------------- 1 | --- 2 | int32 handle 3 | int32 a 4 | int32 b 5 | int32 rj 6 | -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/GotoNeutral.srv: -------------------------------------------------------------------------------- 1 | float32 duration 2 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/MoveToEEP.srv: -------------------------------------------------------------------------------- 1 | float32[] des_eep 2 | float32 duration 3 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/MoveToState.srv: -------------------------------------------------------------------------------- 1 | float32[] target_xyz 2 | float32 target_zangle 3 | float32 duration 4 | --- 5 | int32 success -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/OpenGripper.srv: -------------------------------------------------------------------------------- 1 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_controller/srv/SetGripperPosition.srv: -------------------------------------------------------------------------------- 1 | float32 des_pos 2 | --- -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/__init__.py: -------------------------------------------------------------------------------- 1 | name = "widowx_envs" 2 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/base/base_env.py: -------------------------------------------------------------------------------- 1 | import random 2 | import numpy as np 3 | from widowx_envs.utils.utils import AttrDict 4 | from widowx_envs.utils.utils import Configurable 5 | 6 | class BaseEnv(Configurable): 7 | 8 | def step(self, action): 9 | """ 10 | Applies the action and steps simulation 11 | :param action: action at time-step 12 | :return: obs dict where: 13 | -each key is an observation at that step 14 | -keys are constant across entire datastep (e.x. every-timestep has 'state' key) 15 | -keys corresponding to numpy arrays should have constant shape every timestep (for caching) 16 | -images should be placed in the 'images' key in a (ncam, ...) array 17 | """ 18 | raise NotImplementedError 19 | 20 | def current_obs(self): 21 | """ 22 | :return: Current environment observation dict 23 | """ 24 | raise NotImplementedError 25 | 26 | def reset(self): 27 | """ 28 | Resets the environment. Returns initial observation as well as information needed to recreate initialization 29 | 30 | :return: obs dict (look at step(self, action) for documentation) 31 | reset_state - All the information needed to recreate environment (can be None if not possible) 32 | """ 33 | raise NotImplementedError 34 | 35 | def valid_rollout(self): 36 | """ 37 | Checks if the environment is currently in a valid state 38 | Common invalid states include: 39 | - object falling out of bin 40 | - mujoco error during rollout 41 | :return: bool value that is False if rollout isn't valid 42 | """ 43 | raise NotImplementedError 44 | 45 | def goal_reached(self): 46 | """ 47 | Checks if the environment hit a goal (if environment has goals) 48 | - e.x. if goal is to lift object should return true if object lifted by gripper 49 | :return: whether or not environment reached goal state 50 | """ 51 | raise NotImplementedError("Environment has No Goal") 52 | 53 | def has_goal(self): 54 | """ 55 | :return: Whether or not environment has a goal 56 | """ 57 | return False 58 | 59 | def render(self): 60 | """ Renders the enviornment. 61 | Implements custom rendering support. If mode is: 62 | 63 | - dual: renders both left and main cameras 64 | - left: renders only left camera 65 | - main: renders only main (front) camera 66 | :param mode: Mode to render with (dual by default) 67 | :return: uint8 numpy array with rendering from sim 68 | """ 69 | raise NotImplementedError("Rendering not implemented in Base Env") 70 | 71 | @property 72 | def adim(self): 73 | """ 74 | :return: Environment's action dimension 75 | """ 76 | raise NotImplementedError 77 | 78 | @property 79 | def sdim(self): 80 | """ 81 | :return: Environment's state dimension 82 | """ 83 | raise NotImplementedError 84 | 85 | def seed(self, seed=None): 86 | random.seed(seed) 87 | np.random.seed(seed) 88 | 89 | def eval(self): 90 | """ 91 | return environment statistics, like distance to goal etc. 92 | :param agentdata: 93 | :return: 94 | """ 95 | pass 96 | 97 | @staticmethod 98 | def get_goal_states_from_obsdict(obs_dict): 99 | """ 100 | :param obs_dict: 101 | :return: goal_arm_pose, goal_obj_pose 102 | """ 103 | raise NotImplementedError 104 | 105 | 106 | @staticmethod 107 | def default_ncam(): 108 | """ 109 | Static class function that describes how many cameras this environment has by default 110 | 111 | NOTE: This can be called by BaseEnv.default_ncam() but CANNOT be called by instatiated class object 112 | Usage is typically for agent to infer default ncam attribute before class creation 113 | 114 | :return: default n_cam (usually 2) 115 | """ 116 | return 2 117 | 118 | def save_recording(self, save_worker, i_traj): 119 | raise NotImplementedError 120 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/base/robot_configs.json: -------------------------------------------------------------------------------- 1 | { 2 | "wx250s": { 3 | "bound": [[0.19, -0.08, 0.05, -1.57, 0], [0.31, 0.08, 0.055, 1.57, 0]] 4 | }, 5 | "wx250": { 6 | "bound": [[0.19, -0.08, 0.05, -1.57, 0], [0.31, 0.08, 0.055, 1.57, 0]] 7 | }, 8 | "wx200": { 9 | "bound": [[0.19, -0.08, 0.05, -1.57, 0], [0.31, 0.08, 0.055, 1.57, 0]] 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/policies/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rail-berkeley/bridge_data_robot/b841131ecd512bafb303075bd8f8b677e0bf9f1f/widowx_envs/widowx_envs/policies/__init__.py -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/policies/policy.py: -------------------------------------------------------------------------------- 1 | """ This file defines the base class for the policy. """ 2 | import abc, six 3 | import pickle as pkl 4 | import numpy as np 5 | import pdb 6 | 7 | from widowx_envs.utils.utils import AttrDict, Configurable 8 | 9 | class Policy(Configurable): 10 | """Abstract class for policy.""" 11 | def _default_hparams(self): 12 | dict = AttrDict( 13 | ngpu=1, 14 | gpu_id=0, 15 | ) 16 | default_dict = super()._default_hparams() 17 | default_dict.update(dict) 18 | return default_dict 19 | 20 | def act(self, *args): 21 | """ 22 | Args: 23 | Request necessary arguments in definition 24 | (see Agent code) 25 | Returns: 26 | A dict of outputs D 27 | -One key in D, 'actions' should have the action for this time-step 28 | """ 29 | raise NotImplementedError("Must be implemented in subclass.") 30 | 31 | def reset(self): 32 | pass 33 | 34 | def set_log_dir(self, dir): 35 | self.traj_log_dir = dir 36 | 37 | 38 | class DummyPolicy(Policy): 39 | def __init__(self, ag_params, policyparams): 40 | """ Computes actions from states/observations. """ 41 | pass 42 | 43 | def act(self, *args): 44 | return {'actions': None} 45 | 46 | def reset(self): 47 | return None 48 | 49 | 50 | class ReplayActions(Policy): 51 | def __init__(self, ag_params, policyparams): 52 | """ Computes actions from states/observations. """ 53 | self._hp = self._default_hparams() 54 | self._override_defaults(policyparams) 55 | self.policy_out = pkl.load(open(self._hp.load_file + '/policy_out.pkl', 'rb')) 56 | self.env = ag_params.env 57 | 58 | def _default_hparams(self): 59 | dict = AttrDict( 60 | load_file="", 61 | type=None, 62 | ) 63 | default_dict = super(Policy, self)._default_hparams() 64 | default_dict.update(dict) 65 | return default_dict 66 | 67 | def act(self, t): 68 | return self.policy_out[t] 69 | 70 | def reset(self): 71 | return None 72 | 73 | 74 | class NullPolicy(Policy): 75 | """ 76 | Returns 0 for all timesteps 77 | """ 78 | def __init__(self, ag_params, policyparams): 79 | self._adim = ag_params['adim'] 80 | self._hp = self._default_hparams() 81 | self._override_defaults(policyparams) 82 | 83 | # def _default_hparams(self): 84 | # default_dict = { 85 | # 'wait_for_user': False 86 | # } 87 | # parent_params = super(NullPolicy, self)._default_hparams() 88 | # for k in default_dict.keys(): 89 | # parent_params.add_hparam(k, default_dict[k]) 90 | # return parent_params 91 | 92 | def act(self): 93 | return {'actions': np.zeros(self._adim)} -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/policies/vr_teleop_policy.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | from widowx_envs.policies.policy import Policy 4 | from widowx_envs.utils.utils import AttrDict 5 | from widowx_envs.control_loops import Environment_Exception 6 | import widowx_envs.utils.transformation_utils as tr 7 | 8 | import numpy as np 9 | import time 10 | 11 | from pyquaternion import Quaternion 12 | from widowx_controller.widowx_controller import publish_transform 13 | 14 | ############################################################################## 15 | 16 | class VRTeleopPolicy(Policy): 17 | def __init__(self, ag_params, policyparams): 18 | 19 | """ Computes actions from states/observations. """ 20 | self._hp = self._default_hparams() 21 | self._override_defaults(policyparams) 22 | 23 | self.last_pressed_times = {} 24 | self.env = ag_params.env_handle 25 | 26 | self.reader = self.env.oculus_reader 27 | # self.prev_vr_transform = None 28 | self.action_space = self.env._hp.action_mode 29 | 30 | self.prev_handle_press = False 31 | self.reference_vr_transform = None 32 | self.reference_robot_transform = None 33 | self.internal_counter = 0 34 | self.internal_counter_default_policy = 0 35 | 36 | def _default_hparams(self): 37 | dict = AttrDict( 38 | load_file="", 39 | type=None, 40 | policy_T=None, 41 | ) 42 | default_dict = super(Policy, self)._default_hparams() 43 | default_dict.update(dict) 44 | return default_dict 45 | 46 | 47 | def get_pose_and_button(self): 48 | poses, buttons = self.reader.get_transformations_and_buttons() 49 | if poses == {}: 50 | return None, None, None, None 51 | return poses['r'], buttons['RTr'], buttons['rightTrig'][0], buttons['RG'] 52 | 53 | 54 | def act_use_fixed_reference(self, t, i_tr, images, task_id): 55 | # print("time update cmds", time.time() - self.last_update_time) 56 | self.last_update_time = time.time() 57 | t1 = time.time() 58 | current_vr_transform, trigger, trigger_continuous, handle_button = self.get_pose_and_button() 59 | if current_vr_transform is None: 60 | return self.get_default_action(t, i_tr, images, task_id) 61 | else: 62 | if not self.prev_handle_press and handle_button: 63 | print("resetting reference pose") 64 | self.internal_counter_default_policy = 0 65 | self.reference_vr_transform = self.oculus_to_robot(current_vr_transform) 66 | self.initial_vr_offset = tr.RpToTrans(np.eye(3), self.reference_vr_transform[:3, 3]) 67 | self.reference_vr_transform = tr.TransInv(self.initial_vr_offset).dot(self.reference_vr_transform) ## 68 | 69 | self.reference_robot_transform, _ = self.env.get_target_state() 70 | if self.action_space == '3trans1rot': 71 | self.reference_robot_transform = self.zero_out_pitchroll(self.reference_robot_transform) 72 | self.prev_commanded_transform = self.reference_robot_transform 73 | 74 | if not handle_button: 75 | self.internal_counter = 0 76 | self.internal_counter_default_policy += 1 77 | self.reference_vr_transform = None 78 | self.reference_robot_transform, _ = self.env.get_target_state() 79 | self.prev_handle_press = False 80 | if self.action_space == '3trans1rot': 81 | self.reference_robot_transform = self.zero_out_pitchroll(self.reference_robot_transform) 82 | self.prev_commanded_transform = self.reference_robot_transform 83 | return self.get_default_action(t, i_tr, images, task_id) 84 | self.prev_handle_press = True 85 | self.internal_counter += 1 86 | 87 | current_vr_transform = self.oculus_to_robot(current_vr_transform) 88 | current_vr_transform = tr.TransInv(self.initial_vr_offset).dot(current_vr_transform) ## 89 | 90 | publish_transform(current_vr_transform, 'currentvr_robotsystem') 91 | delta_vr_transform = current_vr_transform.dot(tr.TransInv(self.reference_vr_transform)) 92 | 93 | publish_transform(self.reference_robot_transform, 'reference_robot_transform') 94 | M_rob, p_rob = tr.TransToRp(self.reference_robot_transform) 95 | M_delta, p_delta = tr.TransToRp(delta_vr_transform) 96 | new_robot_transform = tr.RpToTrans(M_delta.dot(M_rob), p_rob + p_delta) 97 | 98 | if self.action_space == '3trans1rot': 99 | new_robot_transform = self.zero_out_pitchroll(new_robot_transform) 100 | if self.action_space == '3trans': 101 | new_robot_transform = self.zero_out_yawpitchroll(new_robot_transform) 102 | publish_transform(new_robot_transform, 'des_robot_transform') 103 | 104 | prev_target_pos, _ = self.env.get_target_state() 105 | delta_robot_transform = new_robot_transform.dot(tr.TransInv(prev_target_pos)) 106 | publish_transform(delta_robot_transform, 'delta_robot_transform') 107 | self.prev_commanded_transform = new_robot_transform 108 | 109 | des_gripper_position = (1 - trigger_continuous) 110 | actions = tr.transform2action_local(delta_robot_transform, des_gripper_position, self.env._controller.get_cartesian_pose()[:3]) 111 | 112 | if self.env._hp.action_mode == '3trans1rot': 113 | actions = np.concatenate([actions[:3], np.array([actions[5]]), np.array([des_gripper_position])]) # only use the yaw rotation 114 | if self.env._hp.action_mode == '3trans': 115 | actions = np.concatenate([actions[:3], np.array([des_gripper_position])]) # only use the yaw rotation 116 | 117 | if np.linalg.norm(actions[:3]) > 0.5: 118 | print('delta transform too large!') 119 | print('Press c and enter to continue') 120 | import pdb; pdb.set_trace() 121 | raise Environment_Exception 122 | 123 | output = {'actions': actions, 'new_robot_transform':new_robot_transform, 'delta_robot_transform': delta_robot_transform, 'policy_type': 'VRTeleopPolicy'} 124 | 125 | if self._hp.policy_T and self.internal_counter >= self._hp.policy_T: 126 | output['done'] = True 127 | 128 | return output 129 | 130 | def act(self, t=None, i_tr=None, images=None, task_id=None): 131 | return self.act_use_fixed_reference(t, i_tr, images, task_id) 132 | 133 | def get_action(self, obs_np, task_id_vec=None): 134 | dict = self.act(images=obs_np, task_id=task_id_vec) 135 | return dict['actions'], {'policy_type': dict['policy_type']} 136 | 137 | def get_default_action(self, t, i_tr, images, task_id): 138 | return self.get_zero_action() 139 | 140 | def get_zero_action(self): 141 | if self.env._hp.action_mode == '3trans3rot': 142 | actions = np.concatenate([np.zeros(6), np.array([1])]) 143 | elif self.env._hp.action_mode == '3trans1rot': 144 | actions = np.concatenate([np.zeros(4), np.array([1])]) 145 | elif self.env._hp.action_mode == '3trans': 146 | actions = np.concatenate([np.zeros(3), np.array([1])]) 147 | else: 148 | raise NotImplementedError 149 | return {'actions': actions, 'new_robot_transform':np.eye(4), 'delta_robot_transform': np.eye(4), 'policy_type': 'VRTeleopPolicy'} 150 | 151 | def zero_out_pitchroll(self, new_robot_transform): 152 | rot, xyz = tr.TransToRp(new_robot_transform) 153 | euler = tr.rotationMatrixToEulerAngles(rot.dot(self.env._controller.default_rot.transpose()), check_error_thresh=1e-5) 154 | euler[:2] = np.zeros(2) # zeroing out pitch roll 155 | new_rot = tr.eulerAnglesToRotationMatrix(euler).dot(self.env._controller.default_rot) 156 | new_robot_transform = tr.RpToTrans(new_rot, xyz) 157 | return new_robot_transform 158 | 159 | def zero_out_yawpitchroll(self, new_robot_transform): 160 | rot, xyz = tr.TransToRp(new_robot_transform) 161 | euler = tr.rotationMatrixToEulerAngles(rot.dot(self.env._controller.default_rot.transpose()), check_error_thresh=1e-5) 162 | euler = np.zeros(3) # zeroing out yaw pitch roll 163 | new_rot = tr.eulerAnglesToRotationMatrix(euler).dot(self.env._controller.default_rot) 164 | new_robot_transform = tr.RpToTrans(new_rot, xyz) 165 | return new_robot_transform 166 | 167 | def oculus_to_robot(self, current_vr_transform): 168 | current_vr_transform = tr.RpToTrans(Quaternion(axis=[0, 0, 1], angle=np.pi / 2).rotation_matrix, 169 | np.zeros(3)).dot( 170 | tr.RpToTrans(Quaternion(axis=[1, 0, 0], angle=np.pi / 2).rotation_matrix, np.zeros(3))).dot( 171 | current_vr_transform) 172 | return current_vr_transform 173 | 174 | def reset(self): 175 | self.internal_counter = 0 176 | self.internal_counter_default_policy = 0 177 | self.prev_vr_transform = None # used for self.act_use_deltas only 178 | 179 | # used for act_use_fixed_reference only: 180 | self.prev_handle_press = False 181 | self.reference_vr_transform = None 182 | self.reference_robot_transform = None 183 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/run_data_collection.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import importlib.util # replaced: import imp due to deprecation 3 | 4 | import json 5 | from widowx_envs.utils.utils import ask_confirm, save_config 6 | import datetime 7 | import os 8 | from widowx_envs.trajectory_collector import TrajectoryCollector 9 | import numpy as np 10 | 11 | class DataCollectionManager(object): 12 | def __init__(self, save_dir_prefix='', args_in=None, hyperparams=None): 13 | """ 14 | :param save_dir_prefix: will be added to the experiment data and training data save paths 15 | specified in the variables $VMPC_DATA and $VMPC_EXP 16 | :param args_in: 17 | :param hyperparams: 18 | """ 19 | parser = argparse.ArgumentParser(description='run parallel data collection') 20 | parser.add_argument('experiment', type=str, help='path to the experiment configuraiton file including mod_hyper.py') 21 | parser.add_argument('--gpu_id', type=int, help='the starting gpu_id', default=0) 22 | parser.add_argument('--ngpu', type=int, help='the number of gpus to use', default=1) 23 | parser.add_argument('--prefix', default='', help='prefixes for data and exp dirs') 24 | 25 | args = parser.parse_args(args_in) 26 | if hyperparams is None: 27 | spec = importlib.util.spec_from_file_location('hyperparams', args.experiment) 28 | hyperparams = importlib.util.module_from_spec(spec) 29 | spec.loader.exec_module(hyperparams) 30 | self.hyperparams = hyperparams.config 31 | # hyperparams = imp.load_source('hyperparams', args.experiment) # TODO: (YL) backup test then removed 32 | # self.hyperparams = hyperparams.config 33 | else: 34 | self.hyperparams = hyperparams 35 | self.args = args 36 | if save_dir_prefix != '': 37 | self.save_dir_prefix = save_dir_prefix 38 | else: 39 | self.save_dir_prefix = args.prefix 40 | self.time_prefix = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") 41 | 42 | def run(self): 43 | hyperparams = self.hyperparams 44 | assert 'collection_metadata' in hyperparams 45 | 46 | self.set_paths(hyperparams) 47 | save_config(hyperparams, hyperparams['data_save_dir']) 48 | 49 | s = TrajectoryCollector(hyperparams) 50 | 51 | metadata_destination = hyperparams['data_save_dir'] 52 | meta_data_dict = json.load(open(hyperparams['collection_metadata'], 'r')) 53 | meta_data_dict['date_time'] = datetime.datetime.now().strftime("_%Y-%m-%d_%H-%M-%S") 54 | 55 | if hyperparams['record_floor_height']: 56 | # sample a trajectory to determine floor height 57 | print('#################################################') 58 | print('#################################################') 59 | print("Move the gripper all the way to the lowest point of the workspace and end the trajectory.") 60 | _, obs_dict, _ = s.agent.sample(s.policies[0], 0) 61 | floor_height = np.min(obs_dict['full_state'][:, 2]) 62 | meta_data_dict['floor_height'] = floor_height 63 | 64 | print('#################################################') 65 | print('#################################################') 66 | for k, v in meta_data_dict.items(): 67 | print('{}: {}'.format(k,v)) 68 | ask_confirm('Is the meta data correct? press y/n') 69 | with open(os.path.join(metadata_destination, "collection_metadata.json"), 'w') as outfile: 70 | json.dump(meta_data_dict, outfile) 71 | 72 | s.run() 73 | 74 | def set_paths(self, hyperparams): 75 | """ 76 | set two directories: 77 | log_dir is for experiment logs, visuals, tensorboards stuff etc. 78 | data_save_dir is for collected datasets 79 | the subpath after the experiments folder is appended to the $VMPC_DATA and $VMPC_EXP directories respectively 80 | """ 81 | project_key = 'robonetv2' 82 | 83 | assert 'experiments' in self.args.experiment 84 | subpath = hyperparams['current_dir'].partition('experiments')[2] 85 | hyperparams['data_save_dir'] = os.path.join(os.environ['DATA'], project_key, subpath.strip("/"), self.save_dir_prefix.strip("/")) 86 | if self.time_prefix != "": 87 | hyperparams['data_save_dir'] = hyperparams['data_save_dir'] + '/' + self.time_prefix 88 | hyperparams['log_dir'] = os.path.join(os.environ['EXP'], project_key, subpath.strip("/"), self.save_dir_prefix.strip("/")) 89 | print('setting data_save_dir to', hyperparams['data_save_dir']) 90 | print('setting log_dir to', hyperparams['log_dir']) 91 | self.hyperparams = hyperparams 92 | 93 | if __name__ == '__main__': 94 | DataCollectionManager().run() 95 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/teleop.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import numpy as np 5 | import cv2 6 | from widowx_envs.widowx_env_service import WidowXClient, WidowXConfigs 7 | 8 | def show_video(client, full_image=True): 9 | """ 10 | This shows the video from the camera for a given duration. 11 | Full image is the image before resized to default 256x256. 12 | """ 13 | res = client.get_observation() 14 | if res is None: 15 | print("No observation available... waiting") 16 | return 17 | 18 | if full_image: 19 | img = res["full_image"] 20 | img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) 21 | else: 22 | img = res["image"] 23 | img = (img.reshape(3, 256, 256).transpose(1, 2, 0) * 255).astype(np.uint8) 24 | cv2.imshow("Robot Camera", img) 25 | cv2.waitKey(20) # 20 ms 26 | 27 | print_yellow = lambda x: print("\033[93m {}\033[00m" .format(x)) 28 | 29 | def print_help(): 30 | print_yellow(" Teleop Controls:") 31 | print_yellow(" w, s : move forward/backward") 32 | print_yellow(" a, d : move left/right") 33 | print_yellow(" z, c : move up/down") 34 | print_yellow(" i, k: rotate yaw") 35 | print_yellow(" j, l: rotate pitch") 36 | print_yellow(" n m: rotate roll") 37 | print_yellow(" space: toggle gripper") 38 | print_yellow(" r: reset robot") 39 | print_yellow(" q: quit") 40 | 41 | def main(): 42 | parser = argparse.ArgumentParser(description='Teleoperation for WidowX Robot') 43 | parser.add_argument('--ip', type=str, default='localhost') 44 | parser.add_argument('--port', type=int, default=5556) 45 | args = parser.parse_args() 46 | 47 | client = WidowXClient(host=args.ip, port=args.port) 48 | client.init(WidowXConfigs.DefaultEnvParams, image_size=256) 49 | 50 | print_help() 51 | cv2.namedWindow("Robot Camera") 52 | is_open = 1 53 | running = True 54 | while running: 55 | # Check for key press 56 | key = cv2.waitKey(100) & 0xFF 57 | 58 | # escape key to quit 59 | if key == ord('q'): 60 | print("Quitting teleoperation.") 61 | running = False 62 | continue 63 | 64 | # Handle key press for robot control 65 | # translation 66 | if key == ord('w'): 67 | client.step_action(np.array([0.01, 0, 0, 0, 0, 0, is_open])) 68 | elif key == ord('s'): 69 | client.step_action(np.array([-0.01, 0, 0, 0, 0, 0, is_open])) 70 | elif key == ord('a'): 71 | client.step_action(np.array([0, 0.01, 0, 0, 0, 0, is_open])) 72 | elif key == ord('d'): 73 | client.step_action(np.array([0, -0.01, 0, 0, 0, 0, is_open])) 74 | elif key == ord('z'): 75 | client.step_action(np.array([0, 0, 0.01, 0, 0, 0, is_open])) 76 | elif key == ord('c'): 77 | client.step_action(np.array([0, 0, -0.01, 0, 0, 0, is_open])) 78 | 79 | # rotation 80 | elif key == ord('i'): 81 | client.step_action(np.array([0, 0, 0, 0.01, 0, 0, is_open])) 82 | elif key == ord('k'): 83 | client.step_action(np.array([0, 0, 0, -0.01, 0, 0, is_open])) 84 | elif key == ord('j'): 85 | client.step_action(np.array([0, 0, 0, 0, 0.01, 0, is_open])) 86 | elif key == ord('l'): 87 | client.step_action(np.array([0, 0, 0, 0, -0.01, 0, is_open])) 88 | elif key == ord('n'): 89 | client.step_action(np.array([0, 0, 0, 0, 0, 0.01, is_open])) 90 | elif key == ord('m'): 91 | client.step_action(np.array([0, 0, 0, 0, 0, -0.01, is_open])) 92 | 93 | # space bar to change gripper state 94 | elif key == ord(' '): 95 | is_open = 1 - is_open 96 | print("Gripper is now: ", is_open) 97 | client.step_action(np.array([0, 0, 0, 0, 0, 0, is_open])) 98 | elif key == ord('r'): 99 | print("Resetting robot...") 100 | client.reset() 101 | print_help() 102 | 103 | show_video(client) 104 | 105 | client.stop() # Properly stop the client 106 | cv2.destroyAllWindows() 107 | print("Teleoperation ended.") 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/trajectory_collector.py: -------------------------------------------------------------------------------- 1 | import os 2 | import os.path 3 | import sys 4 | import numpy as np 5 | from widowx_envs.utils.utils import timed, AttrDict, Configurable 6 | from widowx_envs.utils.raw_saver import RawSaver 7 | 8 | sys.path.append('/'.join(str.split(__file__, '/')[:-2])) 9 | 10 | 11 | class TrajectoryCollector(Configurable): 12 | 13 | def __init__(self, config, gpu_id=0, ngpu=1): 14 | self._hp = self._default_hparams() 15 | self._override_defaults(config) 16 | if self._hp.log_dir is not "": 17 | self._hp.agent['log_dir'] = self._hp.log_dir 18 | self.agent = self._hp.agent['type'](self._hp.agent) 19 | self.agent._hp.env_handle = self.agent.env 20 | self.agent._hp.gpu_id = gpu_id 21 | self.agent._hp.ngpu = ngpu 22 | 23 | if isinstance(self._hp.policy, list): # in case multiple policies are used 24 | self.policies = [] 25 | for policy_param in self._hp.policy: 26 | self.policies.append(policy_param['type'](self.agent._hp, policy_param)) 27 | else: 28 | self.policies = [self._hp.policy['type'](self.agent._hp, self._hp.policy)] 29 | 30 | self.trajectory_list = [] 31 | self.im_score_list = [] 32 | try: 33 | os.remove(self._hp.agent['image_dir']) 34 | except: 35 | pass 36 | 37 | self.savers = {} 38 | 39 | self.efforts = [] 40 | 41 | if 'raw' in self._hp.save_format: 42 | self.savers['raw'] = RawSaver(self._hp.data_save_dir) 43 | 44 | 45 | def _default_hparams(self): 46 | default_dict = AttrDict({ 47 | 'save_format': ['hdf5', 'raw'], 48 | 'save_data': True, 49 | 'agent': {}, 50 | 'policy': {}, 51 | 'start_index': -1, 52 | 'end_index': -1, 53 | 'ntraj': -1, 54 | 'gpu_id': -1, 55 | 'current_dir': '', 56 | 'traj_per_file': 1, 57 | 'data_save_dir': '', 58 | 'log_dir': '', 59 | 'split_train_val_test': True, 60 | 'write_traj_summaries': False, 61 | 'collection_metadata': None, 62 | 'make_diagnostics': False 63 | }) 64 | return default_dict 65 | 66 | def run(self): 67 | for i in range(self._hp.start_index, self._hp.end_index+1): 68 | for policy in self.policies: 69 | self.take_sample(i, policy) 70 | 71 | @timed('traj sample time: ') 72 | def take_sample(self, index, policy): 73 | """ 74 | :param index: run a single trajectory with index 75 | :return: 76 | """ 77 | agent_data, obs_dict, policy_out = self.agent.sample(policy, index) 78 | if self._hp.save_data: 79 | self.save_data(index, agent_data, obs_dict, policy_out) 80 | if self._hp.make_diagnostics: 81 | self.make_diagnostics(obs_dict) 82 | 83 | @timed('savingtime: ') 84 | def save_data(self, itr, agent_data, obs_dict, policy_outputs): 85 | for name, saver in self.savers.items(): # if directly saving data 86 | saver.save_traj(itr, agent_data, obs_dict, policy_outputs) 87 | 88 | def make_diagnostics(self, obs): 89 | effort = obs['joint_effort'] 90 | max_effort = np.max(np.abs(effort), 0) 91 | print('max effort', max_effort) 92 | self.efforts.append(max_effort) 93 | max_over_all_runs = np.max(np.stack(self.efforts, axis=0), axis=0) 94 | print('max over all runs', max_over_all_runs) 95 | 96 | xpos = obs['state'][:, 0] 97 | des_xpos = obs['desired_state'][:, 0] 98 | 99 | images = obs['images'].astype(np.float32)/255 100 | image_delta = np.abs(images[1:][:, 0] - images[:-1][:, 0]) 101 | tlen = image_delta.shape[0] 102 | image_delta = np.mean(image_delta.reshape(tlen, -1), axis=1) 103 | 104 | import matplotlib.pyplot as plt 105 | import matplotlib 106 | matplotlib.use('agg') 107 | plt.figure() 108 | ax = plt.gca() 109 | ax.plot(xpos, label='xpos') 110 | plt.plot(des_xpos, label='despos') 111 | xvalues = range(1, 1+ image_delta.shape[0]) 112 | plt.plot(xvalues, image_delta, label='image_delta') 113 | ax.legend() 114 | plt.grid() 115 | print('saving figure', self._hp.data_save_dir + '/diagnostics.png') 116 | plt.savefig(self._hp.data_save_dir + '/diagnostics.png') 117 | # import pdb; pdb.set_trace() 118 | 119 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/exceptions.py: -------------------------------------------------------------------------------- 1 | class Bad_Traj_Exception(Exception): 2 | def __init__(self): 3 | pass 4 | 5 | 6 | class Image_Exception(Exception): 7 | def __init__(self): 8 | pass 9 | 10 | 11 | class Environment_Exception(Exception): 12 | def __init__(self): 13 | pass 14 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/grasp_utils.py: -------------------------------------------------------------------------------- 1 | from sklearn.linear_model import LinearRegression 2 | import numpy as np 3 | import time 4 | from sklearn.preprocessing import PolynomialFeatures 5 | 6 | 7 | def compute_robot_transformation_matrix(a, b): 8 | lr = LinearRegression(fit_intercept=False).fit(a, b) 9 | return lr.coef_.T 10 | 11 | 12 | def convert_obs_to_image(obs, transpose=False): 13 | print("taking picture...") 14 | image = np.uint8(np.reshape(obs['image'] * 255, (3, 64, 64))) 15 | if transpose: image = np.transpose(image, (1, 2, 0)) 16 | # print("image.shape", image.shape) 17 | return image 18 | 19 | 20 | def rgb_to_robot_coords(rgb_coords, transmatrix): 21 | # add vector of 1s as feature to the pc_coords. 22 | assert len(rgb_coords.shape) <= 2 23 | if len(rgb_coords.shape) == 1: 24 | rgb_coords = np.array(rgb_coords[None]) 25 | poly = PolynomialFeatures(2) 26 | rgb_coords = poly.fit_transform(rgb_coords) 27 | 28 | if transmatrix is not None: 29 | robot_coords = rgb_coords @ transmatrix 30 | return np.squeeze(robot_coords) 31 | 32 | def get_image_obs(env, image_xyz=None, skip_move_to_neutral=False): 33 | joint_angles = env._controller.get_joint_angles() 34 | if image_xyz is None: 35 | if not skip_move_to_neutral: 36 | env.move_to_neutral(0.5) 37 | # else: 38 | # env.reset() 39 | else: 40 | env.move_to_state(image_xyz, target_zangle=0, duration=0.5) 41 | time.sleep(0.2) # wait for camera to catch up 42 | obs = env.current_obs() 43 | env._controller.set_joint_angles(joint_angles, 0.5) 44 | return obs 45 | 46 | 47 | def get_image(env, transpose=True, image_xyz=None, skip_move_to_neutral=False): 48 | obs = get_image_obs(env, image_xyz, skip_move_to_neutral) 49 | return convert_obs_to_image(obs, transpose=transpose) 50 | 51 | 52 | def execute_reach(env, reach_policy, reachpoint, noise=0.0): 53 | reach_policy.reset(reach_point=reachpoint) 54 | for i in range(6): 55 | action, _ = reach_policy.get_action() 56 | 57 | # noise 58 | noise_dims = 2 59 | noise_stds = [noise] * noise_dims + [0] * (len(action) - noise_dims) 60 | action = np.random.normal(loc=action, scale=noise_stds) 61 | action = np.clip(action, -1.0, 1.0) 62 | # import ipdb; ipdb.set_trace() 63 | obs, _, _, _ = env.step(action) 64 | return obs 65 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/image_utils.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import moviepy.editor as mpy 3 | import os 4 | import numpy as np 5 | from PIL import Image, ImageDraw 6 | from PIL import Image 7 | 8 | def resize_store(t, target_array, input_array): 9 | target_img_height, target_img_width = target_array.shape[2:4] 10 | 11 | if (target_img_height, target_img_width) == input_array.shape[1:3]: 12 | for i in range(input_array.shape[0]): 13 | target_array[t, i] = input_array[i] 14 | else: 15 | for i in range(input_array.shape[0]): 16 | target_array[t, i] = cv2.resize(input_array[i], (target_img_width, target_img_height), 17 | interpolation=cv2.INTER_AREA) 18 | 19 | 20 | def npy_to_gif(im_list, filename, fps=4): 21 | save_dir = '/'.join(str.split(filename, '/')[:-1]) 22 | 23 | if not os.path.exists(save_dir): 24 | print('creating directory: ', save_dir) 25 | os.makedirs(save_dir) 26 | 27 | clip = mpy.ImageSequenceClip(im_list, fps=fps) 28 | clip.write_gif(filename + '.gif') 29 | 30 | 31 | def npy_to_mp4(im_list, filename, fps=4): 32 | save_dir = '/'.join(str.split(filename, '/')[:-1]) 33 | 34 | if not os.path.exists(save_dir): 35 | print('creating directory: ', save_dir) 36 | os.mkdir(save_dir) 37 | 38 | clip = mpy.ImageSequenceClip(im_list, fps=fps) 39 | clip.write_videofile(filename + '.mp4') 40 | 41 | def draw_text_image(text, background_color=(255,255,255), image_size=(30, 64), dtype=np.float32): 42 | 43 | text_image = Image.new('RGB', image_size[::-1], background_color) 44 | draw = ImageDraw.Draw(text_image) 45 | if text: 46 | draw.text((4, 0), text, fill=(0, 0, 0)) 47 | if dtype == np.float32: 48 | return np.array(text_image).astype(np.float32)/255. 49 | else: 50 | return np.array(text_image) 51 | 52 | 53 | def draw_text_onimage(text, image, color=(255, 0, 0)): 54 | if image.dtype == np.float32: 55 | image = (image*255.).astype(np.uint8) 56 | assert image.dtype == np.uint8 57 | text_image = Image.fromarray(image) 58 | draw = ImageDraw.Draw(text_image) 59 | draw.text((4, 0), text, fill=color) 60 | return np.array(text_image).astype(np.float32)/255. 61 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/metadata_helper.py: -------------------------------------------------------------------------------- 1 | import h5py 2 | import pandas as pd 3 | import numpy as np 4 | import glob 5 | import os 6 | from tqdm import tqdm 7 | from multiprocessing import Pool, cpu_count 8 | import hashlib 9 | import io 10 | import random 11 | 12 | 13 | class MetaDataContainer: 14 | def __init__(self, base_path, meta_data): 15 | self._meta_data = meta_data 16 | self._base_path = base_path 17 | 18 | def get_file_metadata(self, fname): 19 | fname = fname.split('/')[-1] 20 | return self._meta_data.loc[fname] 21 | 22 | def select_objects(self, obj_class_name): 23 | if isinstance(obj_class_name, str): 24 | return self._meta_data[[obj_class_name in x for x in self._meta_data['object_classes']]] 25 | return self._meta_data[[set(obj_class_name) == set(x) for x in self._meta_data['object_classes']]] 26 | 27 | @property 28 | def frame(self): 29 | return self._meta_data 30 | 31 | @property 32 | def files(self): 33 | return ['{}/{}'.format(self._base_path, f) for f in self.frame.index] 34 | 35 | def get_shuffled_files(self, rng=None): 36 | files = ['{}/{}'.format(self._base_path, f) for f in self.frame.index] 37 | if rng: 38 | rng.shuffle(files) 39 | else: 40 | random.shuffle(files) 41 | return files 42 | 43 | @property 44 | def base_path(self): 45 | return self._base_path 46 | 47 | def __getitem__(self, arg): 48 | return MetaDataContainer(self._base_path, self._meta_data[arg]) 49 | 50 | def __contains__(self, item): 51 | return item in self._meta_data 52 | 53 | def __repr__(self): 54 | return repr(self._meta_data) 55 | 56 | def __str__(self): 57 | return str(self._meta_data) 58 | 59 | def __eq__(self, other): 60 | return self._meta_data == other 61 | 62 | def __ne__(self, other): 63 | return self._meta_data != other 64 | 65 | def __lt__(self, other): 66 | return self._meta_data < other 67 | 68 | def __le__(self, other): 69 | return self._meta_data <= other 70 | 71 | def __gt__(self, other): 72 | return self._meta_data > other 73 | 74 | def __ge__(self, other): 75 | return self._meta_data >= other 76 | 77 | def keys(self): 78 | return self._meta_data.keys() 79 | 80 | def __len__(self): 81 | return len(self._meta_data) 82 | 83 | 84 | def load_metadata_dict(fname): 85 | if not os.path.exists(fname) or not os.path.isfile(fname): 86 | raise IOError("can't find {}".format(fname)) 87 | buf = open(fname, 'rb').read() 88 | # with h5py.File(io.BytesIO(buf)) as hf: 89 | with h5py.File(fname, 'r') as hf: 90 | meta_data_dict = {'file_version': hf['file_version'][()]} 91 | 92 | meta_data_dict['sha256'] = hashlib.sha256(buf).hexdigest() 93 | 94 | if len(hf['policy']['actions'].shape) == 1: # when single time-steps saved in hdf5 instead of video 95 | meta_data_dict['sdim'] = hf['env']['state'].shape[0] 96 | meta_data_dict['adim'] = hf['policy']['actions'].shape[0] 97 | meta_data_dict['frame_dim'] = hf['env']['image'].attrs['shape'][:2] 98 | else: # whole video saved in hdf5: 99 | meta_data_dict['sdim'] = hf['env']['state'].shape[1] 100 | meta_data_dict['state_T'] = hf['env']['state'].shape[0] 101 | 102 | meta_data_dict['adim'] = hf['policy']['actions'].shape[1] 103 | meta_data_dict['action_T'] =hf['policy']['actions'].shape[0] 104 | 105 | # assumes all cameras have same attributes (if they exist) 106 | n_cams = hf['env'].attrs.get('n_cams', 0) 107 | if n_cams: 108 | meta_data_dict['ncam'] = n_cams 109 | 110 | if hf['env'].attrs['cam_encoding'] == 'mp4': 111 | meta_data_dict['frame_dim'] = hf['env']['cam0_video']['frames'].attrs['shape'][:2] 112 | meta_data_dict['img_T'] = hf['env']['cam0_video']['frames'].attrs['T'] 113 | meta_data_dict['img_encoding'] = 'mp4' 114 | meta_data_dict['image_format'] = hf['env']['cam0_video']['frames'].attrs['image_format'] 115 | else: 116 | meta_data_dict['frame_dim'] = hf['env']['cam0_video']['frame0'].attrs['shape'][:2] 117 | meta_data_dict['image_format'] = hf['env']['cam0_video']['frame0'].attrs['image_format'] 118 | meta_data_dict['img_encoding'] = 'jpg' 119 | meta_data_dict['img_T'] = len(hf['env']['cam0_video']) 120 | 121 | # TODO: remove misc field and shift all to meta-data 122 | for k in hf['misc'].keys(): 123 | assert k not in meta_data_dict, "key {} already present!".format(k) 124 | meta_data_dict[k] = hf['misc'][k][()] 125 | 126 | for k in hf['metadata'].attrs.keys(): 127 | assert k not in meta_data_dict, "key {} already present!".format(k) 128 | meta_data_dict[k] = hf['metadata'].attrs[k] 129 | 130 | if 'low_bound' not in meta_data_dict and 'low_bound' in hf['env']: 131 | meta_data_dict['low_bound'] = hf['env']['low_bound'][0] 132 | 133 | if 'high_bound' not in meta_data_dict and 'high_bound' in hf['env']: 134 | meta_data_dict['high_bound'] = hf['env']['high_bound'][0] 135 | 136 | return meta_data_dict 137 | 138 | def get_metadata_frame(files): 139 | if isinstance(files, str): 140 | base_path = files 141 | files = sorted(glob.glob('{}/*.hdf5'.format(files))) 142 | if not files: 143 | raise ValueError('no hdf5 files found at {}/*.hdf5'.format(base_path)) 144 | 145 | if os.path.exists('{}/meta_data.pkl'.format(base_path)): 146 | meta_data = pd.read_pickle('{}/meta_data.pkl'.format(base_path), compression='gzip') 147 | 148 | registered_fnames = set([f for f in meta_data.index]) 149 | loaded_fnames = set([f.split('/')[-1] for f in files]) 150 | 151 | if loaded_fnames == registered_fnames: 152 | return meta_data 153 | os.remove('{}/meta_data.pkl'.format(base_path)) 154 | print('regenerating meta_data file!') 155 | elif isinstance(files, (list, tuple)): 156 | base_path=None 157 | files = sorted(files) 158 | else: 159 | raise ValueError("Must be path to files or list/tuple of filenames") 160 | 161 | parallel = True 162 | if not parallel: 163 | meta_data = [load_metadata_dict(f) for f in files] 164 | else: 165 | with Pool(cpu_count()) as p: 166 | meta_data = list(tqdm(p.imap(load_metadata_dict, files), total=len(files))) 167 | 168 | data_frame = pd.DataFrame(meta_data, index=[f.split('/')[-1] for f in files]) 169 | if base_path: 170 | data_frame.to_pickle("{}/meta_data.pkl".format(base_path), compression='gzip') 171 | return data_frame 172 | 173 | 174 | def load_metadata(files): 175 | print('loading metadata') 176 | base_path = files 177 | if isinstance(files, (tuple, list)): 178 | base_path = '' 179 | else: 180 | files = base_path = os.path.expanduser(base_path) 181 | 182 | return MetaDataContainer(base_path, get_metadata_frame(files)) 183 | 184 | 185 | if __name__ == '__main__': 186 | import argparse 187 | import pdb 188 | 189 | parser = argparse.ArgumentParser(description="calculates or loads meta_data frame") 190 | parser.add_argument('path', help='path to files containing hdf5 dataset') 191 | args = parser.parse_args() 192 | data_frame = load_metadata(args.path) 193 | pdb.set_trace() 194 | print('loaded frame') 195 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/raw_saver.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import pickle as pkl 4 | import cv2 5 | import copy 6 | from widowx_envs.utils.utils import AttrDict 7 | import glob 8 | 9 | import numpy as np 10 | 11 | def count_trajs(save_dir): 12 | 13 | base_folder = '/'.join(str.split(save_dir, '/')[:-1]) 14 | trajs = glob.glob(base_folder + '/*/raw/traj_group*/*') 15 | return len(trajs) 16 | 17 | class RawSaver(): 18 | def __init__(self, save_dir, ngroup=1000): 19 | self.save_dir = save_dir 20 | self.ngroup = ngroup 21 | 22 | def save_traj(self, itr, agent_data=None, obs_dict=None, policy_outputs=None, reward_data=None): 23 | 24 | print("##################################") 25 | print('saving trajectory number {}'.format(count_trajs(self.save_dir) + 1)) 26 | print("##################################") 27 | 28 | igrp = itr // self.ngroup 29 | group_folder = os.path.join(self.save_dir , 'raw/traj_group{}'.format(igrp)) 30 | if not os.path.exists(group_folder): 31 | os.makedirs(group_folder) 32 | 33 | traj_folder = os.path.join(group_folder , 'traj{}'.format(itr)) 34 | if os.path.exists(traj_folder): 35 | print('trajectory folder {} already exists, deleting the folder'.format(traj_folder)) 36 | shutil.rmtree(traj_folder) 37 | 38 | os.makedirs(traj_folder) 39 | print('creating: ', traj_folder) 40 | 41 | if 'images' in obs_dict: 42 | images = obs_dict['images'] 43 | T, n_cams = images.shape[:2] 44 | for i in range(n_cams): 45 | os.mkdir(traj_folder + '/images{}'.format(i)) 46 | for t in range(T): 47 | for i in range(n_cams): 48 | args = AttrDict(i=i, images=images, t=t, traj_folder=traj_folder) 49 | save_single(args) 50 | if 'depth_images' in obs_dict: 51 | depth_images = obs_dict['depth_images'] 52 | T, n_cams = depth_images.shape[:2] 53 | for i in range(n_cams): 54 | os.mkdir(traj_folder + '/depth_images{}'.format(i)) 55 | for t in range(T): 56 | for i in range(n_cams): 57 | cv2.imwrite('{}/depth_images{}/im_{}.png'.format(traj_folder, i, t), depth_images[t, i]) 58 | 59 | if agent_data is not None: 60 | with open('{}/agent_data.pkl'.format(traj_folder), 'wb') as file: 61 | pkl.dump(agent_data, file) 62 | if obs_dict is not None: 63 | obs_dict_no_image = copy.deepcopy(obs_dict) 64 | obs_dict_no_image.pop('images') 65 | if "depth_images" in obs_dict_no_image: 66 | obs_dict_no_image.pop('depth_images') 67 | with open('{}/obs_dict.pkl'.format(traj_folder), 'wb') as file: 68 | pkl.dump(obs_dict_no_image, file) 69 | if policy_outputs is not None: 70 | with open('{}/policy_out.pkl'.format(traj_folder), 'wb') as file: 71 | pkl.dump(policy_outputs, file) 72 | if reward_data is not None: 73 | with open('{}/reward_data.pkl'.format(traj_folder), 'wb') as file: 74 | pkl.dump(reward_data, file) 75 | 76 | class RawSaverRailRL(RawSaver): 77 | def __init__(self, save_dir, ngroup=1000): 78 | super(RawSaverRailRL, self).__init__(save_dir, ngroup=1000) 79 | self.i_traj = 0 80 | 81 | def save(self, path): 82 | obs = convert_listofdicts2dictoflists(path['full_observations']) 83 | self.save_traj(self.i_traj, 84 | {'agent_infos': path['agent_infos'], 'env_infos': path['env_infos']}, 85 | obs, path['actions'], path['rewards']) 86 | print('saving traj {} done'.format(self.i_traj)) 87 | self.i_traj += 1 88 | 89 | 90 | def convert_listofdicts2dictoflists(list): 91 | obs_dict = {} 92 | for key in list[0].keys(): 93 | vecs = [] 94 | for tstep in list: 95 | vecs.append(tstep[key]) 96 | obs_dict[key] = np.stack(vecs, 0) 97 | return obs_dict 98 | 99 | 100 | def save_single(arg): 101 | cv2.imwrite('{}/images{}/im_{}.jpg'.format(arg.traj_folder, arg.i, arg.t), arg.images[arg.t, arg.i, :, :, ::-1]) 102 | return True 103 | 104 | 105 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/sync.py: -------------------------------------------------------------------------------- 1 | from multiprocessing import Value, Lock 2 | 3 | 4 | class SyncCounter: 5 | def __init__(self, base_value=0): 6 | self._lock = Lock() 7 | self._value = Value('i', base_value) 8 | 9 | @property 10 | def ret_increment(self): 11 | with self._lock: 12 | ret_val = self._value.value 13 | self._value.value += 1 14 | return ret_val 15 | 16 | @property 17 | def value(self): 18 | with self._lock: 19 | ret_val = self._value.value 20 | return ret_val 21 | 22 | 23 | class ManagedSyncCounter(SyncCounter): 24 | def __init__(self, manager, base_value=0): 25 | self._lock, self._value = manager.Lock(), manager.Value('i', base_value) 26 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/transformation_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import math 3 | 4 | def TransToRp(T): 5 | """Converts a homogeneous transformation matrix into a rotation matrix 6 | and position vector 7 | 8 | :param T: A homogeneous transformation matrix 9 | :return R: The corresponding rotation matrix, 10 | :return p: The corresponding position vector. 11 | 12 | Example Input: 13 | T = np.array([[1, 0, 0, 0], 14 | [0, 0, -1, 0], 15 | [0, 1, 0, 3], 16 | [0, 0, 0, 1]]) 17 | Output: 18 | (np.array([[1, 0, 0], 19 | [0, 0, -1], 20 | [0, 1, 0]]), 21 | np.array([0, 0, 3])) 22 | """ 23 | T = np.array(T) 24 | return T[0: 3, 0: 3], T[0: 3, 3] 25 | 26 | def RpToTrans(R, p): 27 | """Converts a rotation matrix and a position vector into homogeneous 28 | transformation matrix 29 | 30 | :param R: A 3x3 rotation matrix 31 | :param p: A 3-vector 32 | :return: A homogeneous transformation matrix corresponding to the inputs 33 | 34 | Example Input: 35 | R = np.array([[1, 0, 0], 36 | [0, 0, -1], 37 | [0, 1, 0]]) 38 | p = np.array([1, 2, 5]) 39 | Output: 40 | np.array([[1, 0, 0, 1], 41 | [0, 0, -1, 2], 42 | [0, 1, 0, 5], 43 | [0, 0, 0, 1]]) 44 | """ 45 | return np.r_[np.c_[R, p], [[0, 0, 0, 1]]] 46 | 47 | def TransInv(T): 48 | """Inverts a homogeneous transformation matrix 49 | 50 | :param T: A homogeneous transformation matrix 51 | :return: The inverse of T 52 | Uses the structure of transformation matrices to avoid taking a matrix 53 | inverse, for efficiency. 54 | 55 | Example input: 56 | T = np.array([[1, 0, 0, 0], 57 | [0, 0, -1, 0], 58 | [0, 1, 0, 3], 59 | [0, 0, 0, 1]]) 60 | Output: 61 | np.array([[1, 0, 0, 0], 62 | [0, 0, 1, -3], 63 | [0, -1, 0, 0], 64 | [0, 0, 0, 1]]) 65 | """ 66 | R, p = TransToRp(T) 67 | Rt = np.array(R).T 68 | return np.r_[np.c_[Rt, -np.dot(Rt, p)], [[0, 0, 0, 1]]] 69 | 70 | 71 | 72 | # Calculates Rotation Matrix given euler angles. 73 | def eulerAnglesToRotationMatrix(theta): 74 | 75 | R_x = np.array([[1, 0, 0 ], 76 | [0, math.cos(theta[0]), -math.sin(theta[0]) ], 77 | [0, math.sin(theta[0]), math.cos(theta[0]) ] 78 | ]) 79 | 80 | R_y = np.array([[math.cos(theta[1]), 0, math.sin(theta[1]) ], 81 | [0, 1, 0 ], 82 | [-math.sin(theta[1]), 0, math.cos(theta[1]) ] 83 | ]) 84 | 85 | R_z = np.array([[math.cos(theta[2]), -math.sin(theta[2]), 0], 86 | [math.sin(theta[2]), math.cos(theta[2]), 0], 87 | [0, 0, 1] 88 | ]) 89 | 90 | R = np.dot(R_z, np.dot( R_y, R_x )) 91 | 92 | return R 93 | 94 | # Checks if a matrix is a valid rotation matrix. 95 | def isRotationMatrix(R, thresh=1e-6): 96 | Rt = np.transpose(R) 97 | shouldBeIdentity = np.dot(Rt, R) 98 | I = np.identity(3, dtype = R.dtype) 99 | n = np.linalg.norm(I - shouldBeIdentity) 100 | return n < thresh 101 | 102 | 103 | # Calculates rotation matrix to euler angles 104 | # The result is the same as MATLAB except the order 105 | # of the euler angles ( x and z are swapped ). 106 | def rotationMatrixToEulerAngles(R, check_error_thresh=1e-6): 107 | 108 | assert(isRotationMatrix(R, check_error_thresh)) 109 | 110 | sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0]) 111 | 112 | singular = sy < 1e-6 113 | 114 | if not singular : 115 | x = math.atan2(R[2,1] , R[2,2]) 116 | y = math.atan2(-R[2,0], sy) 117 | z = math.atan2(R[1,0], R[0,0]) 118 | else : 119 | x = math.atan2(-R[1,2], R[1,1]) 120 | y = math.atan2(-R[2,0], sy) 121 | z = 0 122 | 123 | return np.array([x, y, z]) 124 | 125 | 126 | def transform2state(eep, desired_gripper_position, default_rotation): 127 | assert eep.shape == (4, 4) 128 | if isinstance(desired_gripper_position, float): 129 | desired_gripper_position = np.array([desired_gripper_position]) 130 | assert desired_gripper_position.shape == (1,) 131 | 132 | rot, xyz = TransToRp(eep) 133 | euler = rotationMatrixToEulerAngles(rot.dot(default_rotation.transpose()), check_error_thresh=1e-5) 134 | return np.concatenate([xyz, euler, desired_gripper_position]) 135 | 136 | def state2transform(state, default_rotation): 137 | assert state.shape == (7,) 138 | xyz, euler, gripper_state = state[:3], state[3:6], state[6:] 139 | trans = RpToTrans(eulerAnglesToRotationMatrix(euler).dot(default_rotation), xyz) 140 | return trans, gripper_state 141 | 142 | def action2transform_local(actions, current_eef_position): 143 | """ 144 | actions: [xyz deltas, and euler rotation angles, grasp_action], the rotation is around the position of the end-effector (axes are the same as world) 145 | current_eef_position: xyz of current end-effector 146 | 147 | return: transformation (4x4) and desired gripper state 148 | """ 149 | assert current_eef_position.shape == (3,) 150 | assert actions.shape == (7,) 151 | 152 | Teef = RpToTrans(np.eye(3), current_eef_position) 153 | 154 | xyz, euler, gripper_state = actions[:3], actions[3:6], actions[6] 155 | trans = RpToTrans(eulerAnglesToRotationMatrix(euler), xyz) 156 | 157 | trans = Teef.dot(trans).dot(TransInv(Teef)) 158 | return trans, gripper_state 159 | 160 | def transform2action_local(transform, gripper_action, current_eef_position): 161 | """ 162 | transform: 4x4 matrix, transform between current eef_pose in world coordinates to next eef_pose 163 | current_eef_position: xyz of current end-effector 164 | 165 | return: [xyz deltas, and euler rotation angles, grasp_action], the rotation is around the position of the end-effector (axes are the same as world) 166 | """ 167 | assert current_eef_position.shape == (3,) 168 | assert transform.shape == (4, 4) 169 | Teef = RpToTrans(np.eye(3), current_eef_position) 170 | transform = TransInv(Teef).dot(transform).dot(Teef) 171 | 172 | if isinstance(gripper_action, float): 173 | gripper_action = np.array([gripper_action]) 174 | assert gripper_action.shape == (1,) 175 | 176 | rot, xyz = TransToRp(transform) 177 | euler = rotationMatrixToEulerAngles(rot, check_error_thresh=1e-5) 178 | return np.concatenate([xyz, euler, gripper_action]) 179 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/utils/utils.py: -------------------------------------------------------------------------------- 1 | import time 2 | from contextlib import contextmanager 3 | import os 4 | import sys 5 | from funcsigs import signature, Parameter 6 | import numpy as np 7 | import json 8 | import inspect 9 | import yaml 10 | 11 | class AttrDict(dict): 12 | __setattr__ = dict.__setitem__ 13 | 14 | def __getattr__(self, attr): 15 | # Take care that getattr() raises AttributeError, not KeyError. 16 | # Required e.g. for hasattr(), deepcopy and OrderedDict. 17 | try: 18 | return self.__getitem__(attr) 19 | except KeyError: 20 | raise AttributeError("Attribute %r not found" % attr) 21 | 22 | def __getstate__(self): return self 23 | def __setstate__(self, d): self = d 24 | 25 | 26 | def np_unstack(array, axis): 27 | arr = np.split(array, array.shape[axis], axis) 28 | arr = [a.squeeze() for a in arr] 29 | return arr 30 | 31 | 32 | def get_policy_args(policy, obs, t, i_tr, step_data=None): 33 | """ 34 | Generalized way to get data from agent and pass to policy 35 | :param policy: Subclass of Policy 36 | :param obs: obs_dict from agent 37 | :param t: current timestep 38 | :param i_tr: current traj number 39 | :param step_data: dict of step specific data passed in by agent 40 | :return: dictionary mapping keys that the policy requests (by way of argument in policy.act) to their value 41 | """ 42 | 43 | policy_args = {} 44 | policy_signature = signature(policy.act) # Gets arguments required by policy 45 | for arg in policy_signature.parameters: # Fills out arguments according to their keyword 46 | if arg == 'args': 47 | return {} 48 | value = policy_signature.parameters[arg].default 49 | if arg in obs: 50 | value = obs[arg] 51 | elif step_data is not None and arg in step_data: 52 | value = step_data[arg] 53 | 54 | # everthing that is not cached in post_process_obs is assigned here: 55 | elif arg == 't': 56 | value = t 57 | elif arg == 'i_tr': 58 | value = i_tr 59 | elif arg == 'obs': # policy can ask for all arguments from environment 60 | value = obs 61 | elif arg == 'step_data': 62 | value = step_data 63 | elif arg == 'goal_pos': 64 | value = step_data['goal_pos'] 65 | 66 | if value is Parameter.empty: 67 | # required parameters MUST be set by agent 68 | raise ValueError("Required Policy Param {} not set in agent".format(arg)) 69 | policy_args[arg] = value 70 | # import pdb; pdb.set_trace() 71 | return policy_args 72 | 73 | class Configurable(object): 74 | def _override_defaults(self, policyparams, identical_default_ok=False): 75 | if policyparams is None: 76 | return 77 | for name, value in policyparams.items(): 78 | # if name not in self._hp.keys(): 79 | # raise ValueError('key {} not in hparam'.format(name)) 80 | # print('overriding param {} to value {}'.format(name, value)) 81 | # if value == getattr(self._hp, name) and not identical_default_ok: 82 | # raise ValueError("attribute {} is identical to default value {} !!".format(name, self._hp[name])) 83 | self._hp[name] = value 84 | 85 | def _default_hparams(self): 86 | return AttrDict() 87 | 88 | 89 | @contextmanager 90 | def timing(text): 91 | start = time.time() 92 | yield 93 | elapsed = time.time() - start 94 | print("{} {}".format(text, elapsed)) 95 | 96 | 97 | class timed: 98 | """ A function decorator that prints the elapsed time """ 99 | 100 | def __init__(self, text): 101 | """ Decorator parameters """ 102 | self.text = text 103 | 104 | def __call__(self, func): 105 | """ Wrapping """ 106 | 107 | def wrapper(*args, **kwargs): 108 | with timing(self.text): 109 | result = func(*args, **kwargs) 110 | return result 111 | 112 | return wrapper 113 | 114 | 115 | def map_dict(fn, d): 116 | """takes a dictionary and applies the function to every element""" 117 | return type(d)(map(lambda kv: (kv[0], fn(kv[1])), d.items())) 118 | 119 | 120 | def make_recursive(fn, *argv, **kwargs): 121 | """ Takes a fn and returns a function that can apply fn on tensor structure 122 | which can be a single tensor, tuple or a list. """ 123 | 124 | def recursive_map(tensors): 125 | if tensors is None: 126 | return tensors 127 | elif isinstance(tensors, list) or isinstance(tensors, tuple): 128 | return type(tensors)(map(recursive_map, tensors)) 129 | elif isinstance(tensors, dict): 130 | return type(tensors)(map_dict(recursive_map, tensors)) 131 | else: 132 | try: 133 | return fn(tensors, *argv, **kwargs) 134 | except Exception as e: 135 | print("The following error was raised when recursively applying a function:") 136 | print(e) 137 | raise ValueError("Type {} not supported for recursive map".format(type(tensors))) 138 | 139 | return recursive_map 140 | 141 | 142 | def map_recursive(fn, tensors): 143 | return make_recursive(fn)(tensors) 144 | 145 | 146 | def save_config(confs, exp_conf_path): 147 | print('saving config to ', exp_conf_path) 148 | def func(x): 149 | if inspect.isclass(x): 150 | return x.__name__ 151 | elif hasattr(x, 'name'): 152 | return x.name 153 | else: 154 | return x 155 | confs = map_recursive(func, confs) 156 | 157 | if not os.path.exists(exp_conf_path): 158 | os.makedirs(exp_conf_path) 159 | with open(exp_conf_path + '/config.json', 'w') as f: 160 | json.dump(confs, f, indent=4) 161 | 162 | 163 | if sys.version_info[0] == 2: 164 | input_fn = raw_input 165 | else: 166 | input_fn = input 167 | 168 | def ask_confirm(text): 169 | print(text) 170 | valid = False 171 | response = False 172 | while not valid: 173 | str = input_fn() 174 | valid = True 175 | if str == 'y': 176 | response = True 177 | elif str == 'n': 178 | response = False 179 | else: 180 | valid = False 181 | return response 182 | 183 | def read_yaml_file(path): 184 | content = yaml.load(open(os.path.expanduser(path), 'r'), Loader=yaml.CLoader) 185 | if content is None: 186 | return {} 187 | else: 188 | return content 189 | 190 | -------------------------------------------------------------------------------- /widowx_envs/widowx_envs/widowx_env_service.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | # This is the highest level interface to interact with the widowx setup. 3 | 4 | import time 5 | import cv2 6 | import argparse 7 | import numpy as np 8 | import logging 9 | import traceback 10 | 11 | from typing import Optional, Tuple, Any 12 | from widowx_envs.utils.exceptions import Environment_Exception 13 | 14 | # install from: https://github.com/youliangtan/edgeml 15 | from edgeml.action import ActionClient, ActionServer, ActionConfig 16 | from edgeml.internal.utils import mat_to_jpeg, jpeg_to_mat, compute_hash 17 | 18 | ############################################################################## 19 | 20 | 21 | class WidowXConfigs: 22 | DefaultEnvParams = { 23 | "fix_zangle": 0.1, 24 | "move_duration": 0.2, 25 | "adaptive_wait": True, 26 | "move_to_rand_start_freq": 1, 27 | "override_workspace_boundaries": [ 28 | [0.1, -0.15, -0.1, -1.57, 0], 29 | [0.45, 0.25, 0.18, 1.57, 0], 30 | ], 31 | "action_clipping": "xyz", 32 | "catch_environment_except": False, 33 | "start_state": [0.3, 0.0, 0.15, 0, 0, 0, 1], # pose when reset is called 34 | "skip_move_to_neutral": False, 35 | "return_full_image": False, 36 | "camera_topics": [{"name": "/blue/image_raw"}], 37 | } 38 | 39 | DefaultActionConfig = ActionConfig( 40 | port_number=5556, 41 | action_keys=["init", "move", "gripper", "reset", "step_action", "reboot_motor"], 42 | observation_keys=["image", "state", "full_image"], 43 | broadcast_port=5556 + 1, 44 | ) 45 | 46 | 47 | def print_red(x): return print("\033[91m{}\033[00m".format(x)) 48 | 49 | ############################################################################## 50 | 51 | 52 | class WidowXStatus: 53 | NO_CONNECTION = 0 54 | SUCCESS = 1 55 | EXECUTION_FAILURE = 2 56 | NOT_INITIALIZED = 3 57 | 58 | ############################################################################## 59 | 60 | 61 | class WidowXActionServer(): 62 | """ 63 | This is the highest level abstraction of the widowx setup. We will run 64 | this as a server, and we can have multiple clients connect to it and 65 | reveives the observation and control the widowx robot. 66 | """ 67 | 68 | def __init__(self, port: int = 5556, testing: bool = False): 69 | edgeml_config = WidowXConfigs.DefaultActionConfig 70 | edgeml_config.port_number = port 71 | edgeml_config.broadcast_port = port + 1 72 | 73 | self.testing = testing # TODO: remove this soon 74 | self.bridge_env = None 75 | self.__server = ActionServer(edgeml_config, 76 | obs_callback=self.__observe, 77 | act_callback=self.__action, 78 | log_level=logging.WARNING) 79 | 80 | self._env_params = {} # default nothing 81 | self._image_size = None # default None 82 | self._action_methods = { 83 | "init": self.__init, 84 | "gripper": self.__gripper, 85 | "move": self.__move, 86 | "step_action": self.__step_action, 87 | "reset": self.__reset, 88 | "reboot_motor": self.__reboot_motor, 89 | } 90 | 91 | def start(self, threaded: bool = False): 92 | """ 93 | This starts the server. Default is blocking. 94 | """ 95 | self.__server.start(threaded) 96 | 97 | def stop(self): 98 | """Stop the server.""" 99 | self.__server.stop() 100 | return WidowXStatus.SUCCESS 101 | 102 | def init_robot(self, env_params, image_size): 103 | """Public method to init the robot""" 104 | if self.bridge_env: 105 | del self.bridge_env 106 | 107 | from widowx_envs.widowx_env import BridgeDataRailRLPrivateWidowX 108 | from multicam_server.topic_utils import IMTopic 109 | from tf.transformations import quaternion_from_euler 110 | from tf.transformations import quaternion_matrix 111 | 112 | # brute force way to convert json to IMTopic 113 | _env_params = env_params.copy() 114 | cam_imtopic = [] 115 | for cam in _env_params["camera_topics"]: 116 | imtopic_obj = IMTopic.from_dict(cam) 117 | cam_imtopic.append(imtopic_obj) 118 | _env_params["camera_topics"] = cam_imtopic 119 | 120 | def get_tf_mat(pose): 121 | # convert pose to a 4x4 tf matrix, rpy to quat 122 | quat = quaternion_from_euler(pose[3], pose[4], pose[5]) 123 | tf_mat = quaternion_matrix(quat) 124 | tf_mat[:3, 3] = pose[:3] 125 | return tf_mat 126 | 127 | self.get_tf_mat = get_tf_mat 128 | self.bridge_env = BridgeDataRailRLPrivateWidowX( 129 | _env_params, fixed_image_size=image_size) 130 | print("Initialized bridge env.") 131 | 132 | def hard_reset(self) -> bool: 133 | """This conduct a hard reset the Widowxenv""" 134 | if self._image_size is None: 135 | print_red("env was not init before") 136 | return False 137 | self.init_robot(self._env_params, self._image_size) 138 | 139 | def __action(self, type: str, req_payload: dict) -> dict: 140 | if type not in self._action_methods: 141 | return {"status": WidowXStatus.EXECUTION_FAILURE} 142 | if type != "init" and self.bridge_env is None: 143 | print_red("WARNING: env not initialized.") 144 | return {"status": WidowXStatus.NOT_INITIALIZED} 145 | 146 | status = self._action_methods[type](req_payload) 147 | return {"status": status} 148 | 149 | def __init(self, payload) -> WidowXStatus: 150 | do_reinit = not self._env_params == payload["env_params"] 151 | self._env_params = payload["env_params"] 152 | self._image_size = payload["image_size"] 153 | 154 | if self.testing: 155 | print_red("WARNING: Running in testing mode, \ 156 | no env will be initialized.") 157 | return WidowXStatus.NOT_INITIALIZED 158 | 159 | elif not do_reinit: 160 | print_red("env already initialized") 161 | self.__reset({}) 162 | return WidowXStatus.SUCCESS 163 | 164 | self.init_robot(payload["env_params"], payload["image_size"]) 165 | return WidowXStatus.SUCCESS 166 | 167 | def __reboot_motor(self, payload) -> WidowXStatus: 168 | joint_name = payload["joint_name"] 169 | print(f"Experimental: Rebooting motor {joint_name}") 170 | self.bridge_env.controller().reboot_motor(joint_name) 171 | return WidowXStatus.SUCCESS 172 | 173 | def __observe(self, types: list) -> dict: 174 | if self.bridge_env: 175 | # we will default return image and proprio only 176 | obs = self.bridge_env.current_obs() 177 | obs = { 178 | "image": obs["image"], 179 | "state": obs["state"], 180 | "full_image": mat_to_jpeg(obs["full_image"][0]) # faster 181 | } 182 | else: 183 | # use dummy img with random noise 184 | img = np.random.randint(0, 255, (480, 640, 3), dtype=np.uint8) 185 | obs = {"image": img, "state": {}, "full_image": img} 186 | print_red("WARNING: No bridge env not initialized.") 187 | return obs 188 | 189 | def __gripper(self, payload) -> WidowXStatus: 190 | if payload["open"] > 0.5: # convert to bool, for future float support 191 | self.bridge_env.controller().open_gripper() 192 | else: 193 | self.bridge_env.controller().close_gripper() 194 | return WidowXStatus.SUCCESS 195 | 196 | def __move(self, payload) -> WidowXStatus: 197 | pose = payload["pose"] 198 | if pose.shape == (4, 4): 199 | eep = pose 200 | else: 201 | eep = self.get_tf_mat(pose) 202 | try: 203 | self.bridge_env.controller().move_to_eep( 204 | eep, 205 | duration=payload["duration"], 206 | blocking=payload["blocking"], 207 | step=False, 208 | ) 209 | self.bridge_env._reset_previous_qpos() 210 | except Environment_Exception as e: 211 | print_red("Move execution error: {}".format(e)) 212 | return WidowXStatus.EXECUTION_FAILURE 213 | return WidowXStatus.SUCCESS 214 | 215 | def __step_action(self, payload) -> WidowXStatus: 216 | self.bridge_env.step(payload["action"], blocking=payload["blocking"]) 217 | return WidowXStatus.SUCCESS 218 | 219 | def __reset(self, payload) -> WidowXStatus: 220 | # NOTE(YL): in bridge env, the entire process is very stateful, 221 | # even reset might not be enough to reset the state 222 | # this will require further debug, a full refactoring 223 | # self.bridge_env.controller().move_to_neutral(duration=1.0) 224 | # self.bridge_env.controller().open_gripper() 225 | self.bridge_env.reset() 226 | obs = self.bridge_env.current_obs() 227 | print("JOINTS: ", obs["joints"]) 228 | self.bridge_env.start() 229 | return WidowXStatus.SUCCESS 230 | 231 | ############################################################################## 232 | 233 | 234 | class WidowXClient(): 235 | def __init__(self, 236 | host: str = "localhost", 237 | port: int = 5556, 238 | ): 239 | """ 240 | Args: 241 | :param host: the host ip address 242 | :param port: the port number 243 | """ 244 | edgeml_config = WidowXConfigs.DefaultActionConfig 245 | edgeml_config.port_number = port 246 | edgeml_config.broadcast_port = port + 1 247 | self.__client = ActionClient(host, edgeml_config) 248 | print("Initialized widowx client.") 249 | 250 | def init(self, 251 | env_params: dict, 252 | image_size: int = 256, 253 | ) -> WidowXStatus: 254 | """ 255 | Initialize the environment. 256 | :param env_params: a dict of env params 257 | :param image_size: the size of the image to return 258 | """ 259 | payload = {"env_params": env_params, 260 | "image_size": image_size} 261 | res = self.__client.act("init", payload) 262 | return WidowXStatus.NO_CONNECTION if res is None else res["status"] 263 | 264 | def move(self, 265 | pose: np.ndarray, 266 | duration: float = 1.0, 267 | blocking: bool = False, 268 | ) -> WidowXStatus: 269 | """ 270 | Command the arm to move to a given pose in space. 271 | :param pose: dim of 6, [x, y, z, roll, pitch, yaw] or 272 | a 4x4 tf matrix 273 | :param duration: time to move to the pose. Not implemented 274 | :param blocking: whether to block the server until the move is done 275 | """ 276 | assert len(pose) == 6 or pose.shape == (4, 4), "invalid pose shape" 277 | _payload = {"pose": pose, "duration": duration, "blocking": blocking} 278 | res = self.__client.act("move", _payload) 279 | return WidowXStatus.NO_CONNECTION if res is None else res["status"] 280 | 281 | def move_gripper(self, state: float) -> WidowXStatus: 282 | """Open or close the gripper. 1.0 is open, 0.0 is closed.""" 283 | res = self.__client.act("gripper", {"open": state}) 284 | return WidowXStatus.NO_CONNECTION if res is None else res["status"] 285 | 286 | def step_action(self, action: np.ndarray, blocking=False) -> WidowXStatus: 287 | """ 288 | Step the action. size of 5 (3trans) or 7 (3trans1rot) 289 | Note that the action is in relative space. 290 | """ 291 | assert len(action) in [5, 7], "invalid action shape" 292 | res = self.__client.act("step_action", {"action": action, 293 | "blocking": blocking}) 294 | return WidowXStatus.NO_CONNECTION if res is None else res["status"] 295 | 296 | def reset(self) -> WidowXStatus: 297 | """Reset the arm to the neutral position.""" 298 | res = self.__client.act("reset", {}) 299 | return WidowXStatus.NO_CONNECTION if res is None else res["status"] 300 | 301 | def get_observation(self) -> Optional[dict]: 302 | """ 303 | Get the current camera image and proprioceptive state. 304 | :return a dict of observations 305 | """ 306 | res = self.__client.obs() 307 | if res is None: 308 | return None 309 | # NOTE: this is a lossy conversion, but faster data transfer 310 | res["full_image"] = jpeg_to_mat(res["full_image"]) 311 | return res 312 | 313 | def stop(self): 314 | """Stop the client.""" 315 | self.__client.stop() 316 | 317 | def reboot_motor(self, joint_name: str): 318 | """Experimentation: Force Reboot the motor. 319 | Supported joint names: 320 | - waist, shoulder, elbow, forearm_roll, 321 | - wrist_angle, wrist_rotate, gripper, left_finger, right_finger 322 | """ 323 | self.__client.act("reboot_motor", {"joint_name": joint_name}) 324 | 325 | ############################################################################## 326 | 327 | 328 | def show_video(client, duration, full_image=True): 329 | """This shows the video from the camera for a given duration.""" 330 | start = time.time() 331 | while (time.time() - start) < duration: 332 | res = client.get_observation() 333 | if res is None: 334 | print("No observation available... waiting") 335 | continue 336 | 337 | if full_image: 338 | img = res["full_image"] 339 | else: 340 | img = res["image"] 341 | # if img.shape[0] != 3: # sanity check to make sure it's not flattened 342 | img = (img.reshape(3, 256, 256).transpose(1, 2, 0) * 255).astype(np.uint8) 343 | cv2.imshow("img", img) 344 | cv2.waitKey(10) # 10 ms 345 | 346 | 347 | def main(): 348 | parser = argparse.ArgumentParser() 349 | parser.add_argument('--server', action='store_true') 350 | parser.add_argument('--client', action='store_true') 351 | parser.add_argument('--ip', type=str, default='localhost') 352 | parser.add_argument('--port', type=int, default=5556) 353 | parser.add_argument('--test', action='store_true', help='run in test mode') 354 | args = parser.parse_args() 355 | 356 | if args.server: 357 | widowx_server = WidowXActionServer(port=args.port, testing=args.test) 358 | 359 | # try capture errors and hard restart the server 360 | while True: 361 | try: 362 | # this is a blocking call 363 | widowx_server.start() 364 | except Exception as e: 365 | if e == KeyboardInterrupt: 366 | break 367 | print(traceback.format_exc()) 368 | print_red(f"{e}, Restarting server...") 369 | widowx_server.stop() 370 | widowx_server.hard_reset() 371 | widowx_server.stop() 372 | 373 | if args.client: 374 | widowx_client = WidowXClient(host=args.ip, port=args.port) 375 | 376 | # NOTE: this normally takes 10 seconds when first time init 377 | widowx_client.init(WidowXConfigs.DefaultEnvParams, image_size=256) 378 | 379 | # This ensures that the robot is ready to be controlled 380 | obs = None 381 | while obs is None: 382 | obs = widowx_client.get_observation() 383 | time.sleep(1) 384 | print("Waiting for robot to be ready...") 385 | 386 | # Coordinate Convention: 387 | # - x: forward 388 | # - y: left 389 | # - z: up 390 | 391 | # move left up with slanted gripper 392 | res = widowx_client.move(np.array([0.2, 0.1, 0.3, 0, 0.47, 0.3])) 393 | assert args.test or res == WidowXStatus.SUCCESS, "move failed" 394 | show_video(widowx_client, duration=1.5) 395 | 396 | # test reboot motor, the gripper should get loosed for a quick moment 397 | widowx_client.reboot_motor("wrist_angle") 398 | show_video(widowx_client, duration=2) 399 | 400 | # NOTE, use blocking to make sure the qpos is reset after the move 401 | # this is important so that step_action works in this initial position 402 | res = widowx_client.move(np.array([0.2, 0.1, 0.3, 0, 1.57, 0.]), blocking=True) 403 | show_video(widowx_client, duration=0.5) 404 | 405 | # close gripper 406 | print("Closing gripper...") 407 | res = widowx_client.move_gripper(0.0) 408 | assert args.test or res == WidowXStatus.SUCCESS, "gripper failed" 409 | show_video(widowx_client, duration=2.5) 410 | 411 | print("Run step_action for 25 steps") 412 | for _ in range(25): 413 | start_time = time.time() 414 | widowx_client.step_action(np.array([-0.005, 0.005, 0.005, 0, 0, 0, 0])) 415 | print(f"Time taken for each step: {time.time() - start_time}") 416 | show_video(widowx_client, duration=0.5) 417 | 418 | # move right down 419 | res = widowx_client.move(np.array([0.2, -0.1, 0.1, 0, 1.57, 0])) 420 | assert args.test or res == WidowXStatus.SUCCESS, "move failed" 421 | show_video(widowx_client, duration=1.5) 422 | 423 | # open gripper 424 | print("Opening gripper...") 425 | res = widowx_client.move_gripper(1.0) 426 | assert args.test or res == WidowXStatus.SUCCESS, "gripper failed" 427 | show_video(widowx_client, duration=2.5) 428 | 429 | widowx_client.stop() 430 | print("Done all") 431 | 432 | 433 | if __name__ == '__main__': 434 | main() 435 | --------------------------------------------------------------------------------