├── .gitignore ├── LICENSE ├── README.md ├── bash ├── collect_data.sh ├── install_deps.sh ├── install_rlbench.sh ├── start_eval.sh ├── start_eval_ksteps.sh ├── start_runs.sh └── start_train.sh ├── conf ├── backbone │ ├── mlp_3dp.yaml │ ├── pointmlp.yaml │ ├── pointnet.yaml │ └── resnet_dp.yaml ├── collect_demos_train.yaml ├── collect_demos_valid.yaml ├── eval.yaml ├── experiment │ ├── adaflow.yaml │ ├── diffusion_policy.yaml │ ├── dp3.yaml │ ├── pointflowmatch.yaml │ ├── pointflowmatch_ddim.yaml │ ├── pointflowmatch_images.yaml │ ├── pointflowmatch_real.yaml │ └── pointflowmatch_so3.yaml ├── model │ ├── ddim.yaml │ ├── flow.yaml │ ├── flow_5p.yaml │ ├── flow_se3.yaml │ ├── flow_so3.yaml │ ├── flow_so3delta.yaml │ └── flow_target.yaml ├── train.yaml └── trainer_eval.yaml ├── pfp ├── __init__.py ├── backbones │ ├── mlp_3dp.py │ ├── pointmlp.py │ ├── pointnet.py │ └── resnet_dp.py ├── common │ ├── fm_utils.py │ ├── o3d_utils.py │ ├── se3_utils.py │ └── visualization.py ├── data │ ├── dataset_images.py │ ├── dataset_pcd.py │ └── replay_buffer.py ├── envs │ ├── base_env.py │ ├── rlbench_env.py │ └── rlbench_runner.py └── policy │ ├── base_policy.py │ ├── ddim_policy.py │ ├── fm_5p_policy.py │ ├── fm_policy.py │ ├── fm_se3_policy.py │ ├── fm_so3_policy.py │ ├── fm_so3delta_policy.py │ └── fm_target_policy.py ├── pyproject.toml ├── sandbox ├── augmentation.py ├── learning_rate.py ├── pypose_play.py ├── state_5p.py ├── vis_random_traj.py ├── vis_t_schedule.py └── vis_urdf.py ├── scripts ├── collect_demos.py ├── convert_data.py ├── evaluate.py ├── plots.py ├── print_ablation.py ├── print_experiments.py ├── train.py ├── train_real.py ├── trainer_eval.py └── vis_dataset.py ├── toy_circle ├── model.py ├── train_fm_euclid.py ├── train_fm_euclid_biased.py ├── train_fm_euclid_biased_inference.py ├── train_fm_so3.py ├── train_fm_so3_forward.py └── visualize_results.ipynb └── urdfs └── panda ├── meshes ├── Pandagripper_coll_1.dae ├── Pandagrippervisual_vis_1.dae ├── Pandagrippervisual_vis_2.dae ├── Pandagrippervisual_vis_3.dae ├── Pandagrippervisual_vis_4.dae ├── Pandagrippervisual_vis_5.dae ├── Pandaleftfingervisible_vis_1.dae ├── Pandaleftfingervisible_vis_2.dae ├── Pandalink0visual_vis_1.dae ├── Pandalink1respondable_coll_1.dae ├── Pandalink1visual_vis_1.dae ├── Pandalink2respondable_coll_1.dae ├── Pandalink2visual_vis_1.dae ├── Pandalink3respondable_coll_1.dae ├── Pandalink3visual_vis_1.dae ├── Pandalink3visual_vis_2.dae ├── Pandalink4respondable_coll_1.dae ├── Pandalink4visual_vis_1.dae ├── Pandalink4visual_vis_2.dae ├── Pandalink5respondable_coll_1.dae ├── Pandalink5respondable_coll_2.dae ├── Pandalink5respondable_coll_3.dae ├── Pandalink5respondable_coll_4.dae ├── Pandalink5respondable_coll_5.dae ├── Pandalink5respondable_coll_6.dae ├── Pandalink5respondable_coll_7.dae ├── Pandalink5visual_vis_1.dae ├── Pandalink5visual_vis_2.dae ├── Pandalink6respondable_coll_1.dae ├── Pandalink6visual_vis_1.dae ├── Pandalink6visual_vis_2.dae ├── Pandalink6visual_vis_3.dae ├── Pandalink6visual_vis_4.dae ├── Pandalink6visual_vis_5.dae ├── Pandalink6visual_vis_6.dae ├── Pandalink7respondable_coll_1.dae ├── Pandalink7visual_vis_1.dae ├── Pandalink7visual_vis_2.dae ├── Pandalink7visual_vis_3.dae ├── Pandalink7visual_vis_4.dae ├── Pandalink7visual_vis_5.dae ├── Pandarightfingervisual_vis_1.dae ├── Pandarightfingervisual_vis_2.dae └── robot_base_coll_1.dae ├── panda.urdf └── panda_gripper.urdf /.gitignore: -------------------------------------------------------------------------------- 1 | **/__pycache__/** 2 | **/outputs/** 3 | **/multirun/** 4 | **/wandb/** 5 | **/ckpt/** 6 | **/demos/** 7 | **.html 8 | **/toy_circle/results/** 9 | *plot.png 10 | *.svg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PointFlowMatch: Learning Robotic Manipulation Policies from Point Clouds with Conditional Flow Matching 2 | 3 | Repository providing the source code for the paper "Learning Robotic Manipulation Policies from Point Clouds with Conditional Flow Matching", see the [project website](http://pointflowmatch.cs.uni-freiburg.de/). Please cite the paper as follows: 4 | 5 | @article{chisari2024learning, 6 | title={Learning Robotic Manipulation Policies from Point Clouds with Conditional Flow Matching}, 7 | shorttile={PointFlowMatch}, 8 | author={Chisari, Eugenio and Heppert, Nick and Argus, Max and Welschehold, Tim and Brox, Thomas and Valada, Abhinav}, 9 | journal={Conference on Robot Learning (CoRL)}, 10 | year={2024} 11 | } 12 | 13 | ## Installation 14 | 15 | - Add env variables to your `.bashrc` 16 | 17 | ```bash 18 | export COPPELIASIM_ROOT=${HOME}/CoppeliaSim 19 | export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$COPPELIASIM_ROOT 20 | export QT_QPA_PLATFORM_PLUGIN_PATH=$COPPELIASIM_ROOT 21 | ``` 22 | 23 | - Install dependencies 24 | 25 | ```bash 26 | conda create --name pfp_env python=3.10 27 | conda activate pfp_env 28 | bash bash/install_deps.sh 29 | bash bash/install_rlbench.sh 30 | 31 | # Get diffusion_policy from my branch 32 | cd .. 33 | git clone git@github.com:chisarie/diffusion_policy.git && cd diffusion_policy && git checkout develop/eugenio 34 | pip install -e ../diffusion_policy 35 | 36 | # 3dp install 37 | cd .. 38 | git clone git@github.com:YanjieZe/3D-Diffusion-Policy.git && cd 3D-Diffusion-Policy 39 | cd 3D-Diffusion-Policy && pip install -e . && cd .. 40 | 41 | # If locally (doesnt work on Ubuntu18): 42 | pip install rerun-sdk==0.15.1 43 | ``` 44 | 45 | ## Pretrained Weights Download 46 | 47 | Here you can find the pretrained checkpoints of our PointFlowMatch policies for different RLBench environments. Download and unzip them in the `ckpt` folder. 48 | 49 | | unplug charger | close door | open box | open fridge | frame hanger | open oven | books on shelf | shoes out of box | 50 | | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | ------------- | 51 | | [1717446544-didactic-woodpecker](http://pointflowmatch.cs.uni-freiburg.de/download/1717446544-didactic-woodpecker.zip) | [1717446607-uppish-grebe](http://pointflowmatch.cs.uni-freiburg.de/download/1717446607-uppish-grebe.zip) | [1717446558-qualified-finch](http://pointflowmatch.cs.uni-freiburg.de/download/1717446558-qualified-finch.zip) | [1717446565-astute-stingray](http://pointflowmatch.cs.uni-freiburg.de/download/1717446565-astute-stingray.zip) | [1717446708-analytic-cuckoo](http://pointflowmatch.cs.uni-freiburg.de/download/1717446708-analytic-cuckoo.zip) | [1717446706-natural-scallop](http://pointflowmatch.cs.uni-freiburg.de/download/1717446706-natural-scallop.zip) | [1717446594-astute-panda](http://pointflowmatch.cs.uni-freiburg.de/download/1717446594-astute-panda.zip) | [1717447341-indigo-quokka](http://pointflowmatch.cs.uni-freiburg.de/download/1717447341-indigo-quokka.zip) | 52 | 53 | ## Evaluation 54 | 55 | To reproduce the results from the paper, run: 56 | 57 | ```bash 58 | python scripts/evaluate.py log_wandb=True env_runner.env_config.vis=False policy.ckpt_name= 59 | ``` 60 | 61 | Where `` is the folder name of the selected checkpoint. Each checkpoint will be automatically evaluated on the correct environment. 62 | 63 | ## Training 64 | 65 | To train your own policies instead of using the pretrained checkpoints, you first need to collect demonstrations: 66 | 67 | ```bash 68 | bash bash/collect_data.sh 69 | ``` 70 | 71 | Then, you can train your own policies: 72 | 73 | ```bash 74 | python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name= +experiment= 75 | ``` 76 | 77 | Valid task names are all those supported by RLBench. In this work, we used the following tasks: `unplug_charger`, `close_door`, `open_box`, `open_fridge`, `take_frame_off_hanger`, `open_oven`, `put_books_on_bookshelf`, `take_shoes_out_of_box`. 78 | 79 | Valid experiment names are the following, and they represent the different baselines we tested: `adaflow`, `diffusion_policy`, `dp3`, `pointflowmatch`, `pointflowmatch_images`, `pointflowmatch_ddim`, `pointflowmatch_so3`. -------------------------------------------------------------------------------- /bash/collect_data.sh: -------------------------------------------------------------------------------- 1 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=unplug_charger 2 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=unplug_charger 3 | 4 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=close_door 5 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=close_door 6 | 7 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=open_box 8 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=open_box 9 | 10 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=open_fridge 11 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=open_fridge 12 | 13 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=take_frame_off_hanger 14 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=take_frame_off_hanger 15 | 16 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=open_oven 17 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=open_oven 18 | 19 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=put_books_on_bookshelf 20 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=put_books_on_bookshelf 21 | 22 | python scripts/collect_demos.py --config-name=collect_demos_train save_data=True env_config.vis=False env_config.task_name=take_shoes_out_of_box 23 | python scripts/collect_demos.py --config-name=collect_demos_valid save_data=True env_config.vis=False env_config.task_name=take_shoes_out_of_box -------------------------------------------------------------------------------- /bash/install_deps.sh: -------------------------------------------------------------------------------- 1 | if ! [[ -n "${CONDA_PREFIX}" ]]; then 2 | echo "You are not inside a conda environment. Please activate your environment first." 3 | exit 1 4 | fi 5 | 6 | pip install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cu121 7 | pip install fvcore iopath 8 | pip install --no-index --no-cache-dir pytorch3d==0.7.5 -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt212/download.html 9 | # Or if on cpu: 10 | # pip3 install torch==2.1.2 torchvision==0.16.2 torchaudio==2.1.2 --index-url https://download.pytorch.org/whl/cpu 11 | pip install -e . 12 | rm -R *.egg-info 13 | 14 | # Pypose 15 | pip install --no-deps pypose 16 | -------------------------------------------------------------------------------- /bash/install_rlbench.sh: -------------------------------------------------------------------------------- 1 | if ! [[ -n "${CONDA_PREFIX}" ]]; then 2 | echo "You are not inside a conda environment. Please activate your environment first." 3 | exit 1 4 | fi 5 | 6 | if ! [[ -n "${COPPELIASIM_ROOT}" ]]; then 7 | echo "COPPELIASIM_ROOT is not defined." 8 | exit 1 9 | fi 10 | 11 | # Download Coppelia sim if not present 12 | if ! [[-e $COPPELIASIM_ROOT]]; then 13 | wget https://downloads.coppeliarobotics.com/V4_1_0/CoppeliaSim_Edu_V4_1_0_Ubuntu20_04.tar.xz 14 | mkdir -p $COPPELIASIM_ROOT && tar -xf CoppeliaSim_Edu_V4_1_0_Ubuntu20_04.tar.xz -C $COPPELIASIM_ROOT --strip-components 1 15 | rm -rf CoppeliaSim_Edu_V4_1_0_Ubuntu20_04.tar.xz 16 | fi 17 | 18 | # Install PyRep and RLBench 19 | pip install -r https://raw.githubusercontent.com/stepjam/PyRep/master/requirements.txt 20 | pip install git+https://github.com/stepjam/PyRep.git 21 | pip install git+https://github.com/stepjam/RLBench.git 22 | -------------------------------------------------------------------------------- /bash/start_eval.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_id=$1 4 | ckpt_name=$2 5 | k_steps=${3:-50} # Default k_steps is 50 6 | seed=${4:-0} # Default seed is 0 7 | if [ $seed -ne 0 ]; then 8 | tmux new-session -d -s "eval_${ckpt_name}_k${k_steps}_${seed}" 9 | tmux send-keys -t "eval_${ckpt_name}_k${k_steps}_${seed}" "conda activate pfp_env && CUDA_VISIBLE_DEVICES=$gpu_id WANDB__SERVICE_WAIT=300 xvfb-run -a python scripts/evaluate.py log_wandb=True env_runner.env_config.vis=False policy.ckpt_name=$ckpt_name seed=$seed policy.num_k_infer=$k_steps" Enter 10 | else 11 | for seed in 5678 2468 1357; do 12 | tmux new-session -d -s "eval_${ckpt_name}_k${k_steps}_${seed}" 13 | tmux send-keys -t "eval_${ckpt_name}_k${k_steps}_${seed}" "conda activate pfp_env && CUDA_VISIBLE_DEVICES=$gpu_id WANDB__SERVICE_WAIT=300 xvfb-run -a python scripts/evaluate.py log_wandb=True env_runner.env_config.vis=False policy.ckpt_name=$ckpt_name seed=$seed policy.num_k_infer=$k_steps" Enter 14 | done 15 | fi -------------------------------------------------------------------------------- /bash/start_eval_ksteps.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_id=$1 4 | ckpt_name=$2 5 | seed=${4:-0} # Default seed is 0 6 | for k_steps in 1 2 4 8; do 7 | if [ $seed -ne 0 ]; then 8 | tmux new-session -d -s "eval_${ckpt_name}_k${k_steps}_${seed}" 9 | tmux send-keys -t "eval_${ckpt_name}_k${k_steps}_${seed}" "conda activate pfp_env && CUDA_VISIBLE_DEVICES=$gpu_id WANDB__SERVICE_WAIT=300 xvfb-run -a python scripts/evaluate.py log_wandb=True env_runner.env_config.vis=False policy.ckpt_name=$ckpt_name seed=$seed policy.num_k_infer=$k_steps" Enter 10 | else 11 | for seed in 5678 2468 1357; do 12 | tmux new-session -d -s "eval_${ckpt_name}_k${k_steps}_${seed}" 13 | tmux send-keys -t "eval_${ckpt_name}_k${k_steps}_${seed}" "conda activate pfp_env && CUDA_VISIBLE_DEVICES=$gpu_id WANDB__SERVICE_WAIT=300 xvfb-run -a python scripts/evaluate.py log_wandb=True env_runner.env_config.vis=False policy.ckpt_name=$ckpt_name seed=$seed policy.num_k_infer=$k_steps" Enter 14 | done 15 | seed=0 16 | fi 17 | done -------------------------------------------------------------------------------- /bash/start_runs.sh: -------------------------------------------------------------------------------- 1 | # Just examples to copy paste in the tmux terminal 2 | 3 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=0 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=unplug_charger 4 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=0 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=unplug_charger model=flow_se3 5 | 6 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=1 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=close_door 7 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=1 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=close_door model=flow_se3 8 | 9 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=2 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_box 10 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=2 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_box model=flow_se3 11 | 12 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=3 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_fridge 13 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=3 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_fridge model=flow_se3 14 | 15 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=4 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=take_frame_off_hanger 16 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=4 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=take_frame_off_hanger model=flow_se3 17 | 18 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=5 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_oven 19 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=5 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=open_oven model=flow_se3 20 | 21 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=6 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=put_books_on_bookshelf 22 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=6 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=put_books_on_bookshelf model=flow_se3 23 | 24 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=7 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=take_shoes_out_of_box 25 | # WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=7 python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=take_shoes_out_of_box model=flow_se3 26 | 27 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=0 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=unplug_charger +experiment=pfp_so3,pfp_ddim,dp3,adaflow 28 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=0 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=unplug_charger +experiment=pfp_euclid,pfp_images,diffusion_policy 29 | 30 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=1 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=close_door +experiment=pfp_so3,pfp_ddim,dp3,adaflow 31 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=1 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=close_door +experiment=pfp_euclid,pfp_images,diffusion_policy 32 | 33 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=2 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=open_box +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy 34 | 35 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=3 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=open_fridge +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy 36 | 37 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=4 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=take_frame_off_hanger +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy 38 | 39 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=5 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=open_oven +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy 40 | 41 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=6 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=put_books_on_bookshelf +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy 42 | 43 | WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=7 python scripts/train.py --multirun log_wandb=True dataloader.num_workers=8 task_name=take_shoes_out_of_box +experiment=pfp_so3,pfp_euclid,pfp_ddim,pfp_images,dp3,adaflow,diffusion_policy -------------------------------------------------------------------------------- /bash/start_train.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gpu_id=$1 4 | task_name=$2 5 | experiment=$3 6 | tmux new-session -d -s "train_${gpu_id}_${task_name}_${experiment}" 7 | tmux send-keys -t "train_${gpu_id}_${task_name}_${experiment}" "conda activate pfp_env && WANDB__SERVICE_WAIT=300 CUDA_VISIBLE_DEVICES=$gpu_id python scripts/train.py log_wandb=True dataloader.num_workers=8 task_name=$task_name +experiment=$experiment" Enter 8 | -------------------------------------------------------------------------------- /conf/backbone/mlp_3dp.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.backbones.mlp_3dp.MLP3DP 2 | in_channels: "${eval: '6 if ${dataset.use_pc_color} else 3'}" 3 | out_channels: ${obs_features_dim} -------------------------------------------------------------------------------- /conf/backbone/pointmlp.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.backbones.pointmlp.PointMLP 2 | points: ${dataset.n_points} 3 | input_channels: "${eval: '6 if ${dataset.use_pc_color} else 3'}" 4 | embed_dim: ${obs_features_dim} -------------------------------------------------------------------------------- /conf/backbone/pointnet.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.backbones.pointnet.PointNetBackbone 2 | embed_dim: ${obs_features_dim} 3 | input_channels: "${eval: '6 if ${dataset.use_pc_color} else 3'}" 4 | input_transform: False 5 | use_group_norm: False 6 | -------------------------------------------------------------------------------- /conf/backbone/resnet_dp.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.backbones.resnet_dp.ResnetDP 2 | shape_meta: 3 | # acceptable types: rgb, low_dim 4 | obs: 5 | img_0: 6 | shape: [3, 128, 128] 7 | type: rgb 8 | img_1: 9 | shape: [3, 128, 128] 10 | type: rgb 11 | img_2: 12 | shape: [3, 128, 128] 13 | type: rgb 14 | img_3: 15 | shape: [3, 128, 128] 16 | type: rgb 17 | img_4: 18 | shape: [3, 128, 128] 19 | type: rgb 20 | robot_state: 21 | shape: [10] 22 | type: low_dim 23 | -------------------------------------------------------------------------------- /conf/collect_demos_train.yaml: -------------------------------------------------------------------------------- 1 | mode: train 2 | seed: 1234 3 | num_episodes: 100 4 | save_data: False 5 | 6 | 7 | env_config: 8 | task_name: take_lid_off_saucepan 9 | voxel_size: 0.01 10 | n_points: 5500 11 | headless: True 12 | vis: True -------------------------------------------------------------------------------- /conf/collect_demos_valid.yaml: -------------------------------------------------------------------------------- 1 | mode: valid 2 | seed: 5678 3 | num_episodes: 10 4 | save_data: False 5 | 6 | 7 | env_config: 8 | task_name: open_fridge 9 | voxel_size: 0.01 10 | n_points: 5500 11 | headless: True 12 | vis: False -------------------------------------------------------------------------------- /conf/eval.yaml: -------------------------------------------------------------------------------- 1 | seed: 5678 2 | log_wandb: False 3 | 4 | env_runner: 5 | num_episodes: 100 6 | max_episode_length: 200 7 | verbose: True 8 | env_config: 9 | voxel_size: 0.01 10 | headless: True 11 | vis: True 12 | 13 | 14 | policy: 15 | ckpt_name: 1717446544-didactic-woodpecker 16 | ckpt_episode: ep1500 # latest, ep1500, ep1000 17 | num_k_infer: 50 18 | # Uncomment the following to override settings used during training 19 | # flow_schedule: linear # linear | cosine | exp 20 | # exp_scale: 4.0 -------------------------------------------------------------------------------- /conf/experiment/adaflow.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: flow 4 | - override /backbone: resnet_dp 5 | 6 | obs_mode: rgb # pcd | rgb 7 | n_pred_steps: 16 # Must be divisible by 4 8 | obs_features_dim: 512 9 | x_dim: "${eval: '${obs_features_dim} * 5 + ${y_dim}'}" 10 | 11 | dataset: 12 | n_obs_steps: ${n_obs_steps} 13 | n_pred_steps: ${n_pred_steps} 14 | subs_factor: 1 15 | use_pcd_color: null 16 | n_points: null 17 | 18 | dataloader: 19 | batch_size: 64 20 | 21 | model: 22 | _target_: pfp.policy.fm_policy.FMPolicyImage 23 | augment_data: False # done in backbone 24 | 25 | optimizer: 26 | _target_: torch.optim.AdamW 27 | lr: 1.0e-4 28 | betas: [0.95, 0.999] 29 | eps: 1.0e-8 30 | weight_decay: 1.0e-6 31 | 32 | lr_scheduler: 33 | name: cosine # constant | cosine | linear | ... 34 | num_warmup_steps: 500 -------------------------------------------------------------------------------- /conf/experiment/diffusion_policy.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: ddim 4 | - override /backbone: resnet_dp 5 | 6 | obs_mode: rgb # pcd | rgb 7 | n_pred_steps: 16 # Must be divisible by 4 8 | obs_features_dim: 512 9 | x_dim: "${eval: '${obs_features_dim} * 5 + ${y_dim}'}" 10 | 11 | dataset: 12 | n_obs_steps: ${n_obs_steps} 13 | n_pred_steps: ${n_pred_steps} 14 | subs_factor: 1 15 | use_pcd_color: null 16 | n_points: null 17 | 18 | dataloader: 19 | batch_size: 64 20 | 21 | model: 22 | _target_: pfp.policy.ddim_policy.DDIMPolicyImage 23 | augment_data: False # done in backbone 24 | 25 | optimizer: 26 | _target_: torch.optim.AdamW 27 | lr: 1.0e-4 28 | betas: [0.95, 0.999] 29 | eps: 1.0e-8 30 | weight_decay: 1.0e-6 31 | 32 | lr_scheduler: 33 | name: cosine # constant | cosine | linear | ... 34 | num_warmup_steps: 500 -------------------------------------------------------------------------------- /conf/experiment/dp3.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: ddim 4 | - override /backbone: mlp_3dp 5 | 6 | n_pred_steps: 4 7 | use_ema: True 8 | 9 | dataset: 10 | subs_factor: 1 11 | 12 | dataloader: 13 | batch_size: 128 14 | 15 | optimizer: 16 | _target_: torch.optim.AdamW 17 | lr: 1.0e-4 18 | betas: [0.95, 0.999] 19 | eps: 1.0e-8 20 | weight_decay: 1.0e-6 21 | 22 | lr_scheduler: 23 | name: cosine # constant | cosine | linear | ... 24 | num_warmup_steps: 500 -------------------------------------------------------------------------------- /conf/experiment/pointflowmatch.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: flow 4 | -------------------------------------------------------------------------------- /conf/experiment/pointflowmatch_ddim.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: ddim 4 | -------------------------------------------------------------------------------- /conf/experiment/pointflowmatch_images.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: flow 4 | - override /backbone: resnet_dp 5 | 6 | obs_mode: rgb # pcd | rgb 7 | obs_features_dim: 512 8 | x_dim: "${eval: '${obs_features_dim} * 5 + ${y_dim}'}" 9 | 10 | dataset: 11 | use_pcd_color: null 12 | n_points: null 13 | 14 | model: 15 | _target_: pfp.policy.fm_policy.FMPolicyImage 16 | augment_data: False # done in backbone -------------------------------------------------------------------------------- /conf/experiment/pointflowmatch_real.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: flow 4 | 5 | model: 6 | norm_pcd_center: [0.4, 0, 0.4] 7 | augment_data: True -------------------------------------------------------------------------------- /conf/experiment/pointflowmatch_so3.yaml: -------------------------------------------------------------------------------- 1 | # @package _global_ 2 | defaults: 3 | - override /model: flow_so3 4 | -------------------------------------------------------------------------------- /conf/model/ddim.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.ddim_policy.DDIMPolicy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_train: 100 7 | num_k_infer: 10 8 | norm_pcd_center: [0.4, 0.0, 1.4] 9 | augment_data: False 10 | 11 | obs_encoder: ${backbone} 12 | 13 | diffusion_net: 14 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 15 | input_dim: ${y_dim} 16 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 17 | diffusion_step_embed_dim: 256 18 | down_dims: [256, 512, 1024] 19 | kernel_size: 5 20 | n_groups: 8 21 | cond_predict_scale: True 22 | 23 | noise_scheduler_train: 24 | _target_: diffusers.schedulers.scheduling_ddim.DDIMScheduler 25 | num_train_timesteps: ${model.num_k_train} 26 | beta_start: 0.0001 27 | beta_end: 0.02 28 | beta_schedule: squaredcos_cap_v2 29 | clip_sample: True 30 | set_alpha_to_one: True 31 | steps_offset: 0 32 | prediction_type: epsilon 33 | # rescale_betas_zero_snr: True 34 | # prediction_type: v_prediction 35 | # timestep_spacing: trailing 36 | 37 | loss_weights: 38 | xyz: 10.0 39 | rot6d: 10.0 40 | grip: 1.0 41 | -------------------------------------------------------------------------------- /conf/model/flow.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_policy.FMPolicy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | time_conditioning: True 8 | norm_pcd_center: [0.4, 0.0, 1.4] 9 | augment_data: False 10 | noise_type: gaussian # gaussian | trajectory | igso3 11 | noise_scale: 1.0 12 | loss_type: l2 # l2 | l1 13 | flow_schedule: exp # linear | cosine | exp 14 | exp_scale: 4.0 15 | snr_sampler: uniform # uniform | logit_normal 16 | 17 | obs_encoder: ${backbone} 18 | 19 | diffusion_net: 20 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 21 | input_dim: ${y_dim} 22 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 23 | diffusion_step_embed_dim: "${eval: '256 if ${model.time_conditioning} else 0'}" 24 | down_dims: [256, 512, 1024] 25 | kernel_size: 5 26 | n_groups: 8 27 | cond_predict_scale: True 28 | use_dropout: False 29 | 30 | loss_weights: 31 | xyz: 10.0 32 | rot6d: 10.0 33 | grip: 1.0 34 | -------------------------------------------------------------------------------- /conf/model/flow_5p.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_5p_policy.FM5PPolicy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | time_conditioning: True 8 | norm_pcd_center: [0.4, 0.0, 1.4] 9 | augment_data: False 10 | noise_type: gaussian # gaussian | trajectory 11 | noise_scale: 1.0 12 | loss_type: l2 # l2 | l1 13 | flow_schedule: exp # linear | cosine | exp 14 | exp_scale: 4.0 15 | 16 | obs_encoder: ${backbone} 17 | 18 | diffusion_net: 19 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 20 | input_dim: 16 21 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 22 | diffusion_step_embed_dim: "${eval: '256 if ${model.time_conditioning} else 0'}" 23 | down_dims: [256, 512, 1024] 24 | kernel_size: 5 25 | n_groups: 8 26 | cond_predict_scale: True 27 | 28 | loss_weights: 29 | 5p: 10.0 30 | grip: 1.0 31 | -------------------------------------------------------------------------------- /conf/model/flow_se3.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_se3_policy.FMSE3Policy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | norm_pcd_center: [0.4, 0.0, 1.4] 8 | augment_data: False 9 | loss_type: l2 # l2 | l1 10 | flow_schedule: exp # linear | cosine | exp 11 | exp_scale: 4.0 12 | 13 | obs_encoder: ${backbone} 14 | 15 | diffusion_net: 16 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 17 | input_dim: ${y_dim} 18 | output_dim: 7 19 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 20 | diffusion_step_embed_dim: 256 21 | down_dims: [256, 512, 1024] 22 | kernel_size: 5 23 | n_groups: 8 24 | cond_predict_scale: True 25 | 26 | loss_weights: 27 | twist: 10.0 28 | grip: 1.0 29 | -------------------------------------------------------------------------------- /conf/model/flow_so3.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_so3_policy.FMSO3Policy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | norm_pcd_center: [0.4, 0.0, 1.4] 8 | augment_data: False 9 | loss_type: l2 # l2 | l1 10 | flow_schedule: exp # linear | cosine | exp 11 | exp_scale: 4.0 12 | snr_sampler: uniform # uniform | logit_normal 13 | noise_type: uniform # uniform | biased 14 | 15 | obs_encoder: ${backbone} 16 | 17 | diffusion_net: 18 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 19 | input_dim: ${y_dim} 20 | output_dim: 7 21 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 22 | diffusion_step_embed_dim: 256 23 | down_dims: [256, 512, 1024] 24 | kernel_size: 5 25 | n_groups: 8 26 | cond_predict_scale: True 27 | 28 | loss_weights: 29 | xyz: 10.0 30 | so3: 10.0 31 | grip: 1.0 32 | -------------------------------------------------------------------------------- /conf/model/flow_so3delta.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_so3delta_policy.FMSO3DeltaPolicy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | norm_pcd_center: [0.4, 0.0, 1.4] 8 | augment_data: False 9 | loss_type: l2 # l2 | l1 10 | flow_schedule: exp # linear | cosine | exp 11 | exp_scale: 4.0 12 | 13 | obs_encoder: ${backbone} 14 | 15 | diffusion_net: 16 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 17 | input_dim: ${y_dim} 18 | # output_dim: 10 19 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 20 | diffusion_step_embed_dim: 256 21 | down_dims: [256, 512, 1024] 22 | kernel_size: 5 23 | n_groups: 8 24 | cond_predict_scale: True 25 | 26 | loss_weights: 27 | xyz: 10.0 28 | rot6d: 10.0 29 | grip: 1.0 30 | -------------------------------------------------------------------------------- /conf/model/flow_target.yaml: -------------------------------------------------------------------------------- 1 | _target_: pfp.policy.fm_target_policy.FMTargetPolicy 2 | x_dim: ${x_dim} 3 | y_dim: ${y_dim} 4 | n_obs_steps: ${n_obs_steps} 5 | n_pred_steps: ${n_pred_steps} 6 | num_k_infer: 10 7 | time_conditioning: False 8 | norm_pcd_center: [0.4, 0.0, 1.4] 9 | augment_data: False 10 | loss_type: l2 # l2 | l1 11 | flow_schedule: exp # linear | cosine | exp 12 | exp_scale: 4.0 13 | 14 | obs_encoder: ${backbone} 15 | 16 | diffusion_net: 17 | _target_: diffusion_policy.model.diffusion.conditional_unet1d.ConditionalUnet1D 18 | input_dim: ${y_dim} 19 | global_cond_dim: "${eval: '${x_dim} * ${n_obs_steps}'}" 20 | diffusion_step_embed_dim: "${eval: '256 if ${model.time_conditioning} else 0'}" 21 | down_dims: [256, 512, 1024] 22 | kernel_size: 5 23 | n_groups: 8 24 | cond_predict_scale: True 25 | 26 | loss_weights: 27 | xyz: 10.0 28 | rot6d: 10.0 29 | grip: 1.0 30 | -------------------------------------------------------------------------------- /conf/train.yaml: -------------------------------------------------------------------------------- 1 | seed: 1234 2 | epochs: 1500 3 | log_wandb: False 4 | task_name: unplug_charger 5 | obs_features_dim: 256 6 | y_dim: 10 # (xyz, rot6d, g) 7 | x_dim: "${eval: '${obs_features_dim} + ${y_dim}'}" 8 | n_obs_steps: 2 9 | n_pred_steps: 32 # Must be divisible by 4 10 | use_ema: True 11 | save_each_n_epochs: 500 12 | obs_mode: pcd # pcd | rgb 13 | run_name: null # set this to continue training from previous ckpt 14 | 15 | 16 | # env_runner: 17 | # num_episodes: 20 18 | # max_episode_length: 200 19 | # task_name: ${task_name} 20 | # env_config: 21 | # seed: 1996 22 | # lowdim_obs: False 23 | 24 | 25 | dataset: 26 | n_obs_steps: ${n_obs_steps} 27 | n_pred_steps: ${n_pred_steps} 28 | subs_factor: 3 29 | use_pc_color: False 30 | n_points: 4096 31 | 32 | 33 | dataloader: 34 | batch_size: 128 35 | num_workers: 0 36 | # pin_memory: True 37 | 38 | 39 | optimizer: 40 | _target_: torch.optim.AdamW 41 | lr: 3.0e-5 42 | betas: [0.95, 0.999] 43 | eps: 1.0e-8 44 | weight_decay: 1.0e-6 45 | 46 | lr_scheduler: 47 | name: cosine # constant | cosine | linear | ... 48 | num_warmup_steps: 5000 49 | 50 | 51 | defaults: 52 | - model: flow 53 | - backbone: pointnet -------------------------------------------------------------------------------- /conf/trainer_eval.yaml: -------------------------------------------------------------------------------- 1 | seed: 5678 2 | log_wandb: False 3 | run_name: 1716560279-subtle-kestrel # previous ckpt 4 | model: 5 | num_k_infer: 5 6 | -------------------------------------------------------------------------------- /pfp/__init__.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import random 3 | import pathlib 4 | import numpy as np 5 | from dataclasses import dataclass 6 | 7 | 8 | @dataclass 9 | class DATA_DIRS: 10 | ROOT = pathlib.Path(__file__).parents[1] / "demos" 11 | PFP = ROOT / "sim" 12 | PFP_REAL = ROOT / "real" 13 | 14 | 15 | @dataclass 16 | class REPO_DIRS: 17 | ROOT = pathlib.Path(__file__).parents[1] 18 | CKPT = ROOT / "ckpt" 19 | URDFS = ROOT / "urdfs" 20 | 21 | 22 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 23 | 24 | 25 | def set_seeds(seed=0): 26 | """Sets all seeds.""" 27 | torch.manual_seed(seed) 28 | torch.cuda.manual_seed_all(seed) 29 | np.random.seed(seed) 30 | random.seed(seed) 31 | -------------------------------------------------------------------------------- /pfp/backbones/mlp_3dp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from diffusion_policy_3d.model.vision.pointnet_extractor import ( 4 | PointNetEncoderXYZRGB, 5 | PointNetEncoderXYZ, 6 | ) 7 | 8 | 9 | class MLP3DP(nn.Module): 10 | def __init__(self, in_channels: int, out_channels: int): 11 | super().__init__() 12 | if in_channels == 3: 13 | self.backbone = PointNetEncoderXYZ( 14 | in_channels=in_channels, 15 | out_channels=out_channels, 16 | use_layernorm=True, 17 | final_norm="layernorm", 18 | normal_channel=False, 19 | ) 20 | elif in_channels == 6: 21 | self.backbone = PointNetEncoderXYZRGB( 22 | in_channels=in_channels, 23 | out_channels=out_channels, 24 | use_layernorm=True, 25 | final_norm="layernorm", 26 | normal_channel=False, 27 | ) 28 | else: 29 | raise ValueError("Invalid number of input channels for MLP3DP") 30 | return 31 | 32 | def forward(self, pcd: torch.Tensor, robot_state_obs: torch.Tensor = None) -> torch.Tensor: 33 | B = pcd.shape[0] 34 | # Flatten the batch and time dimensions 35 | pcd = pcd.float().reshape(-1, *pcd.shape[2:]) 36 | robot_state_obs = robot_state_obs.float().reshape(-1, *robot_state_obs.shape[2:]) 37 | # Encode all point clouds (across time steps and batch size) 38 | encoded_pcd = self.backbone(pcd) 39 | nx = torch.cat([encoded_pcd, robot_state_obs], dim=1) 40 | # Reshape back to the batch dimension. Now the features of each time step are concatenated 41 | nx = nx.reshape(B, -1) 42 | return nx 43 | -------------------------------------------------------------------------------- /pfp/backbones/pointnet.py: -------------------------------------------------------------------------------- 1 | """ Adapted from https://github.com/dyson-ai/hdp/blob/main/rk_diffuser/models/pointnet.py """ 2 | 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | import torch.nn.parallel 8 | import torch.utils.data 9 | from torch.autograd import Variable 10 | from diffusion_policy.common.pytorch_util import replace_submodules 11 | 12 | 13 | class STN3d(nn.Module): 14 | def __init__(self): 15 | super(STN3d, self).__init__() 16 | self.conv1 = torch.nn.Conv1d(3, 64, 1) 17 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 18 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 19 | self.fc1 = nn.Linear(1024, 512) 20 | self.fc2 = nn.Linear(512, 256) 21 | self.fc3 = nn.Linear(256, 9) 22 | self.relu = nn.ReLU() 23 | 24 | self.bn1 = nn.BatchNorm1d(64) 25 | self.bn2 = nn.BatchNorm1d(128) 26 | self.bn3 = nn.BatchNorm1d(1024) 27 | # self.bn4 = nn.BatchNorm1d(512) 28 | # self.bn5 = nn.BatchNorm1d(256) 29 | 30 | self.bn4 = nn.LayerNorm(512) 31 | self.bn5 = nn.LayerNorm(256) 32 | 33 | def forward(self, x): 34 | batchsize = x.size()[0] 35 | x = F.relu(self.bn1(self.conv1(x))) 36 | x = F.relu(self.bn2(self.conv2(x))) 37 | x = F.relu(self.bn3(self.conv3(x))) 38 | x = torch.max(x, 2, keepdim=True)[0] 39 | x = x.view(-1, 1024) 40 | 41 | x = F.relu(self.bn4(self.fc1(x))) 42 | x = F.relu(self.bn5(self.fc2(x))) 43 | x = self.fc3(x) 44 | 45 | iden = ( 46 | Variable(torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32))) 47 | .view(1, 9) 48 | .repeat(batchsize, 1) 49 | ) 50 | if x.is_cuda: 51 | iden = iden.cuda() 52 | x = x + iden 53 | x = x.view(-1, 3, 3) 54 | return x 55 | 56 | 57 | class STNkd(nn.Module): 58 | def __init__(self, k=64): 59 | super(STNkd, self).__init__() 60 | self.conv1 = torch.nn.Conv1d(k, 64, 1) 61 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 62 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 63 | self.fc1 = nn.Linear(1024, 512) 64 | self.fc2 = nn.Linear(512, 256) 65 | self.fc3 = nn.Linear(256, k * k) 66 | self.relu = nn.ReLU() 67 | 68 | self.bn1 = nn.BatchNorm1d(64) 69 | self.bn2 = nn.BatchNorm1d(128) 70 | self.bn3 = nn.BatchNorm1d(1024) 71 | # self.bn4 = nn.BatchNorm1d(512) 72 | # self.bn5 = nn.BatchNorm1d(256) 73 | 74 | self.bn4 = nn.LayerNorm(512) 75 | self.bn5 = nn.LayerNorm(256) 76 | 77 | self.k = k 78 | 79 | def forward(self, x): 80 | batchsize = x.size()[0] 81 | x = F.relu(self.bn1(self.conv1(x))) 82 | x = F.relu(self.bn2(self.conv2(x))) 83 | x = F.relu(self.bn3(self.conv3(x))) 84 | x = torch.max(x, 2, keepdim=True)[0] 85 | x = x.view(-1, 1024) 86 | 87 | x = F.relu(self.bn4(self.fc1(x))) 88 | x = F.relu(self.bn5(self.fc2(x))) 89 | x = self.fc3(x) 90 | 91 | iden = ( 92 | Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))) 93 | .view(1, self.k * self.k) 94 | .repeat(batchsize, 1) 95 | ) 96 | if x.is_cuda: 97 | iden = iden.cuda() 98 | x = x + iden 99 | x = x.view(-1, self.k, self.k) 100 | return x 101 | 102 | 103 | class PointNetfeat(nn.Module): 104 | def __init__(self, input_channels: int, input_transform: bool, feature_transform=False): 105 | super(PointNetfeat, self).__init__() 106 | self.input_transform = input_transform 107 | if self.input_transform: 108 | self.stn = STNkd(k=input_channels) 109 | self.conv1 = torch.nn.Conv1d(input_channels, 64, 1) 110 | self.conv2 = torch.nn.Conv1d(64, 128, 1) 111 | self.conv3 = torch.nn.Conv1d(128, 1024, 1) 112 | self.bn1 = nn.BatchNorm1d(64) 113 | self.bn2 = nn.BatchNorm1d(128) 114 | self.bn3 = nn.BatchNorm1d(1024) 115 | self.feature_transform = feature_transform 116 | if self.feature_transform: 117 | self.fstn = STNkd(k=64) 118 | 119 | def forward(self, x): 120 | b = x.size(0) 121 | if len(x.shape) == 4: 122 | x = x.view(b, -1, 3).permute(0, 2, 1).contiguous() 123 | 124 | if self.input_transform: 125 | trans = self.stn(x) 126 | x = x.transpose(2, 1) 127 | x = torch.bmm(x, trans) 128 | x = x.transpose(2, 1) 129 | else: 130 | trans = None 131 | 132 | x = F.relu(self.bn1(self.conv1(x))) 133 | 134 | if self.feature_transform: 135 | trans_feat = self.fstn(x) 136 | x = x.transpose(2, 1) 137 | x = torch.bmm(x, trans_feat) 138 | x = x.transpose(2, 1) 139 | else: 140 | trans_feat = None 141 | 142 | x = F.relu(self.bn2(self.conv2(x))) 143 | x = self.bn3(self.conv3(x)) 144 | x = torch.max(x, 2, keepdim=True)[0] 145 | x = x.view(-1, 1024) 146 | return x 147 | 148 | 149 | class PointNetCls(nn.Module): 150 | def __init__(self, k=2, feature_transform=False): 151 | super(PointNetCls, self).__init__() 152 | self.feature_transform = feature_transform 153 | self.feat = PointNetfeat(global_feat=True, feature_transform=feature_transform) 154 | self.fc1 = nn.Linear(1024, 512) 155 | self.fc2 = nn.Linear(512, 256) 156 | self.fc3 = nn.Linear(256, k) 157 | self.dropout = nn.Dropout(p=0.3) 158 | self.bn1 = nn.BatchNorm1d(512) 159 | self.bn2 = nn.BatchNorm1d(256) 160 | self.relu = nn.ReLU() 161 | 162 | def forward(self, x): 163 | x, trans, trans_feat = self.feat(x) 164 | x = F.relu(self.bn1(self.fc1(x))) 165 | x = F.relu(self.bn2(self.dropout(self.fc2(x)))) 166 | x = self.fc3(x) 167 | return F.log_softmax(x, dim=1), trans, trans_feat 168 | 169 | 170 | class PointNetDenseCls(nn.Module): 171 | def __init__(self, k=2, feature_transform=False): 172 | super(PointNetDenseCls, self).__init__() 173 | self.k = k 174 | self.feature_transform = feature_transform 175 | self.feat = PointNetfeat(global_feat=False, feature_transform=feature_transform) 176 | self.conv1 = torch.nn.Conv1d(1088, 512, 1) 177 | self.conv2 = torch.nn.Conv1d(512, 256, 1) 178 | self.conv3 = torch.nn.Conv1d(256, 128, 1) 179 | self.conv4 = torch.nn.Conv1d(128, self.k, 1) 180 | self.bn1 = nn.BatchNorm1d(512) 181 | self.bn2 = nn.BatchNorm1d(256) 182 | self.bn3 = nn.BatchNorm1d(128) 183 | 184 | def forward(self, x): 185 | batchsize = x.size()[0] 186 | n_pts = x.size()[2] 187 | x, trans, trans_feat = self.feat(x) 188 | x = F.relu(self.bn1(self.conv1(x))) 189 | x = F.relu(self.bn2(self.conv2(x))) 190 | x = F.relu(self.bn3(self.conv3(x))) 191 | x = self.conv4(x) 192 | x = x.transpose(2, 1).contiguous() 193 | x = F.log_softmax(x.view(-1, self.k), dim=-1) 194 | x = x.view(batchsize, n_pts, self.k) 195 | return x, trans, trans_feat 196 | 197 | 198 | class PointNetBackbone(nn.Module): 199 | def __init__( 200 | self, 201 | embed_dim: int, 202 | input_channels: int, 203 | input_transform: bool, 204 | use_group_norm: bool = False, 205 | ): 206 | super().__init__() 207 | assert input_channels in [3, 6], "Input channels must be 3 or 6" 208 | self.backbone = nn.Sequential( 209 | PointNetfeat(input_channels, input_transform), 210 | nn.Mish(), 211 | nn.Linear(1024, 512), 212 | nn.Mish(), 213 | nn.Linear(512, embed_dim), 214 | ) 215 | if use_group_norm: 216 | self.backbone = replace_submodules( 217 | root_module=self.backbone, 218 | predicate=lambda x: isinstance(x, nn.BatchNorm1d), 219 | func=lambda x: nn.GroupNorm( 220 | num_groups=x.num_features // 16, num_channels=x.num_features 221 | ), 222 | ) 223 | return 224 | 225 | def forward(self, pcd: torch.Tensor, robot_state_obs: torch.Tensor = None) -> torch.Tensor: 226 | B = pcd.shape[0] 227 | # Flatten the batch and time dimensions 228 | pcd = pcd.float().reshape(-1, *pcd.shape[2:]) 229 | robot_state_obs = robot_state_obs.float().reshape(-1, *robot_state_obs.shape[2:]) 230 | # Permute [B, P, C] -> [B, C, P] 231 | pcd = pcd.permute(0, 2, 1) 232 | # Encode all point clouds (across time steps and batch size) 233 | encoded_pcd = self.backbone(pcd) 234 | nx = torch.cat([encoded_pcd, robot_state_obs], dim=1) 235 | # Reshape back to the batch dimension. Now the features of each time step are concatenated 236 | nx = nx.reshape(B, -1) 237 | return nx 238 | -------------------------------------------------------------------------------- /pfp/backbones/resnet_dp.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | from diffusion_policy.model.vision.model_getter import get_resnet 4 | from diffusion_policy.model.vision.multi_image_obs_encoder import MultiImageObsEncoder 5 | 6 | 7 | class ResnetDP(nn.Module): 8 | def __init__(self, shape_meta: dict): 9 | super().__init__() 10 | rgb_model = get_resnet(name="resnet18") 11 | self.backbone = MultiImageObsEncoder( 12 | shape_meta=shape_meta, 13 | rgb_model=rgb_model, 14 | crop_shape=(76, 76), 15 | random_crop=True, 16 | use_group_norm=True, 17 | share_rgb_model=False, 18 | imagenet_norm=True, 19 | ) 20 | return 21 | 22 | def forward(self, images: torch.Tensor, robot_state_obs: torch.Tensor = None) -> torch.Tensor: 23 | B = images.shape[0] 24 | # Flatten the batch and time dimensions 25 | images = images.reshape(-1, *images.shape[2:]).permute(0, 1, 4, 2, 3) 26 | robot_state_obs = robot_state_obs.float().reshape(-1, *robot_state_obs.shape[2:]) 27 | # Encode all observations (across time steps and batch size) 28 | obs_dict = {f"img_{i}": images[:, i] for i in range(images.shape[1])} 29 | obs_dict["robot_state"] = robot_state_obs 30 | nx = self.backbone(obs_dict) 31 | # Reshape back to the batch dimension. Now the features of each time step are concatenated 32 | nx = nx.reshape(B, -1) 33 | return nx 34 | -------------------------------------------------------------------------------- /pfp/common/fm_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def get_timesteps(schedule: str, k_steps: int, exp_scale: float = 1.0): 5 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 6 | if schedule == "linear": 7 | dt = torch.ones(k_steps) / k_steps 8 | elif schedule == "cosine": 9 | dt = torch.cos(t * torch.pi) + 1 10 | dt /= torch.sum(dt) 11 | elif schedule == "exp": 12 | dt = torch.exp(-t * exp_scale) 13 | dt /= torch.sum(dt) 14 | else: 15 | raise ValueError(f"Invalid schedule: {schedule}") 16 | t0 = torch.cat((torch.zeros(1), torch.cumsum(dt, dim=0)[:-1])) 17 | return t0, dt 18 | -------------------------------------------------------------------------------- /pfp/common/o3d_utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import functools 3 | import numpy as np 4 | import open3d as o3d 5 | 6 | 7 | def make_pcd( 8 | xyz: np.ndarray, 9 | rgb: np.ndarray, 10 | ) -> o3d.geometry.PointCloud: 11 | points = o3d.utility.Vector3dVector(xyz.reshape(-1, 3)) 12 | colors = o3d.utility.Vector3dVector(rgb.reshape(-1, 3).astype(np.float64) / 255) 13 | pcd = o3d.geometry.PointCloud(points) 14 | pcd.colors = colors 15 | return pcd 16 | 17 | 18 | def merge_pcds( 19 | voxel_size: float, 20 | n_points: int, 21 | pcds: list[o3d.geometry.PointCloud], 22 | ws_aabb: o3d.geometry.AxisAlignedBoundingBox, 23 | ) -> o3d.geometry.PointCloud: 24 | merged_pcd = functools.reduce(lambda a, b: a + b, pcds, o3d.geometry.PointCloud()) 25 | merged_pcd = merged_pcd.crop(ws_aabb) 26 | downsampled_pcd = merged_pcd.voxel_down_sample(voxel_size=voxel_size) 27 | if len(downsampled_pcd.points) > n_points: 28 | ratio = n_points / len(downsampled_pcd.points) 29 | downsampled_pcd = downsampled_pcd.random_down_sample(ratio) 30 | if len(downsampled_pcd.points) < n_points: 31 | # Append zeros to make the point cloud have the desired number of points 32 | num_missing_points = n_points - len(downsampled_pcd.points) 33 | zeros = np.zeros((num_missing_points, 3)) 34 | zeros_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(zeros)) 35 | zeros_pcd.colors = o3d.utility.Vector3dVector(zeros) 36 | downsampled_pcd += zeros_pcd 37 | return downsampled_pcd 38 | -------------------------------------------------------------------------------- /pfp/common/se3_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from spatialmath.base import r2q 4 | from spatialmath.base.transforms3d import isrot 5 | 6 | try: 7 | from pytorch3d.ops import corresponding_points_alignment 8 | except ImportError: 9 | print("pytorch3d not installed") 10 | from pfp import DEVICE 11 | 12 | 13 | def transform_th(transform: torch.Tensor, points: torch.Tensor) -> torch.Tensor: 14 | """Apply a 4x4 transformation matrix to a set of points.""" 15 | new_points = points @ transform[..., :3, :3].mT + transform[..., :3, 3] 16 | return new_points 17 | 18 | 19 | def vec_projection_np(v: np.ndarray, e: np.ndarray) -> np.ndarray: 20 | """Project vector v onto unit vector e.""" 21 | proj = np.sum(v * e, axis=-1, keepdims=True) * e 22 | return proj 23 | 24 | 25 | def vec_projection_th(v: torch.Tensor, e: torch.Tensor) -> torch.Tensor: 26 | """Project vector v onto unit vector e.""" 27 | proj = torch.sum(v * e, dim=-1, keepdim=True) * e 28 | return proj 29 | 30 | 31 | def grahm_schmidt_np(v1: np.ndarray, v2: np.ndarray) -> np.ndarray: 32 | """Compute orthonormal basis from two vectors.""" 33 | v1 = v1.astype(np.float64) 34 | v2 = v2.astype(np.float64) 35 | u1 = v1 36 | e1 = u1 / np.linalg.norm(u1, axis=-1, keepdims=True) 37 | u2 = v2 - vec_projection_np(v2, e1) 38 | e2 = u2 / np.linalg.norm(u2, axis=-1, keepdims=True) 39 | e3 = np.cross(e1, e2, axis=-1) 40 | rot_matrix = np.concatenate([e1[..., None], e2[..., None], e3[..., None]], axis=-1) 41 | return rot_matrix 42 | 43 | 44 | def grahm_schmidt_th(v1: torch.Tensor, v2: torch.Tensor) -> torch.Tensor: 45 | """Compute orthonormal basis from two vectors.""" 46 | u1 = v1 47 | e1 = u1 / torch.norm(u1, dim=-1, keepdim=True) 48 | u2 = v2 - vec_projection_th(v2, e1) 49 | e2 = u2 / torch.norm(u2, dim=-1, keepdim=True) 50 | e3 = torch.cross(e1, e2, dim=-1) 51 | rot_matrix = torch.cat( 52 | [e1.unsqueeze(dim=-1), e2.unsqueeze(dim=-1), e3.unsqueeze(dim=-1)], dim=-1 53 | ) 54 | return rot_matrix 55 | 56 | 57 | def pfp_to_pose_np(robot_states: np.ndarray) -> np.ndarray: 58 | """Convert pfp state (T, 10) to 4x4 poses (T, 4, 4).""" 59 | T = robot_states.shape[0] 60 | poses = np.eye(4)[np.newaxis, ...] 61 | poses = np.tile(poses, (T, 1, 1)) 62 | poses[:, :3, 3] = robot_states[:, :3] 63 | poses[:, :3, :3] = grahm_schmidt_np(robot_states[:, 3:6], robot_states[:, 6:9]) 64 | return poses 65 | 66 | 67 | def pfp_to_pose_th(robot_states: torch.Tensor) -> torch.Tensor: 68 | """Convert pfp state (B, T, 10) to 4x4 poses (B, T, 4, 4) and gripper (B, T, 1).""" 69 | B = robot_states.shape[0] 70 | T = robot_states.shape[1] 71 | poses = ( 72 | torch.eye(4, device=robot_states.device) 73 | .unsqueeze(0) 74 | .unsqueeze(0) 75 | .expand(B, T, 4, 4) 76 | .contiguous() 77 | ) 78 | poses[..., :3, 3] = robot_states[..., :3] 79 | poses[..., :3, :3] = grahm_schmidt_th(robot_states[..., 3:6], robot_states[..., 6:9]) 80 | gripper = robot_states[..., -1:] 81 | return poses, gripper 82 | 83 | 84 | def rot6d_to_quat_np(rot6d: np.ndarray, order: str = "xyzs") -> np.ndarray: 85 | """Convert 6d rotation matrix to quaternion.""" 86 | rot = grahm_schmidt_np(rot6d[:3], rot6d[3:]) 87 | quat = r2q(rot, order=order) 88 | return quat 89 | 90 | 91 | def rot6d_to_rot_np(rot6d: np.ndarray) -> np.ndarray: 92 | """Convert 6d rotation matrix to 3x3 rotation matrix.""" 93 | rot = grahm_schmidt_np(rot6d[:3], rot6d[3:]) 94 | return rot 95 | 96 | 97 | def check_valid_rot(rot: np.ndarray) -> bool: 98 | """Check if the 3x3 rotation matrix is valid.""" 99 | valid = isrot(rot, check=True, tol=1e10) 100 | return valid 101 | 102 | 103 | def get_canonical_5p_th() -> torch.Tensor: 104 | """Return the (5,3) canonical 5points representation of the franka hand.""" 105 | gripper_width = 0.08 106 | left_y = 0.5 * gripper_width 107 | right_y = -0.5 * gripper_width 108 | mid_z = -0.041 109 | top_z = -0.1034 110 | a = [0, 0, top_z] 111 | b = [0, left_y, mid_z] 112 | c = [0, right_y, mid_z] 113 | d = [0, left_y, 0] 114 | e = [0, right_y, 0] 115 | pose_5p = torch.tensor([a, b, c, d, e]) 116 | return pose_5p 117 | 118 | 119 | def pfp_to_state5p_th(robot_states: torch.Tensor) -> torch.Tensor: 120 | """ 121 | Convert pfp state (B, T, 10) to 5points representation (B, T, 16). 122 | 5p: [x0, y0, z0, x1, y1, z1, x2, y2, z2, x3, y3, z3, x4, y4, z4, gripper] 123 | """ 124 | device = robot_states.device 125 | poses, gripper = pfp_to_pose_th(robot_states) 126 | canonical_5p = get_canonical_5p_th().to(device) 127 | canonical_5p_homog = torch.cat([canonical_5p, torch.ones(5, 1, device=device)], dim=-1) 128 | poses_5p_homog = (poses @ canonical_5p_homog.mT).mT 129 | poses_5p = poses_5p_homog[..., :3].contiguous().flatten(start_dim=-2) 130 | state5p = torch.cat([poses_5p, gripper], dim=-1) 131 | return state5p 132 | 133 | 134 | def state5p_to_pfp_th(state5p: torch.Tensor) -> torch.Tensor: 135 | """ 136 | Convert 5points representation (B, T, 16) to pfp state (B, T, 10) using svd projection. 137 | """ 138 | device = state5p.device 139 | leading_dims = state5p.shape[0:2] 140 | # Flatten the batch and time dimensions 141 | state5p = state5p.reshape(-1, *state5p.shape[2:]) 142 | poses_5p, gripper = state5p[..., :-1], state5p[..., -1:] 143 | poses_5p = poses_5p.reshape(-1, 5, 3) 144 | canonical_5p = get_canonical_5p_th().expand(poses_5p.shape[0], 5, 3).to(device) 145 | with torch.cuda.amp.autocast(enabled=False): 146 | result = corresponding_points_alignment(canonical_5p, poses_5p) 147 | rotations = result.R.mT 148 | translations = result.T 149 | pfp_state = torch.cat([translations, rotations[..., 0], rotations[..., 1], gripper], dim=-1) 150 | # Reshape back to the batch and time dimensions 151 | pfp_state = pfp_state.reshape(*leading_dims, -1) 152 | return pfp_state 153 | 154 | 155 | def init_random_traj_th(B: int, T: int, noise_scale: float) -> torch.Tensor: 156 | """ 157 | B: batch size 158 | T: number of time steps 159 | """ 160 | # Position 161 | random_xyz = torch.randn((B, 1, 3), device=DEVICE) * noise_scale 162 | direction = torch.randn((B, 1, 3), device=DEVICE) 163 | direction = direction / torch.norm(direction, dim=-1, keepdim=True) 164 | t = torch.linspace(0, 1, T, device=DEVICE).unsqueeze(0).unsqueeze(-1) 165 | random_xyz = random_xyz + t * direction 166 | 167 | # Rotation 6d 168 | random_r1 = torch.randn((B, 1, 3), device=DEVICE) 169 | random_r1 = random_r1 / torch.norm(random_r1, dim=-1, keepdim=True) 170 | random_r2 = torch.randn((B, 1, 3), device=DEVICE) 171 | random_r2 = random_r2 - vec_projection_th(random_r2, random_r1) 172 | random_r2 = random_r2 / torch.norm(random_r2, dim=-1, keepdim=True) 173 | random_r6d = torch.cat([random_r1, random_r2], dim=-1) 174 | random_r6d = random_r6d.expand(B, T, 6) 175 | 176 | # Gripper 177 | gripper = torch.ones((B, T, 1), device=DEVICE) 178 | 179 | random_traj = torch.cat([random_xyz, random_r6d, gripper], dim=-1) 180 | return random_traj 181 | -------------------------------------------------------------------------------- /pfp/common/visualization.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import trimesh 3 | import numpy as np 4 | import open3d as o3d 5 | from yourdfpy.urdf import URDF 6 | from pfp.common.se3_utils import pfp_to_pose_np 7 | 8 | try: 9 | import rerun as rr 10 | except ImportError: 11 | print("WARNING: Rerun not installed. Visualization will not work.") 12 | 13 | 14 | class RerunViewer: 15 | def __init__(self, name: str, addr: str = None): 16 | rr.init(name) 17 | if addr is None: 18 | addr = "127.0.0.1" 19 | port = ":9876" 20 | rr.connect(addr + port) 21 | RerunViewer.clear() 22 | return 23 | 24 | @staticmethod 25 | def add_obs_dict(obs_dict: dict, timestep: int = None): 26 | if timestep is not None: 27 | rr.set_time_sequence("timestep", timestep) 28 | RerunViewer.add_rgb("rgb", obs_dict["image"]) 29 | RerunViewer.add_depth("depth", obs_dict["depth"]) 30 | RerunViewer.add_np_pointcloud( 31 | "vis/pointcloud", 32 | points=obs_dict["point_cloud"][:, :3], 33 | colors_uint8=obs_dict["point_cloud"][:, 3:], 34 | ) 35 | return 36 | 37 | @staticmethod 38 | def add_o3d_pointcloud(name: str, pointcloud: o3d.geometry.PointCloud, radii: float = None): 39 | points = np.asanyarray(pointcloud.points) 40 | colors = np.asanyarray(pointcloud.colors) if pointcloud.has_colors() else None 41 | colors_uint8 = (colors * 255).astype(np.uint8) if pointcloud.has_colors() else None 42 | RerunViewer.add_np_pointcloud(name, points, colors_uint8, radii) 43 | return 44 | 45 | @staticmethod 46 | def add_np_pointcloud( 47 | name: str, points: np.ndarray, colors_uint8: np.ndarray = None, radii: float = None 48 | ): 49 | rr_points = rr.Points3D(positions=points, colors=colors_uint8, radii=radii) 50 | rr.log(name, rr_points) 51 | return 52 | 53 | @staticmethod 54 | def add_axis(name: str, pose: np.ndarray, size: float = 0.004, timeless: bool = False): 55 | mesh = trimesh.creation.axis(origin_size=size, transform=pose) 56 | RerunViewer.add_mesh_trimesh(name, mesh, timeless) 57 | return 58 | 59 | @staticmethod 60 | def add_aabb(name: str, centers: np.ndarray, extents: np.ndarray, timeless=False): 61 | rr.log(name, rr.Boxes3D(centers=centers, sizes=extents), timeless=timeless) 62 | return 63 | 64 | @staticmethod 65 | def add_mesh_trimesh(name: str, mesh: trimesh.Trimesh, timeless: bool = False): 66 | # Handle colors 67 | if mesh.visual.kind in ["vertex", "face"]: 68 | vertex_colors = mesh.visual.vertex_colors 69 | elif mesh.visual.kind == "texture": 70 | vertex_colors = mesh.visual.to_color().vertex_colors 71 | else: 72 | vertex_colors = None 73 | # Log mesh 74 | rr_mesh = rr.Mesh3D( 75 | vertex_positions=mesh.vertices, 76 | vertex_colors=vertex_colors, 77 | vertex_normals=mesh.vertex_normals, 78 | indices=mesh.faces, 79 | ) 80 | rr.log(name, rr_mesh, timeless=timeless) 81 | return 82 | 83 | @staticmethod 84 | def add_mesh_list_trimesh(name: str, meshes: list[trimesh.Trimesh]): 85 | for i, mesh in enumerate(meshes): 86 | RerunViewer.add_mesh_trimesh(name + f"/{i}", mesh) 87 | return 88 | 89 | @staticmethod 90 | def add_rgb(name: str, rgb_uint8: np.ndarray): 91 | if rgb_uint8.shape[0] == 3: 92 | # CHW -> HWC 93 | rgb_uint8 = np.transpose(rgb_uint8, (1, 2, 0)) 94 | rr.log(name, rr.Image(rgb_uint8)) 95 | 96 | @staticmethod 97 | def add_depth(name: str, detph: np.ndarray): 98 | rr.log(name, rr.DepthImage(detph)) 99 | 100 | @staticmethod 101 | def add_traj(name: str, traj: np.ndarray): 102 | """ 103 | name: str 104 | traj: np.ndarray (T, 10) 105 | """ 106 | poses = pfp_to_pose_np(traj) 107 | for i, pose in enumerate(poses): 108 | RerunViewer.add_axis(name + f"/{i}t", pose) 109 | return 110 | 111 | @staticmethod 112 | def clear(): 113 | rr.log("vis", rr.Clear(recursive=True)) 114 | return 115 | 116 | 117 | class RerunTraj: 118 | def __init__(self) -> None: 119 | self.traj_shape = None 120 | return 121 | 122 | def add_traj(self, name: str, traj: np.ndarray, size: float = 0.004): 123 | """ 124 | name: str 125 | traj: np.ndarray (T, 10) 126 | """ 127 | if self.traj_shape is None or self.traj_shape != traj.shape: 128 | self.traj_shape = traj.shape 129 | for i in range(traj.shape[0]): 130 | RerunViewer.add_axis(name + f"/{i}t", np.eye(4), size) 131 | poses = pfp_to_pose_np(traj) 132 | for i, pose in enumerate(poses): 133 | rr.log( 134 | name + f"/{i}t", 135 | rr.Transform3D(mat3x3=pose[:3, :3], translation=pose[:3, 3]), 136 | ) 137 | return 138 | 139 | 140 | class RerunURDF: 141 | def __init__(self, name: str, urdf_path: str, meshes_root: str): 142 | self.name = name 143 | self.urdf: URDF = URDF.load(urdf_path, mesh_dir=meshes_root) 144 | return 145 | 146 | def update_vis( 147 | self, 148 | joint_state: list | np.ndarray, 149 | root_pose: np.ndarray = np.eye(4), 150 | name_suffix: str = "", 151 | ): 152 | self._update_joints(joint_state) 153 | scene = self.urdf.scene 154 | trimeshes = self._scene_to_trimeshes(scene) 155 | trimeshes = [t.apply_transform(root_pose) for t in trimeshes] 156 | RerunViewer.add_mesh_list_trimesh(self.name + name_suffix, trimeshes) 157 | return 158 | 159 | def _update_joints(self, joint_state: list | np.ndarray): 160 | assert len(joint_state) == len(self.urdf.actuated_joints), "Wrong number of joint values." 161 | self.urdf.update_cfg(joint_state) 162 | return 163 | 164 | def _scene_to_trimeshes(self, scene: trimesh.Scene) -> list[trimesh.Trimesh]: 165 | """ 166 | Convert a trimesh.Scene to a list of trimesh.Trimesh. 167 | 168 | Skips objects that are not an instance of trimesh.Trimesh. 169 | """ 170 | trimeshes = [] 171 | scene_dump = scene.dump() 172 | geometries = [scene_dump] if not isinstance(scene_dump, list) else scene_dump 173 | for geometry in geometries: 174 | if isinstance(geometry, trimesh.Trimesh): 175 | trimeshes.append(geometry) 176 | elif isinstance(geometry, trimesh.Scene): 177 | trimeshes.extend(self._scene_to_trimeshes(geometry)) 178 | return trimeshes 179 | -------------------------------------------------------------------------------- /pfp/data/dataset_images.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import torch 3 | import numpy as np 4 | from diffusion_policy.common.sampler import SequenceSampler 5 | from pfp.data.replay_buffer import RobotReplayBuffer 6 | from pfp import DATA_DIRS 7 | 8 | 9 | class RobotDatasetImages(torch.utils.data.Dataset): 10 | def __init__( 11 | self, 12 | data_path: str, 13 | n_obs_steps: int, 14 | n_pred_steps: int, 15 | subs_factor: int = 1, # 1 means no subsampling 16 | **kwargs, 17 | ) -> None: 18 | """ 19 | To me it makes sense that sequence_length == n_obs_steps + n_prediction_steps 20 | """ 21 | replay_buffer = RobotReplayBuffer.create_from_path(data_path, mode="r") 22 | data_keys = ["robot_state", "images"] 23 | data_key_first_k = {"images": n_obs_steps * subs_factor} 24 | self.sampler = SequenceSampler( 25 | replay_buffer=replay_buffer, 26 | sequence_length=(n_obs_steps + n_pred_steps) * subs_factor - (subs_factor - 1), 27 | pad_before=(n_obs_steps - 1) * subs_factor, 28 | pad_after=(n_pred_steps - 1) * subs_factor + (subs_factor - 1), 29 | keys=data_keys, 30 | key_first_k=data_key_first_k, 31 | ) 32 | self.n_obs_steps = n_obs_steps 33 | self.n_prediction_steps = n_pred_steps 34 | self.subs_factor = subs_factor 35 | self.rng = np.random.default_rng() 36 | return 37 | 38 | def __len__(self) -> int: 39 | return len(self.sampler) 40 | 41 | def __getitem__(self, idx: int) -> tuple[torch.Tensor, ...]: 42 | sample: dict[str, np.ndarray] = self.sampler.sample_sequence(idx) 43 | cur_step_i = self.n_obs_steps * self.subs_factor 44 | images = sample["images"][: cur_step_i : self.subs_factor] 45 | robot_state_obs = sample["robot_state"][: cur_step_i : self.subs_factor] 46 | robot_state_pred = sample["robot_state"][cur_step_i :: self.subs_factor] 47 | return images, robot_state_obs, robot_state_pred 48 | 49 | 50 | if __name__ == "__main__": 51 | dataset = RobotDatasetImages( 52 | data_path=DATA_DIRS.PFP / "open_fridge" / "train", 53 | n_obs_steps=2, 54 | n_pred_steps=8, 55 | subs_factor=5, 56 | ) 57 | i = 20 58 | obs, robot_state_obs, robot_state_pred = dataset[i] 59 | print("robot_state_obs: ", robot_state_obs) 60 | print("robot_state_pred: ", robot_state_pred) 61 | print("done") 62 | -------------------------------------------------------------------------------- /pfp/data/dataset_pcd.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import torch 3 | import numpy as np 4 | import pypose as pp 5 | from diffusion_policy.common.sampler import SequenceSampler 6 | from pfp.data.replay_buffer import RobotReplayBuffer 7 | from pfp.common.se3_utils import transform_th 8 | from pfp import DATA_DIRS 9 | 10 | 11 | def rand_range(low: float, high: float, size: tuple[int], device) -> torch.Tensor: 12 | return torch.rand(size, device=device) * (high - low) + low 13 | 14 | 15 | def augment_pcd_data(batch: tuple[torch.Tensor, ...]) -> tuple[torch.Tensor, ...]: 16 | pcd, robot_state_obs, robot_state_pred = batch 17 | BT_robot_obs = robot_state_obs.shape[:-1] 18 | BT_robot_pred = robot_state_pred.shape[:-1] 19 | 20 | # sigma=(sigma_transl, sigma_rot_rad) 21 | transform = pp.randn_SE3(sigma=(0.1, 0.2), device=pcd.device).matrix() 22 | 23 | pcd[..., :3] = transform_th(transform, pcd[..., :3]) 24 | robot_obs_pseudoposes = robot_state_obs[..., :9].reshape(*BT_robot_obs, 3, 3) 25 | robot_pred_pseudoposes = robot_state_pred[..., :9].reshape(*BT_robot_pred, 3, 3) 26 | robot_obs_pseudoposes = transform_th(transform, robot_obs_pseudoposes) 27 | robot_pred_pseudoposes = transform_th(transform, robot_pred_pseudoposes) 28 | robot_state_obs[..., :9] = robot_obs_pseudoposes.reshape(*BT_robot_obs, 9) 29 | robot_state_pred[..., :9] = robot_pred_pseudoposes.reshape(*BT_robot_pred, 9) 30 | 31 | # We shuffle the points, i.e. shuffle pcd along dim=2 (B, T, P, 3) 32 | idx = torch.randperm(pcd.shape[2]) 33 | pcd = pcd[:, :, idx, :] 34 | return pcd, robot_state_obs, robot_state_pred 35 | 36 | 37 | class RobotDatasetPcd(torch.utils.data.Dataset): 38 | def __init__( 39 | self, 40 | data_path: str, 41 | n_obs_steps: int, 42 | n_pred_steps: int, 43 | use_pc_color: bool, 44 | n_points: int, 45 | subs_factor: int = 1, # 1 means no subsampling 46 | ) -> None: 47 | """ 48 | To me it makes sense that sequence_length == n_obs_steps + n_prediction_steps 49 | """ 50 | replay_buffer = RobotReplayBuffer.create_from_path(data_path, mode="r") 51 | data_keys = ["robot_state", "pcd_xyz"] 52 | data_key_first_k = {"pcd_xyz": n_obs_steps * subs_factor} 53 | if use_pc_color: 54 | data_keys.append("pcd_color") 55 | data_key_first_k["pcd_color"] = n_obs_steps * subs_factor 56 | self.sampler = SequenceSampler( 57 | replay_buffer=replay_buffer, 58 | sequence_length=(n_obs_steps + n_pred_steps) * subs_factor - (subs_factor - 1), 59 | pad_before=(n_obs_steps - 1) * subs_factor, 60 | pad_after=(n_pred_steps - 1) * subs_factor + (subs_factor - 1), 61 | keys=data_keys, 62 | key_first_k=data_key_first_k, 63 | ) 64 | self.n_obs_steps = n_obs_steps 65 | self.n_prediction_steps = n_pred_steps 66 | self.subs_factor = subs_factor 67 | self.use_pc_color = use_pc_color 68 | self.n_points = n_points 69 | self.rng = np.random.default_rng() 70 | return 71 | 72 | def __len__(self) -> int: 73 | return len(self.sampler) 74 | 75 | def __getitem__(self, idx: int) -> tuple[torch.Tensor, ...]: 76 | sample: dict[str, np.ndarray] = self.sampler.sample_sequence(idx) 77 | cur_step_i = self.n_obs_steps * self.subs_factor 78 | pcd = sample["pcd_xyz"][: cur_step_i : self.subs_factor] 79 | if self.use_pc_color: 80 | pcd_color = sample["pcd_color"][: cur_step_i : self.subs_factor] 81 | pcd_color = pcd_color.astype(np.float32) / 255.0 82 | pcd = np.concatenate([pcd, pcd_color], axis=-1) 83 | robot_state_obs = sample["robot_state"][: cur_step_i : self.subs_factor].astype(np.float32) 84 | robot_state_pred = sample["robot_state"][cur_step_i :: self.subs_factor].astype(np.float32) 85 | # Random sample pcd points 86 | if pcd.shape[1] > self.n_points: 87 | random_indices = np.random.choice(pcd.shape[1], self.n_points, replace=False) 88 | pcd = pcd[:, random_indices] 89 | return pcd, robot_state_obs, robot_state_pred 90 | 91 | 92 | if __name__ == "__main__": 93 | dataset = RobotDatasetPcd( 94 | data_path=DATA_DIRS.PFP / "open_fridge" / "train", 95 | n_obs_steps=2, 96 | n_pred_steps=8, 97 | subs_factor=5, 98 | use_pc_color=False, 99 | n_points=4096, 100 | ) 101 | i = 20 102 | obs, robot_state_obs, robot_state_pred = dataset[i] 103 | print("robot_state_obs: ", robot_state_obs) 104 | print("robot_state_pred: ", robot_state_pred) 105 | print("done") 106 | -------------------------------------------------------------------------------- /pfp/data/replay_buffer.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import zarr 3 | import numpy as np 4 | from diffusion_policy.common.replay_buffer import ReplayBuffer 5 | from diffusion_policy.codecs.imagecodecs_numcodecs import register_codec, Jpeg2k 6 | 7 | register_codec(Jpeg2k) 8 | 9 | 10 | class RobotReplayBuffer(ReplayBuffer): 11 | def __init__(self, root: zarr.Group): 12 | super().__init__(root) 13 | self.jpeg_compressor = Jpeg2k() 14 | return 15 | 16 | def add_episode_from_list(self, data_list: list[dict[str, np.ndarray]], **kwargs): 17 | """ 18 | data_list is a list of dictionaries, where each dictionary contains the data for one step. 19 | """ 20 | data_dict = dict() 21 | for key in data_list[0].keys(): 22 | data_dict[key] = np.stack([x[key] for x in data_list]) 23 | self.add_episode(data_dict, **kwargs) 24 | return 25 | 26 | def add_episode_from_list_compressed(self, data_list: list[dict[str, np.ndarray]], **kwargs): 27 | """ 28 | data_list is a list of dictionaries, where each dictionary contains the data for one step. 29 | WARNING: decoding (i.e. reading) is broken. 30 | """ 31 | data_dict = {key: np.stack([x[key] for x in data_list]) for key in data_list[0].keys()} 32 | # get the keys starting with 'rgb*' 33 | rgb_keys = [key for key in data_dict.keys() if key.startswith("rgb")] 34 | rgb_shapes = [data_list[0][key].shape for key in rgb_keys] 35 | chunks = {rgb_keys[i]: (1, *rgb_shapes[i]) for i in range(len(rgb_keys))} 36 | compressors = {key: self.jpeg_compressor for key in rgb_keys} 37 | self.add_episode(data_dict, chunks, compressors, **kwargs) 38 | return 39 | -------------------------------------------------------------------------------- /pfp/envs/base_env.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class BaseEnv(ABC): 5 | """ 6 | The base abstract class for all envs. 7 | """ 8 | 9 | @abstractmethod 10 | def reset(self): 11 | pass 12 | 13 | @abstractmethod 14 | def reset_rng(self): 15 | pass 16 | 17 | @abstractmethod 18 | def step(self, action): 19 | pass 20 | 21 | @abstractmethod 22 | def get_obs(self): 23 | pass 24 | -------------------------------------------------------------------------------- /pfp/envs/rlbench_env.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | import open3d as o3d 4 | import spatialmath.base as sm 5 | from pyrep.const import RenderMode 6 | from pfp.envs.base_env import BaseEnv 7 | from pyrep.errors import IKError 8 | from rlbench.environment import Environment 9 | from rlbench.backend.observation import Observation 10 | from rlbench.backend.exceptions import InvalidActionError 11 | from rlbench.action_modes.action_mode import MoveArmThenGripper 12 | from rlbench.action_modes.gripper_action_modes import Discrete 13 | from rlbench.action_modes.arm_action_modes import EndEffectorPoseViaPlanning 14 | from rlbench.observation_config import ObservationConfig, CameraConfig 15 | from rlbench.utils import name_to_task_class 16 | from pfp.common.visualization import RerunViewer as RV 17 | from pfp.common.o3d_utils import make_pcd, merge_pcds 18 | from pfp.common.se3_utils import rot6d_to_quat_np, pfp_to_pose_np 19 | 20 | try: 21 | import rerun as rr 22 | except ImportError: 23 | print("WARNING: Rerun not installed. Visualization will not work.") 24 | 25 | 26 | class RLBenchEnv(BaseEnv): 27 | """ 28 | DT = 0.05 (50ms/20Hz) 29 | robot_state = [px, py, pz, r00, r10, r20, r01, r11, r21, gripper] 30 | The pose is the ttip frame, with x pointing backwards, y pointing left, and z pointing down. 31 | """ 32 | 33 | def __init__( 34 | self, 35 | task_name: str, 36 | voxel_size: float, 37 | n_points: int, 38 | use_pc_color: bool, 39 | headless: bool, 40 | vis: bool, 41 | obs_mode: str = "pcd", 42 | ): 43 | assert obs_mode in ["pcd", "rgb"], "Invalid obs_mode" 44 | self.obs_mode = obs_mode 45 | # image_size=(128, 128) 46 | self.voxel_size = voxel_size 47 | self.n_points = n_points 48 | self.use_pc_color = use_pc_color 49 | camera_config = CameraConfig( 50 | rgb=True, 51 | depth=False, 52 | mask=False, 53 | point_cloud=True, 54 | image_size=(128, 128), 55 | render_mode=RenderMode.OPENGL, 56 | ) 57 | obs_config = ObservationConfig( 58 | left_shoulder_camera=camera_config, 59 | right_shoulder_camera=camera_config, 60 | overhead_camera=camera_config, 61 | wrist_camera=camera_config, 62 | front_camera=camera_config, 63 | gripper_matrix=True, 64 | gripper_joint_positions=True, 65 | ) 66 | # EE pose is (X,Y,Z,Qx,Qy,Qz,Qw) 67 | action_mode = MoveArmThenGripper( 68 | arm_action_mode=EndEffectorPoseViaPlanning(), gripper_action_mode=Discrete() 69 | ) 70 | self.env = Environment( 71 | action_mode, 72 | obs_config=obs_config, 73 | headless=headless, 74 | ) 75 | self.env.launch() 76 | self.task = self.env.get_task(name_to_task_class(task_name)) 77 | self.robot_position = self.env._robot.arm.get_position() 78 | self.ws_aabb = o3d.geometry.AxisAlignedBoundingBox( 79 | min_bound=(self.robot_position[0] + 0.1, -0.65, self.robot_position[2] - 0.05), 80 | max_bound=(1, 0.65, 2), 81 | ) 82 | self.vis = vis 83 | self.last_obs = None 84 | if self.vis: 85 | RV.add_axis("vis/origin", np.eye(4), size=0.01, timeless=True) 86 | RV.add_aabb( 87 | "vis/ws_aabb", self.ws_aabb.get_center(), self.ws_aabb.get_extent(), timeless=True 88 | ) 89 | return 90 | 91 | def reset(self): 92 | self.task.reset() 93 | return 94 | 95 | def reset_rng(self): 96 | return 97 | 98 | def step(self, robot_state: np.ndarray): 99 | ee_position = robot_state[:3] 100 | ee_quat = rot6d_to_quat_np(robot_state[3:9]) 101 | gripper = robot_state[-1:] 102 | action = np.concatenate([ee_position, ee_quat, gripper]) 103 | reward, terminate = self._step_safe(action) 104 | return reward, terminate 105 | 106 | def _step_safe(self, action: np.ndarray, recursion_depth=0): 107 | if recursion_depth > 15: 108 | print("Warning: Recursion depth limit reached.") 109 | return 0.0, True 110 | try: 111 | _, reward, terminate = self.task.step(action) 112 | except IKError and InvalidActionError as e: 113 | print(e) 114 | cur_position = self.last_obs.gripper_pose[:3] 115 | des_position = action[:3] 116 | new_position = cur_position + (des_position - cur_position) * 0.25 117 | 118 | cur_quat = self.last_obs.gripper_pose[3:] 119 | cur_quat = np.array([cur_quat[3], cur_quat[0], cur_quat[1], cur_quat[2]]) 120 | des_quat = action[3:7] 121 | des_quat = np.array([des_quat[3], des_quat[0], des_quat[1], des_quat[2]]) 122 | new_quat = sm.qslerp(cur_quat, des_quat, 0.25, shortest=True) 123 | new_quat = np.array([new_quat[1], new_quat[2], new_quat[3], new_quat[0]]) 124 | 125 | new_action = np.concatenate([new_position, new_quat, action[-1:]]) 126 | reward, terminate = self._step_safe(new_action, recursion_depth + 1) 127 | return reward, terminate 128 | 129 | def get_obs(self) -> tuple[np.ndarray, ...]: 130 | obs_rlbench = self.task.get_observation() 131 | self.last_obs = obs_rlbench 132 | robot_state = self.get_robot_state(obs_rlbench) 133 | if self.obs_mode == "pcd": 134 | pcd_o3d = self.get_pcd(obs_rlbench) 135 | pcd = np.asarray(pcd_o3d.points) 136 | if self.use_pc_color: 137 | pcd_color = np.asarray(pcd_o3d.colors, dtype=np.float32) 138 | pcd = np.concatenate([pcd, pcd_color], axis=-1) 139 | obs = pcd 140 | elif self.obs_mode == "rgb": 141 | obs = self.get_images(obs_rlbench) 142 | return robot_state, obs 143 | 144 | def get_robot_state(self, obs: Observation) -> np.ndarray: 145 | ee_position = obs.gripper_matrix[:3, 3] 146 | ee_rot6d = obs.gripper_matrix[:3, :2].flatten(order="F") 147 | gripper = np.array([obs.gripper_open]) 148 | robot_state = np.concatenate([ee_position, ee_rot6d, gripper]) 149 | return robot_state 150 | 151 | def get_pcd(self, obs: Observation) -> o3d.geometry.PointCloud: 152 | right_pcd = make_pcd(obs.right_shoulder_point_cloud, obs.right_shoulder_rgb) 153 | left_pcd = make_pcd(obs.left_shoulder_point_cloud, obs.left_shoulder_rgb) 154 | overhead_pcd = make_pcd(obs.overhead_point_cloud, obs.overhead_rgb) 155 | front_pcd = make_pcd(obs.front_point_cloud, obs.front_rgb) 156 | wrist_pcd = make_pcd(obs.wrist_point_cloud, obs.wrist_rgb) 157 | pcd_list = [right_pcd, left_pcd, overhead_pcd, front_pcd, wrist_pcd] 158 | pcd = merge_pcds(self.voxel_size, self.n_points, pcd_list, self.ws_aabb) 159 | return pcd 160 | 161 | def get_images(self, obs: Observation) -> np.ndarray: 162 | images = np.stack( 163 | ( 164 | obs.right_shoulder_rgb, 165 | obs.left_shoulder_rgb, 166 | obs.overhead_rgb, 167 | obs.front_rgb, 168 | obs.wrist_rgb, 169 | ) 170 | ) 171 | return images 172 | 173 | def vis_step(self, robot_state: np.ndarray, obs: np.ndarray, prediction: np.ndarray = None): 174 | """ 175 | robot_state: the current robot state (10,) 176 | obs: either pcd or images 177 | - pcd: the current point cloud (N, 6) or (N, 3) 178 | - images: the current images (5, H, W, 3) 179 | prediction: the full trajectory of robot states (T, 10) 180 | """ 181 | VIS_FLOW = False 182 | if not self.vis: 183 | return 184 | rr.set_time_seconds("time", time.time()) 185 | 186 | # Point cloud 187 | if self.obs_mode == "pcd": 188 | pcd = obs 189 | pcd_xyz = pcd[:, :3] 190 | pcd_color = (pcd[:, 3:6] * 255).astype(np.uint8) if self.use_pc_color else None 191 | RV.add_np_pointcloud("vis/pcd_obs", points=pcd_xyz, colors_uint8=pcd_color, radii=0.003) 192 | 193 | # RGB images 194 | elif self.obs_mode == "rgb": 195 | images = obs 196 | for i, img in enumerate(images): 197 | RV.add_rgb(f"vis/rgb_obs_{i}", img) 198 | 199 | # EE State 200 | ee_pose = pfp_to_pose_np(robot_state[np.newaxis, ...]).squeeze() 201 | RV.add_axis("vis/ee_state", ee_pose) 202 | rr.log("plot/gripper_state", rr.Scalar(robot_state[-1])) 203 | 204 | if prediction is None: 205 | return 206 | 207 | # EE predictions 208 | final_pred = prediction[-1] 209 | if VIS_FLOW: 210 | for traj in prediction: 211 | RV.add_traj("vis/traj_k", traj) 212 | else: 213 | RV.add_traj("vis/ee_pred", final_pred) 214 | 215 | # Gripper action prediction 216 | rr.log("plot/gripper_pred", rr.Scalar(final_pred[0, -1])) 217 | return 218 | 219 | def close(self): 220 | self.env.shutdown() 221 | return 222 | 223 | 224 | if __name__ == "__main__": 225 | env = RLBenchEnv( 226 | "close_microwave", 227 | voxel_size=0.01, 228 | n_points=5500, 229 | use_pc_color=False, 230 | headless=True, 231 | vis=True, 232 | ) 233 | env.reset() 234 | for i in range(1000): 235 | robot_state, pcd = env.get_obs() 236 | next_robot_state = robot_state.copy() 237 | next_robot_state[:3] += np.array([-0.005, 0.005, 0.0]) 238 | env.step(next_robot_state) 239 | env.close() 240 | -------------------------------------------------------------------------------- /pfp/envs/rlbench_runner.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | from tqdm import tqdm 3 | from pfp.envs.rlbench_env import RLBenchEnv 4 | from pfp.policy.base_policy import BasePolicy 5 | 6 | 7 | class RLBenchRunner: 8 | def __init__( 9 | self, 10 | num_episodes: int, 11 | max_episode_length: int, 12 | env_config: dict, 13 | verbose=False, 14 | ) -> None: 15 | self.env: RLBenchEnv = RLBenchEnv(**env_config) 16 | self.num_episodes = num_episodes 17 | self.max_episode_length = max_episode_length 18 | self.verbose = verbose 19 | return 20 | 21 | def run(self, policy: BasePolicy): 22 | wandb.define_metric("success", summary="mean") 23 | wandb.define_metric("steps", summary="mean") 24 | success_list: list[bool] = [] 25 | steps_list: list[int] = [] 26 | self.env.reset_rng() 27 | for episode in tqdm(range(self.num_episodes)): 28 | policy.reset_obs() 29 | self.env.reset() 30 | for step in range(self.max_episode_length): 31 | robot_state, obs = self.env.get_obs() 32 | prediction = policy.predict_action(obs, robot_state) 33 | self.env.vis_step(robot_state, obs, prediction) 34 | next_robot_state = prediction[-1, 0] # Last K step, first T step 35 | reward, terminate = self.env.step(next_robot_state) 36 | success = bool(reward) 37 | if success or terminate: 38 | break 39 | success_list.append(success) 40 | if success: 41 | steps_list.append(step) 42 | if self.verbose: 43 | print(f"Steps: {step}") 44 | print(f"Success: {success}") 45 | wandb.log({"episode": episode, "success": int(success), "steps": step}) 46 | return success_list, steps_list 47 | -------------------------------------------------------------------------------- /pfp/policy/base_policy.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import numpy as np 3 | from collections import deque 4 | from abc import ABC, abstractmethod 5 | from pfp import DEVICE 6 | 7 | 8 | class BasePolicy(ABC): 9 | """ 10 | The base abstract class for all policies. 11 | """ 12 | 13 | def __init__(self, n_obs_steps: int, subs_factor: int = 1) -> None: 14 | maxlen = n_obs_steps * subs_factor - (subs_factor - 1) 15 | self.obs_list = deque(maxlen=maxlen) 16 | self.robot_state_list = deque(maxlen=maxlen) 17 | self.subs_factor = subs_factor 18 | return 19 | 20 | def reset_obs(self): 21 | self.obs_list.clear() 22 | self.robot_state_list.clear() 23 | return 24 | 25 | def update_obs_lists(self, obs: np.ndarray, robot_state: np.ndarray): 26 | self.obs_list.append(obs) 27 | if len(self.obs_list) < self.obs_list.maxlen: 28 | self.obs_list.extendleft( 29 | [self.obs_list[0]] * (self.obs_list.maxlen - len(self.obs_list)) 30 | ) 31 | self.robot_state_list.append(robot_state) 32 | if len(self.robot_state_list) < self.robot_state_list.maxlen: 33 | n = self.robot_state_list.maxlen - len(self.robot_state_list) 34 | self.robot_state_list.extendleft([self.robot_state_list[0]] * n) 35 | return 36 | 37 | def sample_stacked_obs(self) -> tuple[np.ndarray, ...]: 38 | obs_stacked = np.stack(self.obs_list, axis=0)[:: self.subs_factor] 39 | robot_state_stacked = np.stack(self.robot_state_list, axis=0)[:: self.subs_factor] 40 | return obs_stacked, robot_state_stacked 41 | 42 | def predict_action(self, obs: np.ndarray, robot_state: np.ndarray) -> np.ndarray: 43 | self.update_obs_lists(obs, robot_state) 44 | obs_stacked, robot_state_stacked = self.sample_stacked_obs() 45 | action = self.infer_from_np(obs_stacked, robot_state_stacked) 46 | return action 47 | 48 | def infer_from_np(self, obs: np.ndarray, robot_state: np.ndarray) -> np.ndarray: 49 | obs_th = torch.tensor(obs, device=DEVICE).unsqueeze(0) 50 | robot_state_th = torch.tensor(robot_state, device=DEVICE).unsqueeze(0) 51 | obs_th = self._norm_obs(obs_th) 52 | robot_state_th = self._norm_robot_state(robot_state_th) 53 | ny = self.infer_y( 54 | obs_th, 55 | robot_state_th, 56 | return_traj=True, 57 | ) 58 | ny = self._denorm_robot_state(ny) 59 | ny = ny.squeeze().detach().cpu().numpy() 60 | # Return the full trajectory (both integration time K and horizon T) 61 | return ny # (K, T, 10) 62 | 63 | @abstractmethod 64 | def _norm_obs(self, obs: torch.Tensor) -> torch.Tensor: 65 | pass 66 | 67 | @abstractmethod 68 | def _norm_robot_state(self, robot_state: torch.Tensor) -> torch.Tensor: 69 | pass 70 | 71 | @abstractmethod 72 | def _denorm_robot_state(self, robot_state: torch.Tensor) -> torch.Tensor: 73 | pass 74 | 75 | @abstractmethod 76 | def infer_y( 77 | self, obs: torch.Tensor, robot_state: torch.Tensor, return_traj: bool 78 | ) -> torch.Tensor: 79 | pass 80 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | # https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#declaring-project-metadata 2 | # https://packaging.python.org/en/latest/tutorials/packaging-projects/ 3 | 4 | [build-system] 5 | requires = ["hatchling"] 6 | build-backend = "hatchling.build" 7 | 8 | [tool.setuptools] 9 | py-modules = ["pfp"] 10 | 11 | [tool.black] 12 | line-length = 100 13 | 14 | [project] 15 | name = "pfp" 16 | version = "0.0.1" 17 | authors = [{ name = "Eugenio Chisari", email = "eugenio.chisari@gmail.com" }] 18 | description = "" 19 | readme = "README.md" 20 | requires-python = ">=3.8" 21 | dependencies = [ 22 | "numpy==1.23.5", 23 | "spatialmath-python==1.1.9", 24 | "prompt-toolkit==3.0.36", 25 | "ipython<=8.17.2", 26 | "trimesh==4.3.2", 27 | "open3d==0.18.0", 28 | "numba<=0.59.1", 29 | "zarr<=2.17.2", 30 | "matplotlib<=3.8.4", 31 | "torch<=2.1.2", 32 | "torchvision<=0.16.2", 33 | "einops==0.7.0", 34 | "diffusers==0.27.2", 35 | "composer<=0.21.3", 36 | "hydra-core==1.3.2", 37 | "wandb<=0.17.3", 38 | "av==8.1.0", 39 | "yourdfpy==0.0.56", 40 | "geomstats[pytorch]==2.7.0", 41 | "imagecodecs" 42 | ] 43 | 44 | 45 | [project.optional-dependencies] 46 | dev = [] 47 | -------------------------------------------------------------------------------- /sandbox/augmentation.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import numpy as np 3 | from torch.utils.data import DataLoader 4 | from pfp import DATA_DIRS 5 | from pfp.data.dataset_pcd import RobotDatasetPcd, augment_pcd_data 6 | from pfp.common.visualization import RerunViewer as RV 7 | from pfp.common.visualization import RerunTraj 8 | import rerun as rr 9 | 10 | rr_traj = { 11 | "original_robot_obs": RerunTraj(), 12 | "augmented_robot_obs": RerunTraj(), 13 | "original_prediction": RerunTraj(), 14 | "augmented_prediction": RerunTraj(), 15 | } 16 | 17 | 18 | def vis_batch(name, batch): 19 | pcd, robot_state_obs, robot_state_pred = batch 20 | pcd = pcd[0, -1].cpu().numpy() 21 | robot_state_obs = robot_state_obs[0].cpu().numpy() 22 | robot_state_pred = robot_state_pred[0].cpu().numpy() 23 | RV.add_np_pointcloud( 24 | f"vis/{name}_pcd", points=pcd[:, :3], colors_uint8=(pcd[:, 3:6] * 255).astype(np.uint8) 25 | ) 26 | rr_traj[f"{name}_robot_obs"].add_traj(f"{name}_robot_obs", robot_state_obs, size=0.008) 27 | rr_traj[f"{name}_prediction"].add_traj(f"{name}_prediction", robot_state_pred) 28 | return 29 | 30 | 31 | RV("augmentation_vis") 32 | RV.add_axis("vis/origin", np.eye(4), timeless=True) 33 | 34 | task_name = "sponge_on_plate" 35 | 36 | data_path_train = DATA_DIRS.PFP_REAL / task_name / "train" 37 | dataset_train = RobotDatasetPcd( 38 | data_path_train, 39 | n_obs_steps=2, 40 | n_pred_steps=32, 41 | subs_factor=3, 42 | use_pc_color=False, 43 | n_points=4096, 44 | ) 45 | dataloader_train = DataLoader( 46 | dataset_train, 47 | shuffle=False, 48 | batch_size=1, 49 | persistent_workers=False, 50 | ) 51 | 52 | for i, batch in enumerate(dataloader_train): 53 | rr.set_time_sequence("step", i) 54 | original_batch = copy.deepcopy(batch) 55 | vis_batch("original", original_batch) 56 | 57 | augmented_batch = copy.deepcopy(batch) 58 | augmented_batch = augment_pcd_data(augmented_batch) 59 | vis_batch("augmented", augmented_batch) 60 | 61 | if i > 500: 62 | break 63 | -------------------------------------------------------------------------------- /sandbox/learning_rate.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.optim import AdamW 3 | from torch.optim.lr_scheduler import LambdaLR 4 | from diffusion_policy.model.common.lr_scheduler import get_scheduler 5 | 6 | 7 | epochs = 2000 8 | len_dataset = 10000 9 | 10 | params = torch.Tensor(1, 1, 1, 1) 11 | optimizer = AdamW([params], lr=1.0e-4, betas=[0.95, 0.999], eps=1.0e-8, weight_decay=1.0e-6) 12 | lr_scheduler: LambdaLR = get_scheduler( 13 | "cosine", 14 | optimizer=optimizer, 15 | num_warmup_steps=500, 16 | num_training_steps=(len_dataset * epochs), 17 | # pytorch assumes stepping LRScheduler every epoch 18 | # however huggingface diffusers steps it every batch 19 | ) 20 | 21 | for epoch in range(epochs): 22 | # for _ in range(len_dataset): 23 | lr_scheduler.step() 24 | # print(lr_scheduler.get_last_lr()) 25 | 26 | if epoch % 100 == 0: 27 | print(f"Epoch: {epoch}, LR: {lr_scheduler.get_last_lr()}") 28 | -------------------------------------------------------------------------------- /sandbox/pypose_play.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import trimesh 3 | import numpy as np 4 | import pypose as pp 5 | import rerun as rr 6 | 7 | 8 | def rr_init(name: str, addr: str = None): 9 | rr.init(name) 10 | if addr is None: 11 | addr = "127.0.0.1" 12 | port = ":9876" 13 | rr.connect(addr + port) 14 | rr.log("vis", rr.Clear(recursive=True)) 15 | return 16 | 17 | 18 | def rr_add_axis(name: str, pose: np.ndarray, size: float = 0.004, timeless: bool = False): 19 | mesh = trimesh.creation.axis(origin_size=size, transform=pose) 20 | # Handle colors 21 | if mesh.visual.kind in ["vertex", "face"]: 22 | vertex_colors = mesh.visual.vertex_colors 23 | elif mesh.visual.kind == "texture": 24 | vertex_colors = mesh.visual.to_color().vertex_colors 25 | else: 26 | vertex_colors = None 27 | # Log mesh 28 | rr_mesh = rr.Mesh3D( 29 | vertex_positions=mesh.vertices, 30 | vertex_colors=vertex_colors, 31 | vertex_normals=mesh.vertex_normals, 32 | indices=mesh.faces, 33 | ) 34 | rr.log(name, rr_mesh, timeless=timeless) 35 | return 36 | 37 | 38 | # Print random twists 39 | for _ in range(50): 40 | euler = torch.FloatTensor(1, 3).uniform_(-torch.pi / 2, torch.pi / 2) 41 | twist = pp.Log(pp.euler2SO3(euler)) 42 | # print(euler) 43 | # print(twist) 44 | 45 | 46 | # Exp-map: se3 -> SE3 47 | # Identity (SE3) + twist (se3) = pose (SE3) 48 | twist = pp.randn_se3(1) 49 | pose = pp.Exp(twist) 50 | del twist, pose 51 | 52 | # Log-map: SE3 -> se3 53 | # pose (SE3) - Identity (SE3) = twist (se3) 54 | pose = pp.randn_SE3(1) 55 | twist = pp.Log(pose) 56 | del pose, twist 57 | 58 | # Right-plus operator: SE3 + se3 = SE3 59 | # This is in local frame, i.e. in the tangent space of the start pose 60 | start_pose = pp.randn_SE3(1) 61 | twist = pp.randn_se3(1) 62 | end_pose = start_pose @ pp.Exp(twist) 63 | del start_pose, twist, end_pose 64 | 65 | # Right-minus operator: SE3 - SE3 = se3 66 | # This is in local frame, i.e. in the tangent space of the start pose 67 | start_pose = pp.randn_SE3(1) 68 | end_pose = pp.randn_SE3(1) 69 | twist = pp.Log(pp.Inv(start_pose) @ end_pose) 70 | del start_pose, end_pose, twist 71 | 72 | # Left-plus operator: SE3 + se3 = SE3 73 | # This is in global frame, i.e. in the tangent space of the identity pose 74 | start_pose = pp.randn_SE3(1) 75 | twist = pp.randn_se3(1) 76 | end_pose_a = start_pose + twist 77 | end_pose_b = pp.Retr(start_pose, twist) 78 | end_pose_c = pp.Exp(twist) @ start_pose 79 | assert torch.allclose(end_pose_a, end_pose_b) 80 | assert torch.allclose(end_pose_a, end_pose_c) 81 | del start_pose, twist, end_pose_a, end_pose_b, end_pose_c 82 | 83 | # Left-minus operator: SE3 - SE3 = se3 84 | # This is in global frame, i.e. in the tangent space of the identity pose 85 | start_pose = pp.randn_SE3(1) 86 | end_pose = pp.randn_SE3(1) 87 | twist = pp.Log(end_pose @ pp.Inv(start_pose)) 88 | del start_pose, end_pose, twist 89 | 90 | # Simple simulation 91 | rr_init("sandbox") 92 | T0_th = torch.eye(4).unsqueeze(0) 93 | T0_th[:, :3, 3] = torch.tensor([0.5, 0, 0]) 94 | # Rotate -90 degrees 95 | T0_th[:, :3, 0] = torch.tensor([0, -1, 0]) 96 | T0_th[:, :3, 1] = torch.tensor([1, 0, 0]) 97 | 98 | T1_th = torch.eye(4).unsqueeze(0) 99 | T1_th[:, :3, 3] = torch.tensor([0, 0.5, 0]) 100 | T1_th[:, :3, 0], T1_th[:, :3, 1] = T1_th[:, :3, 1], -T1_th[:, :3, 0] # Rotate 90 degrees 101 | 102 | rr_add_axis("vis/origin", np.eye(4), size=0.01, timeless=True) 103 | rr_add_axis("vis/T0", T0_th[0].numpy(), timeless=True) 104 | rr_add_axis("vis/T1", T1_th[0].numpy(), timeless=True) 105 | 106 | 107 | T0_pp = pp.mat2SE3(T0_th) 108 | T1_pp = pp.mat2SE3(T1_th) 109 | vel_right = pp.Log(pp.Inv(T0_pp) @ T1_pp) 110 | vel_left = pp.Log(T1_pp @ pp.Inv(T0_pp)) 111 | 112 | k_steps = 50 113 | dt = 1 / k_steps 114 | pose_pp_right = T0_pp 115 | pose_pp_left = T0_pp 116 | for k_step in range(k_steps): 117 | pose_pp_right: pp.LieTensor = pose_pp_right @ pp.Exp(vel_right * dt) 118 | pose_pp_left: pp.LieTensor = pp.Exp(vel_left * dt) @ pose_pp_left 119 | rr.set_time_sequence("k_step", k_step) 120 | rr_add_axis("vis/pose_se3_right", pose_pp_right.matrix().squeeze().numpy()) 121 | rr_add_axis("vis/pose_se3_left", pose_pp_left.matrix().squeeze().numpy()) 122 | 123 | # SO3 x R3 124 | t0 = T0_th[0, :3, 3] 125 | t1 = T1_th[0, :3, 3] 126 | R0_pp = pp.mat2SO3(T0_th[:, :3, :3]) 127 | R1_pp = pp.euler2SO3(torch.tensor([0, 0, np.pi / 2 - 0.001])) 128 | xyz_vel = t1 - t0 129 | R_vel_right = pp.Log(pp.Inv(R0_pp) @ R1_pp) 130 | R_vel_left = pp.Log(R1_pp @ pp.Inv(R0_pp)) 131 | 132 | xyz = t0 133 | R_pp_right = R0_pp 134 | R_pp_left = R0_pp 135 | for k_step in range(k_steps): 136 | xyz = xyz + xyz_vel * dt 137 | R_pp_right = R_pp_right @ pp.Exp(R_vel_right * dt) 138 | R_pp_left = pp.Exp(R_vel_left * dt) @ R_pp_left 139 | rr.set_time_sequence("k_step", k_step) 140 | pose_right = torch.eye(4) 141 | pose_right[:3, :3] = R_pp_right.matrix().squeeze() 142 | pose_right[:3, 3] = xyz 143 | pose_left = torch.eye(4) 144 | pose_left[:3, :3] = R_pp_left.matrix().squeeze() 145 | pose_left[:3, 3] = xyz 146 | rr_add_axis("vis/pose_so3", pose_right.numpy()) 147 | rr_add_axis("vis/pose_so3_left", pose_left.numpy()) 148 | 149 | print(f"{R_vel_right=}") 150 | print(f"{R_vel_left=}") 151 | print("done") 152 | -------------------------------------------------------------------------------- /sandbox/state_5p.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import pypose as pp 3 | from pfp.common.visualization import RerunViewer as RV 4 | from pfp.common.se3_utils import pfp_to_state5p_th, state5p_to_pfp_th, pfp_to_pose_th 5 | 6 | B = 2 7 | T = 2 8 | grippers = torch.ones(B, T, 1) 9 | poses = pp.randn_SE3(B, T).matrix() 10 | state_pfp = torch.cat([poses[..., :3, 3], poses[..., :3, 0], poses[..., :3, 1], grippers], dim=-1) 11 | state_5p = pfp_to_state5p_th(state_pfp) 12 | state_5p_pcds = state_5p[:, :, :15].reshape(B, T, 5, 3) 13 | 14 | noise = torch.randn_like(state_5p) * 0.01 15 | noise[..., 15] = 0 16 | noisy_state_5p = state_5p + noise 17 | noisy_5p_pcds = noisy_state_5p[:, :, :15].reshape(B, T, 5, 3) 18 | estimated_state_pfp = state5p_to_pfp_th(noisy_state_5p) 19 | estimated_poses, gripper = pfp_to_pose_th(estimated_state_pfp) 20 | 21 | RV("state_5p") 22 | 23 | for i in range(B): 24 | for j in range(T): 25 | RV.add_axis(f"state_pfp_{i}_{j}", poses[i, j].numpy()) 26 | RV.add_axis(f"estimated_{i}_{j}", estimated_poses[i, j].numpy()) 27 | RV.add_np_pointcloud(f"state_5p_{i}_{j}", state_5p_pcds[i, j].numpy(), radii=0.005) 28 | RV.add_np_pointcloud(f"noisy_5p_{i}_{j}", noisy_5p_pcds[i, j].numpy(), radii=0.005) 29 | 30 | 31 | print("done") 32 | -------------------------------------------------------------------------------- /sandbox/vis_random_traj.py: -------------------------------------------------------------------------------- 1 | from pfp.common.se3_utils import init_random_traj_th 2 | from pfp.common.visualization import RerunViewer as RV 3 | 4 | B = 2 5 | T = 10 6 | traj = init_random_traj_th(B, T) 7 | # Vis first traj 8 | RV("random_traj") 9 | RV.add_traj("random_traj", traj[0].cpu().numpy()) 10 | 11 | print("Done.") 12 | -------------------------------------------------------------------------------- /sandbox/vis_t_schedule.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import matplotlib.pyplot as plt 3 | 4 | plt.figure() 5 | 6 | k_steps = 20 7 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 8 | 9 | # Linear 10 | dt_lin = torch.ones(k_steps) / k_steps 11 | 12 | # Cosine 13 | dt_cos = torch.cos(t * torch.pi) + 1 14 | dt_cos /= torch.sum(dt_cos) 15 | t0_cos = torch.cat((torch.zeros(1), torch.cumsum(dt_cos, dim=0)[:-1])) 16 | plt.scatter(t.numpy(), t0_cos.numpy(), label="cos") 17 | 18 | for scaling in [1, 2, 4, 8]: 19 | dt_exp = torch.exp(-t * scaling) 20 | dt_exp /= torch.sum(dt_exp) 21 | t0_exp = torch.cat((torch.zeros(1), torch.cumsum(dt_exp, dim=0)[:-1])) 22 | plt.scatter(t.numpy(), t0_exp.numpy(), label=f"exp_{scaling}") 23 | 24 | plt.legend() 25 | plt.show() 26 | 27 | print("Done") 28 | -------------------------------------------------------------------------------- /sandbox/vis_urdf.py: -------------------------------------------------------------------------------- 1 | import time 2 | import numpy as np 3 | import rerun as rr 4 | from pfp import REPO_DIRS 5 | from pfp.common.visualization import RerunViewer as RV 6 | from pfp.common.visualization import RerunURDF 7 | 8 | panda_dir = REPO_DIRS.URDFS / "panda" 9 | meshes_root = panda_dir 10 | rerun_panda = RerunURDF("vis/panda", panda_dir / "panda.urdf", meshes_root) 11 | rerun_panda_gripper = RerunURDF("vis/panda_gripper", panda_dir / "panda_gripper.urdf", meshes_root) 12 | 13 | print("Actuated Joints: ", [j.name for j in rerun_panda.urdf.actuated_joints]) 14 | print("Current joint state: ", rerun_panda.urdf.cfg) 15 | 16 | 17 | RV() 18 | RV.add_axis("vis/origin", np.eye(4), size=0.01, timeless=True) 19 | root_pose = np.eye(4) 20 | root_pose_gripper = np.eye(4) 21 | for i in range(10): 22 | rr.set_time_seconds("timestep", time.time()) 23 | # Panda 24 | joint_state = np.ones(7) * i / 10 25 | joint_state = np.concatenate([joint_state, [0.04, 0.04]]) 26 | rerun_panda.update_vis(joint_state, root_pose) 27 | 28 | # Panda Gripper 29 | joint_state_gripper = np.array([0.04, 0.04]) 30 | root_pose_gripper[:3, 3] = i / 10 31 | rerun_panda_gripper.update_vis(joint_state_gripper, root_pose_gripper) 32 | 33 | 34 | print("done") 35 | -------------------------------------------------------------------------------- /scripts/collect_demos.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | import numpy as np 3 | from tqdm import tqdm 4 | from omegaconf import OmegaConf 5 | from rlbench.backend.observation import Observation 6 | from pfp import DATA_DIRS, set_seeds 7 | from pfp.envs.rlbench_env import RLBenchEnv 8 | from pfp.data.replay_buffer import RobotReplayBuffer 9 | from pfp.common.visualization import RerunViewer as RV 10 | 11 | 12 | # For valid, call it with: --config-name=collect_demos_valid 13 | # To actually save the data, remember to call it with: save_data=True 14 | @hydra.main(version_base=None, config_path="../conf", config_name="collect_demos_train") 15 | def main(cfg: OmegaConf): 16 | set_seeds(cfg.seed) 17 | if not OmegaConf.has_resolver("eval"): 18 | OmegaConf.register_new_resolver("eval", eval) 19 | print(OmegaConf.to_yaml(cfg)) 20 | 21 | assert cfg.mode in ["train", "valid"] 22 | if cfg.env_config.vis: 23 | RV("pfp_collect_demos") 24 | env = RLBenchEnv(use_pc_color=True, **cfg.env_config) 25 | if cfg.save_data: 26 | data_path = DATA_DIRS.PFP / cfg.env_config.task_name / cfg.mode 27 | if data_path.is_dir(): 28 | print(f"ERROR: Data path {data_path} already exists! Exiting...") 29 | return 30 | replay_buffer = RobotReplayBuffer.create_from_path(data_path, mode="a") 31 | 32 | for _ in tqdm(range(cfg.num_episodes)): 33 | data_history = list() 34 | demo = env.task.get_demos(1, live_demos=True)[0] 35 | observations: list[Observation] = demo._observations 36 | for obs in observations: 37 | robot_state = env.get_robot_state(obs) 38 | images = env.get_images(obs) 39 | pcd = env.get_pcd(obs) 40 | pcd_xyz = np.asarray(pcd.points) 41 | pcd_color = np.asarray(pcd.colors) 42 | data_history.append( 43 | { 44 | "pcd_xyz": pcd_xyz.astype(np.float32), 45 | "pcd_color": (pcd_color * 255).astype(np.uint8), 46 | "robot_state": robot_state.astype(np.float32), 47 | "images": images, 48 | } 49 | ) 50 | env.vis_step(robot_state, np.concatenate((pcd_xyz, pcd_color), axis=-1)) 51 | 52 | if cfg.save_data: 53 | replay_buffer.add_episode_from_list(data_history, compressors="disk") 54 | print(f"Saved episode with {len(data_history)} steps to disk.") 55 | 56 | # while True: 57 | # env.step(robot_state) 58 | return 59 | 60 | 61 | if __name__ == "__main__": 62 | main() 63 | -------------------------------------------------------------------------------- /scripts/convert_data.py: -------------------------------------------------------------------------------- 1 | from tqdm import tqdm 2 | from pfp import DATA_DIRS 3 | from pfp.data.replay_buffer import RobotReplayBuffer 4 | 5 | 6 | task_name = "calibration" 7 | mode = "train" 8 | 9 | data_path = DATA_DIRS.PFP_REAL / task_name / mode 10 | new_data_path = DATA_DIRS.PFP_REAL / (task_name + "_new") / mode 11 | if new_data_path.exists(): 12 | raise FileExistsError(f"{new_data_path} already exists") 13 | 14 | replay_buffer = RobotReplayBuffer.create_from_path(data_path, mode="r") 15 | new_replay_buffer = RobotReplayBuffer.create_from_path(new_data_path, mode="w") 16 | 17 | for episode_idx in tqdm(range(replay_buffer.n_episodes)): 18 | episode = replay_buffer.get_episode(episode_idx) 19 | episode_len = len(episode["robot_state"]) 20 | data_list = [] 21 | for step_idx in range(episode_len): 22 | data_dict = {} 23 | for key in episode.keys(): 24 | if key.startswith("rgb"): 25 | data_dict[key] = episode[key][step_idx][::4, ::4, :] 26 | else: 27 | data_dict[key] = episode[key][step_idx] 28 | data_list.append(data_dict) 29 | new_replay_buffer.add_episode_from_list(data_list) 30 | print("debug") 31 | 32 | print("done") 33 | -------------------------------------------------------------------------------- /scripts/evaluate.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | import wandb 3 | import subprocess 4 | from omegaconf import OmegaConf, open_dict 5 | from pfp import set_seeds, REPO_DIRS 6 | from pfp.envs.rlbench_runner import RLBenchRunner 7 | from pfp.policy.base_policy import BasePolicy 8 | from pfp.common.visualization import RerunViewer as RV 9 | 10 | 11 | @hydra.main(version_base=None, config_path="../conf", config_name="eval") 12 | def main(cfg: OmegaConf): 13 | if not OmegaConf.has_resolver("eval"): 14 | OmegaConf.register_new_resolver("eval", eval) 15 | OmegaConf.resolve(cfg) 16 | print(OmegaConf.to_yaml(cfg)) 17 | set_seeds(cfg.seed) 18 | 19 | # Download checkpoint if not present 20 | ckpt_path = REPO_DIRS.CKPT / cfg.policy.ckpt_name 21 | if not ckpt_path.exists(): 22 | subprocess.run( 23 | [ 24 | "rsync", 25 | "-hPrl", 26 | f"chisari@rlgpu2:{ckpt_path}", 27 | f"{REPO_DIRS.CKPT}/", 28 | ] 29 | ) 30 | 31 | with open_dict(cfg): 32 | train_cfg = OmegaConf.load(ckpt_path / "config.yaml") 33 | cfg.model = train_cfg.model 34 | cfg.env_runner.env_config.task_name = train_cfg.task_name 35 | cfg.env_runner.env_config.obs_mode = train_cfg.obs_mode 36 | cfg.env_runner.env_config.use_pc_color = train_cfg.dataset.use_pc_color 37 | cfg.env_runner.env_config.n_points = train_cfg.dataset.n_points 38 | cfg.policy._target_ = train_cfg.model._target_ + ".load_from_checkpoint" 39 | 40 | print(OmegaConf.to_yaml(cfg)) 41 | 42 | if cfg.env_runner.env_config.vis: 43 | RV("pfp_evaluate") 44 | wandb.init( 45 | project="pfp-eval-rebuttal", 46 | entity="rl-lab-chisari", 47 | config=OmegaConf.to_container(cfg), 48 | mode="online" if cfg.log_wandb else "disabled", 49 | ) 50 | policy: BasePolicy = hydra.utils.instantiate(cfg.policy) 51 | env_runner = RLBenchRunner(**cfg.env_runner) 52 | _ = env_runner.run(policy) 53 | wandb.finish() 54 | return 55 | 56 | 57 | if __name__ == "__main__": 58 | main() 59 | -------------------------------------------------------------------------------- /scripts/plots.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import seaborn as sns 3 | import matplotlib.pyplot as plt 4 | 5 | # Set larger font sizes 6 | plt.rcParams.update( 7 | { 8 | "font.size": 32, # General font size 9 | "axes.labelsize": 38, # Axes labels font size 10 | } 11 | ) 12 | 13 | # Benchmark data 14 | df_benchmark = pd.DataFrame(columns=["Baseline", "Success Rate", "std"]) 15 | df_benchmark.loc[len(df_benchmark)] = ["Dif. Policy", 18.7, 2.3] 16 | df_benchmark.loc[len(df_benchmark)] = ["AdaFlow", 19.0, 2.3] 17 | df_benchmark.loc[len(df_benchmark)] = ["3D-DP", 28.5, 2.2] 18 | df_benchmark.loc[len(df_benchmark)] = ["OL-ChDif", 34.6, 0] 19 | df_benchmark.loc[len(df_benchmark)] = ["PFM(ours)", 67.8, 4.1] 20 | 21 | # Plot and save benchmark data barplot 22 | plt.figure(figsize=(16, 8)) # Adjust the width and height as needed 23 | ax = sns.barplot(df_benchmark, x="Baseline", y="Success Rate", color="#344A9A", width=0.6) 24 | ax.errorbar( 25 | df_benchmark.index, 26 | df_benchmark["Success Rate"], 27 | yerr=df_benchmark["std"], 28 | fmt="none", 29 | c="black", 30 | capsize=10, 31 | capthick=5, 32 | elinewidth=5, 33 | ) 34 | ax.set(xlabel="", ylabel="Success Rate (↑)") 35 | plt.tight_layout() 36 | plt.savefig("benchmark_plot.png") 37 | plt.savefig("benchmark_plot.svg") 38 | plt.clf() # Clear the current figure 39 | 40 | # Ablation data 41 | df_ablation = pd.DataFrame(columns=["Baseline", "Success Rate", "std"]) 42 | df_ablation.loc[len(df_ablation)] = ["Img-CFM-R6", 40.1, 3.3] 43 | df_ablation.loc[len(df_ablation)] = ["Pcd-DDIM-R6", 68.0, 4.3] 44 | df_ablation.loc[len(df_ablation)] = ["Pcd-CFM-SO3", 67.4, 4.4] 45 | df_ablation.loc[len(df_ablation)] = ["Pcd-CFM-R6", 67.8, 4.1] 46 | 47 | # Plot and save success_rate barplot 48 | plt.figure(figsize=(16, 8)) # Adjust the width and height as needed 49 | ax = sns.barplot(df_ablation, x="Baseline", y="Success Rate", color="#344A9A", width=0.6) 50 | ax.errorbar( 51 | df_ablation.index, 52 | df_ablation["Success Rate"], 53 | yerr=df_ablation["std"], 54 | fmt="none", 55 | c="black", 56 | capsize=10, 57 | capthick=5, 58 | elinewidth=5, 59 | ) 60 | ax.set(xlabel="", ylabel="Success Rate (↑)") 61 | plt.tight_layout() 62 | plt.savefig("ablation_plot.png") 63 | plt.savefig("ablation_plot.svg") 64 | plt.clf() # Clear the current figure 65 | -------------------------------------------------------------------------------- /scripts/print_ablation.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | import matplotlib.pyplot as plt 4 | import seaborn as sns 5 | 6 | 7 | def exp_name_from_run(run_config: dict) -> str: 8 | model = run_config["model"]["_target_"] 9 | backbone = run_config["model"]["obs_encoder"]["_target_"] 10 | noise_type = run_config["model"]["noise_type"] if "noise_type" in run_config["model"] else None 11 | if model == "pfp.policy.fm_so3_policy.FMSO3Policy": 12 | exp_name = "pfp_so3" 13 | elif model == "pfp.policy.fm_policy.FMPolicy" and noise_type == "gaussian": 14 | exp_name = "pfp_euclid" 15 | elif model == "pfp.policy.fm_policy.FMPolicy" and noise_type == "igso3": 16 | exp_name = "pfp_euclid_igso3" 17 | elif ( 18 | model == "pfp.policy.ddim_policy.DDIMPolicy" 19 | and backbone == "pfp.backbones.pointnet.PointNetBackbone" 20 | ): 21 | exp_name = "pfp_ddim" 22 | elif model == "pfp.policy.fm_so3_policy.FMSO3PolicyImage": 23 | exp_name = "pfp_images" 24 | elif backbone == "pfp.backbones.mlp_3dp.MLP3DP": 25 | exp_name = "dp3" 26 | elif model == "pfp.policy.fm_policy.FMPolicyImage": 27 | exp_name = "adaflow" 28 | elif model == "pfp.policy.ddim_policy.DDIMPolicyImage": 29 | exp_name = "diffusion_policy" 30 | else: 31 | exp_name = "other" 32 | # raise ValueError(f"Unknown experiment name from model: {model} and backbone: {backbone}") 33 | 34 | # Tunings 35 | if run_config["model"].get("noise_type") == "biased": 36 | exp_name += "biased" 37 | if run_config["model"].get("snr_sampler") == "logit_normal": 38 | exp_name += "_logitnorm" 39 | return exp_name 40 | 41 | 42 | pd.set_option("display.precision", 2) 43 | api = wandb.Api() 44 | runs = api.runs("rl-lab-chisari/pfp-eval-rebuttal") 45 | 46 | data_list = [] 47 | for run in runs: 48 | if run.state in ["running", "failed", "crashed"]: 49 | continue 50 | exp_name = exp_name_from_run(run.config) 51 | if exp_name in ["other", "pfp_images", "dp3", "adaflow", "diffusion_policy"]: 52 | continue 53 | assert run.summary["episode"] == 99, "Not all runs have 100 episodes" 54 | data = { 55 | "task_name": run.config["env_runner"]["env_config"]["task_name"], 56 | "exp_name": exp_name, 57 | "k_steps": run.config["policy"]["num_k_infer"], 58 | "success_rate": run.summary["success"]["mean"] * 100, 59 | } 60 | data_list.append(data) 61 | 62 | 63 | rows = list( 64 | [ 65 | "pfp_ddim", 66 | "pfp_so3", 67 | ] 68 | ) 69 | columns = [ 70 | "unplug_charger", 71 | "close_door", 72 | "open_box", 73 | "open_fridge", 74 | "take_frame_off_hanger", 75 | "open_oven", 76 | "put_books_on_bookshelf", 77 | "take_shoes_out_of_box", 78 | ] 79 | data_frame = pd.DataFrame.from_records(data_list) 80 | comparison_frame = data_frame.groupby(["task_name", "exp_name", "k_steps"]) 81 | exp_count = comparison_frame.size().unstack(level=0) 82 | exp_count = exp_count.reindex(columns=columns) 83 | # print exp_count with yellow color for cells with other than 3 runs 84 | exp_count = exp_count.style.applymap(lambda x: "background-color: yellow" if x != 3 else "") 85 | # Add more space between rows and columns 86 | paddings = [ 87 | ("padding-right", "20px"), 88 | ("padding-left", "20px"), 89 | ("padding-bottom", "10px"), 90 | ("padding-top", "10px"), 91 | ] 92 | exp_count.set_table_styles( 93 | [ 94 | { 95 | "selector": "th, td", 96 | "props": paddings, 97 | } 98 | ] 99 | ) 100 | 101 | # Add horizontal line only after each k_step==16 102 | slice_ = pd.IndexSlice[pd.IndexSlice[:, 16], :] 103 | exp_count.set_properties(**{"border-bottom": "1px solid black"}, subset=slice_) 104 | 105 | # Set number precision 106 | exp_count.format("{:.0f}") 107 | exp_count.to_html("experiments/ablation_count.html") 108 | 109 | # Process exp_mean DataFrame 110 | exp_mean = comparison_frame.mean()["success_rate"].unstack(level=0) 111 | exp_mean = exp_mean.reindex(columns=columns) 112 | 113 | # add a column with the mean of all columns 114 | exp_mean["Mean"] = exp_mean.mean(axis=1) 115 | 116 | 117 | # Apply green color for cells with the highest value in each column 118 | def highlight_max(s): 119 | return ["background-color: lightgreen" if v == s.max() else "" for v in s] 120 | 121 | 122 | # exp_mean_styled = exp_mean.style.apply(highlight_max, axis=0) 123 | exp_mean_styled = exp_mean.style.apply(highlight_max, axis=0) 124 | 125 | # Add more space between rows and columns 126 | exp_mean_styled = exp_mean_styled.set_table_styles([{"selector": "th, td", "props": paddings}]) 127 | 128 | # Add horizontal line only after each K-steps==16 129 | slice_ = pd.IndexSlice[pd.IndexSlice[:, 16], :] 130 | exp_mean_styled.set_properties(**{"border-bottom": "1px solid black"}, subset=slice_) 131 | 132 | # Set number precision 133 | exp_mean_styled = exp_mean_styled.format("{:.1f}") 134 | 135 | # Save exp_mean to HTML 136 | exp_mean_styled.to_html("experiments/ablation_mean.html") 137 | 138 | 139 | # ####### Make line plot ########### 140 | ax = sns.relplot( 141 | data=data_frame[data_frame["exp_name"].isin(["pfp_euclid", "pfp_ddim"])], 142 | kind="line", 143 | x="k_steps", 144 | y="success_rate", 145 | hue="exp_name", 146 | hue_order=["pfp_euclid", "pfp_ddim"], 147 | errorbar=None, 148 | marker="o", 149 | markersize=8, 150 | legend=False, 151 | aspect=1.5, 152 | ) 153 | ax.set_xlabels("K Inference Steps") 154 | ax.set_ylabels("Success Rate") 155 | plt.xscale("log") 156 | plt.minorticks_off() 157 | plt.xticks([1, 2, 4, 8, 16], [1, 2, 4, 8, 16]) 158 | plt.legend( 159 | title="", 160 | labels=["CFM", "DDIM"], 161 | # bbox_to_anchor=(0.15, 0.8, 0.8, 0.8), 162 | # loc="lower right", 163 | # mode="expand", 164 | # borderaxespad=0.0, 165 | ) 166 | 167 | plt.savefig("experiments/ablation_plot.png") 168 | 169 | print("Done") 170 | -------------------------------------------------------------------------------- /scripts/print_experiments.py: -------------------------------------------------------------------------------- 1 | import wandb 2 | import pandas as pd 3 | 4 | # From their paper 5 | CHAINED_DIFF_RESULTS = [ 6 | {"task_name": "unplug_charger", "exp_name": "chain_dif", "success_rate": 65}, 7 | {"task_name": "close_door", "exp_name": "chain_dif", "success_rate": 21}, 8 | {"task_name": "open_box", "exp_name": "chain_dif", "success_rate": 46}, 9 | {"task_name": "open_fridge", "exp_name": "chain_dif", "success_rate": 37}, 10 | {"task_name": "take_frame_off_hanger", "exp_name": "chain_dif", "success_rate": 43}, 11 | {"task_name": "open_oven", "exp_name": "chain_dif", "success_rate": 16}, 12 | {"task_name": "put_books_on_bookshelf", "exp_name": "chain_dif", "success_rate": 40}, 13 | {"task_name": "take_shoes_out_of_box", "exp_name": "chain_dif", "success_rate": 9}, 14 | ] 15 | 16 | 17 | def exp_name_from_run(run_config: dict) -> str: 18 | model = run_config["model"]["_target_"] 19 | backbone = run_config["model"]["obs_encoder"]["_target_"] 20 | if model == "pfp.policy.fm_so3_policy.FMSO3Policy": 21 | if ( 22 | "noise_type" not in run_config["model"] 23 | or run_config["model"]["noise_type"] == "uniform" 24 | ): 25 | return "pfp_so3" 26 | else: 27 | return "pfp_so3_b" 28 | elif model == "pfp.policy.fm_policy.FMPolicy": 29 | return "pfp_euclid" 30 | elif ( 31 | model == "pfp.policy.ddim_policy.DDIMPolicy" 32 | and backbone == "pfp.backbones.pointnet.PointNetBackbone" 33 | ): 34 | return "pfp_ddim" 35 | elif model == "pfp.policy.fm_so3_policy.FMSO3PolicyImage": 36 | return "pfp_images" 37 | elif backbone == "pfp.backbones.mlp_3dp.MLP3DP": 38 | return "dp3" 39 | elif model == "pfp.policy.fm_policy.FMPolicyImage": 40 | return "adaflow" 41 | elif model == "pfp.policy.ddim_policy.DDIMPolicyImage": 42 | return "diffusion_policy" 43 | else: 44 | raise ValueError(f"Unknown experiment name from model: {model} and backbone: {backbone}") 45 | return 46 | 47 | 48 | pd.set_option("display.precision", 2) 49 | api = wandb.Api() 50 | runs = api.runs("rl-lab-chisari/pfp-eval-fixed") 51 | 52 | data_list = CHAINED_DIFF_RESULTS 53 | for run in runs: 54 | if run.state in ["running", "failed", "crashed"]: 55 | continue 56 | if run.config["policy"]["num_k_infer"] != 50: 57 | continue 58 | if ( 59 | "snr_sampler" in run.config["model"] 60 | and run.config["model"]["snr_sampler"] == "logit_normal" 61 | ): 62 | continue 63 | assert run.summary["episode"] == 99, "Not all runs have 100 episodes" 64 | data = { 65 | "task_name": run.config["env_runner"]["env_config"]["task_name"], 66 | "exp_name": exp_name_from_run(run.config), 67 | "success_rate": run.summary["success"]["mean"] * 100, 68 | } 69 | data_list.append(data) 70 | 71 | 72 | rows = list( 73 | [ 74 | "diffusion_policy", 75 | "adaflow", 76 | "dp3", 77 | "chain_dif", 78 | "pfp_images", 79 | "pfp_ddim", 80 | "pfp_euclid", 81 | "pfp_so3", 82 | "pfp_so3_b", 83 | ] 84 | ) 85 | columns = [ 86 | "unplug_charger", 87 | "close_door", 88 | "open_box", 89 | "open_fridge", 90 | "take_frame_off_hanger", 91 | "open_oven", 92 | "put_books_on_bookshelf", 93 | "take_shoes_out_of_box", 94 | ] 95 | data_frame = pd.DataFrame.from_records(data_list) 96 | comparison_frame = data_frame.groupby(["task_name", "exp_name"]) 97 | exp_count = comparison_frame.size().unstack(level=0) 98 | exp_count = exp_count.reindex(index=rows, columns=columns) 99 | # print exp_count with yellow color for cells with other than 3 runs 100 | exp_count = exp_count.style.applymap(lambda x: "background-color: yellow" if x != 3 else "") 101 | # Add more space between rows and columns 102 | paddings = [ 103 | ("padding-right", "20px"), 104 | ("padding-left", "20px"), 105 | ("padding-bottom", "10px"), 106 | ("padding-top", "10px"), 107 | ] 108 | exp_count.set_table_styles( 109 | [ 110 | { 111 | "selector": "th, td", 112 | "props": paddings, 113 | } 114 | ] 115 | ) 116 | # Set number precision 117 | exp_count.format("{:.0f}") 118 | exp_count.to_html("experiments/exp_count.html") 119 | 120 | 121 | # Process exp_mean DataFrame 122 | exp_mean = comparison_frame.mean()["success_rate"].unstack(level=0) 123 | exp_mean = exp_mean.reindex(index=rows, columns=columns) 124 | 125 | # add a column with the mean of all columns 126 | exp_mean["Mean"] = exp_mean.mean(axis=1) 127 | 128 | 129 | # Apply green color for cells with the highest value in each column 130 | def highlight_max(s): 131 | return ["background-color: lightgreen" if v == s.max() else "" for v in s] 132 | 133 | 134 | # exp_mean_styled = exp_mean.style.apply(highlight_max, axis=0) 135 | exp_mean_styled = exp_mean.style.apply(highlight_max, axis=0) 136 | 137 | # Add more space between rows and columns 138 | exp_mean_styled = exp_mean_styled.set_table_styles([{"selector": "th, td", "props": paddings}]) 139 | 140 | # Set number precision 141 | exp_mean_styled = exp_mean_styled.format("{:.1f}") 142 | 143 | # Save exp_mean to HTML 144 | exp_mean_styled.to_html("experiments/exp_mean.html") 145 | 146 | print("Done") 147 | -------------------------------------------------------------------------------- /scripts/train.py: -------------------------------------------------------------------------------- 1 | import os 2 | import hydra 3 | import wandb 4 | import subprocess 5 | from omegaconf import OmegaConf 6 | from torch.utils.data import DataLoader 7 | from composer.trainer import Trainer 8 | from composer.loggers import WandBLogger 9 | from composer.callbacks import LRMonitor 10 | from composer.models import ComposerModel 11 | from composer.algorithms import EMA 12 | from diffusion_policy.model.common.lr_scheduler import get_scheduler 13 | from pfp import DEVICE, DATA_DIRS, set_seeds 14 | from pfp.data.dataset_pcd import RobotDatasetPcd 15 | from pfp.data.dataset_images import RobotDatasetImages 16 | 17 | 18 | @hydra.main(version_base=None, config_path="../conf", config_name="train") 19 | def main(cfg: OmegaConf): 20 | if not OmegaConf.has_resolver("eval"): 21 | OmegaConf.register_new_resolver("eval", eval) 22 | OmegaConf.resolve(cfg) 23 | print(OmegaConf.to_yaml(cfg)) 24 | set_seeds(cfg.seed) 25 | 26 | data_path_train = DATA_DIRS.PFP / cfg.task_name / "train" 27 | data_path_valid = DATA_DIRS.PFP / cfg.task_name / "valid" 28 | if cfg.obs_mode == "pcd": 29 | dataset_train = RobotDatasetPcd(data_path_train, **cfg.dataset) 30 | dataset_valid = RobotDatasetPcd(data_path_valid, **cfg.dataset) 31 | elif cfg.obs_mode == "rgb": 32 | dataset_train = RobotDatasetImages(data_path_train, **cfg.dataset) 33 | dataset_valid = RobotDatasetImages(data_path_valid, **cfg.dataset) 34 | else: 35 | raise ValueError(f"Unknown observation mode: {cfg.obs_mode}") 36 | dataloader_train = DataLoader( 37 | dataset_train, 38 | shuffle=True, 39 | **cfg.dataloader, 40 | persistent_workers=True if cfg.dataloader.num_workers > 0 else False, 41 | ) 42 | dataloader_valid = DataLoader( 43 | dataset_valid, 44 | shuffle=False, 45 | **cfg.dataloader, 46 | persistent_workers=True if cfg.dataloader.num_workers > 0 else False, 47 | ) 48 | 49 | composer_model: ComposerModel = hydra.utils.instantiate(cfg.model) 50 | optimizer = hydra.utils.instantiate(cfg.optimizer, composer_model.parameters()) 51 | lr_scheduler = get_scheduler( 52 | cfg.lr_scheduler.name, 53 | optimizer=optimizer, 54 | num_warmup_steps=cfg.lr_scheduler.num_warmup_steps, 55 | num_training_steps=(len(dataloader_train) * cfg.epochs), 56 | # pytorch assumes stepping LRScheduler every epoch 57 | # however huggingface diffusers steps it every batch 58 | ) 59 | 60 | wandb_logger = WandBLogger( 61 | project="pfp-train-fixed", 62 | entity="rl-lab-chisari", 63 | init_kwargs={ 64 | "config": OmegaConf.to_container(cfg), 65 | "mode": "online" if cfg.log_wandb else "disabled", 66 | }, 67 | ) 68 | 69 | trainer = Trainer( 70 | model=composer_model, 71 | train_dataloader=dataloader_train, 72 | eval_dataloader=dataloader_valid, 73 | max_duration=cfg.epochs, 74 | optimizers=optimizer, 75 | schedulers=lr_scheduler, 76 | step_schedulers_every_batch=True, 77 | device="gpu" if DEVICE.type == "cuda" else "cpu", 78 | loggers=[wandb_logger], 79 | callbacks=[LRMonitor()], 80 | save_folder="ckpt/{run_name}", 81 | save_interval=f"{cfg.save_each_n_epochs}ep", 82 | save_num_checkpoints_to_keep=3, 83 | algorithms=[EMA()] if cfg.use_ema else None, 84 | run_name=cfg.run_name, # set this to continue training from previous ckpt 85 | autoresume=True if cfg.run_name is not None else False, 86 | spin_dataloaders=False 87 | ) 88 | wandb.watch(composer_model) 89 | # Save the used cfg for inference 90 | OmegaConf.save(cfg, "ckpt/" + trainer.state.run_name + "/config.yaml") 91 | 92 | trainer.fit() 93 | run_name = trainer.state.run_name 94 | wandb.finish() 95 | trainer.close() 96 | 97 | _ = subprocess.Popen( 98 | [ 99 | "bash", 100 | "bash/start_eval.sh", 101 | f"{os.environ['CUDA_VISIBLE_DEVICES']}", 102 | f"{run_name}", 103 | ], 104 | start_new_session=True, 105 | ) 106 | return 107 | 108 | 109 | if __name__ == "__main__": 110 | main() 111 | -------------------------------------------------------------------------------- /scripts/train_real.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | import wandb 3 | from omegaconf import OmegaConf 4 | from torch.utils.data import DataLoader 5 | from composer.trainer import Trainer 6 | from composer.loggers import WandBLogger 7 | from composer.callbacks import LRMonitor 8 | from composer.models import ComposerModel 9 | from composer.algorithms import EMA 10 | from diffusion_policy.model.common.lr_scheduler import get_scheduler 11 | from pfp import DEVICE, DATA_DIRS, set_seeds 12 | from pfp.data.dataset_pcd import RobotDatasetPcd 13 | from pfp.data.dataset_images import RobotDatasetImages 14 | 15 | 16 | @hydra.main(version_base=None, config_path="../conf", config_name="train") 17 | def main(cfg: OmegaConf): 18 | if not OmegaConf.has_resolver("eval"): 19 | OmegaConf.register_new_resolver("eval", eval) 20 | OmegaConf.resolve(cfg) 21 | print(OmegaConf.to_yaml(cfg)) 22 | set_seeds(cfg.seed) 23 | 24 | data_path_train = DATA_DIRS.PFP_REAL / cfg.task_name / "train" 25 | data_path_valid = DATA_DIRS.PFP_REAL / cfg.task_name / "valid" 26 | if cfg.obs_mode == "pcd": 27 | dataset_train = RobotDatasetPcd(data_path_train, **cfg.dataset) 28 | dataset_valid = RobotDatasetPcd(data_path_valid, **cfg.dataset) 29 | elif cfg.obs_mode == "rgb": 30 | dataset_train = RobotDatasetImages(data_path_train, **cfg.dataset) 31 | dataset_valid = RobotDatasetImages(data_path_valid, **cfg.dataset) 32 | else: 33 | raise ValueError(f"Unknown observation mode: {cfg.obs_mode}") 34 | dataloader_train = DataLoader( 35 | dataset_train, 36 | shuffle=True, 37 | **cfg.dataloader, 38 | persistent_workers=True if cfg.dataloader.num_workers > 0 else False, 39 | ) 40 | dataloader_valid = DataLoader( 41 | dataset_valid, 42 | shuffle=False, 43 | **cfg.dataloader, 44 | persistent_workers=True if cfg.dataloader.num_workers > 0 else False, 45 | ) 46 | 47 | composer_model: ComposerModel = hydra.utils.instantiate(cfg.model) 48 | optimizer = hydra.utils.instantiate(cfg.optimizer, composer_model.parameters()) 49 | lr_scheduler = get_scheduler( 50 | cfg.lr_scheduler.name, 51 | optimizer=optimizer, 52 | num_warmup_steps=cfg.lr_scheduler.num_warmup_steps, 53 | num_training_steps=(len(dataloader_train) * cfg.epochs), 54 | # pytorch assumes stepping LRScheduler every epoch 55 | # however huggingface diffusers steps it every batch 56 | ) 57 | 58 | wandb_logger = WandBLogger( 59 | project="pfp-real", 60 | entity="rl-lab-chisari", 61 | init_kwargs={ 62 | "config": OmegaConf.to_container(cfg), 63 | "mode": "online" if cfg.log_wandb else "disabled", 64 | }, 65 | ) 66 | 67 | trainer = Trainer( 68 | model=composer_model, 69 | train_dataloader=dataloader_train, 70 | eval_dataloader=dataloader_valid, 71 | max_duration=cfg.epochs, 72 | optimizers=optimizer, 73 | schedulers=lr_scheduler, 74 | step_schedulers_every_batch=True, 75 | device="gpu" if DEVICE.type == "cuda" else "cpu", 76 | loggers=[wandb_logger], 77 | callbacks=[LRMonitor()], 78 | save_folder="ckpt/{run_name}", 79 | save_interval=f"{cfg.save_each_n_epochs}ep", 80 | save_num_checkpoints_to_keep=3, 81 | algorithms=[EMA()] if cfg.use_ema else None, 82 | run_name=cfg.run_name, # set this to continue training from previous ckpt 83 | autoresume=True if cfg.run_name is not None else False, 84 | spin_dataloaders=False, 85 | ) 86 | wandb.watch(composer_model) 87 | # Save the used cfg for inference 88 | OmegaConf.save(cfg, "ckpt/" + trainer.state.run_name + "/config.yaml") 89 | 90 | trainer.fit() 91 | wandb.finish() 92 | trainer.close() 93 | return 94 | 95 | 96 | if __name__ == "__main__": 97 | main() 98 | -------------------------------------------------------------------------------- /scripts/trainer_eval.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | import subprocess 3 | from omegaconf import OmegaConf 4 | from torch.utils.data import DataLoader 5 | from composer.trainer import Trainer 6 | from composer.loggers import WandBLogger 7 | from composer.models import ComposerModel 8 | from pfp import DEVICE, REPO_DIRS, DATA_DIRS, set_seeds 9 | from pfp.data.dataset_pcd import RobotDatasetPcd 10 | from pfp.data.dataset_images import RobotDatasetImages 11 | 12 | 13 | @hydra.main(version_base=None, config_path="../conf", config_name="trainer_eval") 14 | def main(cfg: OmegaConf): 15 | # Download checkpoint if not present 16 | ckpt_path = REPO_DIRS.CKPT / cfg.run_name 17 | if not ckpt_path.exists(): 18 | subprocess.run( 19 | [ 20 | "rsync", 21 | "-hPrl", 22 | f"chisari@rlgpu2:{ckpt_path}", 23 | f"{REPO_DIRS.CKPT}/", 24 | ] 25 | ) 26 | 27 | train_cfg = OmegaConf.load(ckpt_path / "config.yaml") 28 | cfg = OmegaConf.merge(train_cfg, cfg) 29 | print(OmegaConf.to_yaml(cfg)) 30 | set_seeds(cfg.seed) 31 | 32 | data_path_valid = DATA_DIRS.PFP / cfg.task_name / "valid" 33 | if cfg.obs_mode == "pcd": 34 | dataset_valid = RobotDatasetPcd(data_path_valid, **cfg.dataset) 35 | elif cfg.obs_mode == "rgb": 36 | dataset_valid = RobotDatasetImages(data_path_valid, **cfg.dataset) 37 | else: 38 | raise ValueError(f"Unknown observation mode: {cfg.obs_mode}") 39 | dataloader_valid = DataLoader( 40 | dataset_valid, 41 | shuffle=False, 42 | **cfg.dataloader, 43 | persistent_workers=True if cfg.dataloader.num_workers > 0 else False, 44 | ) 45 | composer_model: ComposerModel = hydra.utils.instantiate(cfg.model) 46 | wandb_logger = WandBLogger( 47 | project="pfp-trainer-eval", 48 | entity="rl-lab-chisari", 49 | init_kwargs={ 50 | "config": OmegaConf.to_container(cfg), 51 | "mode": "online" if cfg.log_wandb else "disabled", 52 | }, 53 | ) 54 | 55 | trainer = Trainer( 56 | model=composer_model, 57 | eval_dataloader=dataloader_valid, 58 | device="gpu" if DEVICE.type == "cuda" else "cpu", 59 | loggers=[wandb_logger], 60 | save_folder="ckpt/{run_name}", 61 | run_name=cfg.run_name, # set this to continue training from previous ckpt 62 | autoresume=True if cfg.run_name is not None else False, 63 | ) 64 | trainer.eval() 65 | return 66 | 67 | 68 | if __name__ == "__main__": 69 | main() 70 | -------------------------------------------------------------------------------- /scripts/vis_dataset.py: -------------------------------------------------------------------------------- 1 | import hydra 2 | from omegaconf import OmegaConf 3 | from pfp import DATA_DIRS, set_seeds 4 | from pfp.data.dataset_pcd import RobotDatasetPcd 5 | from pfp.data.dataset_images import RobotDatasetImages 6 | 7 | import rerun as rr 8 | from pfp.common.visualization import RerunViewer as RV 9 | from pfp.common.visualization import RerunTraj 10 | 11 | TASK_NAME = "sponge_on_plate" 12 | MODE = "valid" # "train" or "valid" 13 | 14 | 15 | @hydra.main(version_base=None, config_path="../conf", config_name="train") 16 | def main(cfg: OmegaConf): 17 | if not OmegaConf.has_resolver("eval"): 18 | OmegaConf.register_new_resolver("eval", eval) 19 | OmegaConf.resolve(cfg) 20 | print(OmegaConf.to_yaml(cfg)) 21 | set_seeds(cfg.seed) 22 | 23 | data_path_train = DATA_DIRS.PFP_REAL / TASK_NAME / MODE 24 | # data_path_valid = DATA_DIRS.PFP_REAL / TASK_NAME / MODE 25 | if cfg.obs_mode == "pcd": 26 | dataset_train = RobotDatasetPcd(data_path_train, **cfg.dataset) 27 | # dataset_valid = RobotDatasetPcd(data_path_valid, **cfg.dataset) 28 | elif cfg.obs_mode == "rgb": 29 | dataset_train = RobotDatasetImages(data_path_train, **cfg.dataset) 30 | # dataset_valid = RobotDatasetImages(data_path_valid, **cfg.dataset) 31 | else: 32 | raise ValueError(f"Unknown observation mode: {cfg.obs_mode}") 33 | 34 | # Visualize the dataset 35 | RV("Dataset visualization") 36 | obs_traj = RerunTraj() 37 | pred_traj = RerunTraj() 38 | for i in range(len(dataset_train)): 39 | # pcd: (2, 4096, 3) 40 | # robot_state_obs: (2, 10) 41 | # robot_state_pred: (32, 10) 42 | pcd, robot_state_obs, robot_state_pred = dataset_train[i] 43 | rr.set_time_sequence("timestep", i) 44 | RV.add_np_pointcloud("vis/pointcloud", pcd[-1]) 45 | obs_traj.add_traj("vis/robot_state_obs", robot_state_obs, size=0.008) 46 | pred_traj.add_traj("vis/robot_state_pred", robot_state_pred, size=0.004) 47 | rr.log("plot/gripper_pred", rr.Scalar(robot_state_pred[0, -1])) 48 | 49 | 50 | if __name__ == "__main__": 51 | main() 52 | -------------------------------------------------------------------------------- /toy_circle/model.py: -------------------------------------------------------------------------------- 1 | import torch 2 | import torch.nn as nn 3 | 4 | 5 | class MLP(nn.Module): 6 | def __init__( 7 | self, 8 | num_in, 9 | num_out, 10 | num_hid=500, 11 | ): 12 | super().__init__() 13 | self.num_in = num_in 14 | self.num_hid = num_hid 15 | self.num_out = num_out 16 | 17 | self.rdFrequency = torch.normal(0, 1, (1, 100)) 18 | 19 | self.net = nn.Sequential( 20 | nn.Linear(num_in + 100, num_hid), 21 | nn.SiLU(), 22 | nn.Linear(num_hid, num_hid), 23 | nn.SiLU(), 24 | nn.Linear(num_hid, num_hid), 25 | nn.SiLU(), 26 | nn.Linear(num_hid, num_hid), 27 | nn.SiLU(), 28 | nn.Linear(num_hid, num_out), 29 | ) 30 | 31 | def forward(self, noisy_y, timesteps): 32 | time_feature = torch.cos(torch.matmul(timesteps, self.rdFrequency.to(timesteps))) 33 | x_in = torch.cat([noisy_y, time_feature], dim=-1) 34 | out = self.net(x_in) 35 | return out 36 | -------------------------------------------------------------------------------- /toy_circle/train_fm_euclid.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import random 3 | import torch 4 | import numpy as np 5 | from tqdm import tqdm 6 | from model import MLP 7 | import matplotlib.pyplot as plt 8 | from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | # Training hyperparams 13 | batch_size = 1000 14 | num_epochs = 2000 15 | num_warmup_steps = 10 16 | lr = 1e-3 17 | seed = 1234 18 | 19 | random.seed(seed) 20 | torch.manual_seed(seed) 21 | np.random.seed(seed) 22 | 23 | 24 | # Network 25 | net = MLP(num_in=2, num_out=2).to(DEVICE) 26 | 27 | # Optimizer 28 | optimizer = torch.optim.Adam(net.parameters(), lr=lr) 29 | 30 | # LR Scheduler 31 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[SchedulerType("cosine")] 32 | lr_scheduler = schedule_func( 33 | optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_epochs 34 | ) 35 | 36 | # Model params 37 | pos_emb_scale = 20 38 | k_steps = 50 39 | 40 | 41 | # Loss 42 | def calculate_loss(noise=None, B=None, reduction="mean"): 43 | if noise is None: 44 | z0 = torch.randn((B, 2), device=DEVICE) 45 | else: 46 | z0 = noise 47 | B = noise.shape[0] 48 | 49 | t = torch.rand((B, 1), device=DEVICE) 50 | z1 = torch.cat((torch.ones(B, 1), torch.zeros(B, 1)), dim=-1).to(DEVICE) 51 | target_vel = z1 - z0 52 | zt = z0 + target_vel * t 53 | timesteps = t * pos_emb_scale 54 | pred_vel = net(zt, timesteps) 55 | loss = torch.nn.functional.mse_loss(pred_vel, target_vel, reduction=reduction) 56 | return loss 57 | 58 | 59 | # Inference 60 | def infer_y(noise=None, B=None): 61 | if noise is None: 62 | z = torch.randn((B, 2), device=DEVICE) 63 | else: 64 | z = noise 65 | B = noise.shape[0] 66 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 67 | dt = torch.ones(k_steps) / k_steps 68 | for k in range(k_steps): 69 | timesteps = torch.ones((B, 1), device=DEVICE) * t[k] 70 | timesteps *= pos_emb_scale 71 | pred_vel = net(z, timesteps) 72 | z = z + pred_vel * dt[k] 73 | 74 | # Project output to unit circle 75 | z = z / torch.norm(z, dim=-1, keepdim=True) 76 | return z 77 | 78 | 79 | # Training loop 80 | losses = list() 81 | for epoch in tqdm(range(num_epochs), desc="Epoch"): 82 | loss = calculate_loss(B=batch_size) 83 | loss.backward() 84 | optimizer.step() 85 | optimizer.zero_grad() 86 | lr_scheduler.step() 87 | losses.append(loss.item()) 88 | 89 | # Plot loss curve 90 | plt.figure() 91 | plt.plot(losses) 92 | plt.title("Training Loss") 93 | # plt.show() 94 | 95 | 96 | # Evaluation 97 | B = 10000 98 | start_noise = torch.randn((B, 2), device=DEVICE) 99 | losses = calculate_loss(noise=start_noise, reduction="none").detach() 100 | print(f"Loss on random noise: {loss.item()}") 101 | with torch.no_grad(): 102 | z = infer_y(start_noise) 103 | angle_errors = torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 104 | angle_abs_errors = torch.abs(angle_errors) 105 | 106 | print(f"{angle_abs_errors.mean() = }") 107 | 108 | start_noisy_angle_np = torch.atan2(start_noise[:, 1], start_noise[:, 0]).cpu().numpy() 109 | angle_err_np = angle_abs_errors.cpu().numpy() 110 | plt.scatter(start_noisy_angle_np, angle_err_np) 111 | # plt.show() 112 | 113 | saving_path = pathlib.Path("toy_circle") / "results" / "euclid" 114 | saving_path.mkdir(exist_ok=True, parents=True) 115 | 116 | np.save(saving_path / "start_noise.npy", start_noise.cpu().numpy()) 117 | np.save(saving_path / "losses.npy", losses.cpu().numpy()) 118 | np.save(saving_path / "angle_abs_errors.npy", angle_abs_errors.cpu().numpy()) 119 | -------------------------------------------------------------------------------- /toy_circle/train_fm_euclid_biased.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import random 3 | import torch 4 | import numpy as np 5 | from tqdm import tqdm 6 | from model import MLP 7 | import matplotlib.pyplot as plt 8 | from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | # Training hyperparams 13 | batch_size = 1000 14 | num_epochs = 2000 15 | num_warmup_steps = 10 16 | lr = 1e-3 17 | seed = 1234 18 | 19 | random.seed(seed) 20 | torch.manual_seed(seed) 21 | np.random.seed(seed) 22 | 23 | 24 | # Network 25 | net = MLP(num_in=2, num_out=2).to(DEVICE) 26 | 27 | # Optimizer 28 | optimizer = torch.optim.Adam(net.parameters(), lr=lr) 29 | 30 | # LR Scheduler 31 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[SchedulerType("cosine")] 32 | lr_scheduler = schedule_func( 33 | optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_epochs 34 | ) 35 | 36 | # Model params 37 | pos_emb_scale = 20 38 | k_steps = 50 39 | 40 | 41 | def sample_SO2(B): 42 | # Sample randomly between -pi, pi 43 | z0_angle = (torch.rand((B, 1), device=DEVICE) - 0.5) * 2.0 * torch.pi 44 | # Transform to SO(2) 45 | z0 = exp(z0_angle) 46 | return z0 47 | 48 | 49 | def exp(theta): 50 | if theta.ndim == 1: 51 | theta = theta.unsqueeze(-1) 52 | R = torch.stack( 53 | ( 54 | torch.cat((torch.cos(theta), -torch.sin(theta)), axis=-1), 55 | torch.cat((torch.sin(theta), torch.cos(theta)), axis=-1), 56 | ), 57 | axis=-2, # Stack along second dimension (not last)! Very important! 58 | ) 59 | return R 60 | 61 | 62 | def log(R: torch.tensor): 63 | assert R.shape[-2:] == (2, 2) 64 | return torch.arctan2(R[..., 1, 0], R[..., 0, 0]).unsqueeze( 65 | -1 66 | ) # Keep single last dimensions again 67 | 68 | 69 | # Loss 70 | def calculate_loss(noise=None, B=None, reduction="mean"): 71 | if noise is None: 72 | z0 = sample_SO2(B)[:, :, 0] 73 | else: 74 | z0 = noise 75 | B = noise.shape[0] 76 | 77 | t = torch.rand((B, 1), device=DEVICE) 78 | z1 = torch.cat((torch.ones(B, 1), torch.zeros(B, 1)), dim=-1).to(DEVICE) 79 | # z1 = torch.cat((torch.zeros(B, 1), torch.ones(B, 1)), dim=-1).to(DEVICE) 80 | target_vel = z1 - z0 81 | zt = z0 + target_vel * t 82 | timesteps = t * pos_emb_scale 83 | pred_vel = net(zt, timesteps) 84 | loss = torch.nn.functional.mse_loss(pred_vel, target_vel, reduction=reduction) 85 | return loss 86 | 87 | 88 | # Inference 89 | def infer_y(noise=None, B=None): 90 | if noise is None: 91 | z = sample_SO2(B)[:, :, 0] 92 | else: 93 | z = noise 94 | B = noise.shape[0] 95 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 96 | dt = torch.ones(k_steps) / k_steps 97 | for k in range(k_steps): 98 | timesteps = torch.ones((B, 1), device=DEVICE) * t[k] 99 | timesteps *= pos_emb_scale 100 | pred_vel = net(z, timesteps) 101 | z = z + pred_vel * dt[k] 102 | 103 | # Project output to unit circle 104 | z = z / torch.norm(z, dim=-1, keepdim=True) 105 | return z 106 | 107 | 108 | # Training loop 109 | losses = list() 110 | for epoch in tqdm(range(num_epochs), desc="Epoch"): 111 | loss = calculate_loss(B=batch_size) 112 | loss.backward() 113 | optimizer.step() 114 | optimizer.zero_grad() 115 | lr_scheduler.step() 116 | losses.append(loss.item()) 117 | 118 | # Plot loss curve 119 | plt.figure() 120 | plt.plot(losses) 121 | plt.title("Training Loss") 122 | # plt.show() 123 | 124 | 125 | # Evaluation 126 | B = 10000 127 | start_noise = sample_SO2(B)[:, :, 0] 128 | losses = calculate_loss(noise=start_noise, reduction="none").detach() 129 | print(f"Loss on random noise: {loss.item()}") 130 | with torch.no_grad(): 131 | z = infer_y(start_noise) 132 | angle_errors = torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 133 | # angle_errors = 90 - torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 134 | angle_abs_errors = torch.abs(angle_errors) 135 | 136 | print(f"{angle_abs_errors.mean() = }") 137 | 138 | start_noisy_angle_np = torch.atan2(start_noise[:, 1], start_noise[:, 0]).cpu().numpy() 139 | angle_err_np = angle_abs_errors.cpu().numpy() 140 | plt.scatter(start_noisy_angle_np, angle_err_np) 141 | # plt.show() 142 | 143 | saving_path = pathlib.Path("toy_circle") / "results" / "euclid_biased" 144 | saving_path.mkdir(exist_ok=True, parents=True) 145 | 146 | np.save(saving_path / "start_noise.npy", start_noise.cpu().numpy()) 147 | np.save(saving_path / "losses.npy", losses.cpu().numpy()) 148 | np.save(saving_path / "angle_abs_errors.npy", angle_abs_errors.cpu().numpy()) 149 | -------------------------------------------------------------------------------- /toy_circle/train_fm_euclid_biased_inference.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import random 3 | import torch 4 | import numpy as np 5 | from tqdm import tqdm 6 | from model import MLP 7 | import matplotlib.pyplot as plt 8 | from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | # Training hyperparams 13 | batch_size = 1000 14 | num_epochs = 2000 15 | num_warmup_steps = 10 16 | lr = 1e-3 17 | seed = 1234 18 | 19 | random.seed(seed) 20 | torch.manual_seed(seed) 21 | np.random.seed(seed) 22 | 23 | 24 | # Network 25 | net = MLP(num_in=2, num_out=2).to(DEVICE) 26 | 27 | # Optimizer 28 | optimizer = torch.optim.Adam(net.parameters(), lr=lr) 29 | 30 | # LR Scheduler 31 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[SchedulerType("cosine")] 32 | lr_scheduler = schedule_func( 33 | optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_epochs 34 | ) 35 | 36 | # Model params 37 | pos_emb_scale = 20 38 | k_steps = 50 39 | 40 | 41 | def sample_SO2(B): 42 | # Sample randomly between -pi, pi 43 | z0_angle = (torch.rand((B, 1), device=DEVICE) - 0.5) * 2.0 * torch.pi 44 | # Transform to SO(2) 45 | z0 = exp(z0_angle) 46 | return z0 47 | 48 | 49 | def exp(theta): 50 | if theta.ndim == 1: 51 | theta = theta.unsqueeze(-1) 52 | R = torch.stack( 53 | ( 54 | torch.cat((torch.cos(theta), -torch.sin(theta)), axis=-1), 55 | torch.cat((torch.sin(theta), torch.cos(theta)), axis=-1), 56 | ), 57 | axis=-2, # Stack along second dimension (not last)! Very important! 58 | ) 59 | return R 60 | 61 | 62 | def log(R: torch.tensor): 63 | assert R.shape[-2:] == (2, 2) 64 | return torch.arctan2(R[..., 1, 0], R[..., 0, 0]).unsqueeze( 65 | -1 66 | ) # Keep single last dimensions again 67 | 68 | 69 | # Loss 70 | def calculate_loss(noise=None, B=None, reduction="mean"): 71 | if noise is None: 72 | z0 = torch.randn((B, 2), device=DEVICE) 73 | else: 74 | z0 = noise 75 | B = noise.shape[0] 76 | 77 | t = torch.rand((B, 1), device=DEVICE) 78 | z1 = torch.cat((torch.ones(B, 1), torch.zeros(B, 1)), dim=-1).to(DEVICE) 79 | # z1 = torch.cat((torch.zeros(B, 1), torch.ones(B, 1)), dim=-1).to(DEVICE) 80 | target_vel = z1 - z0 81 | zt = z0 + target_vel * t 82 | timesteps = t * pos_emb_scale 83 | pred_vel = net(zt, timesteps) 84 | loss = torch.nn.functional.mse_loss(pred_vel, target_vel, reduction=reduction) 85 | return loss 86 | 87 | 88 | # Inference 89 | def infer_y(noise=None, B=None): 90 | if noise is None: 91 | z = torch.randn((B, 2), device=DEVICE) 92 | else: 93 | z = noise 94 | B = noise.shape[0] 95 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 96 | dt = torch.ones(k_steps) / k_steps 97 | for k in range(k_steps): 98 | timesteps = torch.ones((B, 1), device=DEVICE) * t[k] 99 | timesteps *= pos_emb_scale 100 | pred_vel = net(z, timesteps) 101 | z = z + pred_vel * dt[k] 102 | 103 | # Project output to unit circle 104 | z = z / torch.norm(z, dim=-1, keepdim=True) 105 | return z 106 | 107 | 108 | # Training loop 109 | losses = list() 110 | for epoch in tqdm(range(num_epochs), desc="Epoch"): 111 | loss = calculate_loss(B=batch_size) 112 | loss.backward() 113 | optimizer.step() 114 | optimizer.zero_grad() 115 | lr_scheduler.step() 116 | losses.append(loss.item()) 117 | 118 | # Plot loss curve 119 | plt.figure() 120 | plt.plot(losses) 121 | plt.title("Training Loss") 122 | # plt.show() 123 | 124 | 125 | # Evaluation 126 | B = 10000 127 | start_noise = sample_SO2(B)[:, :, 0] 128 | losses = calculate_loss(noise=start_noise, reduction="none").detach() 129 | print(f"Loss on random noise: {loss.item()}") 130 | with torch.no_grad(): 131 | z = infer_y(start_noise) 132 | angle_errors = torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 133 | # angle_errors = 90 - torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 134 | angle_abs_errors = torch.abs(angle_errors) 135 | 136 | print(f"{angle_abs_errors.mean() = }") 137 | 138 | start_noisy_angle_np = torch.atan2(start_noise[:, 1], start_noise[:, 0]).cpu().numpy() 139 | angle_err_np = angle_abs_errors.cpu().numpy() 140 | plt.scatter(start_noisy_angle_np, angle_err_np) 141 | # plt.show() 142 | 143 | saving_path = pathlib.Path("toy_circle") / "results" / "euclid_biased_inference" 144 | saving_path.mkdir(exist_ok=True, parents=True) 145 | 146 | np.save(saving_path / "start_noise.npy", start_noise.cpu().numpy()) 147 | np.save(saving_path / "losses.npy", losses.cpu().numpy()) 148 | np.save(saving_path / "angle_abs_errors.npy", angle_abs_errors.cpu().numpy()) 149 | -------------------------------------------------------------------------------- /toy_circle/train_fm_so3.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import random 3 | import torch 4 | import numpy as np 5 | from tqdm import tqdm 6 | from model import MLP 7 | import matplotlib.pyplot as plt 8 | from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | # Training hyperparams 13 | batch_size = 1000 14 | num_epochs = 2000 15 | num_warmup_steps = 10 16 | lr = 1e-3 17 | seed = 1234 18 | 19 | random.seed(seed) 20 | torch.manual_seed(seed) 21 | np.random.seed(seed) 22 | 23 | # Network 24 | net = MLP(num_in=2, num_out=1).to(DEVICE) 25 | 26 | # Optimizer 27 | optimizer = torch.optim.Adam(net.parameters(), lr=lr) 28 | 29 | # LR Scheduler 30 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[SchedulerType("cosine")] 31 | lr_scheduler = schedule_func( 32 | optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_epochs 33 | ) 34 | 35 | # Model params 36 | pos_emb_scale = 20 37 | k_steps = 50 38 | 39 | 40 | def sample_SO2(B): 41 | # Sample randomly between -pi, pi 42 | z0_angle = (torch.rand((B, 1), device=DEVICE) - 0.5) * 2.0 * torch.pi 43 | # Transform to SO(2) 44 | z0 = exp(z0_angle) 45 | return z0 46 | 47 | 48 | def exp(theta): 49 | if theta.ndim == 1: 50 | theta = theta.unsqueeze(-1) 51 | R = torch.stack( 52 | ( 53 | torch.cat((torch.cos(theta), -torch.sin(theta)), axis=-1), 54 | torch.cat((torch.sin(theta), torch.cos(theta)), axis=-1), 55 | ), 56 | axis=-2, # Stack along second dimension (not last)! Very important! 57 | ) 58 | return R 59 | 60 | 61 | def log(R: torch.tensor): 62 | assert R.shape[-2:] == (2, 2) 63 | return torch.arctan2(R[..., 1, 0], R[..., 0, 0]).unsqueeze( 64 | -1 65 | ) # Keep single last dimensions again 66 | 67 | 68 | def SO2_to_pfp(z_SO2): 69 | return z_SO2[:, :, 0] 70 | 71 | 72 | # Loss 73 | def calculate_loss(noise=None, B=None, reduction="mean"): 74 | if noise is None: 75 | # Euclidean variant 76 | # z0 = torch.randn((B, 2), device=DEVICE) 77 | 78 | # SO(2) variant 79 | z0 = sample_SO2(B) 80 | else: 81 | z0 = noise 82 | B = noise.shape[0] 83 | 84 | t = torch.rand((B, 1), device=DEVICE) 85 | z1 = torch.eye((2)).unsqueeze(0).repeat(B, 1, 1).to(DEVICE) 86 | 87 | # Euclidean variant 88 | # target_vel = z1 - z0 89 | # zt = z0 + target_vel * t 90 | 91 | # SO(2) variant 92 | # target_vel = log(torch.linalg.pinv(z0) @ z1) 93 | target_vel = log(torch.linalg.pinv(z0) @ z1) 94 | zt = z0 @ exp(target_vel * t) 95 | 96 | # Convert to pfp 97 | zt_pfp = SO2_to_pfp(zt) 98 | timesteps = t * pos_emb_scale 99 | 100 | pred_vel = net(zt_pfp, timesteps) 101 | loss = torch.nn.functional.mse_loss(pred_vel, target_vel, reduction=reduction) 102 | return loss 103 | 104 | 105 | # Inference 106 | def infer_y(noise=None, B=None): 107 | if noise is None: 108 | # z = torch.randn((B, 2), device=DEVICE) 109 | z = sample_SO2(B) 110 | else: 111 | z = noise 112 | B = noise.shape[0] 113 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 114 | dt = torch.ones(k_steps) / k_steps 115 | for k in range(k_steps): 116 | timesteps = torch.ones((B, 1), device=DEVICE) * t[k] 117 | timesteps *= pos_emb_scale 118 | z_pfp = SO2_to_pfp(z) 119 | pred_vel = net(z_pfp, timesteps) 120 | 121 | # z = z + pred_vel * dt[k] 122 | z = z @ exp(pred_vel * dt[k]) 123 | 124 | # Project output to unit circle 125 | # z = z / torch.norm(z, dim=-1, keepdim=True) 126 | z = z[..., 0] 127 | return z 128 | 129 | 130 | # Training loop 131 | losses = list() 132 | for epoch in tqdm(range(num_epochs), desc="Epoch"): 133 | loss = calculate_loss(B=batch_size) 134 | loss.backward() 135 | optimizer.step() 136 | optimizer.zero_grad() 137 | lr_scheduler.step() 138 | losses.append(loss.item()) 139 | 140 | # Plot loss curve 141 | plt.figure() 142 | plt.plot(losses) 143 | plt.title("Training Loss") 144 | # plt.show() 145 | 146 | 147 | # Evaluation 148 | B = 10000 149 | # start_noise = torch.randn((B, 2), device=DEVICE) 150 | start_noise = sample_SO2(B) 151 | losses = calculate_loss(noise=start_noise, reduction="none").detach() 152 | print(f"Loss on random noise: {loss.item()}") 153 | with torch.no_grad(): 154 | z = infer_y(start_noise) 155 | angle_errors = torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 156 | angle_abs_errors = torch.abs(angle_errors) 157 | 158 | print(f"{angle_abs_errors.mean() = }") 159 | 160 | start_noisy_angle_np = log(start_noise).cpu().numpy()[:, 0] 161 | angle_err_np = angle_abs_errors.cpu().numpy() 162 | plt.scatter(start_noisy_angle_np, angle_err_np) 163 | # plt.show() 164 | 165 | saving_path = pathlib.Path("toy_circle") / "results" / "so2" 166 | saving_path.mkdir(exist_ok=True, parents=True) 167 | 168 | 169 | np.save(saving_path / "start_noise.npy", start_noise.cpu().numpy()) 170 | np.save(saving_path / "losses.npy", losses.cpu().numpy()) 171 | np.save(saving_path / "angle_abs_errors.npy", angle_abs_errors.cpu().numpy()) 172 | -------------------------------------------------------------------------------- /toy_circle/train_fm_so3_forward.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import random 3 | import torch 4 | import numpy as np 5 | from tqdm import tqdm 6 | from model import MLP 7 | import matplotlib.pyplot as plt 8 | from diffusers.optimization import SchedulerType, TYPE_TO_SCHEDULER_FUNCTION 9 | 10 | DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") 11 | 12 | # Training hyperparams 13 | batch_size = 1000 14 | num_epochs = 2000 15 | num_warmup_steps = 10 16 | lr = 1e-3 17 | seed = 1234 18 | 19 | random.seed(seed) 20 | torch.manual_seed(seed) 21 | np.random.seed(seed) 22 | 23 | # Network 24 | net = MLP(num_in=2, num_out=1).to(DEVICE) 25 | 26 | # Optimizer 27 | optimizer = torch.optim.Adam(net.parameters(), lr=lr) 28 | 29 | # LR Scheduler 30 | schedule_func = TYPE_TO_SCHEDULER_FUNCTION[SchedulerType("cosine")] 31 | lr_scheduler = schedule_func( 32 | optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_epochs 33 | ) 34 | 35 | # Model params 36 | pos_emb_scale = 20 37 | k_steps = 50 38 | 39 | 40 | def sample_SO2(B): 41 | # Sample randomly between -pi, pi 42 | z0_angle = (torch.rand((B, 1), device=DEVICE) - 0.5) * 2.0 * torch.pi 43 | # Transform to SO(2) 44 | z0 = exp(z0_angle) 45 | return z0 46 | 47 | 48 | def exp(theta): 49 | if theta.ndim == 1: 50 | theta = theta.unsqueeze(-1) 51 | R = torch.stack( 52 | ( 53 | torch.cat((torch.cos(theta), -torch.sin(theta)), axis=-1), 54 | torch.cat((torch.sin(theta), torch.cos(theta)), axis=-1), 55 | ), 56 | axis=-2, # Stack along second dimension (not last)! Very important! 57 | ) 58 | return R 59 | 60 | 61 | def log(R: torch.tensor): 62 | assert R.shape[-2:] == (2, 2) 63 | return torch.arctan2(R[..., 1, 0], R[..., 0, 0]).unsqueeze( 64 | -1 65 | ) # Keep single last dimensions again 66 | 67 | 68 | def SO2_to_pfp(z_SO2): 69 | return z_SO2[:, :, 0] 70 | 71 | 72 | # Loss 73 | def calculate_loss(noise=None, B=None, reduction="mean"): 74 | if noise is None: 75 | # Euclidean variant 76 | # z0 = torch.randn((B, 2), device=DEVICE) 77 | 78 | # SO(2) variant 79 | z0 = sample_SO2(B) 80 | else: 81 | z0 = noise 82 | B = noise.shape[0] 83 | 84 | t = torch.rand((B, 1), device=DEVICE) 85 | z1 = torch.eye((2)).unsqueeze(0).repeat(B, 1, 1).to(DEVICE) 86 | 87 | # Euclidean variant 88 | # target_vel = z1 - z0 89 | # zt = z0 + target_vel * t 90 | 91 | # SO(2) variant 92 | # target_vel = log(torch.linalg.pinv(z0) @ z1) 93 | target_vel = log(torch.linalg.pinv(z0) @ z1) 94 | zt = z0 @ exp(target_vel * t) 95 | 96 | # Convert to pfp 97 | zt_pfp = SO2_to_pfp(zt) 98 | timesteps = t * pos_emb_scale 99 | 100 | pred_vel = net(zt_pfp, timesteps) 101 | 102 | # Use the loss to make a forward prediction of the goal state 103 | z_pred = zt @ exp(pred_vel * (1 - t)) 104 | 105 | loss = torch.nn.functional.mse_loss(z_pred, z1, reduction=reduction) 106 | return loss 107 | 108 | 109 | # Inference 110 | def infer_y(noise=None, B=None): 111 | if noise is None: 112 | # z = torch.randn((B, 2), device=DEVICE) 113 | z = sample_SO2(B) 114 | else: 115 | z = noise 116 | B = noise.shape[0] 117 | t = torch.linspace(0, 1, k_steps + 1)[:-1] 118 | dt = torch.ones(k_steps) / k_steps 119 | for k in range(k_steps): 120 | timesteps = torch.ones((B, 1), device=DEVICE) * t[k] 121 | timesteps *= pos_emb_scale 122 | z_pfp = SO2_to_pfp(z) 123 | pred_vel = net(z_pfp, timesteps) 124 | 125 | # z = z + pred_vel * dt[k] 126 | z = z @ exp(pred_vel * dt[k]) 127 | 128 | # Project output to unit circle 129 | # z = z / torch.norm(z, dim=-1, keepdim=True) 130 | z = z[..., 0] 131 | return z 132 | 133 | 134 | # Training loop 135 | losses = list() 136 | for epoch in tqdm(range(num_epochs), desc="Epoch"): 137 | loss = calculate_loss(B=batch_size) 138 | loss.backward() 139 | optimizer.step() 140 | optimizer.zero_grad() 141 | lr_scheduler.step() 142 | losses.append(loss.item()) 143 | 144 | # Plot loss curve 145 | plt.figure() 146 | plt.plot(losses) 147 | plt.title("Training Loss") 148 | plt.show() 149 | 150 | 151 | # Evaluation 152 | B = 10000 153 | # start_noise = torch.randn((B, 2), device=DEVICE) 154 | start_noise = sample_SO2(B) 155 | losses = calculate_loss(noise=start_noise, reduction="none").detach() 156 | print(f"Loss on random noise: {loss.item()}") 157 | with torch.no_grad(): 158 | z = infer_y(start_noise) 159 | angle_errors = torch.atan2(z[:, 1], z[:, 0]) * 180 / torch.pi 160 | angle_abs_errors = torch.abs(angle_errors) 161 | 162 | print(f"{angle_abs_errors.mean() = }") 163 | 164 | start_noisy_angle_np = log(start_noise).cpu().numpy()[:, 0] 165 | angle_err_np = angle_abs_errors.cpu().numpy() 166 | plt.scatter(start_noisy_angle_np, angle_err_np) 167 | plt.show() 168 | 169 | saving_path = pathlib.Path("toy_circle") / "results" / "so2_forward" 170 | saving_path.mkdir(exist_ok=True, parents=True) 171 | 172 | 173 | np.save(saving_path / "start_noise.npy", start_noise.cpu().numpy()) 174 | np.save(saving_path / "losses.npy", losses.cpu().numpy()) 175 | np.save(saving_path / "angle_abs_errors.npy", angle_abs_errors.cpu().numpy()) 176 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandagrippervisual_vis_1.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.69999999 0.69999999 0.69999999 4.5893926e-41 23 | 24 | 25 | 0.69999999 0.69999999 0.69999999 4.5893926e-41 26 | 27 | 28 | 0.30000001 0.30000001 0.30000001 -3.0261091e+37 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.015225413 -0.008862217 0.067985021 -0.014951847 0.0065139923 0.070143402 -0.014971126 0.0065906551 -0.066775225 0.015554337 0.0059708245 0.070138745 0.015279691 -0.0094053652 0.067980371 0.015534578 0.0060474956 -0.066779882 -0.014955218 0.0063072261 0.071005881 -0.014951847 0.0065139923 0.070143402 -0.01502769 0.0022076129 0.073646538 -0.014971126 0.0065906551 -0.066775225 -0.014976609 0.0062893643 -0.0679399 -0.014991173 0.0054780045 -0.06910231 -0.014991173 0.0054780045 -0.06910231 -0.01501062 0.004389246 -0.069903314 -0.015038414 0.0028450098 -0.070287593 -0.015038414 0.0028450098 -0.070287593 -0.015058048 0.0017412961 -0.070171244 -0.014991173 0.0054780045 -0.06910231 -0.015128525 -0.002226362 -0.06907092 -0.015206982 -0.0066383872 -0.067626782 -0.015243744 -0.0087752976 -0.064707041 -0.015226086 -0.007735318 -0.066792712 -0.015237085 -0.0083572883 -0.065908067 -0.015206982 -0.0066383872 -0.067626782 -0.015243744 -0.0087752976 -0.064707041 -0.015225413 -0.008862217 0.067985021 -0.014971126 0.0065906551 -0.066775225 -0.015217421 -0.0084610507 0.069227114 -0.015203948 -0.0076799039 0.070301943 -0.015182748 -0.0065361955 0.071064845 -0.015182748 -0.0065361955 0.071064845 -0.015225413 -0.008862217 0.067985021 -0.015217421 -0.0084610507 0.069227114 -0.01502769 0.0022076129 0.073646538 -0.015008223 0.003304862 0.073577315 -0.014955218 0.0063072261 0.071005881 -0.014989447 0.0043622726 0.073237956 -0.014972995 0.0052954629 0.072563291 -0.015008223 0.003304862 0.073577315 -0.014962357 0.0058990368 0.071846716 -0.014955218 0.0063072261 0.071005881 -0.014972995 0.0052954629 0.072563291 -0.014971126 0.0065906551 -0.066775225 -0.014991173 0.0054780045 -0.06910231 -0.015058048 0.0017412961 -0.070171244 -0.015058048 0.0017412961 -0.070171244 -0.015128525 -0.002226362 -0.06907092 -0.014971126 0.0065906551 -0.066775225 -0.015237085 -0.0083572883 -0.065908067 -0.015243744 -0.0087752976 -0.064707041 -0.015206982 -0.0066383872 -0.067626782 -0.015182748 -0.0065361955 0.071064845 -0.015096646 -0.00171985 0.072616473 -0.015225413 -0.008862217 0.067985021 -0.015008223 0.003304862 0.073577315 -0.014972995 0.0052954629 0.072563291 -0.014955218 0.0063072261 0.071005881 -0.014951847 0.0065139923 0.070143402 -0.015225413 -0.008862217 0.067985021 -0.015096646 -0.00171985 0.072616473 -0.015128525 -0.002226362 -0.06907092 -0.015243744 -0.0087752976 -0.064707041 -0.014971126 0.0065906551 -0.066775225 -0.015096646 -0.00171985 0.072616473 -0.01502769 0.0022076129 0.073646538 -0.014951847 0.0065139923 0.070143402 0.0152685 -0.0089004459 -0.065912716 0.01527938 -0.0082784733 -0.066797368 0.015299083 -0.0071815532 -0.067631438 0.015299083 -0.0071815532 -0.067631438 0.0153773 -0.0027695238 -0.069075577 0.015260641 -0.0093184337 -0.06471169 0.015447777 0.0011981344 -0.070175894 0.015467411 0.0023018483 -0.070292249 0.015514293 0.0049348492 -0.069106959 0.015494725 0.0038460928 -0.069907971 0.015514293 0.0049348492 -0.069106959 0.015467411 0.0023018483 -0.070292249 0.015528976 0.0057462072 -0.067944556 0.015534578 0.0060474956 -0.066779882 0.015514293 0.0049348492 -0.069106959 0.015554337 0.0059708245 0.070138745 0.015550607 0.0057640648 0.071001232 0.015497481 0.0027617025 0.073572665 0.015543467 0.005355875 0.071842067 0.01553283 0.0047523011 0.072558641 0.015550607 0.0057640648 0.071001232 0.015516257 0.0038191131 0.073233299 0.015497481 0.0027617025 0.073572665 0.01553283 0.0047523011 0.072558641 0.015477895 0.0016644555 0.073641889 0.015554337 0.0059708245 0.070138745 0.015497481 0.0027617025 0.073572665 0.015320797 -0.0070793168 0.071060188 0.015301037 -0.0082230503 0.070297293 0.015286365 -0.0090041757 0.069222458 0.015286365 -0.0090041757 0.069222458 0.015279691 -0.0094053652 0.067980371 0.015320797 -0.0070793168 0.071060188 0.015260641 -0.0093184337 -0.06471169 0.0152685 -0.0089004459 -0.065912716 0.015299083 -0.0071815532 -0.067631438 0.0153773 -0.0027695238 -0.069075577 0.015447777 0.0011981344 -0.070175894 0.015534578 0.0060474956 -0.066779882 0.015514293 0.0049348492 -0.069106959 0.015534578 0.0060474956 -0.066779882 0.015447777 0.0011981344 -0.070175894 0.015550607 0.0057640648 0.071001232 0.01553283 0.0047523011 0.072558641 0.015497481 0.0027617025 0.073572665 0.015477895 0.0016644555 0.073641889 0.015406679 -0.0022629672 0.072611824 0.015554337 0.0059708245 0.070138745 0.015320797 -0.0070793168 0.071060188 0.015279691 -0.0094053652 0.067980371 0.015406679 -0.0022629672 0.072611824 0.015260641 -0.0093184337 -0.06471169 0.0153773 -0.0027695238 -0.069075577 0.015534578 0.0060474956 -0.066779882 0.015534578 0.0060474956 -0.066779882 0.015279691 -0.0094053652 0.067980371 0.015260641 -0.0093184337 -0.06471169 0.015406679 -0.0022629672 0.072611824 0.015279691 -0.0094053652 0.067980371 0.015554337 0.0059708245 0.070138745 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandagrippervisual_vis_3.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.1 0.1 0.1 4.5893926e-41 23 | 24 | 25 | 0.1 0.1 0.1 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 3.6295857e-33 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 0.0020971261 -0.0038553688 -0.094717212 0.00084001501 -0.0075557027 -0.10030085 0.00030093081 -0.001408558 -0.095176749 -0.00086881337 -0.042347867 -0.07934317 0.0014067062 -0.040855166 -0.079342619 -0.00079492736 -0.038198184 -0.079340748 -0.00011129861 -0.038271081 -0.086434975 -0.00079492736 -0.038198184 -0.079340748 0.0014067062 -0.040855166 -0.079342619 -0.00011129861 -0.038271081 -0.086434975 -0.0014769809 -0.03697795 -0.097350433 -0.00079492736 -0.038198184 -0.079340748 -0.0014769809 -0.03697795 -0.097350433 -0.00086881337 -0.042347867 -0.07934317 -0.00079492736 -0.038198184 -0.079340748 0.0014733821 -0.039077252 -0.091866009 -0.00011129861 -0.038271081 -0.086434975 0.0014067062 -0.040855166 -0.079342619 -0.00086881337 -0.042347867 -0.07934317 -0.00074101629 -0.041603442 -0.090126723 0.0014067062 -0.040855166 -0.079342619 0.0014733821 -0.039077252 -0.091866009 0.0011131961 -0.031484973 -0.10048833 0.0008732861 -0.035033219 -0.094674632 0.00020666326 -0.034345593 -0.099627875 -0.001969706 -0.033261728 -0.099569626 0.0011131961 -0.031484973 -0.10048833 0.0008732861 -0.035033219 -0.094674632 0.0011131961 -0.031484973 -0.10048833 0.00098431366 -0.02659148 -0.098730236 -0.00074101629 -0.041603442 -0.090126723 0.0014733821 -0.039077252 -0.091866009 0.0014067062 -0.040855166 -0.079342619 -0.001969706 -0.033261728 -0.099569626 0.0010162871 -0.026749723 -0.10181562 0.0011131961 -0.031484973 -0.10048833 -0.00074101629 -0.041603442 -0.090126723 0.00020666326 -0.034345593 -0.099627875 0.0014733821 -0.039077252 -0.091866009 -0.001656043 -0.026668344 -0.10180011 0.0010162871 -0.026749723 -0.10181562 -0.001969706 -0.033261728 -0.099569626 -0.0014769809 -0.03697795 -0.097350433 0.00020666326 -0.034345593 -0.099627875 -0.00074101629 -0.041603442 -0.090126723 0.0014733821 -0.039077252 -0.091866009 -0.0014289875 -0.035657637 -0.093550347 -0.00011129861 -0.038271081 -0.086434975 -0.0014289875 -0.035657637 -0.093550347 -0.0023468875 -0.027633857 -0.1003302 -0.001969706 -0.033261728 -0.099569626 0.0014733821 -0.039077252 -0.091866009 0.0008732861 -0.035033219 -0.094674632 -0.0014289875 -0.035657637 -0.093550347 0.0014733821 -0.039077252 -0.091866009 0.00020666326 -0.034345593 -0.099627875 0.0011131961 -0.031484973 -0.10048833 0.0019964639 -0.0091878716 -0.099362396 0.00094017369 -0.011453598 -0.098029263 0.00098431366 -0.02659148 -0.098730236 0.00094017369 -0.011453598 -0.098029263 -0.0014341684 -0.026532095 -0.098614372 0.00098431366 -0.02659148 -0.098730236 0.00094017369 -0.011453598 -0.098029263 -0.0020374169 -0.011030057 -0.099695586 -0.0014341684 -0.026532095 -0.098614372 -0.0020374169 -0.011030057 -0.099695586 -0.0023468875 -0.027633857 -0.1003302 -0.0014341684 -0.026532095 -0.098614372 0.0019964639 -0.0091878716 -0.099362396 0.00098431366 -0.02659148 -0.098730236 0.0011131961 -0.031484973 -0.10048833 0.00030093081 -0.001408558 -0.095176749 -0.0020374169 -0.011030057 -0.099695586 -0.001858262 -0.0042029181 -0.094552882 0.00030093081 -0.001408558 -0.095176749 0.00084001501 -0.0075557027 -0.10030085 -0.0020374169 -0.011030057 -0.099695586 0.00084001501 -0.0075557027 -0.10030085 0.00019764823 -0.013784488 -0.10227312 -0.0020374169 -0.011030057 -0.099695586 0.00030093081 -0.001408558 -0.095176749 -0.001858262 -0.0042029181 -0.094552882 0.0020971261 -0.0038553688 -0.094717212 -0.0023468875 -0.027633857 -0.1003302 -0.001656043 -0.026668344 -0.10180011 -0.001969706 -0.033261728 -0.099569626 -0.001858262 -0.0042029181 -0.094552882 -0.0020374169 -0.011030057 -0.099695586 0.00094017369 -0.011453598 -0.098029263 0.00019764823 -0.013784488 -0.10227312 0.0010162871 -0.026749723 -0.10181562 -0.001656043 -0.026668344 -0.10180011 0.00019764823 -0.013784488 -0.10227312 0.0019964639 -0.0091878716 -0.099362396 0.0010162871 -0.026749723 -0.10181562 0.0019964639 -0.0091878716 -0.099362396 0.0011131961 -0.031484973 -0.10048833 0.0010162871 -0.026749723 -0.10181562 -0.0014289875 -0.035657637 -0.093550347 -0.001969706 -0.033261728 -0.099569626 -0.0014769809 -0.03697795 -0.097350433 -0.0020374169 -0.011030057 -0.099695586 -0.001656043 -0.026668344 -0.10180011 -0.0023468875 -0.027633857 -0.1003302 -0.0020374169 -0.011030057 -0.099695586 0.00019764823 -0.013784488 -0.10227312 -0.001656043 -0.026668344 -0.10180011 -0.0014769809 -0.03697795 -0.097350433 -0.002074668 -0.041560143 -0.08674597 -0.00086881337 -0.042347867 -0.07934317 -0.002074668 -0.041560143 -0.08674597 -0.0014769809 -0.03697795 -0.097350433 -0.00074101629 -0.041603442 -0.090126723 0.0020971261 -0.0038553688 -0.094717212 0.0019964639 -0.0091878716 -0.099362396 0.00084001501 -0.0075557027 -0.10030085 0.0020971261 -0.0038553688 -0.094717212 0.00094017369 -0.011453598 -0.098029263 0.0019964639 -0.0091878716 -0.099362396 0.00084001501 -0.0075557027 -0.10030085 0.0019964639 -0.0091878716 -0.099362396 0.00019764823 -0.013784488 -0.10227312 -0.001858262 -0.0042029181 -0.094552882 0.00094017369 -0.011453598 -0.098029263 0.0020971261 -0.0038553688 -0.094717212 -0.00011129861 -0.038271081 -0.086434975 -0.0014289875 -0.035657637 -0.093550347 -0.0014769809 -0.03697795 -0.097350433 0.0008732861 -0.035033219 -0.094674632 -0.0023468875 -0.027633857 -0.1003302 -0.0014289875 -0.035657637 -0.093550347 -0.0014341684 -0.026532095 -0.098614372 0.0008732861 -0.035033219 -0.094674632 0.00098431366 -0.02659148 -0.098730236 0.0008732861 -0.035033219 -0.094674632 -0.0014341684 -0.026532095 -0.098614372 -0.0023468875 -0.027633857 -0.1003302 -0.0014769809 -0.03697795 -0.097350433 -0.001969706 -0.033261728 -0.099569626 0.00020666326 -0.034345593 -0.099627875 -0.002074668 -0.041560143 -0.08674597 -0.00074101629 -0.041603442 -0.090126723 -0.00086881337 -0.042347867 -0.07934317 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink5respondable_coll_2.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.69999999 1 0.69999999 4.5893926e-41 23 | 24 | 25 | 0.69999999 1 0.69999999 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 -3.0261091e+37 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.051294353 -0.030582028 -0.075402848 -0.025000349 0.049174447 0.14208537 -0.048127837 0.027418543 0.14384364 -0.025000349 0.049174447 0.14208537 -0.051294353 -0.030582028 -0.075402848 -0.053011697 0.0070385844 -0.091483355 -0.051962689 0.030840931 -0.067940056 -0.025000349 0.049174447 0.14208537 -0.053011697 0.0070385844 -0.091483355 -0.039229035 0.050194182 0.106125 -0.051962689 0.030840931 -0.067940056 -0.061145518 0.028441031 -0.048840161 -0.051000245 0.036786791 0.12242891 -0.039229035 0.050194182 0.106125 -0.061145518 0.028441031 -0.048840161 -0.054371282 0.016502321 -0.087546542 -0.051962689 0.030840931 -0.067940056 -0.053011697 0.0070385844 -0.091483355 -0.061773922 -0.0074440101 -0.085877016 -0.054371282 0.016502321 -0.087546542 -0.053011697 0.0070385844 -0.091483355 -0.051962689 0.030840931 -0.067940056 -0.032998722 0.052967075 0.12357892 -0.025000349 0.049174447 0.14208537 -0.032998722 0.052967075 0.12357892 -0.051962689 0.030840931 -0.067940056 -0.039229035 0.050194182 0.106125 -0.051000245 0.036786791 0.12242891 -0.032998722 0.052967075 0.12357892 -0.039229035 0.050194182 0.106125 -0.025000349 0.049174447 0.14208537 -0.032998722 0.052967075 0.12357892 -0.048127837 0.027418543 0.14384364 -0.032998722 0.052967075 0.12357892 -0.051000245 0.036786791 0.12242891 -0.048127837 0.027418543 0.14384364 -0.051294353 -0.030582028 -0.075402848 -0.054546498 -0.007107947 -0.091099858 -0.053011697 0.0070385844 -0.091483355 -0.054546498 -0.007107947 -0.091099858 -0.061773922 -0.0074440101 -0.085877016 -0.053011697 0.0070385844 -0.091483355 -0.061773922 -0.0074440101 -0.085877016 -0.054546498 -0.007107947 -0.091099858 -0.051294353 -0.030582028 -0.075402848 -0.060544666 -0.024727447 -0.067639112 -0.061773922 -0.0074440101 -0.085877016 -0.051294353 -0.030582028 -0.075402848 -0.060544666 -0.024727447 -0.067639112 -0.051294353 -0.030582028 -0.075402848 -0.048127837 0.027418543 0.14384364 -0.051241849 0.03376529 0.11878838 -0.054758597 0.018998576 0.071151465 -0.048127837 0.027418543 0.14384364 -0.051000245 0.036786791 0.12242891 -0.051241849 0.03376529 0.11878838 -0.048127837 0.027418543 0.14384364 -0.051962689 0.030840931 -0.067940056 -0.053957667 0.027024176 -0.07572075 -0.061145518 0.028441031 -0.048840161 -0.054371282 0.016502321 -0.087546542 -0.053957667 0.027024176 -0.07572075 -0.051962689 0.030840931 -0.067940056 -0.061865665 0.018088777 -0.079058185 -0.054371282 0.016502321 -0.087546542 -0.061773922 -0.0074440101 -0.085877016 -0.053957667 0.027024176 -0.07572075 -0.061865665 0.018088777 -0.079058185 -0.061145518 0.028441031 -0.048840161 -0.061865665 0.018088777 -0.079058185 -0.053957667 0.027024176 -0.07572075 -0.054371282 0.016502321 -0.087546542 -0.060544666 -0.024727447 -0.067639112 -0.063877329 -0.012818276 -0.055269174 -0.061773922 -0.0074440101 -0.085877016 -0.054758597 0.018998576 0.071151465 -0.063877329 -0.012818276 -0.055269174 -0.048127837 0.027418543 0.14384364 -0.063877329 -0.012818276 -0.055269174 -0.060544666 -0.024727447 -0.067639112 -0.048127837 0.027418543 0.14384364 -0.063698359 0.011923926 -0.042154454 -0.051000245 0.036786791 0.12242891 -0.061145518 0.028441031 -0.048840161 -0.051241849 0.03376529 0.11878838 -0.063698359 0.011923926 -0.042154454 -0.054758597 0.018998576 0.071151465 -0.063698359 0.011923926 -0.042154454 -0.051241849 0.03376529 0.11878838 -0.051000245 0.036786791 0.12242891 -0.063698359 0.011923926 -0.042154454 -0.063877329 -0.012818276 -0.055269174 -0.054758597 0.018998576 0.071151465 -0.063877329 -0.012818276 -0.055269174 -0.063698359 0.011923926 -0.042154454 -0.064237989 -0.00054444955 -0.054779243 -0.064770401 0.0048227841 -0.066032819 -0.061865665 0.018088777 -0.079058185 -0.061773922 -0.0074440101 -0.085877016 -0.063877329 -0.012818276 -0.055269174 -0.064770401 0.0048227841 -0.066032819 -0.061773922 -0.0074440101 -0.085877016 -0.064770401 0.0048227841 -0.066032819 -0.063877329 -0.012818276 -0.055269174 -0.064237989 -0.00054444955 -0.054779243 -0.063698359 0.011923926 -0.042154454 -0.064770401 0.0048227841 -0.066032819 -0.064237989 -0.00054444955 -0.054779243 -0.061865665 0.018088777 -0.079058185 -0.064770401 0.0048227841 -0.066032819 -0.061145518 0.028441031 -0.048840161 -0.064770401 0.0048227841 -0.066032819 -0.063698359 0.011923926 -0.042154454 -0.061145518 0.028441031 -0.048840161 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink5respondable_coll_4.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.69999999 1 0.69999999 4.5893926e-41 23 | 24 | 25 | 0.69999999 1 0.69999999 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 3.6295857e-33 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.052373137 -0.016618343 -0.087661587 -0.054546874 -0.007107906 -0.091100611 -0.037923213 -0.029757293 0.071484447 -0.058448073 0.00094194151 0.032247115 -0.050924473 -0.027804343 0.062925048 -0.037923213 -0.029757293 0.071484447 -0.054546874 -0.007107906 -0.091100611 -0.058448073 0.00094194151 0.032247115 -0.037923213 -0.029757293 0.071484447 -0.051294323 -0.030582625 -0.075403512 -0.052373137 -0.016618343 -0.087661587 -0.037923213 -0.029757293 0.071484447 -0.050924473 -0.027804343 0.062925048 -0.051294323 -0.030582625 -0.075403512 -0.037923213 -0.029757293 0.071484447 -0.060543939 -0.024727805 -0.067638792 -0.054546874 -0.007107906 -0.091100611 -0.052373137 -0.016618343 -0.087661587 -0.051294323 -0.030582625 -0.075403512 -0.060543939 -0.024727805 -0.067638792 -0.052373137 -0.016618343 -0.087661587 -0.059134495 -0.028285949 -0.034083407 -0.051294323 -0.030582625 -0.075403512 -0.050924473 -0.027804343 0.062925048 -0.059134495 -0.028285949 -0.034083407 -0.060543939 -0.024727805 -0.067638792 -0.051294323 -0.030582625 -0.075403512 -0.058835413 -0.012982341 0.015392655 -0.059134495 -0.028285949 -0.034083407 -0.050924473 -0.027804343 0.062925048 -0.060543939 -0.024727805 -0.067638792 -0.06387721 -0.012817133 -0.055268593 -0.054546874 -0.007107906 -0.091100611 -0.059134495 -0.028285949 -0.034083407 -0.06387721 -0.012817133 -0.055268593 -0.060543939 -0.024727805 -0.067638792 -0.058448073 0.00094194151 0.032247115 -0.055475798 -0.014839665 0.047234997 -0.050924473 -0.027804343 0.062925048 -0.055475798 -0.014839665 0.047234997 -0.058835413 -0.012982341 0.015392655 -0.050924473 -0.027804343 0.062925048 -0.06165956 0.00032730095 -0.010956828 -0.058448073 0.00094194151 0.032247115 -0.054546874 -0.007107906 -0.091100611 -0.06387721 -0.012817133 -0.055268593 -0.06165956 0.00032730095 -0.010956828 -0.054546874 -0.007107906 -0.091100611 -0.06041301 -0.013259537 -0.0071025686 -0.059134495 -0.028285949 -0.034083407 -0.058835413 -0.012982341 0.015392655 -0.06041301 -0.013259537 -0.0071025686 -0.06387721 -0.012817133 -0.055268593 -0.059134495 -0.028285949 -0.034083407 -0.06165956 0.00032730095 -0.010956828 -0.059962347 -0.0021413816 0.014005037 -0.058448073 0.00094194151 0.032247115 -0.058663152 -0.0025678785 0.030737747 -0.055475798 -0.014839665 0.047234997 -0.058448073 0.00094194151 0.032247115 -0.059881687 -0.0050488217 0.016525595 -0.06165956 0.00032730095 -0.010956828 -0.06387721 -0.012817133 -0.055268593 -0.059881687 -0.0050488217 0.016525595 -0.059962347 -0.0021413816 0.014005037 -0.06165956 0.00032730095 -0.010956828 -0.06041301 -0.013259537 -0.0071025686 -0.059881687 -0.0050488217 0.016525595 -0.06387721 -0.012817133 -0.055268593 -0.059881687 -0.0050488217 0.016525595 -0.06041301 -0.013259537 -0.0071025686 -0.058835413 -0.012982341 0.015392655 -0.055475798 -0.014839665 0.047234997 -0.059881687 -0.0050488217 0.016525595 -0.058835413 -0.012982341 0.015392655 -0.058663152 -0.0025678785 0.030737747 -0.059881687 -0.0050488217 0.016525595 -0.055475798 -0.014839665 0.047234997 -0.059962347 -0.0021413816 0.014005037 -0.059881687 -0.0050488217 0.016525595 -0.058448073 0.00094194151 0.032247115 -0.059881687 -0.0050488217 0.016525595 -0.058663152 -0.0025678785 0.030737747 -0.058448073 0.00094194151 0.032247115 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink5respondable_coll_5.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.69999999 1 0.69999999 4.5893926e-41 23 | 24 | 25 | 0.69999999 1 0.69999999 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 -3.0261091e+37 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.034053553 -0.048453417 0.11512624 -0.017710548 -0.045249306 0.1031604 -0.049430188 -0.03304423 0.12346175 -0.055017967 -0.01432172 0.066148639 -0.055475451 -0.014840133 0.047236063 -0.049430188 -0.03304423 0.12346175 -0.055475451 -0.014840133 0.047236063 -0.050924111 -0.027803877 0.062924005 -0.049430188 -0.03304423 0.12346175 -0.050924111 -0.027803877 0.062924005 -0.034053553 -0.048453417 0.11512624 -0.049430188 -0.03304423 0.12346175 -0.034053553 -0.048453417 0.11512624 -0.050924111 -0.027803877 0.062924005 -0.017710548 -0.045249306 0.1031604 -0.05037364 -0.023200139 0.090668097 -0.055017967 -0.01432172 0.066148639 -0.049430188 -0.03304423 0.12346175 -0.05037364 -0.023200139 0.090668097 -0.021930844 -0.040602859 0.096483968 -0.055017967 -0.01432172 0.066148639 -0.017710548 -0.045249306 0.1031604 -0.021930844 -0.040602859 0.096483968 -0.049430188 -0.03304423 0.12346175 -0.021930844 -0.040602859 0.096483968 -0.05037364 -0.023200139 0.090668097 -0.049430188 -0.03304423 0.12346175 -0.0379235 -0.029757211 0.071484618 -0.050924111 -0.027803877 0.062924005 -0.055475451 -0.014840133 0.047236063 -0.050924111 -0.027803877 0.062924005 -0.0379235 -0.029757211 0.071484618 -0.017710548 -0.045249306 0.1031604 -0.055017967 -0.01432172 0.066148639 -0.032317687 -0.032631159 0.07985229 -0.055475451 -0.014840133 0.047236063 -0.021930844 -0.040602859 0.096483968 -0.032317687 -0.032631159 0.07985229 -0.055017967 -0.01432172 0.066148639 -0.032317687 -0.032631159 0.07985229 -0.0379235 -0.029757211 0.071484618 -0.055475451 -0.014840133 0.047236063 -0.032317687 -0.032631159 0.07985229 -0.021930844 -0.040602859 0.096483968 -0.017710548 -0.045249306 0.1031604 -0.0379235 -0.029757211 0.071484618 -0.032317687 -0.032631159 0.07985229 -0.017710548 -0.045249306 0.1031604 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink6visual_vis_3.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 23 | 24 | 25 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 -3.0261091e+37 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.001339097 0.060543343 0.0595957 -0.00033806908 0.060561012 0.058949407 -0.001665579 0.061177824 0.058805238 0.0098810513 0.062002208 0.049960975 0.012717659 0.0623121 0.050793182 0.010244869 0.062639922 0.049780902 -0.001339097 0.060543343 0.0595957 -0.001665579 0.061177824 0.058805238 -0.0022946468 0.060586724 0.058598801 0.012889992 0.06314335 0.049750008 0.010244869 0.062639922 0.049780902 0.012717659 0.0623121 0.050793182 -0.0013479074 0.06074696 0.055461742 -0.0022946468 0.060586724 0.058598801 -0.0012612169 0.061226916 0.055996187 -0.0022946468 0.060586724 0.058598801 -0.001665579 0.061177824 0.058805238 -0.0012612169 0.061226916 0.055996187 0.010332133 0.062842093 0.04909787 0.010244869 0.062639922 0.049780902 0.012889992 0.06314335 0.049750008 -0.0012612169 0.061226916 0.055996187 -0.00051115197 0.061264828 0.055864494 -0.0013479074 0.06074696 0.055461742 -0.0013479074 0.06074696 0.055461742 -0.00051115197 0.061264828 0.055864494 0.00020959288 0.060711633 0.056030326 -0.001665579 0.061177824 0.058805238 -0.00051115197 0.061264828 0.055864494 -0.0012612169 0.061226916 0.055996187 -0.00051115197 0.061264828 0.055864494 -0.001665579 0.061177824 0.058805238 -0.00033806908 0.060561012 0.058949407 0.013172268 0.062658779 0.048863642 0.013357664 0.063009232 0.049416095 0.013738932 0.062567592 0.05016356 -0.00051115197 0.061264828 0.055864494 -0.00033806908 0.060561012 0.058949407 0.00020959288 0.060711633 0.056030326 -0.01106588 0.062953718 0.044103578 -0.010234956 0.063388325 0.044259645 -0.010261827 0.062950812 0.043681018 -0.01052504 0.062577091 0.045625895 -0.010483846 0.063289396 0.045118812 -0.010234956 0.063388325 0.044259645 -0.010234956 0.063388325 0.044259645 -0.01106588 0.062953718 0.044103578 -0.01052504 0.062577091 0.045625895 -0.0076913168 0.063064367 0.045090567 -0.010483846 0.063289396 0.045118812 -0.0079686632 0.062236208 0.046127517 -0.0079686632 0.062236208 0.046127517 -0.010483846 0.063289396 0.045118812 -0.01052504 0.062577091 0.045625895 -0.010234956 0.063388325 0.044259645 -0.010483846 0.063289396 0.045118812 -0.0076913168 0.063064367 0.045090567 -0.0079686632 0.062236208 0.046127517 -0.0071953926 0.062483199 0.044501025 -0.0076913168 0.063064367 0.045090567 -0.010234956 0.063388325 0.044259645 -0.0076913168 0.063064367 0.045090567 -0.0071953926 0.062483199 0.044501025 0.013738932 0.062567592 0.05016356 0.013357664 0.063009232 0.049416095 0.012717659 0.0623121 0.050793182 -0.010234956 0.063388325 0.044259645 -0.0071953926 0.062483199 0.044501025 -0.010261827 0.062950812 0.043681018 0.012717659 0.0623121 0.050793182 0.013357664 0.063009232 0.049416095 0.012889992 0.06314335 0.049750008 0.004610931 0.065029949 0.035903323 0.0032643713 0.065148458 0.035922583 0.0038392833 0.064369015 0.038487852 0.0048250644 0.06468647 0.035883635 0.003673984 0.064932413 0.034911003 0.004610931 0.065029949 0.035903323 0.004610931 0.065029949 0.035903323 0.003673984 0.064932413 0.034911003 0.0032643713 0.065148458 0.035922583 0.010332133 0.062842093 0.04909787 0.013357664 0.063009232 0.049416095 0.010540963 0.062349357 0.048236057 0.010540963 0.062349357 0.048236057 0.013357664 0.063009232 0.049416095 0.013172268 0.062658779 0.048863642 0.0028748892 0.064686596 0.035526231 0.0032643713 0.065148458 0.035922583 0.003673984 0.064932413 0.034911003 0.010332133 0.062842093 0.04909787 0.012889992 0.06314335 0.049750008 0.013357664 0.063009232 0.049416095 0.0024890662 0.064208977 0.038231295 0.0038392833 0.064369015 0.038487852 0.0032643713 0.065148458 0.035922583 0.0028748892 0.064686596 0.035526231 0.0024890662 0.064208977 0.038231295 0.0032643713 0.065148458 0.035922583 0.010540963 0.062349357 0.048236057 0.009580276 0.062123615 0.048922095 0.010332133 0.062842093 0.04909787 0.0041636392 0.06383206 0.038592689 0.0038392833 0.064369015 0.038487852 0.0024949068 0.063747577 0.038620129 0.0024949068 0.063747577 0.038620129 0.0038392833 0.064369015 0.038487852 0.0024890662 0.064208977 0.038231295 0.0024949068 0.063747577 0.038620129 0.0024890662 0.064208977 0.038231295 0.0028748892 0.064686596 0.035526231 0.004610931 0.065029949 0.035903323 0.0038392833 0.064369015 0.038487852 0.0048250644 0.06468647 0.035883635 0.0048250644 0.06468647 0.035883635 0.0038392833 0.064369015 0.038487852 0.0041636392 0.06383206 0.038592689 0.010244869 0.062639922 0.049780902 0.010332133 0.062842093 0.04909787 0.009580276 0.062123615 0.048922095 0.0098810513 0.062002208 0.049960975 0.010244869 0.062639922 0.049780902 0.009580276 0.062123615 0.048922095 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink7visual_vis_1.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 23 | 24 | 25 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 3.6295857e-33 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | -0.012373693 0.032658134 -0.054697845 -0.0050869067 0.03029873 -0.056897726 -0.014942534 0.030334821 -0.054094683 -0.012373693 0.032658134 -0.054697845 -0.014942534 0.030334821 -0.054094683 -0.013607389 0.032722164 -0.051733937 -0.013607389 0.032722164 -0.051733937 -0.014942534 0.030334821 -0.054094683 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017507 -0.032415364 -0.014942534 0.030334821 -0.054094683 -0.014933888 0.032205995 -0.029962663 -0.014933888 0.032205995 -0.029962663 0.0025683611 0.032804511 -0.02660666 -0.010632399 0.032588102 -0.027749332 0.0024223258 0.032586306 -0.053869683 0.0040807058 0.030346707 -0.051924326 -0.00072861486 0.030303521 -0.056357592 -0.00056762237 0.032549798 -0.05573675 -0.00072861486 0.030303521 -0.056357592 -0.0050869067 0.03029873 -0.056897726 -0.0050869067 0.03029873 -0.056897726 -0.012373693 0.032658134 -0.054697845 -0.00056762237 0.032549798 -0.05573675 -0.010632399 0.032588102 -0.027749332 0.0025683611 0.032804511 -0.02660666 0.0012016267 0.035029851 -0.027096771 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017868 -0.032415554 -0.013607389 0.032722164 -0.051733937 -0.0063952832 0.034982432 -0.027364243 -0.010632399 0.032588102 -0.027749332 0.0012016267 0.035029851 -0.027096771 -0.014933888 0.032205995 -0.029962663 -0.010632399 0.032588102 -0.027749332 -0.013825718 0.034141138 -0.031603541 -0.013825718 0.034141138 -0.031603541 -0.01387227 0.034017507 -0.032415364 -0.014933888 0.032205995 -0.029962663 0.007992859 0.033653185 -0.036615726 0.0080144964 0.032161225 -0.030243035 0.0098285032 0.031286117 -0.03681973 0.0012016267 0.035029851 -0.027096771 0.0025683611 0.032804511 -0.02660666 0.0054283109 0.034804542 -0.028247409 0.007992859 0.033653185 -0.036615726 0.0098285032 0.031286117 -0.03681973 0.0075832559 0.033438526 -0.038424805 0.0054283109 0.034804542 -0.028247409 0.0025683611 0.032804511 -0.02660666 0.0080144964 0.032161225 -0.030243035 0.007992859 0.033653185 -0.036615726 0.0054283109 0.034804542 -0.028247409 0.0080144964 0.032161225 -0.030243035 -0.00072861486 0.030303521 -0.056357592 -0.00056762237 0.032549798 -0.05573675 0.0024223258 0.032586306 -0.053869683 -0.013639394 0.034179073 -0.032436162 -0.013607389 0.032722164 -0.051733937 -0.01387227 0.034017868 -0.032415554 -0.013607389 0.032722164 -0.051733937 -0.013639394 0.034179073 -0.032436162 -0.012373693 0.032658134 -0.054697845 -0.00072861486 0.030303521 -0.056357592 -0.014942534 0.030334821 -0.054094683 -0.0050869067 0.03029873 -0.056897726 -0.013294462 0.034198932 -0.032491568 -0.013825718 0.034141138 -0.031603541 -0.0063952832 0.034982432 -0.027364243 -0.01370037 0.034166768 -0.032430679 -0.013825718 0.034141138 -0.031603541 -0.013294462 0.034198932 -0.032491568 0.0054283109 0.034804542 -0.028247409 -0.0063952832 0.034982432 -0.027364243 0.0012016267 0.035029851 -0.027096771 0.0013765647 0.03200623 -0.035623536 -0.013294462 0.034198932 -0.032491568 0.0054283109 0.034804542 -0.028247409 -0.014933888 0.032205995 -0.029962663 0.0098285032 0.031286117 -0.03681973 0.0080144964 0.032161225 -0.030243035 -0.0063952832 0.034982432 -0.027364243 0.0054283109 0.034804542 -0.028247409 -0.013294462 0.034198932 -0.032491568 0.0054283109 0.034804542 -0.028247409 0.0074689342 0.033475444 -0.038392033 0.0013765647 0.03200623 -0.035623536 0.007992859 0.033653185 -0.036615726 0.0074689342 0.033475444 -0.038392033 0.0054283109 0.034804542 -0.028247409 0.0024223258 0.032586306 -0.053869683 0.0074689342 0.033475444 -0.038392033 0.0075832559 0.033438526 -0.038424805 -0.0085428404 0.032172482 -0.036379103 -0.012373693 0.032658134 -0.054697845 -0.013639394 0.034179073 -0.032436162 -0.0085428404 0.032172482 -0.036379103 -0.013639394 0.034179073 -0.032436162 -0.013294462 0.034198932 -0.032491568 0.0013765647 0.03200623 -0.035623536 -0.0085428404 0.032172482 -0.036379103 -0.013294462 0.034198932 -0.032491568 0.0013765647 0.03200623 -0.035623536 0.0074689342 0.033475444 -0.038392033 0.0024223258 0.032586306 -0.053869683 -0.014933888 0.032205995 -0.029962663 0.0080144964 0.032161225 -0.030243035 0.0025683611 0.032804511 -0.02660666 0.0040807058 0.030346707 -0.051924326 -0.014942534 0.030334821 -0.054094683 -0.00072861486 0.030303521 -0.056357592 -0.00056762237 0.032549798 -0.05573675 0.0013765647 0.03200623 -0.035623536 0.0024223258 0.032586306 -0.053869683 0.0098285032 0.031286117 -0.03681973 -0.014942534 0.030334821 -0.054094683 0.0040807058 0.030346707 -0.051924326 -0.012373693 0.032658134 -0.054697845 0.0013765647 0.03200623 -0.035623536 -0.00056762237 0.032549798 -0.05573675 -0.0085428404 0.032172482 -0.036379103 0.0013765647 0.03200623 -0.035623536 -0.012373693 0.032658134 -0.054697845 -0.010632399 0.032588102 -0.027749332 -0.0063952832 0.034982432 -0.027364243 -0.013825718 0.034141138 -0.031603541 -0.013825718 0.034141138 -0.031603541 -0.01370037 0.034166768 -0.032430679 -0.01387227 0.034017868 -0.032415554 -0.013825718 0.034141138 -0.031603541 -0.01387227 0.034017868 -0.032415554 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017868 -0.032415554 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017507 -0.032415364 -0.01387227 0.034017507 -0.032415364 -0.01370037 0.034166768 -0.032430679 -0.013639394 0.034179073 -0.032436162 -0.01387227 0.034017868 -0.032415554 0.0075832559 0.033438526 -0.038424805 0.0040807058 0.030346707 -0.051924326 0.0024223258 0.032586306 -0.053869683 0.0098285032 0.031286117 -0.03681973 0.0040807058 0.030346707 -0.051924326 0.0075832559 0.033438526 -0.038424805 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/meshes/Pandalink7visual_vis_4.dae: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Assimp 6 | Assimp Exporter 7 | 8 | 2023-09-26T11:24:21 9 | 2023-09-26T11:24:21 10 | 11 | Y_UP 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 0 0 0 4.5895327e-41 20 | 21 | 22 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 23 | 24 | 25 | 0.40000001 0.40000001 0.40000001 4.5893926e-41 26 | 27 | 28 | 0.25 0.25 0.25 -3.0261091e+37 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 0.011863878 0.010263721 -0.043402795 0.014383664 0.0087738466 -0.038168777 0.012375583 0.0010213791 -0.045183286 0.011618486 0.0082874792 -0.045293082 0.011863878 0.010263721 -0.043402795 0.012375583 0.0010213791 -0.045183286 0.014411798 -0.0089342287 -0.03805013 0.012114336 -0.0080024395 -0.045093764 0.012375583 0.0010213791 -0.045183286 0.014411798 -0.0089342287 -0.03805013 0.012375583 0.0010213791 -0.045183286 0.014383664 0.0087738466 -0.038168777 0.01121848 0.0084275994 -0.034487836 0.011053553 -0.010582197 -0.035110142 0.011648169 -0.0090712216 -0.035447411 0.012114336 -0.0080024395 -0.045093764 0.014411798 -0.0089342287 -0.03805013 0.01343744 -0.010329982 -0.038254097 0.01343744 -0.010329982 -0.038254097 0.014411798 -0.0089342287 -0.03805013 0.0138325 -0.0091333929 -0.036946971 0.013527981 0.0098161893 -0.037283741 0.0138325 -0.0091333929 -0.036946971 0.014411798 -0.0089342287 -0.03805013 0.013527981 0.0098161893 -0.037283741 0.014411798 -0.0089342287 -0.03805013 0.014383664 0.0087738466 -0.038168777 0.014383664 0.0087738466 -0.038168777 0.011863878 0.010263721 -0.043402795 0.013527981 0.0098161893 -0.037283741 0.011460294 -0.0096152732 -0.044800114 0.0087053673 -0.0017922424 -0.045239557 0.012114336 -0.0080024395 -0.045093764 0.012114336 -0.0080024395 -0.045093764 0.0087053673 -0.0017922424 -0.045239557 0.012375583 0.0010213791 -0.045183286 0.012375583 0.0010213791 -0.045183286 0.0087053673 -0.0017922424 -0.045239557 0.011618486 0.0082874792 -0.045293082 0.012114336 -0.0080024395 -0.045093764 0.01343744 -0.010329982 -0.038254097 0.011460294 -0.0096152732 -0.044800114 0.01121848 0.0084275994 -0.034487836 0.011648169 -0.0090712216 -0.035447411 0.011455766 0.0084844148 -0.036131863 0.010282567 -0.011872431 -0.037282873 0.0080171125 -0.010580576 -0.045022409 0.011053553 -0.010582197 -0.035110142 0.0080171125 -0.010580576 -0.045022409 0.01343744 -0.010329982 -0.038254097 0.011053553 -0.010582197 -0.035110142 0.0138325 -0.0091333929 -0.036946971 0.011648169 -0.0090712216 -0.035447411 0.01343744 -0.010329982 -0.038254097 0.01343744 -0.010329982 -0.038254097 0.011648169 -0.0090712216 -0.035447411 0.011053553 -0.010582197 -0.035110142 0.010874025 0.011205099 -0.03579412 0.011084965 0.01032088 -0.037360318 0.0082413461 0.010829426 -0.044713996 0.013527981 0.0098161893 -0.037283741 0.011455766 0.0084844148 -0.036131863 0.011279156 -0.001654292 -0.036714409 0.013527981 0.0098161893 -0.037283741 0.011279156 -0.001654292 -0.036714409 0.0138325 -0.0091333929 -0.036946971 0.011084965 0.01032088 -0.037360318 0.010874025 0.011205099 -0.03579412 0.011455766 0.0084844148 -0.036131863 0.0138325 -0.0091333929 -0.036946971 0.011279156 -0.001654292 -0.036714409 0.011648169 -0.0090712216 -0.035447411 0.011863878 0.010263721 -0.043402795 0.011084965 0.01032088 -0.037360318 0.013527981 0.0098161893 -0.037283741 0.013527981 0.0098161893 -0.037283741 0.011084965 0.01032088 -0.037360318 0.011455766 0.0084844148 -0.036131863 0.011863878 0.010263721 -0.043402795 0.0094832685 0.010340937 -0.042666286 0.011084965 0.01032088 -0.037360318 0.011618486 0.0082874792 -0.045293082 0.0082413461 0.010829426 -0.044713996 0.0094832685 0.010340937 -0.042666286 0.011618486 0.0082874792 -0.045293082 0.0094832685 0.010340937 -0.042666286 0.011863878 0.010263721 -0.043402795 0.011455766 0.0084844148 -0.036131863 0.010874025 0.011205099 -0.03579412 0.01121848 0.0084275994 -0.034487836 0.011460294 -0.0096152732 -0.044800114 0.0088838236 -0.0085680224 -0.044646107 0.0087053673 -0.0017922424 -0.045239557 0.011618486 0.0082874792 -0.045293082 0.0087053673 -0.0017922424 -0.045239557 0.0082413461 0.010829426 -0.044713996 0.011460294 -0.0096152732 -0.044800114 0.0080171125 -0.010580576 -0.045022409 0.0088838236 -0.0085680224 -0.044646107 0.01343744 -0.010329982 -0.038254097 0.0080171125 -0.010580576 -0.045022409 0.011460294 -0.0096152732 -0.044800114 0.0082413461 0.010829426 -0.044713996 0.011084965 0.01032088 -0.037360318 0.0094832685 0.010340937 -0.042666286 0.0082413461 0.010829426 -0.044713996 0.0087053673 -0.0017922424 -0.045239557 0.0078647034 0.0068006343 -0.046052482 0.0078647034 0.0068006343 -0.046052482 0.0087053673 -0.0017922424 -0.045239557 0.0078647034 -0.0068673673 -0.046049006 0.011455766 0.0084844148 -0.036131863 0.011648169 -0.0090712216 -0.035447411 0.011279156 -0.001654292 -0.036714409 0.0087053673 -0.0017922424 -0.045239557 0.0088838236 -0.0085680224 -0.044646107 0.0078647034 -0.0068673673 -0.046049006 0.0078647034 -0.0068673673 -0.046049006 0.0088838236 -0.0085680224 -0.044646107 0.0080171125 -0.010580576 -0.045022409 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 59 |

0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

60 |
61 |
62 |
63 |
64 | 65 | 66 | 67 | 68 | 69 | 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 |
85 | -------------------------------------------------------------------------------- /urdfs/panda/panda_gripper.urdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | 189 | --------------------------------------------------------------------------------