├── .gitignore ├── LICENSE ├── README.md ├── algo └── ppo │ ├── __init__.py │ └── ppo │ ├── __init__.py │ ├── module.py │ ├── ppo.py │ └── storage.py ├── assets ├── RealWorldDataset.tgn ├── franka_description │ ├── franka_panda.srdf │ ├── franka_panda.urdf │ └── meshes │ │ ├── collision │ │ ├── finger.obj │ │ ├── finger.obj.convex.stl │ │ ├── finger.stl │ │ ├── hand.obj │ │ ├── hand.obj.convex.stl │ │ ├── hand.stl │ │ ├── link0.obj │ │ ├── link0.obj.convex.stl │ │ ├── link0.stl │ │ ├── link1.obj │ │ ├── link1.obj.convex.stl │ │ ├── link1.stl │ │ ├── link2.obj │ │ ├── link2.obj.convex.stl │ │ ├── link2.stl │ │ ├── link3.obj │ │ ├── link3.obj.convex.stl │ │ ├── link3.stl │ │ ├── link4.obj │ │ ├── link4.obj.convex.stl │ │ ├── link4.stl │ │ ├── link5.obj │ │ ├── link5.obj.convex.stl │ │ ├── link5.stl │ │ ├── link6.obj │ │ ├── link6.obj.convex.stl │ │ ├── link6.stl │ │ ├── link7.obj │ │ ├── link7.obj.convex.stl │ │ ├── link7.stl │ │ ├── stltoobj.bat │ │ └── stltoobj.mlx │ │ └── visual │ │ ├── daetoobj.bat │ │ ├── daetoobj.mlx │ │ ├── finger.dae │ │ ├── finger.mtl │ │ ├── finger.obj │ │ ├── hand.dae │ │ ├── hand.mtl │ │ ├── hand.obj │ │ ├── link0.dae │ │ ├── link0.mtl │ │ ├── link0.obj │ │ ├── link1.dae │ │ ├── link1.mtl │ │ ├── link1.obj │ │ ├── link2.dae │ │ ├── link2.mtl │ │ ├── link2.obj │ │ ├── link3.dae │ │ ├── link3.mtl │ │ ├── link3.obj │ │ ├── link4.dae │ │ ├── link4.mtl │ │ ├── link4.obj │ │ ├── link5.dae │ │ ├── link5.mtl │ │ ├── link5.obj │ │ ├── link6.dae │ │ ├── link6.mtl │ │ ├── link6.obj │ │ ├── link7.dae │ │ ├── link7.mtl │ │ └── link7.obj └── panda │ ├── franka_description │ └── meshes │ │ ├── collision │ │ ├── finger.stl │ │ ├── hand.stl │ │ ├── hand.stl.convex.stl │ │ ├── link0.stl │ │ ├── link0.stl.convex.stl │ │ ├── link1.stl │ │ ├── link1.stl.convex.stl │ │ ├── link2.stl │ │ ├── link2.stl.convex.stl │ │ ├── link3.stl │ │ ├── link3.stl.convex.stl │ │ ├── link4.stl │ │ ├── link4.stl.convex.stl │ │ ├── link5.stl │ │ ├── link5.stl.convex.stl │ │ ├── link6.stl │ │ ├── link6.stl.convex.stl │ │ ├── link7.stl │ │ └── link7.stl.convex.stl │ │ └── visual │ │ ├── finger.dae │ │ ├── hand.dae │ │ ├── link0.dae │ │ ├── link1.dae │ │ ├── link2.dae │ │ ├── link3.dae │ │ ├── link4.dae │ │ ├── link5.dae │ │ ├── link6.dae │ │ └── link7.dae │ ├── panda.srdf │ └── panda.urdf ├── cfg ├── config.yaml ├── controller │ ├── baseline.yaml │ ├── collect_baselines.yaml │ ├── collect_pose.yaml │ ├── gt_pose.yaml │ ├── heuristic_pose.yaml │ ├── homing.yaml │ ├── open_cabinet.yaml │ └── rl.yaml ├── dataset │ ├── cabinet_test.yaml │ ├── cabinet_train.yaml │ ├── drawer_test.yaml │ ├── drawer_train.yaml │ ├── mug_test.yaml │ ├── mug_train.yaml │ ├── pot_test.yaml │ ├── pot_train.yaml │ └── real_world.yaml ├── manipulation │ ├── close_cabinet.yaml │ ├── close_cabinet_open_loop.yaml │ ├── close_drawer.yaml │ ├── close_drawer_open_loop.yaml │ ├── open_cabinet.yaml │ ├── open_cabinet_open_loop.yaml │ ├── open_drawer.yaml │ ├── open_drawer_open_loop.yaml │ ├── open_pot.yaml │ ├── open_pot_open_loop.yaml │ ├── pick_mug.yaml │ └── pick_mug_open_loop.yaml ├── pose_estimator │ ├── adapose_cabinet.yaml │ ├── adapose_drawer.yaml │ ├── adapose_mug.yaml │ ├── adapose_pot.yaml │ └── ground_truth.yaml ├── task │ ├── open_cabinet.yaml │ ├── open_cabinet_45.yaml │ ├── open_cabinet_no_dr.yaml │ ├── open_cabinet_visualize.yaml │ ├── open_drawer.yaml │ ├── open_drawer_30.yaml │ ├── open_drawer_no_dr.yaml │ ├── open_pot.yaml │ ├── pick_mug.yaml │ └── real_world.yaml └── train │ ├── collect.yaml │ ├── controller.yaml │ ├── test.yaml │ └── test_baseline.yaml ├── env ├── base_sapien_env.py ├── base_viewer.py ├── my_vec_env.py ├── real_world_test.py ├── realworld_envs │ ├── base_realworld.py │ └── panda_rs_handeyecalibration_eye_on_hand.yaml ├── sapien_envs │ ├── base_manipulation.py │ ├── close_cabinet.py │ ├── impedance_control.py │ ├── interfaces.py │ ├── open_cabinet.py │ ├── open_pot.py │ └── osc_planner.py └── vec_env.py ├── install.sh ├── miscs ├── experimental_results │ ├── ablation_results.yaml │ ├── baseline_reults.yaml │ └── main_results.yaml └── plot │ ├── alpha.pdf │ ├── alpha.py │ ├── num_views.pdf │ └── num_views.py ├── models ├── controller │ ├── base_controller.py │ ├── baseline.py │ ├── collection.py │ ├── gt_pose.py │ ├── heuristic_pose.py │ ├── homing.py │ └── rl_pose.py ├── manipulation │ ├── base_manipulation.py │ ├── close_cabinet.py │ ├── close_drawer.py │ ├── open_cabinet.py │ ├── open_drawer.py │ ├── open_pot.py │ ├── pick_mug.py │ └── rl.py └── pose_estimator │ ├── .gitignore │ ├── AdaPose │ ├── AdaPose.yml │ ├── inference.py │ ├── interface.py │ ├── interface_baseline.py │ ├── interface_realworld.py │ ├── interface_v2.py │ ├── interface_v3.py │ ├── interface_v4.py │ ├── interface_v5.py │ └── lib │ │ ├── align.py │ │ ├── fusion.py │ │ ├── network.py │ │ ├── network_baseline.py │ │ ├── network_realworld.py │ │ ├── network_v2.py │ │ ├── network_v3.py │ │ ├── network_v4.py │ │ ├── network_v5.py │ │ ├── pspnet.py │ │ ├── rotation_utils.py │ │ └── utils.py │ ├── base_estimator.py │ └── groundtruth_estimator.py ├── requirements.txt ├── train.py └── utils ├── logger.py ├── sapien_utils.py ├── tools.py └── transform.py /.gitignore: -------------------------------------------------------------------------------- 1 | runs 2 | logs 3 | saves 4 | log 5 | # assets 6 | outputs 7 | result.json 8 | __pycache__ 9 | */__pycache__ 10 | */*/__pycache__ 11 | *.pyc 12 | .vscode 13 | models/pose_estimator/AdaPose/checkpoint 14 | downloads/* 15 | imgui.ini -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Adapose 2 | 3 | ## Install 4 | 5 | Install dependencies using `pip install -r requirements.txt`. The instalation is tested with `setuptools==65.6.3`. Some later versions may not be compatible. 6 | 7 | *Potential Dependency Inssues:* Users have reported `Module not Found` error with mplib. This project is developed when the newest version of mplib is `mplib==0.0.8`. Newer versions if mplib may not be compatible with this project. We recommend using `mplib<=0.1.0` 8 | 9 | ## Download Dataset and Checkpoints 10 | 11 | We provided scripts to fully automate dataset and checkpoint downloading from google drive. Just run `sh install.sh` to do the stuff and wait. The total file size to download is around 1GB. 12 | 13 | You can also manually download them following the steps: 14 | 15 | First, the dataset can be downloaded from [google drive](https://drive.google.com/file/d/154g8SzGFWOcLLjes40aTXFoREX47-ZPk/view?usp=sharing). Download it and decompress it into `downloads/dataset` 16 | 17 | Second, download checkpoints for Pose Estimator from [google drive](https://drive.google.com/drive/folders/1WshZaRVllWxHfUFK1--e1hQd_64dAKMz?usp=sharing), and move all four checkpoints into `downloads/pose_estimator/.pth`. 18 | 19 | Third, download checkpoints for Global Scheduling Policy from [google drive](https://drive.google.com/drive/folders/1YMI38jeLkPJQa_HJ-RnbQV36zPk4_OvH?usp=sharing), and save all checkpoints into `downloads/global_sheduling_policy/_0.pt`. Notice the `_0` is necessary since our code examines this number. Use `controller.load=CheckpointPath` option to load checkpoints. 20 | 21 | ## Run Experiments 22 | 23 | To run experiments, use command line options: `python train.py dataset=?? task=?? pose_estimator=?? manipulation=?? controller=?? train=??` 24 | 25 | Possible options include: 26 | 27 | - dataset: 28 | - cabinet_train 29 | - cabinet_test 30 | - drawer_train 31 | - drawer_test 32 | - mug_train 33 | - mug_test 34 | - pot_train 35 | - pot_test 36 | - real_world 37 | - task: 38 | - open_cabinet 39 | - open_cabinet_45 40 | - open_cabinet_no_dr 41 | - open_drawer 42 | - open_drawer_30 43 | - open_drawer_no_dr 44 | - open_pot 45 | - pick_mig 46 | - real_world 47 | - pose_extimator: 48 | - adapose_cabinet 49 | - adapose_drawer 50 | - adapose_mug 51 | - adapose_pot 52 | - ground_truth 53 | - manipulation: 54 | - open_cabinet 55 | - open_cabinet_open_loop 56 | - open_drawer 57 | - open_drawer_open_looop 58 | - open_pot 59 | - open_pot_open_loop 60 | - pick_mug 61 | - pick_mug_open_loop 62 | - controller 63 | - gt_pose 64 | - heuristic_pose 65 | - rl 66 | - train 67 | - controller 68 | - test 69 | - collect: Used to collect offline data for some baselines. 70 | - test_baselines: Test with offline actions generated by some baselines. 71 | 72 | Some other possible arguments are: 73 | 74 | - task.num_envs=X: Number of parallel environments. 75 | - headless=True/False: Whether to show graphics. 76 | - viewerless=True/False: Whether to disable on-hand camera to accelerate some non-vision tasks. 77 | - exp_name=XXX: The name of the experiment (used for saving models and logging). 78 | - controller.load=Checkpoint: Load the saved controller checkpoint. 79 | 80 | All other configurations can be modified using the [hydra](https://hydra.cc/docs/intro/) style command line options. 81 | 82 | ## Miscs 83 | 84 | The scripts for drawing the plots included in our paper is also provided in `miscs/plot`. 85 | 86 | ## Citations 87 | 88 | To cite us at 89 | 90 | ```latex 91 | @article{an2023rgbmanip, 92 | title={RGBManip: Monocular Image-based Robotic Manipulation through Active Object Pose Estimation}, 93 | author={An, Boshi and Geng, Yiran and Chen, Kai and Li, Xiaoqi and Dou, Qi and Dong, Hao}, 94 | journal={arXiv preprint arXiv:2310.03478}, 95 | year={2023} 96 | } 97 | ``` -------------------------------------------------------------------------------- /algo/ppo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/algo/ppo/__init__.py -------------------------------------------------------------------------------- /algo/ppo/ppo/__init__.py: -------------------------------------------------------------------------------- 1 | from .storage import RolloutStorage 2 | from .module import ActorCritic 3 | from .ppo import PPO, prepare_obs 4 | -------------------------------------------------------------------------------- /algo/ppo/ppo/module.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | import torch 4 | import torch.nn as nn 5 | from torch.distributions import MultivariateNormal 6 | 7 | 8 | class ActorCritic(nn.Module): 9 | 10 | def __init__(self, obs_shape, states_shape, actions_shape, initial_std, model_cfg, asymmetric=False): 11 | super(ActorCritic, self).__init__() 12 | 13 | self.asymmetric = asymmetric 14 | 15 | if model_cfg is None: 16 | actor_hidden_dim = [256, 256, 256] 17 | critic_hidden_dim = [256, 256, 256] 18 | activation = get_activation("selu") 19 | else: 20 | actor_hidden_dim = model_cfg['pi_hid_sizes'] 21 | critic_hidden_dim = model_cfg['vf_hid_sizes'] 22 | activation = get_activation(model_cfg['activation']) 23 | 24 | # Policy 25 | actor_layers = [] 26 | actor_layers.append(nn.Linear(*obs_shape, actor_hidden_dim[0])) 27 | actor_layers.append(activation) 28 | for l in range(len(actor_hidden_dim)): 29 | if l == len(actor_hidden_dim) - 1: 30 | actor_layers.append(nn.Linear(actor_hidden_dim[l], *actions_shape)) 31 | else: 32 | actor_layers.append(nn.Linear(actor_hidden_dim[l], actor_hidden_dim[l + 1])) 33 | actor_layers.append(activation) 34 | self.actor = nn.Sequential(*actor_layers) 35 | 36 | # Value function 37 | critic_layers = [] 38 | if self.asymmetric: 39 | critic_layers.append(nn.Linear(*states_shape, critic_hidden_dim[0])) 40 | else: 41 | critic_layers.append(nn.Linear(*obs_shape, critic_hidden_dim[0])) 42 | critic_layers.append(activation) 43 | for l in range(len(critic_hidden_dim)): 44 | if l == len(critic_hidden_dim) - 1: 45 | critic_layers.append(nn.Linear(critic_hidden_dim[l], 1)) 46 | else: 47 | critic_layers.append(nn.Linear(critic_hidden_dim[l], critic_hidden_dim[l + 1])) 48 | critic_layers.append(activation) 49 | self.critic = nn.Sequential(*critic_layers) 50 | 51 | # print(self.actor) 52 | # print(self.critic) 53 | 54 | # Action noise 55 | self.log_std = nn.Parameter(np.log(initial_std) * torch.ones(*actions_shape)) 56 | 57 | # Initialize the weights like in stable baselines 58 | actor_weights = [np.sqrt(2)] * len(actor_hidden_dim) 59 | actor_weights.append(0.01) 60 | critic_weights = [np.sqrt(2)] * len(critic_hidden_dim) 61 | critic_weights.append(1.0) 62 | self.init_weights(self.actor, actor_weights) 63 | self.init_weights(self.critic, critic_weights) 64 | 65 | @staticmethod 66 | def init_weights(sequential, scales): 67 | [torch.nn.init.orthogonal_(module.weight, gain=scales[idx]) for idx, module in 68 | enumerate(mod for mod in sequential if isinstance(mod, nn.Linear))] 69 | 70 | def forward(self): 71 | raise NotImplementedError 72 | 73 | def act(self, observations, states): 74 | actions_mean = self.actor(observations) 75 | 76 | covariance = torch.diag(self.log_std.exp() * self.log_std.exp()) 77 | distribution = MultivariateNormal(actions_mean, scale_tril=covariance) 78 | 79 | actions = distribution.sample() 80 | actions_log_prob = distribution.log_prob(actions) 81 | 82 | if self.asymmetric: 83 | value = self.critic(states) 84 | else: 85 | value = self.critic(observations) 86 | 87 | return actions.detach(), actions_log_prob.detach(), value.detach(), actions_mean.detach(), self.log_std.repeat(actions_mean.shape[0], 1).detach() 88 | 89 | def act_inference(self, observations): 90 | actions_mean = self.actor(observations) 91 | return actions_mean.detach() 92 | 93 | def evaluate(self, observations, states, actions, contrastive=False): 94 | actions_mean = self.actor(observations) 95 | 96 | covariance = torch.diag(self.log_std.exp() * self.log_std.exp()) 97 | distribution = MultivariateNormal(actions_mean, scale_tril=covariance) 98 | 99 | actions_log_prob = distribution.log_prob(actions) 100 | entropy = distribution.entropy() 101 | 102 | if self.asymmetric: 103 | value = self.critic(states) 104 | else: 105 | value = self.critic(observations) 106 | 107 | return actions_log_prob, entropy, value, actions_mean, self.log_std.repeat(actions_mean.shape[0], 1), 0 108 | 109 | def get_activation(act_name): 110 | if act_name == "elu": 111 | return nn.ELU() 112 | elif act_name == "selu": 113 | return nn.SELU() 114 | elif act_name == "relu": 115 | return nn.ReLU() 116 | elif act_name == "crelu": 117 | return nn.ReLU() 118 | elif act_name == "lrelu": 119 | return nn.LeakyReLU() 120 | elif act_name == "tanh": 121 | return nn.Tanh() 122 | elif act_name == "sigmoid": 123 | return nn.Sigmoid() 124 | else: 125 | print("invalid activation function!") 126 | return None 127 | -------------------------------------------------------------------------------- /algo/ppo/ppo/storage.py: -------------------------------------------------------------------------------- 1 | import torch 2 | from torch.utils.data.sampler import BatchSampler, SequentialSampler, SubsetRandomSampler 3 | 4 | 5 | class RolloutStorage: 6 | 7 | def __init__(self, num_envs, num_transitions_per_env, obs_shape, states_shape, actions_shape, device='cpu', sampler='sequential'): 8 | 9 | self.device = device 10 | self.sampler = sampler 11 | 12 | # Core 13 | self.observations = torch.zeros(num_transitions_per_env, num_envs, *obs_shape, device=self.device) 14 | self.states = torch.zeros(num_transitions_per_env, num_envs, *states_shape, device=self.device) 15 | self.rewards = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device) 16 | self.actions = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device) 17 | self.dones = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device).byte() 18 | 19 | # For PPO 20 | self.actions_log_prob = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device) 21 | self.values = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device) 22 | self.returns = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device) 23 | self.advantages = torch.zeros(num_transitions_per_env, num_envs, 1, device=self.device) 24 | self.mu = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device) 25 | self.sigma = torch.zeros(num_transitions_per_env, num_envs, *actions_shape, device=self.device) 26 | 27 | self.num_transitions_per_env = num_transitions_per_env 28 | self.num_envs = num_envs 29 | 30 | self.step = 0 31 | 32 | def add_transitions(self, observations, states, actions, rewards, dones, values, actions_log_prob, mu, sigma): 33 | if self.step >= self.num_transitions_per_env: 34 | raise AssertionError("Rollout buffer overflow") 35 | self.observations[self.step].copy_(observations) 36 | self.states[self.step].copy_(states) 37 | self.actions[self.step].copy_(actions) 38 | self.rewards[self.step].copy_(rewards.view(-1, 1)) 39 | self.dones[self.step].copy_(dones.view(-1, 1)) 40 | self.values[self.step].copy_(values) 41 | self.actions_log_prob[self.step].copy_(actions_log_prob.view(-1, 1)) 42 | self.mu[self.step].copy_(mu) 43 | self.sigma[self.step].copy_(sigma) 44 | 45 | self.step += 1 46 | 47 | def clear(self): 48 | self.step = 0 49 | 50 | def compute_returns(self, last_values, gamma, lam): 51 | advantage = 0 52 | for step in reversed(range(self.num_transitions_per_env)): 53 | if step == self.num_transitions_per_env - 1: 54 | next_values = last_values 55 | else: 56 | next_values = self.values[step + 1] 57 | next_is_not_terminal = 1.0 - self.dones[step].float() 58 | delta = self.rewards[step] + next_is_not_terminal * gamma * next_values - self.values[step] 59 | advantage = delta + next_is_not_terminal * gamma * lam * advantage 60 | self.returns[step] = advantage + self.values[step] 61 | 62 | # Compute and normalize the advantages 63 | self.advantages = self.returns - self.values 64 | self.advantages = (self.advantages - self.advantages.mean()) / (self.advantages.std() + 1e-8) 65 | 66 | def get_statistics(self): 67 | done = self.dones.cpu() 68 | done[-1] = 1 69 | flat_dones = done.permute(1, 0, 2).reshape(-1, 1) 70 | done_indices = torch.cat((flat_dones.new_tensor([-1], dtype=torch.int64), flat_dones.nonzero(as_tuple=False)[:, 0])) 71 | trajectory_lengths = (done_indices[1:] - done_indices[:-1]) 72 | return trajectory_lengths.float().mean(), self.rewards.mean() 73 | 74 | def mini_batch_generator(self, num_mini_batches): 75 | batch_size = self.num_envs * self.num_transitions_per_env 76 | mini_batch_size = batch_size // num_mini_batches 77 | 78 | if self.sampler == "sequential": 79 | # For physics-based RL, each environment is already randomized. There is no value to doing random sampling 80 | # but a lot of CPU overhead during the PPO process. So, we can just switch to a sequential sampler instead 81 | subset = SequentialSampler(range(batch_size)) 82 | elif self.sampler == "random": 83 | subset = SubsetRandomSampler(range(batch_size)) 84 | 85 | batch = BatchSampler(subset, mini_batch_size, drop_last=True) 86 | return batch -------------------------------------------------------------------------------- /assets/RealWorldDataset.tgn: -------------------------------------------------------------------------------- 1 | {"rows_views":[[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}],[{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}},{"style":{"borders":"ltrb","font_style":{},"text_color":"","bg_color":"","halign":"left","valign":"top","padding":{"top":10,"right":5,"bottom":10,"left":5},"border_color":""}}]],"model":{"rows":[[{"value":"Category","cspan":1,"rspan":1,"markup":[1,8]},{"value":"Object Name","cspan":1,"rspan":1,"markup":[1,11]},{"value":"Image","cspan":1,"rspan":1,"markup":[1,5]},{"value":"Universal ID","cspan":1,"rspan":1,"markup":[1,12]},{"value":"Link","cspan":1,"rspan":1,"markup":[1,4]}],[{"value":"Cabinet","cspan":1,"rspan":1,"markup":[1,7]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]}],[{"value":"Drawer","cspan":1,"rspan":1,"markup":[1,6]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]}],[{"value":"Microwave","cspan":1,"rspan":1,"markup":[1,9]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]}],[{"value":"Refrigerator","cspan":1,"rspan":1,"markup":[1,12]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]}],[{"value":"Kitchen Pot","cspan":1,"rspan":1,"markup":[1,11]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]},{"value":"","cspan":1,"rspan":1,"markup":[]}]]},"theme":null,"fixed_layout":false,"markup":{"instances":[{},{"style":{"fontWeight":"","fontStyle":"","textDecoration":"","color":"","backgroundColor":""}}]},"options":{"table_caption":"","table_label":"tab:my-table"}} -------------------------------------------------------------------------------- /assets/franka_description/franka_panda.srdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/finger.obj: -------------------------------------------------------------------------------- 1 | #### 2 | # 3 | # OBJ File Generated by Meshlab 4 | # 5 | #### 6 | # Object finger.obj 7 | # 8 | # Vertices: 52 9 | # Faces: 32 10 | # 11 | #### 12 | vn 0.999991 0.003723 -0.001919 13 | v 0.010360 0.026403 0.000155 14 | vn 0.019341 -0.997893 -0.061925 15 | v 0.010449 0.002583 0.000147 16 | vn -0.999568 -0.025962 0.013789 17 | v -0.010387 0.002534 0.000132 18 | vn -0.999606 -0.009503 0.026403 19 | v -0.010479 0.016102 0.018988 20 | vn -0.000579 0.001464 -0.999999 21 | v -0.010401 0.026309 0.000167 22 | vn -0.044737 0.976483 0.210900 23 | v -0.010389 0.025220 0.019188 24 | vn -0.871286 -0.490748 0.005227 25 | v -0.008730 -0.000024 0.036165 26 | vn 0.999861 0.006488 0.015354 27 | v 0.010400 0.025253 0.019037 28 | vn 0.377718 0.867563 0.323518 29 | v 0.005840 0.014274 0.053803 30 | vn 0.736099 -0.021564 0.676530 31 | v 0.008616 0.013989 0.051328 32 | vn 0.999373 -0.008600 0.034345 33 | v 0.010495 0.015103 0.018436 34 | vn 0.013041 -0.999896 -0.006124 35 | v 0.008693 -0.000133 0.050166 36 | vn -0.998603 -0.032800 0.041418 37 | v -0.008623 -0.000057 0.050953 38 | vn -0.588468 -0.017705 0.808327 39 | v -0.005481 -0.000091 0.053725 40 | vn 0.004085 -0.008700 0.999954 41 | v -0.005278 0.014293 0.053849 42 | vn -0.691057 -0.012018 0.722700 43 | v -0.007778 0.014218 0.052366 44 | vn -0.665951 0.690851 0.281486 45 | v -0.008841 0.013918 0.050589 46 | vn 0.736099 -0.021564 0.676530 47 | v 0.006138 -0.000021 0.053578 48 | vn -0.002818 0.998255 0.058981 49 | v 0.010360 0.026403 0.000155 50 | vn 0.000073 0.000898 -1.000000 51 | v 0.010360 0.026403 0.000155 52 | vn 0.999898 -0.012431 0.007036 53 | v 0.010449 0.002583 0.000147 54 | vn 0.000724 0.000331 -1.000000 55 | v 0.010449 0.002583 0.000147 56 | vn -0.871286 -0.490748 0.005227 57 | v -0.010387 0.002534 0.000132 58 | vn 0.002403 -0.997480 -0.070914 59 | v -0.010387 0.002534 0.000132 60 | vn 0.000073 0.000898 -1.000000 61 | v -0.010387 0.002534 0.000132 62 | vn -0.004486 0.998354 0.057168 63 | v -0.010401 0.026309 0.000167 64 | vn -0.999988 0.004662 -0.001626 65 | v -0.010401 0.026309 0.000167 66 | vn -0.665951 0.690851 0.281486 67 | v -0.010389 0.025220 0.019188 68 | vn -0.999597 0.009346 0.026807 69 | v -0.010389 0.025220 0.019188 70 | vn 0.006493 -0.999457 -0.032313 71 | v -0.008730 -0.000024 0.036165 72 | vn 0.377718 0.867563 0.323518 73 | v 0.010400 0.025253 0.019037 74 | vn -0.000242 0.983230 0.182372 75 | v 0.010400 0.025253 0.019037 76 | vn 0.665647 0.002096 0.746264 77 | v 0.005840 0.014274 0.053803 78 | vn 0.008418 -0.012115 0.999891 79 | v 0.005840 0.014274 0.053803 80 | vn 0.001757 0.953702 0.300749 81 | v 0.005840 0.014274 0.053803 82 | vn 0.377718 0.867563 0.323518 83 | v 0.008616 0.013989 0.051328 84 | vn 0.998361 0.003310 0.057136 85 | v 0.008616 0.013989 0.051328 86 | vn 0.798906 -0.045001 0.599770 87 | v 0.008693 -0.000133 0.050166 88 | vn 0.998687 -0.025065 0.044683 89 | v 0.008693 -0.000133 0.050166 90 | vn -0.769031 -0.017753 0.638965 91 | v -0.008623 -0.000057 0.050953 92 | vn -0.008996 -0.999957 -0.002185 93 | v -0.008623 -0.000057 0.050953 94 | vn -0.871286 -0.490748 0.005227 95 | v -0.008623 -0.000057 0.050953 96 | vn 0.008418 -0.012115 0.999891 97 | v -0.005481 -0.000091 0.053725 98 | vn -0.002059 -0.999940 0.010793 99 | v -0.005481 -0.000091 0.053725 100 | vn -0.510143 -0.000217 0.860089 101 | v -0.005278 0.014293 0.053849 102 | vn -0.108731 0.943365 0.313433 103 | v -0.005278 0.014293 0.053849 104 | vn -0.665951 0.690851 0.281486 105 | v -0.007778 0.014218 0.052366 106 | vn -0.218924 0.920873 0.322590 107 | v -0.007778 0.014218 0.052366 108 | vn -0.858159 -0.000049 0.513385 109 | v -0.008841 0.013918 0.050589 110 | vn -0.998665 -0.002749 0.051583 111 | v -0.008841 0.013918 0.050589 112 | vn 0.006542 -0.999267 0.037718 113 | v 0.006138 -0.000021 0.053578 114 | vn 0.012751 -0.015529 0.999798 115 | v 0.006138 -0.000021 0.053578 116 | # 52 vertices, 0 vertices normals 117 | 118 | f 20//20 22//22 25//25 119 | f 3//3 4//4 27//27 120 | f 27//27 4//4 29//29 121 | f 2//2 30//30 24//24 122 | f 32//32 6//6 35//35 123 | f 25//25 5//5 20//20 124 | f 37//37 11//11 8//8 125 | f 11//11 39//39 21//21 126 | f 37//37 39//39 11//11 127 | f 42//42 23//23 7//7 128 | f 2//2 12//12 30//30 129 | f 12//12 44//44 30//30 130 | f 8//8 11//11 21//21 131 | f 8//8 21//21 1//1 132 | f 32//32 19//19 6//6 133 | f 6//6 46//46 35//35 134 | f 48//48 46//46 6//6 135 | f 40//40 14//14 16//16 136 | f 3//3 13//13 4//4 137 | f 31//31 9//9 36//36 138 | f 19//19 26//26 6//6 139 | f 4//4 50//50 29//29 140 | f 17//17 47//47 28//28 141 | f 34//34 43//43 52//52 142 | f 15//15 43//43 34//34 143 | f 12//12 51//51 44//44 144 | f 18//18 38//38 10//10 145 | f 44//44 41//41 30//30 146 | f 16//16 14//14 45//45 147 | f 13//13 50//50 4//4 148 | f 18//18 10//10 33//33 149 | f 16//16 49//49 40//40 150 | # 32 faces, 0 coords texture 151 | 152 | # End of File 153 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/finger.obj.convex.stl: -------------------------------------------------------------------------------- 1 | solid AssimpScene 2 | facet normal 0 0 0 3 | outer loop 4 | vertex 0.0104489997 0.00258300011 0.000146999984 5 | vertex 0.0103599997 0.0264030024 0.000155000002 6 | vertex 0.0104 0.0252529997 0.0190370008 7 | endloop 8 | endfacet 9 | 10 | facet normal 0 0 0 11 | outer loop 12 | vertex 0.0104489997 0.00258300011 0.000146999984 13 | vertex 0.0104 0.0252529997 0.0190370008 14 | vertex 0.00861600135 0.0139889996 0.0513279997 15 | endloop 16 | endfacet 17 | 18 | facet normal 0 0 0 19 | outer loop 20 | vertex 0.0104489997 0.00258300011 0.000146999984 21 | vertex 0.00861600135 0.0139889996 0.0513279997 22 | vertex 0.0086930003 -0.000132999994 0.050165996 23 | endloop 24 | endfacet 25 | 26 | facet normal 0 0 0 27 | outer loop 28 | vertex 0.0104489997 0.00258300011 0.000146999984 29 | vertex -0.0103870006 0.00253399997 0.000132000001 30 | vertex -0.0104010003 0.0263090022 0.000167000006 31 | endloop 32 | endfacet 33 | 34 | facet normal 0 0 0 35 | outer loop 36 | vertex 0.0104489997 0.00258300011 0.000146999984 37 | vertex -0.0104010003 0.0263090022 0.000167000006 38 | vertex 0.0103599997 0.0264030024 0.000155000002 39 | endloop 40 | endfacet 41 | 42 | facet normal 0 0 0 43 | outer loop 44 | vertex -0.0103890002 0.0252199993 0.0191879999 45 | vertex -0.00527800014 0.014293001 0.0538490005 46 | vertex 0.00584000023 0.0142740002 0.0538030006 47 | endloop 48 | endfacet 49 | 50 | facet normal 0 0 0 51 | outer loop 52 | vertex -0.0103890002 0.0252199993 0.0191879999 53 | vertex 0.00584000023 0.0142740002 0.0538030006 54 | vertex 0.0104 0.0252529997 0.0190370008 55 | endloop 56 | endfacet 57 | 58 | facet normal 0 0 0 59 | outer loop 60 | vertex -0.0103890002 0.0252199993 0.0191879999 61 | vertex 0.0104 0.0252529997 0.0190370008 62 | vertex 0.0103599997 0.0264030024 0.000155000002 63 | endloop 64 | endfacet 65 | 66 | facet normal 0 0 0 67 | outer loop 68 | vertex -0.0103890002 0.0252199993 0.0191879999 69 | vertex 0.0103599997 0.0264030024 0.000155000002 70 | vertex -0.0104010003 0.0263090022 0.000167000006 71 | endloop 72 | endfacet 73 | 74 | facet normal 0 0 0 75 | outer loop 76 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 77 | vertex -0.00884099957 0.0139180003 0.0505889989 78 | vertex -0.0103890002 0.0252199993 0.0191879999 79 | endloop 80 | endfacet 81 | 82 | facet normal 0 0 0 83 | outer loop 84 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 85 | vertex -0.0103890002 0.0252199993 0.0191879999 86 | vertex -0.0104010003 0.0263090022 0.000167000006 87 | endloop 88 | endfacet 89 | 90 | facet normal 0 0 0 91 | outer loop 92 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 93 | vertex -0.0104010003 0.0263090022 0.000167000006 94 | vertex -0.0103870006 0.00253399997 0.000132000001 95 | endloop 96 | endfacet 97 | 98 | facet normal 0 0 0 99 | outer loop 100 | vertex 0.00613800064 -2.09999998e-05 0.0535780005 101 | vertex 0.0086930003 -0.000132999994 0.050165996 102 | vertex 0.00861600135 0.0139889996 0.0513279997 103 | endloop 104 | endfacet 105 | 106 | facet normal 0 0 0 107 | outer loop 108 | vertex -0.00884099957 0.0139180003 0.0505889989 109 | vertex -0.00527800014 0.014293001 0.0538490005 110 | vertex -0.0103890002 0.0252199993 0.0191879999 111 | endloop 112 | endfacet 113 | 114 | facet normal 0 0 0 115 | outer loop 116 | vertex -0.00884099957 0.0139180003 0.0505889989 117 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 118 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 119 | endloop 120 | endfacet 121 | 122 | facet normal 0 0 0 123 | outer loop 124 | vertex -0.00884099957 0.0139180003 0.0505889989 125 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 126 | vertex -0.00527800014 0.014293001 0.0538490005 127 | endloop 128 | endfacet 129 | 130 | facet normal 0 0 0 131 | outer loop 132 | vertex 0.00584000023 0.0142740002 0.0538030006 133 | vertex -0.00527800014 0.014293001 0.0538490005 134 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 135 | endloop 136 | endfacet 137 | 138 | facet normal 0 0 0 139 | outer loop 140 | vertex 0.00584000023 0.0142740002 0.0538030006 141 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 142 | vertex 0.00613800064 -2.09999998e-05 0.0535780005 143 | endloop 144 | endfacet 145 | 146 | facet normal 0 0 0 147 | outer loop 148 | vertex 0.00584000023 0.0142740002 0.0538030006 149 | vertex 0.00613800064 -2.09999998e-05 0.0535780005 150 | vertex 0.00861600135 0.0139889996 0.0513279997 151 | endloop 152 | endfacet 153 | 154 | facet normal 0 0 0 155 | outer loop 156 | vertex 0.00584000023 0.0142740002 0.0538030006 157 | vertex 0.00861600135 0.0139889996 0.0513279997 158 | vertex 0.0104 0.0252529997 0.0190370008 159 | endloop 160 | endfacet 161 | 162 | facet normal 0 0 0 163 | outer loop 164 | vertex 0.0086930003 -0.000132999994 0.050165996 165 | vertex 0.00613800064 -2.09999998e-05 0.0535780005 166 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 167 | endloop 168 | endfacet 169 | 170 | facet normal 0 0 0 171 | outer loop 172 | vertex 0.0086930003 -0.000132999994 0.050165996 173 | vertex -0.00548100052 -9.10000017e-05 0.0537250005 174 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 175 | endloop 176 | endfacet 177 | 178 | facet normal 0 0 0 179 | outer loop 180 | vertex 0.0086930003 -0.000132999994 0.050165996 181 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 182 | vertex -0.00872999988 -2.40000008e-05 0.036164999 183 | endloop 184 | endfacet 185 | 186 | facet normal 0 0 0 187 | outer loop 188 | vertex -0.00872999988 -2.40000008e-05 0.036164999 189 | vertex -0.00862300023 -5.70000047e-05 0.0509530008 190 | vertex -0.0103870006 0.00253399997 0.000132000001 191 | endloop 192 | endfacet 193 | 194 | facet normal 0 0 0 195 | outer loop 196 | vertex -0.00872999988 -2.40000008e-05 0.036164999 197 | vertex -0.0103870006 0.00253399997 0.000132000001 198 | vertex 0.0104489997 0.00258300011 0.000146999984 199 | endloop 200 | endfacet 201 | 202 | facet normal 0 0 0 203 | outer loop 204 | vertex -0.00872999988 -2.40000008e-05 0.036164999 205 | vertex 0.0104489997 0.00258300011 0.000146999984 206 | vertex 0.0086930003 -0.000132999994 0.050165996 207 | endloop 208 | endfacet 209 | 210 | endsolid AssimpScene 211 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/finger.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/hand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/hand.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link0.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link0.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link1.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link1.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link2.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link2.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link3.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link3.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link4.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link4.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link5.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link5.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link6.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link6.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/link7.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/franka_description/meshes/collision/link7.stl -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/stltoobj.bat: -------------------------------------------------------------------------------- 1 | REM SET PATH=%PATH%;C:/Tools/Assimp/bin/x64/ 2 | REM forfiles /m *.dae /c "cmd /c assimp export @file @fname.obj --verbose --show-log -ptv" 3 | 4 | SET PATH=%PATH%;C:/Program Files/VCG/MeshLab/ 5 | forfiles /m *.stl /c "cmd /c meshlabserver -i @file -o @fname.obj -m vn -s stltoobj.mlx" -------------------------------------------------------------------------------- /assets/franka_description/meshes/collision/stltoobj.mlx: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/daetoobj.bat: -------------------------------------------------------------------------------- 1 | SET PATH=%PATH%;C:/Tools/Assimp/bin/x64/ 2 | forfiles /m *.dae /c "cmd /c assimp export @file @fname.obj --verbose --show-log -ptv" 3 | 4 | REM SET PATH=%PATH%;C:/Program Files/VCG/MeshLab/ 5 | REM forfiles /m *.dae /c "cmd /c meshlabserver -i @file -o @fname.obj -m vn vt -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/daetoobj.mlx: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/finger.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 2 3 | 4 | newmtl Part__Feature001_006 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 0.901961 0.921569 0.929412 8 | Ks 0.250000 0.250000 0.250000 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Feature_007 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.250980 0.250980 0.250980 18 | Ks 0.250000 0.250000 0.250000 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/hand.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 5 3 | 4 | newmtl Part__Feature001_008_005 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 0.250980 0.250980 0.250980 8 | Ks 0.007812 0.007812 0.007812 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Feature002_005_005 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.901961 0.921569 0.929412 18 | Ks 0.015625 0.015625 0.015625 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Part__Feature005_001_005 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.015625 0.015625 0.015625 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Part__Feature005_001_005_001 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 0.901961 0.921569 0.929412 38 | Ks 0.015625 0.015625 0.015625 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | 44 | newmtl Part__Feature_009_005 45 | Ns -1.960784 46 | Ka 1.000000 1.000000 1.000000 47 | Kd 0.250980 0.250980 0.250980 48 | Ks 0.015625 0.015625 0.015625 49 | Ke 0.000000 0.000000 0.000000 50 | Ni 1.000000 51 | d 1.000000 52 | illum 2 53 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link0.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 12 3 | 4 | newmtl Face636_001 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 0.901961 0.921569 0.929412 8 | Ks 0.125000 0.125000 0.125000 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Feature017_001 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 1.000000 1.000000 1.000000 18 | Ks 0.500000 0.500000 0.500000 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Part__Feature018_001 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.500000 0.500000 0.500000 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Part__Feature019_001 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 1.000000 1.000000 1.000000 38 | Ks 0.125000 0.125000 0.125000 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | 44 | newmtl Part__Feature022_001 45 | Ns -1.960784 46 | Ka 1.000000 1.000000 1.000000 47 | Kd 0.901961 0.921569 0.929412 48 | Ks 0.125000 0.125000 0.125000 49 | Ke 0.000000 0.000000 0.000000 50 | Ni 1.000000 51 | d 1.000000 52 | illum 2 53 | 54 | newmtl Part__Feature023_001 55 | Ns -1.960784 56 | Ka 1.000000 1.000000 1.000000 57 | Kd 0.250980 0.250980 0.250980 58 | Ks 0.125000 0.125000 0.125000 59 | Ke 0.000000 0.000000 0.000000 60 | Ni 1.000000 61 | d 1.000000 62 | illum 2 63 | 64 | newmtl Shell001_001 65 | Ns -1.960784 66 | Ka 1.000000 1.000000 1.000000 67 | Kd 0.250980 0.250980 0.250980 68 | Ks 0.125000 0.125000 0.125000 69 | Ke 0.000000 0.000000 0.000000 70 | Ni 1.000000 71 | d 1.000000 72 | illum 2 73 | 74 | newmtl Shell002_001 75 | Ns -1.960784 76 | Ka 1.000000 1.000000 1.000000 77 | Kd 0.901961 0.921569 0.929412 78 | Ks 0.125000 0.125000 0.125000 79 | Ke 0.000000 0.000000 0.000000 80 | Ni 1.000000 81 | d 1.000000 82 | illum 2 83 | 84 | newmtl Shell003_001 85 | Ns -1.960784 86 | Ka 1.000000 1.000000 1.000000 87 | Kd 0.901961 0.921569 0.929412 88 | Ks 0.125000 0.125000 0.125000 89 | Ke 0.000000 0.000000 0.000000 90 | Ni 1.000000 91 | d 1.000000 92 | illum 2 93 | 94 | newmtl Shell009_001 95 | Ns -1.960784 96 | Ka 1.000000 1.000000 1.000000 97 | Kd 0.250980 0.250980 0.250980 98 | Ks 0.125000 0.125000 0.125000 99 | Ke 0.000000 0.000000 0.000000 100 | Ni 1.000000 101 | d 1.000000 102 | illum 2 103 | 104 | newmtl Shell010_001 105 | Ns -1.960784 106 | Ka 1.000000 1.000000 1.000000 107 | Kd 0.901961 0.921569 0.929412 108 | Ks 0.125000 0.125000 0.125000 109 | Ke 0.000000 0.000000 0.000000 110 | Ni 1.000000 111 | d 1.000000 112 | illum 2 113 | 114 | newmtl Shell_001 115 | Ns -1.960784 116 | Ka 1.000000 1.000000 1.000000 117 | Kd 0.250980 0.250980 0.250980 118 | Ks 0.125000 0.125000 0.125000 119 | Ke 0.000000 0.000000 0.000000 120 | Ni 1.000000 121 | d 1.000000 122 | illum 2 123 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link1.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 1 3 | 4 | newmtl Part__Feature_001 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 1.000000 1.000000 8 | Ks 0.062500 0.062500 0.062500 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link2.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 1 3 | 4 | newmtl Part__Feature024 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 1.000000 1.000000 8 | Ks 0.125000 0.125000 0.125000 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link3.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 4 3 | 4 | newmtl Part__Feature001_010_001_002.001 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 1.000000 1.000000 8 | Ks 0.007812 0.007812 0.007812 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Feature002_007_001_002.001 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 1.000000 1.000000 1.000000 18 | Ks 0.007812 0.007812 0.007812 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Part__Feature003_004_001_002.001 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.007812 0.007812 0.007812 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Part__Feature_001_001_001_002.001 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 0.250980 0.250980 0.250980 38 | Ks 0.007812 0.007812 0.007812 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link4.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 4 3 | 4 | newmtl Part__Feature001_001_003_001 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 1.000000 1.000000 8 | Ks 0.007812 0.007812 0.007812 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Feature002_001_003_001 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.250980 0.250980 0.250980 18 | Ks 0.007812 0.007812 0.007812 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Part__Feature003_001_003_001 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.007812 0.007812 0.007812 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Part__Feature_002_003_001 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 1.000000 1.000000 1.000000 38 | Ks 0.007812 0.007812 0.007812 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link5.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 3 3 | 4 | newmtl Part__Feature_002_004_003 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 1.000000 1.000000 8 | Ks 0.015625 0.015625 0.015625 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Shell001_001_001_003 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.250000 0.250000 0.250000 18 | Ks 0.015625 0.015625 0.015625 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Shell_001_001_003 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.015625 0.015625 0.015625 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link6.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 17 3 | 4 | newmtl Face064_002_001_002_001 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 1.000000 0.000000 0.000000 8 | Ks 0.003906 0.003906 0.003906 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Face065_002_001_002_001 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.000000 1.000000 0.000000 18 | Ks 0.003906 0.003906 0.003906 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Face374_002_001_002_001 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 1.000000 1.000000 1.000000 28 | Ks 0.003906 0.003906 0.003906 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Face539_002_001_002_001 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 0.250980 0.250980 0.250980 38 | Ks 0.003906 0.003906 0.003906 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | 44 | newmtl Part__Feature001_009_001_002_001 45 | Ns -1.960784 46 | Ka 1.000000 1.000000 1.000000 47 | Kd 0.250980 0.250980 0.250980 48 | Ks 0.003906 0.003906 0.003906 49 | Ke 0.000000 0.000000 0.000000 50 | Ni 1.000000 51 | d 1.000000 52 | illum 2 53 | 54 | newmtl Part__Feature002_006_001_002_001 55 | Ns -1.960784 56 | Ka 1.000000 1.000000 1.000000 57 | Kd 0.250980 0.250980 0.250980 58 | Ks 0.003906 0.003906 0.003906 59 | Ke 0.000000 0.000000 0.000000 60 | Ni 1.000000 61 | d 1.000000 62 | illum 2 63 | 64 | newmtl Shell002_002_001_002_001 65 | Ns -1.960784 66 | Ka 1.000000 1.000000 1.000000 67 | Kd 1.000000 1.000000 1.000000 68 | Ks 0.003906 0.003906 0.003906 69 | Ke 0.000000 0.000000 0.000000 70 | Ni 1.000000 71 | d 1.000000 72 | illum 2 73 | 74 | newmtl Shell003_002_001_002_001 75 | Ns -1.960784 76 | Ka 1.000000 1.000000 1.000000 77 | Kd 1.000000 1.000000 1.000000 78 | Ks 0.003906 0.003906 0.003906 79 | Ke 0.000000 0.000000 0.000000 80 | Ni 1.000000 81 | d 1.000000 82 | illum 2 83 | 84 | newmtl Shell004_001_001_002_001 85 | Ns -1.960784 86 | Ka 1.000000 1.000000 1.000000 87 | Kd 1.000000 1.000000 1.000000 88 | Ks 0.003906 0.003906 0.003906 89 | Ke 0.000000 0.000000 0.000000 90 | Ni 1.000000 91 | d 1.000000 92 | illum 2 93 | 94 | newmtl Shell005_001_001_002_001 95 | Ns -1.960784 96 | Ka 1.000000 1.000000 1.000000 97 | Kd 1.000000 1.000000 1.000000 98 | Ks 0.003906 0.003906 0.003906 99 | Ke 0.000000 0.000000 0.000000 100 | Ni 1.000000 101 | d 1.000000 102 | illum 2 103 | 104 | newmtl Shell006_003_002_001 105 | Ns -1.960784 106 | Ka 1.000000 1.000000 1.000000 107 | Kd 0.901961 0.921569 0.929412 108 | Ks 0.015625 0.015625 0.015625 109 | Ke 0.000000 0.000000 0.000000 110 | Ni 1.000000 111 | d 1.000000 112 | illum 2 113 | 114 | newmtl Shell007_002_002_001 115 | Ns -1.960784 116 | Ka 1.000000 1.000000 1.000000 117 | Kd 0.250000 0.250000 0.250000 118 | Ks 0.003906 0.003906 0.003906 119 | Ke 0.000000 0.000000 0.000000 120 | Ni 1.000000 121 | d 1.000000 122 | illum 2 123 | 124 | newmtl Shell011_002_002_001 125 | Ns -1.960784 126 | Ka 1.000000 1.000000 1.000000 127 | Kd 1.000000 1.000000 1.000000 128 | Ks 0.003906 0.003906 0.003906 129 | Ke 0.000000 0.000000 0.000000 130 | Ni 1.000000 131 | d 1.000000 132 | illum 2 133 | 134 | newmtl Shell012_002_002_001 135 | Ns -1.960784 136 | Ka 1.000000 1.000000 1.000000 137 | Kd 1.000000 1.000000 1.000000 138 | Ks 0.003906 0.003906 0.003906 139 | Ke 0.000000 0.000000 0.000000 140 | Ni 1.000000 141 | d 1.000000 142 | illum 2 143 | 144 | newmtl Shell_003_001_002_001 145 | Ns -1.960784 146 | Ka 1.000000 1.000000 1.000000 147 | Kd 0.250980 0.250980 0.250980 148 | Ks 0.003906 0.003906 0.003906 149 | Ke 0.000000 0.000000 0.000000 150 | Ni 1.000000 151 | d 1.000000 152 | illum 2 153 | 154 | newmtl Union001_001_001_002_001 155 | Ns -1.960784 156 | Ka 1.000000 1.000000 1.000000 157 | Kd 0.039216 0.541176 0.780392 158 | Ks 0.003906 0.003906 0.003906 159 | Ke 0.000000 0.000000 0.000000 160 | Ni 1.000000 161 | d 1.000000 162 | illum 2 163 | 164 | newmtl Union_001_001_002_001 165 | Ns -1.960784 166 | Ka 1.000000 1.000000 1.000000 167 | Kd 0.039216 0.541176 0.780392 168 | Ks 0.003906 0.003906 0.003906 169 | Ke 0.000000 0.000000 0.000000 170 | Ni 1.000000 171 | d 1.000000 172 | illum 2 173 | -------------------------------------------------------------------------------- /assets/franka_description/meshes/visual/link7.mtl: -------------------------------------------------------------------------------- 1 | # Blender MTL File: 'None' 2 | # Material Count: 8 3 | 4 | newmtl Part__Mirroring001_004_002 5 | Ns -1.960784 6 | Ka 1.000000 1.000000 1.000000 7 | Kd 0.250980 0.250980 0.250980 8 | Ks 0.015625 0.015625 0.015625 9 | Ke 0.000000 0.000000 0.000000 10 | Ni 1.000000 11 | d 1.000000 12 | illum 2 13 | 14 | newmtl Part__Mirroring002_004_001 15 | Ns -1.960784 16 | Ka 1.000000 1.000000 1.000000 17 | Kd 0.250980 0.250980 0.250980 18 | Ks 0.031250 0.031250 0.031250 19 | Ke 0.000000 0.000000 0.000000 20 | Ni 1.000000 21 | d 1.000000 22 | illum 2 23 | 24 | newmtl Part__Mirroring003_004_001 25 | Ns -1.960784 26 | Ka 1.000000 1.000000 1.000000 27 | Kd 0.250980 0.250980 0.250980 28 | Ks 0.031250 0.031250 0.031250 29 | Ke 0.000000 0.000000 0.000000 30 | Ni 1.000000 31 | d 1.000000 32 | illum 2 33 | 34 | newmtl Part__Mirroring004_004_002 35 | Ns -1.960784 36 | Ka 1.000000 1.000000 1.000000 37 | Kd 1.000000 1.000000 1.000000 38 | Ks 0.031250 0.031250 0.031250 39 | Ke 0.000000 0.000000 0.000000 40 | Ni 1.000000 41 | d 1.000000 42 | illum 2 43 | 44 | newmtl Part__Mirroring005_004_001 45 | Ns -1.960784 46 | Ka 1.000000 1.000000 1.000000 47 | Kd 0.250980 0.250980 0.250980 48 | Ks 0.031250 0.031250 0.031250 49 | Ke 0.000000 0.000000 0.000000 50 | Ni 1.000000 51 | d 1.000000 52 | illum 2 53 | 54 | newmtl Part__Mirroring006_004_001 55 | Ns -1.960784 56 | Ka 1.000000 1.000000 1.000000 57 | Kd 0.250980 0.250980 0.250980 58 | Ks 0.031250 0.031250 0.031250 59 | Ke 0.000000 0.000000 0.000000 60 | Ni 1.000000 61 | d 1.000000 62 | illum 2 63 | 64 | newmtl Part__Mirroring007_004_001 65 | Ns -1.960784 66 | Ka 1.000000 1.000000 1.000000 67 | Kd 0.250980 0.250980 0.250980 68 | Ks 0.031250 0.031250 0.031250 69 | Ke 0.000000 0.000000 0.000000 70 | Ni 1.000000 71 | d 1.000000 72 | illum 2 73 | 74 | newmtl Part__Mirroring_004_001 75 | Ns -1.960784 76 | Ka 1.000000 1.000000 1.000000 77 | Kd 0.898039 0.917647 0.929412 78 | Ks 0.031250 0.031250 0.031250 79 | Ke 0.000000 0.000000 0.000000 80 | Ni 1.000000 81 | d 1.000000 82 | illum 2 83 | -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/finger.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/finger.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/hand.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/hand.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link0.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link0.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link1.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link1.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link2.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link2.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link3.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link3.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link4.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link4.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link5.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link5.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link6.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link6.stl -------------------------------------------------------------------------------- /assets/panda/franka_description/meshes/collision/link7.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/assets/panda/franka_description/meshes/collision/link7.stl -------------------------------------------------------------------------------- /assets/panda/panda.srdf: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | -------------------------------------------------------------------------------- /cfg/config.yaml: -------------------------------------------------------------------------------- 1 | defaults: 2 | - dataset: partnet_mobility 3 | - task: open_cabinet 4 | - pose_estimator: ground_truth 5 | - manipulation: open_cabinet 6 | - controller: heuristic_pose 7 | - train: default 8 | - _self_ 9 | 10 | exp_name: test 11 | headless: False 12 | viewerless: False # no cameras activated -------------------------------------------------------------------------------- /cfg/controller/baseline.yaml: -------------------------------------------------------------------------------- 1 | name: baseline -------------------------------------------------------------------------------- /cfg/controller/collect_baselines.yaml: -------------------------------------------------------------------------------- 1 | # used to collect data 2 | name: collection 3 | 4 | target: "baselines" 5 | 6 | pose_estimator: 7 | pose_min: [-0.3, -0.3, 0.4] 8 | pose_max: [0.3, 0.3, 1.0] 9 | 10 | learn: 11 | log_dir: ??? 12 | save_dir: ??? -------------------------------------------------------------------------------- /cfg/controller/collect_pose.yaml: -------------------------------------------------------------------------------- 1 | # used to collect data 2 | name: collection 3 | 4 | target: "pose_estimator" 5 | 6 | pose_estimator: 7 | pose_min: [-0.3, -0.3, 0.4] 8 | pose_max: [0.3, 0.3, 1.0] 9 | 10 | learn: 11 | log_dir: ??? 12 | save_dir: ??? -------------------------------------------------------------------------------- /cfg/controller/gt_pose.yaml: -------------------------------------------------------------------------------- 1 | name: "gt_pose" -------------------------------------------------------------------------------- /cfg/controller/heuristic_pose.yaml: -------------------------------------------------------------------------------- 1 | name: "heuristic_pose" -------------------------------------------------------------------------------- /cfg/controller/homing.yaml: -------------------------------------------------------------------------------- 1 | name: "homing" -------------------------------------------------------------------------------- /cfg/controller/open_cabinet.yaml: -------------------------------------------------------------------------------- 1 | name: "open_cabinet" -------------------------------------------------------------------------------- /cfg/controller/rl.yaml: -------------------------------------------------------------------------------- 1 | name: "rl" 2 | 3 | controller: 4 | max_steps: 4 5 | action_type: pose #joint or pose 6 | pose_min: [-0.3, -0.3, 0.4] 7 | pose_max: [0.3, 0.3, 1.0] 8 | early_stop: 4 9 | # xyz = np.clip(xyz + np.asarray([0, 0, 0.5]), [-0.4, -0.4, 0.2], [0.4, 0.4, 1.2]) 10 | 11 | reward: 12 | # reward coefficients 13 | diff_coef: -0.5 # measures the difference between the current and the goal state 14 | move_success_coef: 8.0 # measures the success of moving the camera 15 | move_period_coef: -0.0 # measures the period of moving the camera 16 | far_coef: -2.5 # measures the distance between current camera and comfort zone 17 | ori_coef: 0.25 # measures the difference between current and comfort orientation 18 | xyz_lookat_coef: -0.05 # reward to limit the length of lookat vector to be 1 19 | bbox_coef: -1.0 # making the bbox to stay in the center 20 | bbox_boundary_coef: -1.0 # making the bbox to stay in the center 21 | have_bbox_coef: 2.0 # make sure the bbox is inside the view 22 | center_coef: 12.0 # encourage the pos prediction to be accurate 23 | open_coef: 8.0 # encourage the ori prediction to be accurate 24 | view_coef: 0.5 # encourage adjacent orientation to be different 25 | view_norm_coef: -0.3 # encourage the norm of view weight to be 1 26 | success_coef: 0.0 # reward for success, KEEP IT set to 0 27 | 28 | policy: 29 | actor_critic_class: ActorCritic 30 | pi_hid_sizes: [96, 96, 32] 31 | vf_hid_sizes: [96, 96, 32] 32 | activation: elu 33 | 34 | learn : 35 | exp_name: PPO 36 | reset: True 37 | num_transitions_per_env: 16 38 | num_transitions_eval: 512 # How many transitions to take in a eval round 39 | num_learning_epochs: 8 40 | num_mini_batches: 4 41 | clip_range: 0.2 42 | gamma: 0.98 43 | lam: 0.98 44 | init_noise_std: 0.6 45 | value_loss_coef: 1.0 46 | entropy_coef: 0.0 47 | learning_rate: 0.00001 48 | max_grad_norm: 1.0 49 | use_clipped_value_loss: True 50 | schedule: adaptive 51 | desired_kl: 0.016 52 | max_lr: 0.005 53 | min_lr: 0.0002 54 | device: cuda 55 | sampler: sequential 56 | log_dir: logs/ppo_controller 57 | save_dir: saves/ppo_controller 58 | testing: False 59 | eval_interval: 64 60 | eval_round: 16 61 | eval: False 62 | print_log: True 63 | contrastive: False 64 | 65 | contrastive_m: 0.99 66 | asymmetric: False 67 | 68 | load: "" -------------------------------------------------------------------------------- /cfg/dataset/cabinet_test.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | dataset_root: downloads 3 | objects: 4 | 46456_link_0: 5 | boundingBox: dataset/one_door_cabinet/46456_link_0/bounding_box.json 6 | door: dataset/one_door_cabinet/46456_link_0/door.yaml 7 | handle: dataset/one_door_cabinet/46456_link_0/handle.yaml 8 | name: 46456_link_0 9 | path: dataset/one_door_cabinet/46456_link_0/mobility.urdf 10 | pointCloud: dataset/one_door_cabinet/46456_link_0/point_sample/ply-10000.ply 11 | 46456_link_1: 12 | boundingBox: dataset/one_door_cabinet/46456_link_1/bounding_box.json 13 | door: dataset/one_door_cabinet/46456_link_1/door.yaml 14 | handle: dataset/one_door_cabinet/46456_link_1/handle.yaml 15 | name: 46456_link_1 16 | path: dataset/one_door_cabinet/46456_link_1/mobility.urdf 17 | pointCloud: dataset/one_door_cabinet/46456_link_1/point_sample/ply-10000.ply 18 | 46839_link_0: 19 | boundingBox: dataset/one_door_cabinet/46839_link_0/bounding_box.json 20 | door: dataset/one_door_cabinet/46839_link_0/door.yaml 21 | handle: dataset/one_door_cabinet/46839_link_0/handle.yaml 22 | name: 46839_link_0 23 | path: dataset/one_door_cabinet/46839_link_0/mobility.urdf 24 | pointCloud: dataset/one_door_cabinet/46839_link_0/point_sample/ply-10000.ply 25 | 46839_link_1: 26 | boundingBox: dataset/one_door_cabinet/46839_link_1/bounding_box.json 27 | door: dataset/one_door_cabinet/46839_link_1/door.yaml 28 | handle: dataset/one_door_cabinet/46839_link_1/handle.yaml 29 | name: 46839_link_1 30 | path: dataset/one_door_cabinet/46839_link_1/mobility.urdf 31 | pointCloud: dataset/one_door_cabinet/46839_link_1/point_sample/ply-10000.ply 32 | 46859_link_0: 33 | boundingBox: dataset/one_door_cabinet/46859_link_0/bounding_box.json 34 | door: dataset/one_door_cabinet/46859_link_0/door.yaml 35 | handle: dataset/one_door_cabinet/46859_link_0/handle.yaml 36 | name: 46859_link_0 37 | path: dataset/one_door_cabinet/46859_link_0/mobility.urdf 38 | pointCloud: dataset/one_door_cabinet/46859_link_0/point_sample/ply-10000.ply 39 | 46889_link_0: 40 | boundingBox: dataset/one_door_cabinet/46889_link_0/bounding_box.json 41 | door: dataset/one_door_cabinet/46889_link_0/door.yaml 42 | handle: dataset/one_door_cabinet/46889_link_0/handle.yaml 43 | name: 46889_link_0 44 | path: dataset/one_door_cabinet/46889_link_0/mobility.urdf 45 | pointCloud: dataset/one_door_cabinet/46889_link_0/point_sample/ply-10000.ply 46 | 47315_link_0: 47 | boundingBox: dataset/one_door_cabinet/47315_link_0/bounding_box.json 48 | door: dataset/one_door_cabinet/47315_link_0/door.yaml 49 | handle: dataset/one_door_cabinet/47315_link_0/handle.yaml 50 | name: 47315_link_0 51 | path: dataset/one_door_cabinet/47315_link_0/mobility.urdf 52 | pointCloud: dataset/one_door_cabinet/47315_link_0/point_sample/ply-10000.ply 53 | 47529_link_0: 54 | boundingBox: dataset/one_door_cabinet/47529_link_0/bounding_box.json 55 | door: dataset/one_door_cabinet/47529_link_0/door.yaml 56 | handle: dataset/one_door_cabinet/47529_link_0/handle.yaml 57 | name: 47529_link_0 58 | path: dataset/one_door_cabinet/47529_link_0/mobility.urdf 59 | pointCloud: dataset/one_door_cabinet/47529_link_0/point_sample/ply-10000.ply 60 | 47570_link_0: 61 | boundingBox: dataset/one_door_cabinet/47570_link_0/bounding_box.json 62 | door: dataset/one_door_cabinet/47570_link_0/door.yaml 63 | handle: dataset/one_door_cabinet/47570_link_0/handle.yaml 64 | name: 47570_link_0 65 | path: dataset/one_door_cabinet/47570_link_0/mobility.urdf 66 | pointCloud: dataset/one_door_cabinet/47570_link_0/point_sample/ply-10000.ply 67 | 48271_link_0: 68 | boundingBox: dataset/one_door_cabinet/48271_link_0/bounding_box.json 69 | door: dataset/one_door_cabinet/48271_link_0/door.yaml 70 | handle: dataset/one_door_cabinet/48271_link_0/handle.yaml 71 | name: 48271_link_0 72 | path: dataset/one_door_cabinet/48271_link_0/mobility.urdf 73 | pointCloud: dataset/one_door_cabinet/48271_link_0/point_sample/ply-10000.ply 74 | 48379_link_0: 75 | boundingBox: dataset/one_door_cabinet/48379_link_0/bounding_box.json 76 | door: dataset/one_door_cabinet/48379_link_0/door.yaml 77 | handle: dataset/one_door_cabinet/48379_link_0/handle.yaml 78 | name: 48379_link_0 79 | path: dataset/one_door_cabinet/48379_link_0/mobility.urdf 80 | pointCloud: dataset/one_door_cabinet/48379_link_0/point_sample/ply-10000.ply 81 | 48379_link_1: 82 | boundingBox: dataset/one_door_cabinet/48379_link_1/bounding_box.json 83 | door: dataset/one_door_cabinet/48379_link_1/door.yaml 84 | handle: dataset/one_door_cabinet/48379_link_1/handle.yaml 85 | name: 48379_link_1 86 | path: dataset/one_door_cabinet/48379_link_1/mobility.urdf 87 | pointCloud: dataset/one_door_cabinet/48379_link_1/point_sample/ply-10000.ply 88 | 48452_link_0: 89 | boundingBox: dataset/one_door_cabinet/48452_link_0/bounding_box.json 90 | door: dataset/one_door_cabinet/48452_link_0/door.yaml 91 | handle: dataset/one_door_cabinet/48452_link_0/handle.yaml 92 | name: 48452_link_0 93 | path: dataset/one_door_cabinet/48452_link_0/mobility.urdf 94 | pointCloud: dataset/one_door_cabinet/48452_link_0/point_sample/ply-10000.ply 95 | 49042_link_0: 96 | boundingBox: dataset/one_door_cabinet/49042_link_0/bounding_box.json 97 | door: dataset/one_door_cabinet/49042_link_0/door.yaml 98 | handle: dataset/one_door_cabinet/49042_link_0/handle.yaml 99 | name: 49042_link_0 100 | path: dataset/one_door_cabinet/49042_link_0/mobility.urdf 101 | pointCloud: dataset/one_door_cabinet/49042_link_0/point_sample/ply-10000.ply 102 | 49042_link_1: 103 | boundingBox: dataset/one_door_cabinet/49042_link_1/bounding_box.json 104 | door: dataset/one_door_cabinet/49042_link_1/door.yaml 105 | handle: dataset/one_door_cabinet/49042_link_1/handle.yaml 106 | name: 49042_link_1 107 | path: dataset/one_door_cabinet/49042_link_1/mobility.urdf 108 | pointCloud: dataset/one_door_cabinet/49042_link_1/point_sample/ply-10000.ply 109 | 49133_link_0: 110 | boundingBox: dataset/one_door_cabinet/49133_link_0/bounding_box.json 111 | door: dataset/one_door_cabinet/49133_link_0/door.yaml 112 | handle: dataset/one_door_cabinet/49133_link_0/handle.yaml 113 | name: 49133_link_0 114 | path: dataset/one_door_cabinet/49133_link_0/mobility.urdf 115 | pointCloud: dataset/one_door_cabinet/49133_link_0/point_sample/ply-10000.ply 116 | 49133_link_1: 117 | boundingBox: dataset/one_door_cabinet/49133_link_1/bounding_box.json 118 | door: dataset/one_door_cabinet/49133_link_1/door.yaml 119 | handle: dataset/one_door_cabinet/49133_link_1/handle.yaml 120 | name: 49133_link_1 121 | path: dataset/one_door_cabinet/49133_link_1/mobility.urdf 122 | pointCloud: dataset/one_door_cabinet/49133_link_1/point_sample/ply-10000.ply 123 | -------------------------------------------------------------------------------- /cfg/dataset/drawer_test.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | dataset_root: downloads 3 | objects: 4 | 46084_link_2: 5 | boundingBox: dataset/one_drawer_cabinet/46084_link_2/bounding_box.json 6 | door: dataset/one_drawer_cabinet/46084_link_2/door.yaml 7 | handle: dataset/one_drawer_cabinet/46084_link_2/handle.yaml 8 | name: 46084_link_2 9 | path: dataset/one_drawer_cabinet/46084_link_2/mobility.urdf 10 | pointCloud: dataset/one_drawer_cabinet/46084_link_2/point_sample/ply-10000.ply 11 | 46084_link_3: 12 | boundingBox: dataset/one_drawer_cabinet/46084_link_3/bounding_box.json 13 | door: dataset/one_drawer_cabinet/46084_link_3/door.yaml 14 | handle: dataset/one_drawer_cabinet/46084_link_3/handle.yaml 15 | name: 46084_link_3 16 | path: dataset/one_drawer_cabinet/46084_link_3/mobility.urdf 17 | pointCloud: dataset/one_drawer_cabinet/46084_link_3/point_sample/ply-10000.ply 18 | 46839_link_2: 19 | boundingBox: dataset/one_drawer_cabinet/46839_link_2/bounding_box.json 20 | door: dataset/one_drawer_cabinet/46839_link_2/door.yaml 21 | handle: dataset/one_drawer_cabinet/46839_link_2/handle.yaml 22 | name: 46839_link_2 23 | path: dataset/one_drawer_cabinet/46839_link_2/mobility.urdf 24 | pointCloud: dataset/one_drawer_cabinet/46839_link_2/point_sample/ply-10000.ply 25 | 46839_link_3: 26 | boundingBox: dataset/one_drawer_cabinet/46839_link_3/bounding_box.json 27 | door: dataset/one_drawer_cabinet/46839_link_3/door.yaml 28 | handle: dataset/one_drawer_cabinet/46839_link_3/handle.yaml 29 | name: 46839_link_3 30 | path: dataset/one_drawer_cabinet/46839_link_3/mobility.urdf 31 | pointCloud: dataset/one_drawer_cabinet/46839_link_3/point_sample/ply-10000.ply 32 | 46856_link_2: 33 | boundingBox: dataset/one_drawer_cabinet/46856_link_2/bounding_box.json 34 | door: dataset/one_drawer_cabinet/46856_link_2/door.yaml 35 | handle: dataset/one_drawer_cabinet/46856_link_2/handle.yaml 36 | name: 46856_link_2 37 | path: dataset/one_drawer_cabinet/46856_link_2/mobility.urdf 38 | pointCloud: dataset/one_drawer_cabinet/46856_link_2/point_sample/ply-10000.ply 39 | 46856_link_3: 40 | boundingBox: dataset/one_drawer_cabinet/46856_link_3/bounding_box.json 41 | door: dataset/one_drawer_cabinet/46856_link_3/door.yaml 42 | handle: dataset/one_drawer_cabinet/46856_link_3/handle.yaml 43 | name: 46856_link_3 44 | path: dataset/one_drawer_cabinet/46856_link_3/mobility.urdf 45 | pointCloud: dataset/one_drawer_cabinet/46856_link_3/point_sample/ply-10000.ply 46 | 47944_link_2: 47 | boundingBox: dataset/one_drawer_cabinet/47944_link_2/bounding_box.json 48 | door: dataset/one_drawer_cabinet/47944_link_2/door.yaml 49 | handle: dataset/one_drawer_cabinet/47944_link_2/handle.yaml 50 | name: 47944_link_2 51 | path: dataset/one_drawer_cabinet/47944_link_2/mobility.urdf 52 | pointCloud: dataset/one_drawer_cabinet/47944_link_2/point_sample/ply-10000.ply 53 | 48063_link_2: 54 | boundingBox: dataset/one_drawer_cabinet/48063_link_2/bounding_box.json 55 | door: dataset/one_drawer_cabinet/48063_link_2/door.yaml 56 | handle: dataset/one_drawer_cabinet/48063_link_2/handle.yaml 57 | name: 48063_link_2 58 | path: dataset/one_drawer_cabinet/48063_link_2/mobility.urdf 59 | pointCloud: dataset/one_drawer_cabinet/48063_link_2/point_sample/ply-10000.ply 60 | 48878_link_0: 61 | boundingBox: dataset/one_drawer_cabinet/48878_link_0/bounding_box.json 62 | door: dataset/one_drawer_cabinet/48878_link_0/door.yaml 63 | handle: dataset/one_drawer_cabinet/48878_link_0/handle.yaml 64 | name: 48878_link_0 65 | path: dataset/one_drawer_cabinet/48878_link_0/mobility.urdf 66 | pointCloud: dataset/one_drawer_cabinet/48878_link_0/point_sample/ply-10000.ply 67 | 48258_link_0: 68 | boundingBox: dataset/one_drawer_cabinet/48258_link_0/bounding_box.json 69 | door: dataset/one_drawer_cabinet/48258_link_0/door.yaml 70 | handle: dataset/one_drawer_cabinet/48258_link_0/handle.yaml 71 | name: 48258_link_0 72 | path: dataset/one_drawer_cabinet/48258_link_0/mobility.urdf 73 | pointCloud: dataset/one_drawer_cabinet/48258_link_0/point_sample/ply-10000.ply 74 | 48258_link_1: 75 | boundingBox: dataset/one_drawer_cabinet/48258_link_1/bounding_box.json 76 | door: dataset/one_drawer_cabinet/48258_link_1/door.yaml 77 | handle: dataset/one_drawer_cabinet/48258_link_1/handle.yaml 78 | name: 48258_link_1 79 | path: dataset/one_drawer_cabinet/48258_link_1/mobility.urdf 80 | pointCloud: dataset/one_drawer_cabinet/48258_link_1/point_sample/ply-10000.ply 81 | 48258_link_2: 82 | boundingBox: dataset/one_drawer_cabinet/48258_link_2/bounding_box.json 83 | door: dataset/one_drawer_cabinet/48258_link_2/door.yaml 84 | handle: dataset/one_drawer_cabinet/48258_link_2/handle.yaml 85 | name: 48258_link_2 86 | path: dataset/one_drawer_cabinet/48258_link_2/mobility.urdf 87 | pointCloud: dataset/one_drawer_cabinet/48258_link_2/point_sample/ply-10000.ply 88 | 48258_link_3: 89 | boundingBox: dataset/one_drawer_cabinet/48258_link_3/bounding_box.json 90 | door: dataset/one_drawer_cabinet/48258_link_3/door.yaml 91 | handle: dataset/one_drawer_cabinet/48258_link_3/handle.yaml 92 | name: 48258_link_3 93 | path: dataset/one_drawer_cabinet/48258_link_3/mobility.urdf 94 | pointCloud: dataset/one_drawer_cabinet/48258_link_3/point_sample/ply-10000.ply 95 | 47578_link_0: 96 | boundingBox: dataset/one_drawer_cabinet/47578_link_0/bounding_box.json 97 | door: dataset/one_drawer_cabinet/47578_link_0/door.yaml 98 | handle: dataset/one_drawer_cabinet/47578_link_0/handle.yaml 99 | name: 47578_link_0 100 | path: dataset/one_drawer_cabinet/47578_link_0/mobility.urdf 101 | pointCloud: dataset/one_drawer_cabinet/47578_link_0/point_sample/ply-10000.ply 102 | 47578_link_1: 103 | boundingBox: dataset/one_drawer_cabinet/47578_link_1/bounding_box.json 104 | door: dataset/one_drawer_cabinet/47578_link_1/door.yaml 105 | handle: dataset/one_drawer_cabinet/47578_link_1/handle.yaml 106 | name: 47578_link_1 107 | path: dataset/one_drawer_cabinet/47578_link_1/mobility.urdf 108 | pointCloud: dataset/one_drawer_cabinet/47578_link_1/point_sample/ply-10000.ply 109 | 47578_link_2: 110 | boundingBox: dataset/one_drawer_cabinet/47578_link_2/bounding_box.json 111 | door: dataset/one_drawer_cabinet/47578_link_2/door.yaml 112 | handle: dataset/one_drawer_cabinet/47578_link_2/handle.yaml 113 | name: 47578_link_2 114 | path: dataset/one_drawer_cabinet/47578_link_2/mobility.urdf 115 | pointCloud: dataset/one_drawer_cabinet/47578_link_2/point_sample/ply-10000.ply 116 | 47578_link_3: 117 | boundingBox: dataset/one_drawer_cabinet/47578_link_3/bounding_box.json 118 | door: dataset/one_drawer_cabinet/47578_link_3/door.yaml 119 | handle: dataset/one_drawer_cabinet/47578_link_3/handle.yaml 120 | name: 47578_link_3 121 | path: dataset/one_drawer_cabinet/47578_link_3/mobility.urdf 122 | pointCloud: dataset/one_drawer_cabinet/47578_link_3/point_sample/ply-10000.ply 123 | -------------------------------------------------------------------------------- /cfg/dataset/mug_test.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | type: "mug" 3 | dataset_root: downloads 4 | objects: 5 | mug_6faf1f04bde838e477f883dde7397db2: 6 | path: dataset/mugs/6faf1f04bde838e477f883dde7397db2_link_0/mobility.urdf 7 | mug_73b8b6456221f4ea20d3c05c08e26f: 8 | path: dataset/mugs/73b8b6456221f4ea20d3c05c08e26f_link_0/mobility.urdf 9 | mug_79e673336e836d1333becb3a9550cbb1: 10 | path: dataset/mugs/79e673336e836d1333becb3a9550cbb1_link_0/mobility.urdf 11 | mug_7a8ea24474846c5c2f23d8349a133d2b: 12 | path: dataset/mugs/7a8ea24474846c5c2f23d8349a133d2b_link_0/mobility.urdf 13 | mug_7d282cc3cedd40c8b5c4f4801d3aada: 14 | path: dataset/mugs/7d282cc3cedd40c8b5c4f4801d3aada_link_0/mobility.urdf 15 | mug_7d6baadd51d0703455da767dfc5b748e: 16 | path: dataset/mugs/7d6baadd51d0703455da767dfc5b748e_link_0/mobility.urdf 17 | mug_83b41d719ea5af3f4dcd1df0d0a62a93: 18 | path: dataset/mugs/83b41d719ea5af3f4dcd1df0d0a62a93_link_0/mobility.urdf 19 | mug_8aed972ea2b4a9019c3814eae0b8d399: 20 | path: dataset/mugs/8aed972ea2b4a9019c3814eae0b8d399_link_0/mobility.urdf 21 | mug_8b1dca1414ba88cb91986c63a4d7a99a: 22 | path: dataset/mugs/8b1dca1414ba88cb91986c63a4d7a99a_link_0/mobility.urdf 23 | mug_8b780e521c906eaf95a4f7ae0be930ac: 24 | path: dataset/mugs/8b780e521c906eaf95a4f7ae0be930ac_link_0/mobility.urdf 25 | mug_8f6c86feaa74698d5c91ee20ade72edc: 26 | path: dataset/mugs/8f6c86feaa74698d5c91ee20ade72edc_link_0/mobility.urdf 27 | mug_9af98540f45411467246665d3d3724c: 28 | path: dataset/mugs/9af98540f45411467246665d3d3724c_link_0/mobility.urdf 29 | mug_9c930a8a3411f069e7f67f334aa9295c: 30 | path: dataset/mugs/9c930a8a3411f069e7f67f334aa9295c_link_0/mobility.urdf 31 | mug_9d8c711750a73b06ad1d789f3b2120d0: 32 | path: dataset/mugs/9d8c711750a73b06ad1d789f3b2120d0_link_0/mobility.urdf 33 | mug_9fc96d41ec7a66a6a159545213d74ea: 34 | path: dataset/mugs/9fc96d41ec7a66a6a159545213d74ea_link_0/mobility.urdf 35 | -------------------------------------------------------------------------------- /cfg/dataset/mug_train.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | type: "mug" 3 | dataset_root: downloads 4 | objects: 5 | mug_10c2b3eac377b9084b3c42e318f3affc: 6 | path: dataset/mugs/10c2b3eac377b9084b3c42e318f3affc_link_0/mobility.urdf 7 | mug_10f6e09036350e92b3f21f1137c3c347: 8 | path: dataset/mugs/10f6e09036350e92b3f21f1137c3c347_link_0/mobility.urdf 9 | mug_15bd6225c209a8e3654b0ce7754570c8: 10 | path: dataset/mugs/15bd6225c209a8e3654b0ce7754570c8_link_0/mobility.urdf 11 | mug_1a1c0a8d4bad82169f0594e65f756cf5: 12 | path: dataset/mugs/1a1c0a8d4bad82169f0594e65f756cf5_link_0/mobility.urdf 13 | mug_1a97f3c83016abca21d0de04f408950f: 14 | path: dataset/mugs/1a97f3c83016abca21d0de04f408950f_link_0/mobility.urdf 15 | mug_1be6b2c84cdab826c043c2d07bb83fc8: 16 | path: dataset/mugs/1be6b2c84cdab826c043c2d07bb83fc8_link_0/mobility.urdf 17 | mug_1d18255a04d22794e521eeb8bb14c5b3: 18 | path: dataset/mugs/1d18255a04d22794e521eeb8bb14c5b3_link_0/mobility.urdf 19 | mug_1eaf8db2dd2b710c7d5b1b70ae595e60: 20 | path: dataset/mugs/1eaf8db2dd2b710c7d5b1b70ae595e60_link_0/mobility.urdf 21 | mug_1f035aa5fc6da0983ecac81e09b15ea9: 22 | path: dataset/mugs/1f035aa5fc6da0983ecac81e09b15ea9_link_0/mobility.urdf 23 | mug_24b17537bce40695b3207096ecd79542: 24 | path: dataset/mugs/24b17537bce40695b3207096ecd79542_link_0/mobility.urdf 25 | mug_28f1e7bc572a633cb9946438ed40eeb9: 26 | path: dataset/mugs/28f1e7bc572a633cb9946438ed40eeb9_link_0/mobility.urdf 27 | mug_34ae0b61b0d8aaf2d7b20fded0142d7a: 28 | path: dataset/mugs/34ae0b61b0d8aaf2d7b20fded0142d7a_link_0/mobility.urdf 29 | mug_35ce7ede92198be2b759f7fb0032e59: 30 | path: dataset/mugs/35ce7ede92198be2b759f7fb0032e59_link_0/mobility.urdf 31 | mug_37f56901a07da69dac6b8e58caf61f95: 32 | path: dataset/mugs/37f56901a07da69dac6b8e58caf61f95_link_0/mobility.urdf 33 | mug_3c0467f96e26b8c6a93445a1757adf6: 34 | path: dataset/mugs/3c0467f96e26b8c6a93445a1757adf6_link_0/mobility.urdf 35 | mug_3d1754b7cb46c0ce5c8081810641ef6: 36 | path: dataset/mugs/3d1754b7cb46c0ce5c8081810641ef6_link_0/mobility.urdf 37 | mug_3d3e993f7baa4d7ef1ff24a8b1564a36: 38 | path: dataset/mugs/3d3e993f7baa4d7ef1ff24a8b1564a36_link_0/mobility.urdf 39 | mug_43e1cabc5dd2fa91fffc97a61124b1a9: 40 | path: dataset/mugs/43e1cabc5dd2fa91fffc97a61124b1a9_link_0/mobility.urdf 41 | mug_43f94ba24d2f075c4d32a65fb7bf4ebc: 42 | path: dataset/mugs/43f94ba24d2f075c4d32a65fb7bf4ebc_link_0/mobility.urdf 43 | mug_44f9c4e1ea3532b8d7b20fded0142d7a: 44 | path: dataset/mugs/44f9c4e1ea3532b8d7b20fded0142d7a_link_0/mobility.urdf 45 | mug_46ed9dad0440c043d33646b0990bb4a: 46 | path: dataset/mugs/46ed9dad0440c043d33646b0990bb4a_link_0/mobility.urdf 47 | mug_48e260a614c0fd4434a8988fdcee4fde: 48 | path: dataset/mugs/48e260a614c0fd4434a8988fdcee4fde_link_0/mobility.urdf 49 | mug_4b7888feea81219ab5f4a9188bfa0ef6: 50 | path: dataset/mugs/4b7888feea81219ab5f4a9188bfa0ef6_link_0/mobility.urdf 51 | mug_4b8b10d03552e0891898dfa8eb8eefff: 52 | path: dataset/mugs/4b8b10d03552e0891898dfa8eb8eefff_link_0/mobility.urdf 53 | mug_54f2d6a0b431839c99785666a0fe0255: 54 | path: dataset/mugs/54f2d6a0b431839c99785666a0fe0255_link_0/mobility.urdf 55 | mug_57f73714cbc425e44ae022a8f6e258a7: 56 | path: dataset/mugs/57f73714cbc425e44ae022a8f6e258a7_link_0/mobility.urdf 57 | mug_5c48d471200d2bf16e8a121e6886e18d: 58 | path: dataset/mugs/5c48d471200d2bf16e8a121e6886e18d_link_0/mobility.urdf 59 | mug_5fe74baba21bba7ca4eec1b19b3a18f8: 60 | path: dataset/mugs/5fe74baba21bba7ca4eec1b19b3a18f8_link_0/mobility.urdf 61 | mug_61c10dccfa8e508e2d66cbf6a91063: 62 | path: dataset/mugs/61c10dccfa8e508e2d66cbf6a91063_link_0/mobility.urdf 63 | mug_64a9d9f6774973ebc598d38a6a69ad2: 64 | path: dataset/mugs/64a9d9f6774973ebc598d38a6a69ad2_link_0/mobility.urdf 65 | mug_67b9abb424cf22a22d7082a28b056a5: 66 | path: dataset/mugs/67b9abb424cf22a22d7082a28b056a5_link_0/mobility.urdf 67 | mug_6c379385bf0a23ffdec712af445786fe: 68 | path: dataset/mugs/6c379385bf0a23ffdec712af445786fe_link_0/mobility.urdf 69 | mug_6c5ec193434326fd6fa82390eb12174f: 70 | path: dataset/mugs/6c5ec193434326fd6fa82390eb12174f_link_0/mobility.urdf 71 | mug_6dd59cc1130a426571215a0b56898e5e: 72 | path: dataset/mugs/6dd59cc1130a426571215a0b56898e5e_link_0/mobility.urdf 73 | mug_6e884701bfddd1f71e1138649f4c219: 74 | path: dataset/mugs/6e884701bfddd1f71e1138649f4c219_link_0/mobility.urdf 75 | -------------------------------------------------------------------------------- /cfg/dataset/pot_test.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | type: "pot" 3 | dataset_root: downloads 4 | objects: 5 | 100038_link_0: 6 | path: dataset/pots/100038_link_0/mobility.urdf 7 | 100045_link_0: 8 | path: dataset/pots/100045_link_0/mobility.urdf 9 | 100051_link_0: 10 | path: dataset/pots/100051_link_0/mobility.urdf 11 | 102080_link_0: 12 | path: dataset/pots/102080_link_0/mobility.urdf 13 | -------------------------------------------------------------------------------- /cfg/dataset/pot_train.yaml: -------------------------------------------------------------------------------- 1 | name: "partnet_mobility" 2 | type: "pot" 3 | dataset_root: downloads 4 | objects: 5 | 100015_link_0: 6 | path: dataset/pots/100015_link_0/mobility.urdf 7 | 100017_link_0: 8 | path: dataset/pots/100017_link_0/mobility.urdf 9 | 100021_link_0: 10 | path: dataset/pots/100021_link_0/mobility.urdf 11 | 100023_link_0: 12 | path: dataset/pots/100023_link_0/mobility.urdf 13 | 100025_link_0: 14 | path: dataset/pots/100025_link_0/mobility.urdf 15 | 100028_link_0: 16 | path: dataset/pots/100028_link_0/mobility.urdf 17 | 100032_link_0: 18 | path: dataset/pots/100032_link_0/mobility.urdf 19 | 100033_link_0: 20 | path: dataset/pots/100033_link_0/mobility.urdf 21 | 100040_link_0: 22 | path: dataset/pots/100040_link_0/mobility.urdf 23 | # 100047_link_0: 24 | # path: dataset/pots/100047_link_0/mobility.urdf 25 | 100054_link_0: 26 | path: dataset/pots/100054_link_0/mobility.urdf 27 | 100055_link_0: 28 | path: dataset/pots/100055_link_0/mobility.urdf # Not parallel! 29 | 100056_link_0: 30 | path: dataset/pots/100056_link_0/mobility.urdf 31 | 100057_link_0: 32 | path: dataset/pots/100057_link_0/mobility.urdf 33 | # 100058_link_0: 34 | # path: dataset/pots/100058_link_0/mobility.urdf 35 | 100060_link_0: 36 | path: dataset/pots/100060_link_0/mobility.urdf 37 | # 100613_link_0: 38 | # path: dataset/pots/100613_link_0/mobility.urdf 39 | # 100619_link_0: 40 | # path: dataset/pots/100619_link_0/mobility.urdf 41 | # 100623_link_0: 42 | # path: dataset/pots/100623_link_0/mobility.urdf 43 | # 100693_link_0: 44 | # path: dataset/pots/100693_link_0/mobility.urdf 45 | # 102085_link_0: 46 | # path: dataset/pots/102085_link_0/mobility.urdf 47 | -------------------------------------------------------------------------------- /cfg/dataset/real_world.yaml: -------------------------------------------------------------------------------- 1 | name: "real_world" -------------------------------------------------------------------------------- /cfg/manipulation/close_cabinet.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | closed_loop: True 3 | step_sizes: 4 | - 0.13 5 | - 0.09 6 | - 0.09 7 | - 0.09 8 | - 0.09 9 | - 0.09 10 | -------------------------------------------------------------------------------- /cfg/manipulation/close_cabinet_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | closed_loop: False 3 | step_sizes: 4 | - 0.5 -------------------------------------------------------------------------------- /cfg/manipulation/close_drawer.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | closed_loop: True 3 | step_sizes: 4 | - 0.13 5 | - 0.1 6 | - 0.1 7 | # - 0.1 8 | -------------------------------------------------------------------------------- /cfg/manipulation/close_drawer_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | closed_loop: False 3 | step_sizes: 4 | - 0.5 5 | -------------------------------------------------------------------------------- /cfg/manipulation/open_cabinet.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | closed_loop: True 3 | step_sizes: 4 | - 0.13 5 | - 0.09 6 | - 0.09 7 | - 0.09 8 | - 0.09 9 | - 0.09 -------------------------------------------------------------------------------- /cfg/manipulation/open_cabinet_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | closed_loop: False 3 | step_sizes: 4 | - 0.5 -------------------------------------------------------------------------------- /cfg/manipulation/open_drawer.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | closed_loop: True 3 | step_sizes: 4 | - 0.13 5 | - 0.1 6 | - 0.1 7 | # - 0.1 -------------------------------------------------------------------------------- /cfg/manipulation/open_drawer_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | closed_loop: False 3 | step_sizes: 4 | - 0.5 -------------------------------------------------------------------------------- /cfg/manipulation/open_pot.yaml: -------------------------------------------------------------------------------- 1 | name: open_pot 2 | closed_loop: True 3 | step_sizes: 4 | - 0.06 -------------------------------------------------------------------------------- /cfg/manipulation/open_pot_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: open_pot 2 | closed_loop: False 3 | step_sizes: 4 | - 0.18 5 | -------------------------------------------------------------------------------- /cfg/manipulation/pick_mug.yaml: -------------------------------------------------------------------------------- 1 | name: pick_mug 2 | closed_loop: True 3 | step_sizes: 4 | - 0.1 5 | # - 0.13 -------------------------------------------------------------------------------- /cfg/manipulation/pick_mug_open_loop.yaml: -------------------------------------------------------------------------------- 1 | name: pick_mug 2 | closed_loop: False 3 | step_sizes: 4 | - 0.1 5 | # - 0.13 6 | -------------------------------------------------------------------------------- /cfg/pose_estimator/adapose_cabinet.yaml: -------------------------------------------------------------------------------- 1 | name: adapose_v5 2 | task_name: one_door_cabinet 3 | load: True 4 | checkpoint_path: downloads/pose_estimator/one_door_cabinet.pth 5 | img_size: 224 6 | use_depth: True 7 | n_pts: 1024 8 | direct_regression: True 9 | real_world: False -------------------------------------------------------------------------------- /cfg/pose_estimator/adapose_drawer.yaml: -------------------------------------------------------------------------------- 1 | name: adapose_v5 2 | task_name: one_drawer_cabinet 3 | load: True 4 | checkpoint_path: downloads/pose_estimator/one_drawer_cabinet.pth 5 | img_size: 224 6 | use_depth: True 7 | n_pts: 1024 8 | direct_regression: True 9 | real_world: False -------------------------------------------------------------------------------- /cfg/pose_estimator/adapose_mug.yaml: -------------------------------------------------------------------------------- 1 | name: adapose_v5 2 | task_name: mugs 3 | load: True 4 | checkpoint_path: downloads/pose_estimator/mugs.pth 5 | img_size: 224 6 | use_depth: True 7 | n_pts: 1024 8 | direct_regression: True 9 | real_world: False -------------------------------------------------------------------------------- /cfg/pose_estimator/adapose_pot.yaml: -------------------------------------------------------------------------------- 1 | name: adapose_v5 2 | task_name: pots 3 | load: True 4 | checkpoint_path: downloads/pose_estimator/pots.pth 5 | img_size: 224 6 | use_depth: True 7 | n_pts: 1024 8 | direct_regression: True 9 | real_world: False -------------------------------------------------------------------------------- /cfg/pose_estimator/ground_truth.yaml: -------------------------------------------------------------------------------- 1 | name: ground_truth -------------------------------------------------------------------------------- /cfg/task/open_cabinet.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.15] 14 | randomization: 15 | rot: 16 | low: -0.2 17 | high: 0.2 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.5 23 | high: 0.85 24 | height: 25 | low: 0.01 26 | high: 0.05 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_cabinet_45.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.78] 14 | randomization: 15 | rot: 16 | low: -0.2 17 | high: 0.2 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.5 23 | high: 0.85 24 | height: 25 | low: 0.01 26 | high: 0.05 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_cabinet_no_dr.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.15] 14 | randomization: 15 | rot: 16 | low: -0.0 17 | high: 0.0 18 | pos_angle: 19 | low: -0.0 20 | high: 0.0 21 | dis: 22 | low: 0.6 23 | high: 0.6 24 | height: 25 | low: 0.02 26 | high: 0.02 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_cabinet_visualize.yaml: -------------------------------------------------------------------------------- 1 | name: open_cabinet_visualize 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.3] 14 | randomization: 15 | pose: 16 | xyz: 17 | low: [-0.1, -0.1, 0.5] 18 | high: [0.1, 0.1, 0.6] 19 | rot: 20 | low: -0.1 21 | high: 0.1 22 | dof: 23 | low: [0.0] 24 | high: [0.0] 25 | 26 | robot_conf: 27 | hand_cam_pose: 28 | xyz: [0.1, 0, 0.1] 29 | rot: [0.70710678, 0, -0.70710678, 0] 30 | init_pose: 31 | xyz: [-1.0, 0, 0.05] 32 | rot: [1.0, 0, 0, 0] 33 | init_dof: None 34 | randomization: 35 | pose: 36 | xyz: 37 | low: [-1.1, -0.1, 0.05] 38 | high: [-0.9, 0.1, 0.05] 39 | rot: 40 | low: -0.1 41 | high: 0.1 42 | dof: 43 | low: [-0.05, -0.05, -0.05, -0.99248004, -0.05, 1.0705001, -0.05, 0., 0.] 44 | high: [0.05, 0.05, 0.05, -0.89248, 0.05, 1.1705, 0.05, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_drawer.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.15] 14 | randomization: 15 | rot: 16 | low: -0.2 17 | high: 0.2 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.5 23 | high: 0.8 24 | height: 25 | low: 0.01 26 | high: 0.05 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_drawer_30.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.30] 14 | randomization: 15 | rot: 16 | low: -0.2 17 | high: 0.2 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.5 23 | high: 0.8 24 | height: 25 | low: 0.01 26 | high: 0.05 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_drawer_no_dr.yaml: -------------------------------------------------------------------------------- 1 | name: open_drawer 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.15] 14 | randomization: 15 | rot: 16 | low: -0.0 17 | high: 0.0 18 | pos_angle: 19 | low: -0.0 20 | high: 0.0 21 | dis: 22 | low: 0.6 23 | high: 0.6 24 | height: 25 | low: 0.02 26 | high: 0.02 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/open_pot.yaml: -------------------------------------------------------------------------------- 1 | name: open_pot 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.01] 14 | randomization: 15 | rot: 16 | low: -0.2 17 | high: 0.2 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.2 23 | high: 0.38 24 | height: 25 | low: 0.01 26 | high: 0.3 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/pick_mug.yaml: -------------------------------------------------------------------------------- 1 | name: pick_mug 2 | robot_root: "assets/panda" 3 | robot_name: "panda.urdf" 4 | num_envs: 8 5 | max_step: 512 6 | time_step: 0.005 7 | 8 | object_conf: 9 | init_pose: 10 | xyz: [0, 0, 0.5] 11 | rot: [1.0, 0, 0, 0] 12 | init_dof: [0.0] 13 | success_dof: [0.03] 14 | randomization: 15 | rot: 16 | low: 1.570796327 17 | high: 4.71238898 18 | pos_angle: 19 | low: -0.4 20 | high: 0.4 21 | dis: 22 | low: 0.44 23 | high: 0.50 24 | height: 25 | low: 0.1 26 | high: 0.15 27 | dof: 28 | low: [0.0] 29 | high: [0.0] 30 | 31 | robot_conf: 32 | hand_cam_pose: 33 | xyz: [0.1, 0, 0.1] 34 | rot: [0.70710678, 0, -0.70710678, 0] 35 | init_pose: 36 | xyz: [0.0, 0, 0.15] 37 | rot: [1.0, 0, 0, 0] 38 | init_dof: None 39 | randomization: 40 | pose: 41 | xyz: 42 | low: [0.0, 0.0, 0.15] 43 | high: [0.0, 0.0, 0.15] 44 | rot: 45 | low: 0.0 46 | high: 0.0 47 | dof: 48 | low: [-0.025, -0.025, -0.025, -0.99248004, -0.025, 1.0705001, -0.025, 0., 0.] 49 | high: [0.025, 0.025, 0.025, -0.89248, 0.025, 1.1705, 0.025, 0.04, 0.04] -------------------------------------------------------------------------------- /cfg/task/real_world.yaml: -------------------------------------------------------------------------------- 1 | name: "real_world" -------------------------------------------------------------------------------- /cfg/train/collect.yaml: -------------------------------------------------------------------------------- 1 | log_dir: "./logs" 2 | save_dir: "./saves" 3 | name: "collect" 4 | 5 | total_round: 4096 6 | 7 | train_manipulation: False 8 | train_controller: False -------------------------------------------------------------------------------- /cfg/train/controller.yaml: -------------------------------------------------------------------------------- 1 | log_dir: "./logs" 2 | save_dir: "./saves" 3 | name: "train" 4 | train_manipulation: False 5 | train_controller: True 6 | iterations_per_epoch: 600 7 | log_interval: 1 8 | save_interval: 25 -------------------------------------------------------------------------------- /cfg/train/test.yaml: -------------------------------------------------------------------------------- 1 | log_dir: "./logs" 2 | save_dir: "./saves" 3 | name: "test" 4 | 5 | total_round: 100 6 | 7 | train_manipulation: False 8 | train_controller: False -------------------------------------------------------------------------------- /cfg/train/test_baseline.yaml: -------------------------------------------------------------------------------- 1 | log_dir: "./logs" 2 | save_dir: "./saves" 3 | name: "test_baseline" 4 | 5 | train_manipulation: False 6 | train_controller: False 7 | 8 | task_setting_root: ??? 9 | action_path: ??? -------------------------------------------------------------------------------- /env/base_viewer.py: -------------------------------------------------------------------------------- 1 | from abc import abstractclassmethod 2 | 3 | class BaseViewer: 4 | def __init__(self, name, get_rgba_method, get_depth_method, get_pos_method, get_segmentation_method, update_param_method, get_param_method, get_norm_method): 5 | self.name = name 6 | self.get_rgba_method = get_rgba_method 7 | self.get_depth_method = get_depth_method 8 | self.get_pos_method = get_pos_method 9 | self.get_segmentation_method = get_segmentation_method 10 | self.update_param_method = update_param_method 11 | self.get_param_method = get_param_method 12 | self.get_norm_method = get_norm_method 13 | 14 | def get_rgba(self, *args, **kwargs) : 15 | ''' 16 | Take picture from the viewer. 17 | return: RGBA image array of W*H*4. 18 | ''' 19 | return self.get_rgba_method(*args, **kwargs) 20 | 21 | def get_depth(self, *args, **kwargs) : 22 | ''' 23 | Take picture from the viewer. 24 | return: depth array of W*H. 25 | ''' 26 | return self.get_depth_method(*args, **kwargs) 27 | 28 | def get_pos(self, *args, **kwargs) : 29 | ''' 30 | Take picture from the viewer. 31 | return: depth array of W*H*3. 32 | ''' 33 | return self.get_pos_method(*args, **kwargs) 34 | 35 | def get_segmentation(self, *args, **kwargs) : 36 | ''' 37 | Take picture from the viewer. 38 | return: segmentation array of W*H*2, 39 | the two demensions are for mesh-level and actor-level segmentation. 40 | ''' 41 | return self.get_segmentation_method(*args, **kwargs) 42 | 43 | def get_norm(self) : 44 | ''' 45 | Take picture from the viewer. 46 | return: norm array of W*H*3. 47 | ''' 48 | 49 | return self.get_norm_method() 50 | 51 | def update_viewer(self, camera_extrinsics) : 52 | ''' 53 | Update camera extrinsics and intrisics. 54 | camera_extrinsics: transformation matrix. 55 | ''' 56 | return self.update_param_method(camera_extrinsics) 57 | 58 | def get_param(self) : 59 | ''' 60 | Get camera intrinsics and extrinsics. 61 | return: camera intrinsics and extrinsics. 62 | ''' 63 | return self.get_param_method() -------------------------------------------------------------------------------- /env/real_world_test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/env/real_world_test.py -------------------------------------------------------------------------------- /env/realworld_envs/base_realworld.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sapien 3 | from franka_impedance import RealworldEnv 4 | from utils.transform import * 5 | import numpy as np 6 | import cv2 7 | import matplotlib.pyplot as plt 8 | import numpy as np 9 | from segment_anything import SamPredictor, sam_model_registry 10 | import time as Time 11 | 12 | class BaseRealworldEnv(RealworldEnv) : 13 | 14 | def __init__(self) : 15 | 16 | root = os.path.dirname(os.path.abspath(__file__)) 17 | super().__init__(os.path.join(root, "panda_rs_handeyecalibration_eye_on_hand.yaml")) 18 | sam = sam_model_registry["vit_h"](checkpoint="env/realworld_envs/sam_ckpt/sam_vit_h_4b8939.pth") 19 | self.sam_predictor = SamPredictor(sam) 20 | 21 | def get_observation(self, gt=False) : 22 | 23 | # TODO 24 | assert(gt==False) 25 | # See sapien_envs.base_manipulation for instructions 26 | hand_pose = self.get_hand_pose() 27 | hand_xyz = hand_pose[:3] 28 | target_xyz = None # ??? 29 | total_move_distance = np.linalg.norm(target_xyz - hand_xyz) 30 | success = 0 31 | return total_move_distance, success 32 | 33 | # show results of sam masks 34 | def _show_anns(self, anns): 35 | if len(anns) == 0: 36 | return 37 | sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True) 38 | ax = plt.gca() 39 | ax.set_autoscale_on(False) 40 | 41 | img = np.ones((sorted_anns[0]['segmentation'].shape[0], sorted_anns[0]['segmentation'].shape[1], 4)) 42 | img[:,:,3] = 0 43 | for ann in sorted_anns: 44 | m = ann['segmentation'] 45 | color_mask = np.concatenate([np.random.random(3), [0.35]]) 46 | img[m] = color_mask 47 | ax.imshow(img) 48 | 49 | def get_image(self, mask="handle") : 50 | 51 | print("Getting Image from Realsense") 52 | # TODO 53 | assert(mask in ["handle", ]) 54 | # See sapien_envs.base_manipulation for instructions 55 | # Color, Mask, Intrinsic, Extrinsic 56 | cam_name = "camera0" 57 | cam_pose = self.camera_pose() 58 | cam_pos = cam_pose[:3] 59 | cam_rot = cam_pose[3:] 60 | 61 | 62 | color_image, z_depth = self.camera.take_picture() 63 | self.sam_predictor.set_image(color_image) 64 | intrinsic = None 65 | extrinsic = cam_pose 66 | #camera_intrinsics: [near, far, fovy, width, height] 67 | #camera_extrinsics: pose of the camera (sapien.Pose), or transformation of the camera (numpy). 68 | 69 | 70 | if mask == "handle": 71 | prompt = "handle on cabinet" 72 | 73 | masks, _, _ = self.sam_predictor.predict(prompt) 74 | 75 | 76 | images = {} 77 | 78 | 79 | images[cam_name] = { 80 | 'Color': color_image, 81 | 'Position': cam_pos, 82 | 'Depth': z_depth, 83 | 'Norm': cam_rot, 84 | 'Mask': masks, 85 | 'Intrinsic': intrinsic, 86 | 'Extrinsic': extrinsic 87 | } 88 | 89 | return color_image, masks, intrinsic, extrinsic 90 | 91 | def camera_pose(self) : 92 | 93 | return super().get_cam_pose() 94 | 95 | def hand_pose(self) : 96 | 97 | return super().get_hand_pose() 98 | 99 | def gripper_move_to(self, pose, time=2, wait=1, planner="ik", robot_frame=False, skip_move=False, no_collision_with_front=True) : 100 | ''' 101 | Move the gripper to a target pose 102 | ''' 103 | print("gripper_move_to", pose) 104 | open_dir = quat_to_axis(pose[3:], 2) * 0.105 105 | new_pose = sapien.Pose(p=pose[:3]-open_dir, q=pose[3:]) 106 | res = super().hand_move_to(new_pose) 107 | Time.sleep(time) 108 | print("done") 109 | return res 110 | 111 | def cam_move_to(self, pose, time=2, wait=1, planner="ik", robot_frame=False, skip_move=False, no_collision_with_front=True) : 112 | ''' 113 | Move the camera to a target pose 114 | ''' 115 | 116 | print("cam_move_to", pose) 117 | res = super().cam_move_to(pose) 118 | Time.sleep(time) 119 | print("done") 120 | return res 121 | 122 | # open_dir = quat_to_axis(pose[3:], 2) * 0.105 123 | # new_pose = sapien.Pose(p=pose[:3]-open_dir, q=pose[3:]) 124 | # return super().hand_move_to(new_pose) 125 | 126 | def hand_move_to(self, pose, time=2, wait=1, planner="ik", robot_frame=False, skip_move=False, no_collision_with_front=True) : 127 | ''' 128 | Move the camera to a target pose 129 | ''' 130 | 131 | print("hand_move_to", pose) 132 | res = super().hand_move_to(pose) 133 | Time.sleep(time) 134 | print("done") 135 | return res -------------------------------------------------------------------------------- /env/realworld_envs/panda_rs_handeyecalibration_eye_on_hand.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | eye_on_hand: true 3 | freehand_robot_movement: false 4 | move_group: panda_arm 5 | move_group_namespace: / 6 | namespace: /panda_rs_handeyecalibration_eye_on_hand/ 7 | robot_base_frame: panda_link0 8 | robot_effector_frame: panda_hand_tcp 9 | tracking_base_frame: camera_color_frame 10 | tracking_marker_frame: camera_marker 11 | transformation: 12 | qw: 0.7081374877457951 13 | qx: 0.006027548507685216 14 | qy: 0.00689162640316221 15 | qz: 0.7060152070554716 16 | x: 0.07279950210000039 17 | y: -0.033096014612891786 18 | z: 0.06040634951408137 19 | 20 | # TCP 21 | # translation: 22 | # x: 0.06301070147038353 23 | # y: -0.03401436290920782 24 | # z: -0.04524327998194331 25 | # rotation: 26 | # x: 0.009124544829899064 27 | # y: 0.012592267854170003 28 | # z: 0.7130667150162096 29 | # w: 0.700923702986229 30 | 31 | # PANDA_HAND 32 | # translation: 33 | # x: 0.07279950210000039 34 | # y: -0.033096014612891786 35 | # z: 0.06040634951408137 36 | # rotation: 37 | # x: 0.006027548507685216 38 | # y: 0.00689162640316221 39 | # z: 0.7060152070554716 40 | # w: 0.7081374877457951 41 | -------------------------------------------------------------------------------- /env/sapien_envs/close_cabinet.py: -------------------------------------------------------------------------------- 1 | from typing import Union 2 | from ..base_sapien_env import BaseEnv 3 | from sapien.core import renderer as R 4 | import sapien.core as sapien 5 | import numpy as np 6 | from PIL import Image, ImageColor 7 | import mplib 8 | import os 9 | from utils.transform import * 10 | from utils.tools import * 11 | from utils.sapien_utils import * 12 | from gym.spaces import Space, Box 13 | from env.base_viewer import BaseViewer 14 | from env.sapien_envs.osc_planner import OSCPlanner 15 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 16 | import utils.logger 17 | import random 18 | import ujson as json 19 | from tqdm import tqdm 20 | 21 | CAMERA_INTRINSIC = [0.05, 100, 1, 640, 480] 22 | 23 | class CloseCabinetEnv(OpenCabinetEnv): 24 | 25 | def get_observation(self, gt=False) : 26 | ''' 27 | Get the observation of the environment as a dict 28 | If gt=True, return ground truth bbox 29 | ''' 30 | 31 | raw_observation = super().get_observation() 32 | 33 | # if self.renderer_type == 'sapien' : 34 | # self.scene.update_render() 35 | # for cam in self.registered_cameras : 36 | # cam.take_picture() 37 | # elif self.renderer_type == 'client' : 38 | # self.scene._update_render_and_take_pictures(self.registered_cameras) 39 | 40 | if gt : 41 | raw_observation["handle_bbox"] = self._get_handle_bbox_and_link_transformation(link_name=self.active_link)[0][0] 42 | raw_observation["success"] = (self.obj_dof() < self.obj_success_dof) * 1.0 43 | else : 44 | raw_observation["success"] = (self.obj_dof() < self.obj_success_dof) * 1.0 45 | 46 | # if self.renderer_type == 'client' : 47 | # # provide np.array observation 48 | # observation = regularize_dict(regularize_dict) 49 | 50 | return raw_observation 51 | 52 | def get_reward(self, action) : 53 | 54 | close_reward = -self.obj_dof()[0] 55 | end_eff_p = self.gripper_pose().p 56 | handle_bbox = self._get_handle_bbox_and_link_transformation(link_name=self.active_link)[0][0] 57 | handle_p = (handle_bbox[0] + handle_bbox[6]) / 2 58 | dist = np.linalg.norm(end_eff_p - handle_p) 59 | near_reward = 1.0 / (1.0 + dist**2) + (dist < 0.1) 60 | 61 | eff_x = quat_to_axis(self.gripper_pose().q, 0) 62 | eff_y = quat_to_axis(self.gripper_pose().q, 1) 63 | eff_z = quat_to_axis(self.gripper_pose().q, 2) 64 | handle_x = quat_to_axis(self.handle_pose().q, 0) 65 | handle_y = quat_to_axis(self.handle_pose().q, 1) 66 | handle_z = quat_to_axis(self.handle_pose().q, 2) 67 | 68 | dir_reward = ( 69 | (eff_x*handle_z).sum() + (eff_z * (-handle_x)).sum() # 1.0 / (1.0 + np.linalg.norm(eff_y - handle_y)**2) 70 | ) * 0.1 71 | 72 | # print(near_reward, dir_reward) 73 | robot_qpos = self.robot.get_qpos() 74 | # action_reward = - np.linalg.norm(robot_qpos[:7] - action[:7]) * 0.01 - np.linalg.norm(robot_qpos[7:14]) * 0.01 75 | action_reward = 0 76 | 77 | # print(near_reward, dir_reward, open_reward * (dist < 0.1)) 78 | 79 | return near_reward + dir_reward + close_reward * (dist < 0.1) 80 | # return near_reward + open_reward + dir_reward + action_reward 81 | -------------------------------------------------------------------------------- /env/sapien_envs/impedance_control.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sapien.core as sapien 3 | 4 | class ImpedanceController : 5 | 6 | def __init__ (self, 7 | pinocchio, 8 | eff_link_id, 9 | cartesian_stiffness, 10 | cartesian_damping, 11 | nullspace_stiffness, 12 | damping, 13 | qmask 14 | ) : 15 | 16 | self.pinocchio : sapien.PinocchioModel = pinocchio 17 | self.link_id = eff_link_id 18 | self.damping = damping 19 | self.cartesian_stiffness = cartesian_stiffness 20 | self.cartesian_damping = cartesian_damping 21 | self.nullspace_stiffness = nullspace_stiffness 22 | self.qmask = qmask 23 | self.maskid = np.nonzero(qmask) 24 | 25 | def control_ik(self, target_pose : sapien.Pose, start_dof_pos, dof_pos, dof_vel): 26 | 27 | coriolis = self.pinocchio.compute_coriolis_matrix(dof_pos, dof_vel) 28 | jacobian = self.pinocchio.get_link_jacobian(self.link_id)[:, :7] 29 | q = dof_pos[self.maskid].reshape(-1, 1) 30 | q_nullspace = start_dof_pos[self.maskid].reshape(-1, 1) 31 | dq = dof_vel[self.maskid].reshape(-1, 1) 32 | error = np.zeros((6, 1)) 33 | current_pose = self.pinocchio.get_link_pose(self.link_id) 34 | tau_task = np.zeros((7, 1)) 35 | tau_null = np.zeros((7, 1)) 36 | 37 | error[:3, 0] = current_pose.p - target_pose.p 38 | print(error) 39 | # error[3:, 0] = (current_pose.inv().q * target_pose.q)[1:] 40 | 41 | # Computing Peudo Inverse 42 | lmbda = np.eye(6) * (self.damping ** 2) 43 | j_eef_T = np.transpose(jacobian) 44 | pinv = np.linalg.inv(jacobian @ j_eef_T + lmbda) @ jacobian 45 | 46 | tau_task = j_eef_T @ (-self.cartesian_stiffness * error - self.cartesian_damping * (jacobian @ dq)) 47 | tau_null = (np.eye(7) - j_eef_T @ pinv) @ (self.nullspace_stiffness * (q_nullspace - q) - (2.0 * np.sqrt(self.nullspace_stiffness)) * dq) 48 | 49 | tau = tau_task + tau_null 50 | 51 | # print(coriolis.shape) 52 | # print(jacobian.shape) 53 | 54 | return tau.transpose()[0] -------------------------------------------------------------------------------- /env/sapien_envs/interfaces.py: -------------------------------------------------------------------------------- 1 | from functools import partial 2 | import numpy as np 3 | import sapien.core as sapien 4 | 5 | from utils.sapien_utils import * 6 | from utils.tools import * 7 | from utils.transform import * 8 | 9 | from dm_env import specs 10 | 11 | from env.sapien_envs.base_manipulation import BaseManipulationEnv, CAMERA_INTRINSIC 12 | 13 | IMAGE_SIZE = 84 14 | 15 | class GymManipulationEnv(): 16 | 17 | def __init__(self, env : BaseManipulationEnv, max_step = 4): 18 | 19 | if isinstance(env, partial) : 20 | env = env() 21 | self.env = env 22 | self.max_step = max_step 23 | 24 | obs = self.reset() 25 | self.observation_space = convert_observation_to_space(obs) 26 | self.state_space = convert_observation_to_space(obs) 27 | self.action_space = spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32) 28 | self.last_image = None 29 | 30 | def reset(self, **kwargs): 31 | self.env.reset(**kwargs) 32 | self.step_num = 0 33 | img = self.env.get_image() 34 | self.last_image = img["camera0"] 35 | img["camera1"] = img["camera0"] 36 | return img 37 | 38 | def step(self, actions, **kwargs) : 39 | 40 | self.env.current_driving_target[-1] = actions[-1] 41 | self.env.gripper_move_to(actions[:7]) 42 | 43 | obs = self.env.get_image() 44 | obs["camera1"] = self.last_image 45 | self.last_image = obs["camera0"] 46 | rew = self.env.get_reward(actions) 47 | suc = self.env.get_success() 48 | done = False 49 | self.step_num += 1 50 | if self.step_num == self.max_step : 51 | done = True 52 | 53 | return obs, rew, done, {'is_success': self.env.get_success().any()} 54 | 55 | def close(self) : 56 | 57 | self.env.close() 58 | 59 | class DMCManipulationEnv() : 60 | 61 | def __init__(self, env : BaseManipulationEnv, max_step = 4): 62 | 63 | if isinstance(env, partial) : 64 | env = env() 65 | self.env = env 66 | self.max_step = max_step 67 | self.step_num = 0 68 | 69 | # obs = self.reset() 70 | # self.observation_space = convert_observation_to_space(obs) 71 | # self.state_space = convert_observation_to_space(obs) 72 | # self.action_space = spaces.Box(-np.inf, np.inf, shape=(8,), dtype=np.float32) 73 | 74 | def observation_spec(self) : 75 | 76 | return specs.Array((10, IMAGE_SIZE, IMAGE_SIZE), np.float32, 'observation') 77 | 78 | def action_spec(self) : 79 | 80 | return specs.Array((8,), np.float32, 'action') 81 | 82 | def step(self, actions, **kwargs) : 83 | 84 | self.env.current_driving_target[-1] = actions[-1] 85 | self.env.gripper_move_to(actions[:7]) 86 | self.step_num += 1 87 | 88 | obs = self._get_obsevation() 89 | 90 | return obs 91 | 92 | def reset(self) : 93 | 94 | self.env.reset() 95 | self.step_num = 0 96 | 97 | obs = self._get_obsevation() 98 | 99 | return obs 100 | 101 | def get_done(self) : 102 | 103 | return self.step_num >= self.max_step 104 | 105 | def _get_obsevation(self): 106 | 107 | original_obs = self.env.get_observation() 108 | original_img = self.env.get_image() 109 | color = cv2.resize(original_img['camera0']['Color'], dsize=(84, 84), interpolation=cv2.INTER_CUBIC) 110 | hand_pose = original_obs['hand_pose'] 111 | hand_pose = hand_pose[np.newaxis, np.newaxis, :] * np.ones((IMAGE_SIZE, IMAGE_SIZE, 1)) 112 | # new_obs = np.concatenate((original_obs['cam1_rgb'], original_obs['cam2_rgb'], original_obs['cam1_depth'][..., np.newaxis], original_obs['cam2_depth'][..., np.newaxis]), axis=-1).transpose((2, 0, 1)) 113 | new_obs = np.concatenate((hand_pose, color), axis=-1).transpose((2, 0, 1)) 114 | reward = self.env.get_reward(None) 115 | succes = original_obs["success"] 116 | done = self.get_done() 117 | new_obs = new_obs.astype('float32') 118 | action = self.env.last_action.astype('float32') 119 | discount = np.array((1.0,), dtype='float32') 120 | 121 | class TMP: 122 | 123 | def __getitem__(self, key): 124 | 125 | if key == "observation" : 126 | 127 | return new_obs 128 | 129 | elif key == "reward" : 130 | 131 | return reward 132 | 133 | elif key == "action" : 134 | 135 | return action 136 | 137 | elif key == "discount" : 138 | 139 | return discount 140 | 141 | else : 142 | 143 | raise KeyError(key) 144 | 145 | @property 146 | def success(self) : 147 | 148 | return succes 149 | 150 | @property 151 | def observation(self) : 152 | 153 | return new_obs 154 | 155 | @property 156 | def reward(self) : 157 | 158 | return reward 159 | 160 | @property 161 | def action(self) : 162 | 163 | return action 164 | 165 | def last(self) : 166 | 167 | return done 168 | 169 | return TMP() -------------------------------------------------------------------------------- /env/sapien_envs/osc_planner.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sapien.core as sapien 3 | 4 | class OSCPlanner : 5 | 6 | def __init__ (self, pinocchio, eff_link_id, damping, qmask, dt = 0.1) : 7 | 8 | self.pinocchio : sapien.PinocchioModel = pinocchio 9 | self.link_id = eff_link_id 10 | self.damping = damping 11 | self.qmask = qmask 12 | self.dt = dt 13 | 14 | def control_ik(self, target_pose, dof_pos): 15 | 16 | result, success, error = self.pinocchio.compute_inverse_kinematics( 17 | self.link_id, 18 | target_pose, 19 | dof_pos, 20 | self.qmask, 21 | dt = self.dt, 22 | damp = self.damping, 23 | max_iterations=2048 24 | ) 25 | 26 | return result, success, error 27 | 28 | 29 | self.pinocchio.compute_full_jacobian(dof_pos) 30 | jacobian = self.pinocchio.get_link_jacobian(self.link_id)[:, :7] 31 | 32 | # solve damped least squares 33 | j_eef_T = np.transpose(jacobian) 34 | lmbda = np.eye(6) * (self.damping ** 2) 35 | u = (j_eef_T @ np.linalg.inv(jacobian @ j_eef_T + lmbda) @ dpose.reshape(-1, 1)).reshape(-1) 36 | # print(dpose, u) 37 | 38 | # a = [[ 0., -0.24687966, 0.22048437, 0.20609066, 0.10563097, -0.0034682,\ 39 | # -0.16370341, 0., 0. ],\ 40 | # [-0.00000123, 0.22347132, 0.24358114, 0.09340601, 0.37274289, 0.0781759,\ 41 | # -0.67183615, 0., 0., ],\ 42 | # [ 0. ,0.00000001 ,0.00000084 ,-0.17650335 , 0.21965812 , 0.53936886\ 43 | # ,0.00193711 ,0. ,0. ],\ 44 | # [-0.00000367 ,0.67108505 ,0.73147261 ,-0.50465177 , 0.81164689 ,-0.22828374\ 45 | # ,-0.28547254 , 0. , 0. ],\ 46 | # [-0.00000001 ,0.74138037 ,-0.6621126 ,-0.36496345 , 0.10826459 , 0.96331716\ 47 | # ,0.07231531 ,0. ,0. ],\ 48 | # [ 1. ,0.00000247 ,-0.16295621 ,-0.78238627 ,-0.57402796 ,-0.14109067\ 49 | # ,0.95565471 ,0. ,0. ]] 50 | 51 | # b = [[0. ,-0.2479053 , 0.21928855 , 0.20430644 ,0.09870769 ,-0.00941531\ 52 | # ,-0.15290017 , 0. , 0. ],\ 53 | # [-0.00000123 , 0.22233299 , 0.24451201 , 0.09744147 ,0.37188696 , 0.06331096\ 54 | # ,-0.67433819 , 0. , 0. ],\ 55 | # [ 0. , 0.00000001 , 0.00000084 ,-0.17326357 ,0.22452759 , 0.54274221\ 56 | # ,-0.01412352 , 0. , 0. ],\ 57 | # [-0.00000367 , 0.66766665 , 0.73426799 ,-0.4985721 ,0.81078597 ,-0.23614014\ 58 | # ,-0.28776301 , 0. , 0. ],\ 59 | # [-0.00000001 , 0.74446037 ,-0.65852154 ,-0.35823141 ,0.12944958 , 0.96469461\ 60 | # ,0.0452117 ,0. , 0. ],\ 61 | # [1. , 0.00000246 ,-0.16492394 ,-0.78936438 ,-0.5708493 ,-0.11662825\ 62 | # ,0.95663387 ,0. , 0. ]] 63 | 64 | return u 65 | 66 | # def control_osc(self, dpose, hand_vel, dof_pos, dof_vel): 67 | 68 | # self.pinocchio.compute_full_jacobian(dof_pos) 69 | # mass_matrix = self.pinocchio.compute_generalized_mass_matrix(dof_pos) 70 | # jacobian = self.pinocchio.get_link_jacobian(self.link_id) 71 | 72 | # mm_inv = np.linalg.inv(mass_matrix) # torch.inverse(mm) 73 | # m_eef_inv = jacobian @ mm_inv @ np.transpose(jacobian) 74 | # m_eef = np.linalg.inv(m_eef_inv) 75 | # u = np.transpose(jacobian) @ m_eef @ ( 76 | # self.kp * dpose.reshape(-1, 1) - self.kd * hand_vel.reshape(-1, 1)) 77 | 78 | # # Nullspace control torques `u_null` prevents large changes in joint configuration 79 | # # They are added into the nullspace of OSC so that the end effector orientation remains constant 80 | # # roboticsproceedings.org/rss07/p31.pdf 81 | # j_eef_inv = m_eef @ jacobian @ mm_inv 82 | # u_null = self.kd_null * -dof_vel.reshape(-1, 1) + self.kp_null * ( 83 | # (self.default_dof_pos.reshape(-1, 1) - dof_pos.reshape(-1, 1) + np.pi) % (2 * np.pi) - np.pi) 84 | # u_null = mass_matrix @ u_null 85 | # u += (np.eye(jacobian.shape[1]) - np.transpose(jacobian) @ j_eef_inv) @ u_null 86 | # return u.reshape(-1) -------------------------------------------------------------------------------- /miscs/experimental_results/ablation_results.yaml: -------------------------------------------------------------------------------- 1 | DistCoef: 2 | Cabinet: 3 | 0.0: 4 | Train: 5 | Test: 6 | 0.01: # 2023-09-05_12:44:36NewMovePeriod_Coef0.01/model_550.pt 7 | Train: 84.25 8 | Test: 82.375 9 | 0.02: 10 | Train: 11 | Test: 12 | 0.03: 13 | Train: 14 | Test: 15 | 0.04: 16 | Train: 17 | Test: 18 | 0.05: # 2023-09-03_10:07:59NewMovePeriod_Coef0.05/model_3900.pt 19 | Train: 66.625 20 | Test: 21 | 0.5: # 2023-09-03_16:28:47NewMovePeriod_Coef0.5/model_2000.pt 22 | Train: 75.375 23 | Test: 77.875 24 | 25 | DistCoefNew: 26 | Cabinet: 27 | 1.0: 2.014, 1.87, 1.918, 1.865, 1.88 (0.6079, 0.611, 0.5968, 0.6146, 0.6162) 28 | 0.1: 0.5794, 0.5262, 0.5773, 0.5052, 0.5598 (0.4726, 0.4643, 0.4778, 0.4637, 0.4633) 29 | 0.01: 0.824, 0.9272, 0.95, 0.9095, 0.9466 (0.492, 0.3822, 0.4385, 0.4063, 0.394) 30 | -0.01: 0.3023, 0.5108, 0.4559, 0.3272, 0.5569 (0.4409, 0.4118, 0.4493, 0.4491, 0.4066) 31 | -0.1: 1.818, 1.818, 1.818, 1.818, 1.818 (0.443, 0.443, 0.443, 0.443, 0.443) 32 | -1.0: 2.039, 2.039, 2.039, 2.039, 2.039 (0.35, 0.35, 0.35, 0.35, 0.35) 33 | 34 | OpenLoop: 35 | Cabinet: 36 | Train: 74.5 37 | Test: 74.0 38 | Cabinet45: 39 | Train: 24.625 40 | Test: 29.25 41 | Drawer: 42 | Train: 68.875 43 | Test: 68.375 44 | Drawer30: 45 | Train: 24.625 46 | Test: 29.25 47 | Drawer30: 48 | Train: 66.625 49 | Test: 60.625 50 | 51 | ViewNum: 52 | Cabinet: 53 | 1: 54 | Train: 69.875 (8.070361) 55 | Test: 71.125 (9.759055) 56 | 2: 57 | Train: 83.25 (8.211535) 58 | Test: 85.375 (8.914019) 59 | 3: 60 | Train: 86.25 (8.179599) 61 | Test: 86.625 (9.188982) 62 | 4: 63 | Train: 85.125 (11.625438) 64 | Test: 87.00 (13.706802) 65 | Cabinet45: 66 | 2: 67 | Train: 49.75 68 | Test: 39.125 69 | Drawer: 70 | 1: 71 | Train: 82.375 (7.676725) 72 | Test: 83.25 (8.976509) 73 | 2: 74 | Train: 81.0 (8.184131) 75 | Test: 85.375 (9.149064) 76 | 3: 77 | Train: 83.5 (8.719014) 78 | Test: 87.5 (9.316589) 79 | 4: 80 | Train: 83.0 (14.037972) 81 | Test: 87.0 (9.831388) 82 | Drawer30: 83 | 2: 84 | Train: 64.75 85 | Test: 56.625 86 | Pot: 87 | Mug: 88 | 1: 89 | Train: 90 | Test: 0.0 (2.104183) 91 | 2: 92 | Train: 93 | Test: 1.25 (4.777835) 94 | 3: 95 | Train: 96 | Test: 40.5 (6.341656) 97 | 4: 98 | Train: 99 | Test: 41.875 (8.115523) 100 | 101 | DomainRandomization: 102 | Cabinet: # 2023-09-07_22:27:40CabinetNoDR/model_600.pt (ada) 103 | Train: 66.625 104 | Test: 73.00 105 | Cabinet45: 106 | Train: 32.5 107 | Test: 36.375 108 | Drawer: # 2023-09-12_07:36:25DrawerNoDR/model_500.pt (ada3) 109 | Train: 77.875 110 | Test: 77.625 111 | Drawer30: 112 | Train: 40.625 113 | Test: 30.125 114 | 115 | PoseTricks: 116 | Cabinet: # 2023-09-11_06:49:44CabinetNoTricks/model_800.pt 117 | Train: 65.5 118 | Test: 36.0 119 | Cabinet45: 120 | Train: 121 | Test: 22.75 122 | Drawer: # 2023-09-11_06:49:46DrawerNoTricks/model_800.pt 123 | Train: 35.875 124 | Test: 42.0 125 | Drawer30: 126 | Train: 43.0 127 | Test: 20.25 128 | 129 | Heuristic: 130 | Cabinet: 131 | Train: 69.5 132 | Test: 74.5 133 | Cabinet45: 134 | Train: 39.375 135 | Test: 43.625 136 | Drawer: 137 | Train: 50.625 138 | Test: 59.25 139 | Drawer30: 140 | Train: 44.625 141 | Test: 48.625 142 | Pot: 143 | Train: 1.0 144 | Test: 1.5 145 | Mug: 146 | Train: 0.0 147 | Test: 0.0 148 | -------------------------------------------------------------------------------- /miscs/experimental_results/baseline_reults.yaml: -------------------------------------------------------------------------------- 1 | UMPNet: 2 | Cabinet: 3 | Train: 27.125 4 | Test: 28.125 5 | Cabinet 45: 6 | Train: 11.0 7 | Test: 10.875 8 | Drawer: 9 | Train: 16.625 10 | Test: 18.75 11 | Drawer 30: 12 | Train: 4.375 13 | Test: 5.625 14 | Pot: 15 | Train: 19.125 16 | Test: 36.875 17 | Mug: 18 | Train: 26.625 19 | Test: 22.875 20 | 21 | DRQv2: 22 | Cabinet: 23 | Train: 1.75 24 | Test: 2.5 25 | Cabinet 45: 26 | Train: 0.75 27 | Test: 0.75 28 | Drawer: 29 | Train: 1.875 30 | Test: 1.0 31 | Drawer 30: 32 | Train: 1.375 33 | Test: 0.5 34 | Pot: 35 | Train: 0.125 36 | Test: 0.0 37 | Mug: 38 | Train: 0.0 39 | Test: 0.0 40 | 41 | Flowbot: 42 | Cabinet: 43 | Train: 19.5 44 | Test: 20.375 45 | Cabinet 45: 46 | Train: 6.75 47 | Test: 6.375 48 | Drawer: 49 | Train: 27.25 50 | Test: 25.75 51 | Drawer 30: 52 | Train: 16.875 53 | Test: 11.25 54 | Pot: 55 | Train: 2.5 56 | Test: 7.375 57 | Mug: 58 | Train: 3.875 59 | Test: 4.25 60 | 61 | Where2Act: 62 | Cabinet: 63 | Train: 8.0 64 | Test: 7.0 65 | Cabinet 45: 66 | Train: 1.75 67 | Test: 2.0 68 | Drawer: 69 | Train: 5.875 70 | Test: 7.5 71 | Drawer 30: 72 | Train: 1.125 73 | Test: 0.625 74 | Pot: 75 | Train: 30.0 76 | Test: 55.25 77 | Mug: 78 | Train: 20.875 79 | Test: 19.625 80 | 81 | -------------------------------------------------------------------------------- /miscs/experimental_results/main_results.yaml: -------------------------------------------------------------------------------- 1 | Heuristic: 2 | Door: 3 | Train: 69.5 4 | Test: 74.5 5 | Drawer: 6 | Train: 50.625 7 | Test: 59.25 8 | Pot: 9 | Train: 1.0 10 | Test: 1.5 11 | Mug: 12 | Train: 0.0 13 | Test: 0.0 14 | 15 | Adapose: 16 | Door_Open: # 2023-09-02_01:40:39Split_Cabinet/model_1125.pt 17 | Train: 89.25 18 | Test: 88.875 19 | Door_45: 20 | Train: 51.125 21 | Test: 52.875 22 | Drawer: # 2023-09-01_13:25:15OpenDrawer_Init_PoseRew_BoundaryPenalty_NewEstimator/model_850.pt 23 | Train: 83.00 24 | Test: 87.0 25 | Drawer_30: 26 | Train: 63.5 27 | Test: 61.875 28 | Pot: # 2023-09-01_10:59:12OpenPot/model_1350.pt (Maybe not the best?) 29 | Train: 22.75 30 | Test: 55.625 31 | Mug: # 2023-09-07_17:47:47MugMorePrecise/model_525.pt 32 | Train: 48.375 33 | Test: 41.875 34 | 35 | GroundTruth: 36 | Door: 37 | Train: (0.00) 38 | Test: (0.00) 39 | Drawer: 40 | Train: (0.00) 41 | Test: (0.00) 42 | Pot: 43 | Train: (0.00) 44 | Test: (0.00) 45 | -------------------------------------------------------------------------------- /miscs/plot/alpha.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/miscs/plot/alpha.pdf -------------------------------------------------------------------------------- /miscs/plot/alpha.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | # Number of views 5 | x = [ 6 | -2.5, 7 | -1.5, 8 | -0.5, 9 | 0.5, 10 | 1.5, 11 | 2.5, 12 | 3.5 13 | ] 14 | # Error 15 | y1 = [ 16 | [2.014, 1.87, 1.918, 1.865, 1.88], 17 | [0.5794, 0.2675, 0.5773, 0.3727, 0.5598], 18 | [0.3082, 0.3077, 0.3988, 0.5301, 0.4935], 19 | [0.5805, 0.5056, 0.8713, 1.006, 0.8651], 20 | [1.818, 1.151, 1.551, 1.399, 1.684], 21 | [1.52, 1.284, 1.687, 1.279, 2.249], 22 | [1.431, 1.894, 2.661, 2.429, 1.968] 23 | ] 24 | # Distance 25 | y2 = [ 26 | [0.6079, 0.611, 0.5968, 0.6146, 0.6162], 27 | [0.4726, 0.4643, 0.4778, 0.4637, 0.4633], 28 | [0.492, 0.3822, 0.4385, 0.5012, 0.394], 29 | [0.4409, 0.4118, 0.4493, 0.4491, 0.4066], 30 | [0.443, 0.4638, 0.5004, 0.4919, 0.405], 31 | [0.3647, 0.4047, 0.383, 0.4307, 0.3865], 32 | [0.2251, 0.269, 0.234, 0.264, 0.2747] 33 | ] 34 | 35 | x = np.array(x) 36 | y1 = np.array(y1) 37 | y2 = np.array(y2) 38 | 39 | fig, ax1 = plt.subplots() 40 | 41 | ax1.set_xlabel("Alpha (Log Scale)") 42 | ax1.set_ylabel("Error") 43 | ax1.errorbar(x, y1.mean(axis=-1), y1.std(axis=-1), fmt='o', color='tab:red', 44 | ecolor='pink', elinewidth=3, capsize=0) 45 | ax1.plot(x, y1.mean(axis=-1), color="tab:red", label="Error") 46 | ax1.legend(bbox_to_anchor=(0.3, 1), frameon=False) 47 | 48 | ax2 = ax1.twinx() 49 | ax2.set_ylabel("Distance") 50 | ax2.errorbar(x, y2.mean(axis=-1), y2.std(axis=-1), fmt='o', color='tab:blue', 51 | ecolor='lightblue', elinewidth=3, capsize=0) 52 | ax2.plot(x, y2.mean(axis=-1), color="tab:blue", label="Distance") 53 | ax2.legend(bbox_to_anchor=(0.1, 0.9), frameon=False) 54 | 55 | fig.tight_layout() 56 | # save 57 | plt.savefig("alpha.pdf") -------------------------------------------------------------------------------- /miscs/plot/num_views.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hyperplane-lab/RGBManip/02f1cc104f7723a73381c4b04e7ca81a3eaddff7/miscs/plot/num_views.pdf -------------------------------------------------------------------------------- /miscs/plot/num_views.py: -------------------------------------------------------------------------------- 1 | import matplotlib.pyplot as plt 2 | import numpy as np 3 | 4 | # Number of views 5 | x = [2, 3, 4, 5] 6 | # OpenCabinetTrain 7 | y1 = [84.25, 88.125, 86.125, 89.125] 8 | # OpenCabinetTest 9 | y2 = [78.5, 83.1875, 89.1875, 89.625] 10 | # OpenDrawerTrain 11 | y3 = [82.375, 81.0, 83.5, 83.0] 12 | # OpenDrawerTest 13 | y4 = [83.25, 85.375, 87.5, 87.0] 14 | 15 | x = np.array(x) 16 | y1 = np.array(y1) 17 | y2 = np.array(y2) 18 | y3 = np.array(y3) 19 | y4 = np.array(y4) 20 | 21 | # plt.plot(x, y1, label="OpenCabinetTrain") 22 | # plt.plot(x, y2, label="OpenCabinetTest") 23 | 24 | # plt.plot(x, y3, label="OpenDrawerTrain") 25 | # plt.plot(x, y4, label="OpenDrawerTest") 26 | total_width, n = 0.8, 4 27 | width = total_width / n 28 | x = x - (total_width - width) / 2 29 | plt.bar(x, y1, width=width, label="DoorTrain") 30 | plt.bar(x + width, y2, width=width, label="DoorTest") 31 | plt.bar(x + 2*width, y3, width=width, label="DrawerTrain") 32 | plt.bar(x + 3*width, y4, width=width, label="DrawerTest") 33 | 34 | # Set x axis to integer only 35 | plt.xticks(np.arange(2, 6, 1.0)) 36 | plt.yticks(np.arange(0, 101, 5.0)) 37 | plt.ylim(75, 90) 38 | 39 | plt.xlabel("Number of Views") 40 | plt.ylabel("Success Rate (%)") 41 | plt.title("Number of Views and Success Rate") 42 | plt.legend() 43 | # plt.show() 44 | plt.savefig("num_views.pdf") -------------------------------------------------------------------------------- /models/controller/base_controller.py: -------------------------------------------------------------------------------- 1 | from abc import abstractclassmethod 2 | from models.manipulation.base_manipulation import BaseManipulation 3 | from models.pose_estimator.base_estimator import BasePoseEstimator 4 | from env.base_sapien_env import BaseEnv 5 | from env.my_vec_env import MultiVecEnv 6 | from logging import Logger 7 | 8 | class BaseController : 9 | 10 | def __init__( 11 | self, 12 | env : MultiVecEnv, 13 | pose_estimator : BasePoseEstimator, 14 | manipulation : BaseManipulation, 15 | cfg : dict, 16 | logger : Logger 17 | ) : 18 | 19 | self.env = env 20 | self.pose_estimator = pose_estimator 21 | self.manipulation = manipulation 22 | self.controller = None 23 | self.cfg = cfg 24 | self.logger = logger 25 | 26 | def take_picture(pose) : 27 | ''' 28 | Take a picture of the environment with the camera at the given pose. 29 | Not implemented yet! 30 | ''' 31 | 32 | raise NotImplementedError 33 | 34 | @abstractclassmethod 35 | def run(self) : 36 | 37 | pass 38 | 39 | def train_controller(self, steps, log_interval=1, save_interval=1) : 40 | ''' 41 | Train controller model only 42 | ''' 43 | 44 | self.logger.info("Training controller model...") 45 | self.controller.learn( 46 | steps=steps, 47 | log_interval=log_interval, 48 | save_interval=save_interval 49 | ) 50 | 51 | def train_manipulation(self, steps, log_interval=1, save_interval=1) : 52 | ''' 53 | Train manipulation model only 54 | ''' 55 | 56 | self.logger.info("Training manipulation model...") 57 | self.manipulation.learn( 58 | steps=steps, 59 | log_interval=log_interval, 60 | save_interval=save_interval 61 | ) -------------------------------------------------------------------------------- /models/controller/baseline.py: -------------------------------------------------------------------------------- 1 | import os 2 | from models.controller.base_controller import BaseController 3 | from abc import abstractclassmethod 4 | from models.manipulation.open_cabinet import OpenCabinetManipulation 5 | from models.manipulation.open_drawer import OpenDrawerManipulation 6 | from models.manipulation.open_pot import OpenPotManipulation 7 | from models.manipulation.pick_mug import PickMugManipulation 8 | import numpy as np 9 | 10 | class BaselineController(BaseController) : 11 | 12 | def run(self, setting, action) : 13 | 14 | self.env.load(setting) 15 | # print(setting, action) 16 | # exit() 17 | center = action[None, :3] 18 | direction = action[None, 3:] 19 | x_ = np.zeros_like(direction) 20 | x_[:, 0] = 1 21 | y_ = np.zeros_like(direction) 22 | y_[:, 1] = 1 23 | z_ = np.zeros_like(direction) 24 | z_[:, 2] = 1 25 | 26 | axis = np.zeros((1, 3, 3)) 27 | 28 | if isinstance(self.manipulation, OpenCabinetManipulation) : 29 | axis[0, 0] = -action[3:] 30 | elif isinstance(self.manipulation, OpenDrawerManipulation) : 31 | axis[0, 0] = -action[3:] 32 | elif isinstance(self.manipulation, OpenPotManipulation) : 33 | axis[0, 1, 1] = 1 34 | axis[0, 2, 0] = 1 35 | elif isinstance(self.manipulation, PickMugManipulation) : 36 | axis[0, 1] = action[3:] 37 | 38 | self.manipulation.plan_pathway(center, axis) 39 | pass -------------------------------------------------------------------------------- /models/controller/gt_pose.py: -------------------------------------------------------------------------------- 1 | from models.controller.base_controller import BaseController 2 | from models.manipulation.open_cabinet import OpenCabinetManipulation 3 | from models.pose_estimator.base_estimator import BasePoseEstimator 4 | from models.pose_estimator.groundtruth_estimator import GroundTruthPoseEstimator 5 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 6 | from logging import Logger 7 | import numpy as np 8 | 9 | class GtPoseController(BaseController) : 10 | 11 | def __init__(self, env : OpenCabinetEnv, pose_estimator : BasePoseEstimator, manipulation : OpenCabinetManipulation, cfg : dict, logger : Logger): 12 | super().__init__(env, pose_estimator, manipulation, cfg, logger) 13 | 14 | def run(self, eval=False) : 15 | ''' 16 | Run the controller. 17 | Picture-taking not implemented yet! 18 | ''' 19 | 20 | # for i in range(1000) : 21 | # self.env.step(np.asarray([[0]*self.env.action_space.shape[0]])) 22 | 23 | if isinstance(self.pose_estimator, GroundTruthPoseEstimator) : 24 | original_bbox = self.pose_estimator.estimate() 25 | else : 26 | raise NotImplementedError 27 | 28 | center = (original_bbox[:, 0]+original_bbox[:, 7]) / 2 29 | direction = np.zeros((original_bbox.shape[0], 3, 3)) 30 | direction[:, 0] = original_bbox[:, 1] - original_bbox[:, 0] 31 | direction[:, 1] = original_bbox[:, 0] - original_bbox[:, 2] 32 | direction[:, 2] = original_bbox[:, 4] - original_bbox[:, 0] 33 | frame = np.zeros((original_bbox.shape[0], 3, 3)) 34 | frame[:, 0, 0] = 1 35 | frame[:, 1, 1] = 1 36 | frame[:, 2, 2] = 1 37 | d_norm = np.linalg.norm(direction, axis=-1, keepdims=True) 38 | direction = direction / (d_norm + 1e-8) 39 | direction = np.where(d_norm > 1e-8, direction, frame) 40 | self.manipulation.plan_pathway(center, direction, eval) 41 | -------------------------------------------------------------------------------- /models/controller/heuristic_pose.py: -------------------------------------------------------------------------------- 1 | import sapien.core as sapien 2 | import numpy as np 3 | from models.controller.base_controller import BaseController 4 | from models.manipulation.open_cabinet import OpenCabinetManipulation 5 | from models.pose_estimator.base_estimator import BasePoseEstimator 6 | from models.pose_estimator.groundtruth_estimator import GroundTruthPoseEstimator 7 | from models.pose_estimator.AdaPose.interface import AdaPoseEstimator 8 | from models.pose_estimator.AdaPose.interface_v2 import AdaPoseEstimator_v2 9 | from models.pose_estimator.AdaPose.interface_v3 import AdaPoseEstimator_v3 10 | from models.pose_estimator.AdaPose.interface_v4 import AdaPoseEstimator_v4 11 | from models.pose_estimator.AdaPose.interface_v5 import AdaPoseEstimator_v5 12 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 13 | from env.my_vec_env import MultiVecEnv 14 | from logging import Logger 15 | from utils.tools import show_image 16 | from utils.transform import lookat_quat 17 | import cv2 18 | import _pickle as cPickle 19 | 20 | class HeuristicPoseController(BaseController) : 21 | 22 | def __init__(self, env : MultiVecEnv, pose_estimator : BasePoseEstimator, manipulation : OpenCabinetManipulation, cfg : dict, logger : Logger): 23 | super().__init__(env, pose_estimator, manipulation, cfg, logger) 24 | 25 | def run(self, eval=False) : 26 | ''' 27 | Run the controller. 28 | ''' 29 | 30 | p1 = np.asarray([-0.1, 0.0, 0.8]) 31 | p2 = np.asarray([-0.0, 0.5, 0.7]) 32 | target = np.asarray([0.5, 0.0, 0.5]) 33 | q1 = lookat_quat(target-p1) 34 | q2 = lookat_quat(target-p2) 35 | picture_pose1 = sapien.Pose(p=p1, q=q1) 36 | picture_pose2 = sapien.Pose(p=p2, q=q2) 37 | 38 | self.env.cam_move_to(pose = picture_pose1, time=2, wait=1, planner="path", robot_frame=True, no_collision_with_front=False) 39 | img_1 = self.env.get_image() 40 | self.env.cam_move_to(pose = picture_pose2, time=2, wait=1, planner="path", robot_frame=True, no_collision_with_front=False) 41 | img_2 = self.env.get_image() 42 | 43 | mask_1 = img_1["camera0"]["Mask"] 44 | mask_2 = img_2["camera0"]["Mask"] 45 | 46 | if np.sum(mask_1) == 0 or np.sum(mask_2) == 0 : 47 | self.logger.info("No mask detected") 48 | return 49 | 50 | if isinstance(self.pose_estimator, GroundTruthPoseEstimator) : 51 | original_bbox = self.pose_estimator.estimate() 52 | elif isinstance(self.pose_estimator, AdaPoseEstimator)\ 53 | or isinstance(self.pose_estimator, AdaPoseEstimator_v2)\ 54 | or isinstance(self.pose_estimator, AdaPoseEstimator_v3)\ 55 | or isinstance(self.pose_estimator, AdaPoseEstimator_v4)\ 56 | or isinstance(self.pose_estimator, AdaPoseEstimator_v5) : 57 | original_bbox = self.pose_estimator.estimate( 58 | img_1["camera0"]["Intrinsic"], 59 | img_1["camera0"]["Color"], 60 | img_1["camera0"]["Mask"], 61 | img_1["camera0"]["Extrinsic"], 62 | img_2["camera0"]["Color"], 63 | img_2["camera0"]["Mask"], 64 | img_2["camera0"]["Extrinsic"] 65 | ) 66 | else : 67 | raise NotImplementedError 68 | 69 | center = (original_bbox[:, 1] + original_bbox[:, 7]) / 2 70 | direction = np.zeros((original_bbox.shape[0], 3, 3)) 71 | direction[:, 0] = original_bbox[:, 1] - original_bbox[:, 0] 72 | direction[:, 1] = original_bbox[:, 0] - original_bbox[:, 2] 73 | direction[:, 2] = original_bbox[:, 4] - original_bbox[:, 0] 74 | frame = np.zeros((original_bbox.shape[0], 3, 3)) 75 | frame[:, 0, 0] = 1 76 | frame[:, 1, 1] = 1 77 | frame[:, 2, 2] = 1 78 | d_norm = np.linalg.norm(direction, axis=-1, keepdims=True) 79 | direction = direction / (d_norm + 1e-8) 80 | direction = np.where(d_norm > 1e-8, direction, frame) 81 | self.manipulation.plan_pathway(center, direction, eval) 82 | -------------------------------------------------------------------------------- /models/controller/homing.py: -------------------------------------------------------------------------------- 1 | import sapien.core as sapien 2 | import numpy as np 3 | from models.controller.base_controller import BaseController 4 | from models.manipulation.open_cabinet import OpenCabinetManipulation 5 | from models.pose_estimator.base_estimator import BasePoseEstimator 6 | from models.pose_estimator.groundtruth_estimator import GroundTruthPoseEstimator 7 | from models.pose_estimator.AdaPose.interface import AdaPoseEstimator 8 | from models.pose_estimator.AdaPose.interface_v2 import AdaPoseEstimator_v2 9 | from models.pose_estimator.AdaPose.interface_v3 import AdaPoseEstimator_v3 10 | from models.pose_estimator.AdaPose.interface_v4 import AdaPoseEstimator_v4 11 | from models.pose_estimator.AdaPose.interface_v5 import AdaPoseEstimator_v5 12 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 13 | from env.my_vec_env import MultiVecEnv 14 | from logging import Logger 15 | from utils.tools import show_image 16 | from utils.transform import lookat_quat 17 | import cv2 18 | import _pickle as cPickle 19 | 20 | class HomingController(BaseController) : 21 | 22 | def __init__(self, env : MultiVecEnv, pose_estimator : BasePoseEstimator, manipulation : OpenCabinetManipulation, cfg : dict, logger : Logger): 23 | super().__init__(env, pose_estimator, manipulation, cfg, logger) 24 | 25 | def run(self, eval=False) : 26 | ''' 27 | Run the controller. 28 | ''' 29 | 30 | p1 = np.asarray([0.53, 0.0, 0.40]) 31 | target = np.asarray([0.68, 0.0, 0.40]) 32 | q1 = lookat_quat(target-p1) 33 | picture_pose1 = sapien.Pose(p=p1, q=q1) 34 | 35 | self.env.hand_move_to(pose = picture_pose1, time=2, wait=1, planner="path", robot_frame=True, no_collision_with_front=False) 36 | -------------------------------------------------------------------------------- /models/manipulation/base_manipulation.py: -------------------------------------------------------------------------------- 1 | from abc import abstractclassmethod 2 | from env.base_sapien_env import BaseEnv 3 | from env.my_vec_env import MultiVecEnv 4 | from logging import Logger 5 | 6 | class BaseManipulation : 7 | 8 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 9 | 10 | self.env = env 11 | self.cfg = cfg 12 | self.logger = logger 13 | 14 | @abstractclassmethod 15 | def plan_pathway(self, obs, eval=False) : 16 | 17 | pass -------------------------------------------------------------------------------- /models/manipulation/close_cabinet.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class CloseCabinetManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | x_ = np.array([[1, 0, 0]] * batch_size) 19 | y_ = np.array([[0, 1, 0]] * batch_size) 20 | z_ = np.array([[0, 0, 1]] * batch_size) 21 | 22 | # Computing Pre-grasp Pose 23 | pre_grasp_axis = axis[:, 0] 24 | pre_grasp_axis -= z_ * (pre_grasp_axis * z_).sum(axis=-1, keepdims=True) 25 | norm = np.linalg.norm(pre_grasp_axis, axis=-1, keepdims=True) 26 | pre_grasp_axis = np.where(norm<1e-8, y_, pre_grasp_axis / (norm+1e-8)) 27 | pre_grasp_p = center - pre_grasp_axis * 0.2 28 | pre_grasp_x = -z_ 29 | pre_grasp_z = pre_grasp_axis 30 | pre_grasp_y = np.cross(pre_grasp_z, pre_grasp_x) 31 | axis_from = np.concatenate([ 32 | x_[:, np.newaxis, :], 33 | y_[:, np.newaxis, :], 34 | z_[:, np.newaxis, :] 35 | ], axis=1) 36 | axis_to = np.concatenate([ 37 | pre_grasp_x[:, np.newaxis, :], 38 | pre_grasp_y[:, np.newaxis, :], 39 | pre_grasp_z[:, np.newaxis, :] 40 | ], axis=1) 41 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 42 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 43 | 44 | # Performing Grasp 45 | self.env.class_method("toggle_gripper", open=True) 46 | res = self.env.hand_move_to(pre_grasp_pose, time=2, wait=2, planner="path", no_collision_with_front=True) 47 | 48 | grasp_p = pre_grasp_p 49 | 50 | # Computing Grasp Pose 51 | grasp_p = grasp_p + pre_grasp_axis * 0.18 52 | grasp_q = pre_grasp_q 53 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 54 | 55 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 56 | 57 | self.env.class_method("toggle_gripper", open=False) 58 | 59 | cur_dir = pre_grasp_axis 60 | 61 | for step_size in self.cfg["step_sizes"] : 62 | 63 | cur_p = self.env.gripper_pose()[:, :3] 64 | pred_p = cur_p + cur_dir * step_size 65 | 66 | next_x = -z_ 67 | next_z = -cur_dir 68 | next_y = np.cross(next_z, next_x) 69 | axis_from = np.concatenate([ 70 | x_[:, np.newaxis, :], 71 | y_[:, np.newaxis, :], 72 | z_[:, np.newaxis, :] 73 | ], axis=1) 74 | axis_to = np.concatenate([ 75 | next_x[:, np.newaxis, :], 76 | next_y[:, np.newaxis, :], 77 | next_z[:, np.newaxis, :] 78 | ], axis=1) 79 | pred_q = batch_get_quaternion(axis_from, axis_to) 80 | 81 | pred_pose = np.concatenate([pred_p, pred_q], axis=-1) 82 | 83 | self.env.gripper_move_to(pred_pose, time=step_size*10, wait=step_size*5) 84 | 85 | new_p = self.env.gripper_pose()[:, :3] 86 | new_dir = new_p - cur_p 87 | new_dir[:, 2] = 0 # project to xy-plane 88 | new_dir = normalize(new_dir) 89 | 90 | delta = new_dir - cur_dir 91 | 92 | dot = (new_dir * cur_dir).sum(axis=-1, keepdims=True) 93 | dot = np.clip(dot, -1, 1) 94 | 95 | cur_dir = normalize(cur_dir + 2*delta*dot) 96 | -------------------------------------------------------------------------------- /models/manipulation/close_drawer.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class CloseDrawerManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | # batch_size = obs["handle_bbox"].shape[0] 19 | 20 | x_ = np.array([[1, 0, 0]] * batch_size) 21 | y_ = np.array([[0, 1, 0]] * batch_size) 22 | z_ = np.array([[0, 0, 1]] * batch_size) 23 | 24 | # Computing Pre-grasp Pose 25 | pre_grasp_axis = axis[:, 0] 26 | pre_grasp_axis -= z_ * (pre_grasp_axis * z_).sum(axis=-1, keepdims=True) 27 | norm = np.linalg.norm(pre_grasp_axis, axis=-1, keepdims=True) 28 | pre_grasp_axis = np.where(norm<1e-8, y_, pre_grasp_axis / (norm+1e-8)) 29 | pre_grasp_p = center - pre_grasp_axis * 0.2 30 | pre_grasp_y = -z_ 31 | pre_grasp_z = pre_grasp_axis 32 | pre_grasp_x = np.cross(pre_grasp_y, pre_grasp_z) 33 | axis_from = np.concatenate([ 34 | x_[:, np.newaxis, :], 35 | y_[:, np.newaxis, :], 36 | z_[:, np.newaxis, :] 37 | ], axis=1) 38 | axis_to = np.concatenate([ 39 | pre_grasp_x[:, np.newaxis, :], 40 | pre_grasp_y[:, np.newaxis, :], 41 | pre_grasp_z[:, np.newaxis, :] 42 | ], axis=1) 43 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 44 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 45 | 46 | # Performing Grasp 47 | self.env.class_method("toggle_gripper", open=True) 48 | res = self.env.hand_move_to(pre_grasp_pose, time=2, wait=2, planner="path", no_collision_with_front=True) 49 | 50 | grasp_p = pre_grasp_p 51 | 52 | # Computing Grasp Pose 53 | grasp_p = grasp_p + pre_grasp_axis * 0.18 54 | grasp_q = pre_grasp_q 55 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 56 | 57 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 58 | 59 | self.env.class_method("toggle_gripper", open=False) 60 | 61 | cur_dir = pre_grasp_axis 62 | 63 | for step_size in self.cfg["step_sizes"] : 64 | 65 | cur_p = self.env.gripper_pose()[:, :3] 66 | # self.env.class_method("_draw_point", cur_p[0], color=[0,1/,0], ret=False) 67 | pred_p = cur_p + cur_dir * step_size 68 | 69 | next_y = -z_ 70 | next_z = -cur_dir 71 | next_x = np.cross(next_y, next_z) 72 | # print((next_x*next_z).sum(), (next_y*next_z).sum(), (next_x*next_y).sum()) 73 | axis_from = np.concatenate([ 74 | x_[:, np.newaxis, :], 75 | y_[:, np.newaxis, :], 76 | z_[:, np.newaxis, :] 77 | ], axis=1) 78 | axis_to = np.concatenate([ 79 | next_x[:, np.newaxis, :], 80 | next_y[:, np.newaxis, :], 81 | next_z[:, np.newaxis, :] 82 | ], axis=1) 83 | pred_q = batch_get_quaternion(axis_from, axis_to) 84 | 85 | pred_pose = np.concatenate([pred_p, pred_q], axis=-1) 86 | 87 | self.env.gripper_move_to(pred_pose, time=step_size*10, wait=step_size*5) 88 | 89 | new_p = self.env.gripper_pose()[:, :3] 90 | new_dir = new_p - cur_p 91 | new_dir[:, 2] = 0 # project to xy-plane 92 | new_dir = normalize(new_dir) 93 | 94 | delta = new_dir - cur_dir 95 | 96 | dot = (new_dir * cur_dir).sum(axis=-1, keepdims=True) 97 | dot = np.clip(dot, -1, 1) 98 | 99 | cur_dir = normalize(cur_dir + 2*delta*dot) 100 | -------------------------------------------------------------------------------- /models/manipulation/open_cabinet.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class OpenCabinetManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | x_ = np.array([[1, 0, 0]] * batch_size) 19 | y_ = np.array([[0, 1, 0]] * batch_size) 20 | z_ = np.array([[0, 0, 1]] * batch_size) 21 | 22 | # Computing Pre-grasp Pose 23 | pre_grasp_axis = axis[:, 0] 24 | pre_grasp_axis -= z_ * (pre_grasp_axis * z_).sum(axis=-1, keepdims=True) 25 | norm = np.linalg.norm(pre_grasp_axis, axis=-1, keepdims=True) 26 | pre_grasp_axis = np.where(norm<1e-8, y_, pre_grasp_axis / (norm+1e-8)) 27 | pre_grasp_p = center - pre_grasp_axis * 0.2 28 | pre_grasp_x = -z_ 29 | pre_grasp_z = pre_grasp_axis 30 | pre_grasp_y = np.cross(pre_grasp_z, pre_grasp_x) 31 | axis_from = np.concatenate([ 32 | x_[:, np.newaxis, :], 33 | y_[:, np.newaxis, :], 34 | z_[:, np.newaxis, :] 35 | ], axis=1) 36 | axis_to = np.concatenate([ 37 | pre_grasp_x[:, np.newaxis, :], 38 | pre_grasp_y[:, np.newaxis, :], 39 | pre_grasp_z[:, np.newaxis, :] 40 | ], axis=1) 41 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 42 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 43 | 44 | # Performing Grasp 45 | self.env.class_method("toggle_gripper", open=True) 46 | res = self.env.hand_move_to(pre_grasp_pose, time=2, wait=2, planner="path", no_collision_with_front=True) 47 | 48 | proceed = np.ones(batch_size, dtype=np.int32) 49 | grasp_p = pre_grasp_p 50 | 51 | if self.cfg["closed_loop"] : 52 | for i in range(3) : 53 | 54 | # Computing Grasp Pose 55 | grasp_p = grasp_p + pre_grasp_axis * 0.06 * proceed[..., None] 56 | grasp_q = pre_grasp_q 57 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 58 | 59 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 60 | 61 | self.env.class_method("_release_target") 62 | error = np.linalg.norm(self.env.hand_pose()[:, :3] - grasp_p, axis=-1) 63 | proceed = proceed & (error < 0.01) 64 | 65 | grasp_p = grasp_p - pre_grasp_axis * 0.01 66 | grasp_q = pre_grasp_q 67 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 68 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 69 | else : 70 | grasp_p = grasp_p + pre_grasp_axis * 0.18 71 | grasp_q = pre_grasp_q 72 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 73 | 74 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="path") 75 | 76 | self.env.class_method("_release_target") 77 | 78 | self.env.class_method("toggle_gripper", open=False) 79 | 80 | cur_dir = - pre_grasp_axis 81 | 82 | for step_size in self.cfg["step_sizes"] : 83 | 84 | cur_p = self.env.gripper_pose()[:, :3] 85 | pred_p = cur_p + cur_dir * step_size 86 | 87 | next_x = -z_ 88 | next_z = -cur_dir 89 | next_y = np.cross(next_z, next_x) 90 | axis_from = np.concatenate([ 91 | x_[:, np.newaxis, :], 92 | y_[:, np.newaxis, :], 93 | z_[:, np.newaxis, :] 94 | ], axis=1) 95 | axis_to = np.concatenate([ 96 | next_x[:, np.newaxis, :], 97 | next_y[:, np.newaxis, :], 98 | next_z[:, np.newaxis, :] 99 | ], axis=1) 100 | pred_q = batch_get_quaternion(axis_from, axis_to) 101 | 102 | pred_pose = np.concatenate([pred_p, pred_q], axis=-1) 103 | 104 | self.env.gripper_move_to(pred_pose, time=step_size*10, wait=step_size*5, planner="ik" if self.cfg["closed_loop"] else "path") 105 | 106 | new_p = self.env.gripper_pose()[:, :3] 107 | new_dir = new_p - cur_p 108 | new_dir[:, 2] = 0 # project to xy-plane 109 | new_dir = normalize(new_dir) 110 | 111 | delta = new_dir - cur_dir 112 | 113 | dot = (new_dir * cur_dir).sum(axis=-1, keepdims=True) 114 | dot = np.clip(dot, -1, 1) 115 | 116 | cur_dir = normalize(cur_dir + 2*delta*dot) 117 | -------------------------------------------------------------------------------- /models/manipulation/open_drawer.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class OpenDrawerManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | x_ = np.array([[1, 0, 0]] * batch_size) 19 | y_ = np.array([[0, 1, 0]] * batch_size) 20 | z_ = np.array([[0, 0, 1]] * batch_size) 21 | 22 | # Computing Pre-grasp Pose 23 | pre_grasp_axis = axis[:, 0] 24 | pre_grasp_axis -= z_ * (pre_grasp_axis * z_).sum(axis=-1, keepdims=True) 25 | norm = np.linalg.norm(pre_grasp_axis, axis=-1, keepdims=True) 26 | pre_grasp_axis = np.where(norm<1e-8, y_, pre_grasp_axis / (norm+1e-8)) 27 | pre_grasp_p = center - pre_grasp_axis * 0.2 28 | pre_grasp_y = -z_ 29 | pre_grasp_z = pre_grasp_axis 30 | pre_grasp_x = np.cross(pre_grasp_y, pre_grasp_z) 31 | axis_from = np.concatenate([ 32 | x_[:, np.newaxis, :], 33 | y_[:, np.newaxis, :], 34 | z_[:, np.newaxis, :] 35 | ], axis=1) 36 | axis_to = np.concatenate([ 37 | pre_grasp_x[:, np.newaxis, :], 38 | pre_grasp_y[:, np.newaxis, :], 39 | pre_grasp_z[:, np.newaxis, :] 40 | ], axis=1) 41 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 42 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 43 | 44 | # Performing Grasp 45 | self.env.class_method("toggle_gripper", open=True) 46 | res = self.env.hand_move_to(pre_grasp_pose, time=2, wait=2, planner="path", no_collision_with_front=True) 47 | 48 | proceed = np.ones(batch_size, dtype=np.int32) 49 | grasp_p = pre_grasp_p 50 | 51 | if self.cfg["closed_loop"] : 52 | for i in range(3) : 53 | 54 | # Computing Grasp Pose 55 | grasp_p = grasp_p + pre_grasp_axis * 0.06 * proceed[..., None] 56 | grasp_q = pre_grasp_q 57 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 58 | 59 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 60 | 61 | self.env.class_method("_release_target") 62 | error = np.linalg.norm(self.env.hand_pose()[:, :3] - grasp_p, axis=-1) 63 | proceed = proceed & (error < 0.01) 64 | 65 | grasp_p = grasp_p - pre_grasp_axis * 0.01 66 | grasp_q = pre_grasp_q 67 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 68 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik") 69 | else : 70 | grasp_p = grasp_p + pre_grasp_axis * 0.18 71 | grasp_q = pre_grasp_q 72 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 73 | 74 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="path") 75 | 76 | self.env.class_method("_release_target") 77 | 78 | self.env.class_method("toggle_gripper", open=False) 79 | 80 | cur_dir = - pre_grasp_axis 81 | 82 | for step_size in self.cfg["step_sizes"] : 83 | 84 | cur_p = self.env.gripper_pose()[:, :3] 85 | pred_p = cur_p + cur_dir * step_size 86 | 87 | next_y = -z_ 88 | next_z = -cur_dir 89 | next_x = np.cross(next_y, next_z) 90 | axis_from = np.concatenate([ 91 | x_[:, np.newaxis, :], 92 | y_[:, np.newaxis, :], 93 | z_[:, np.newaxis, :] 94 | ], axis=1) 95 | axis_to = np.concatenate([ 96 | next_x[:, np.newaxis, :], 97 | next_y[:, np.newaxis, :], 98 | next_z[:, np.newaxis, :] 99 | ], axis=1) 100 | pred_q = batch_get_quaternion(axis_from, axis_to) 101 | 102 | pred_pose = np.concatenate([pred_p, pred_q], axis=-1) 103 | 104 | self.env.gripper_move_to(pred_pose, time=step_size*10, wait=step_size*5, planner="ik" if self.cfg["closed_loop"] else "path") 105 | 106 | new_p = self.env.gripper_pose()[:, :3] 107 | new_dir = new_p - cur_p 108 | new_dir[:, 2] = 0 # project to xy-plane 109 | new_dir = normalize(new_dir) 110 | 111 | delta = new_dir - cur_dir 112 | 113 | dot = (new_dir * cur_dir).sum(axis=-1, keepdims=True) 114 | dot = np.clip(dot, -1, 1) 115 | 116 | cur_dir = normalize(cur_dir + 2*delta*dot) 117 | -------------------------------------------------------------------------------- /models/manipulation/open_pot.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class OpenPotManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | x_ = np.array([[1, 0, 0]] * batch_size) 19 | y_ = np.array([[0, 1, 0]] * batch_size) 20 | z_ = np.array([[0, 0, 1]] * batch_size) 21 | 22 | pre_grasp_axis = -z_ 23 | pre_grasp_p = center - pre_grasp_axis * 0.08 24 | pre_grasp_y = np.cross(pre_grasp_axis, axis[:, 1]) 25 | pre_grasp_y /= np.linalg.norm(pre_grasp_y, axis=-1, keepdims=True) 26 | pre_grasp_x = -np.cross(pre_grasp_axis, pre_grasp_y) 27 | pre_grasp_x /= np.linalg.norm(pre_grasp_y, axis=-1, keepdims=True) 28 | pre_grasp_z = pre_grasp_axis 29 | axis_from = np.concatenate([ 30 | x_[:, np.newaxis, :], 31 | y_[:, np.newaxis, :], 32 | z_[:, np.newaxis, :] 33 | ], axis=1) 34 | axis_to = np.concatenate([ 35 | pre_grasp_x[:, np.newaxis, :], 36 | pre_grasp_y[:, np.newaxis, :], 37 | pre_grasp_z[:, np.newaxis, :] 38 | ], axis=1) 39 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 40 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 41 | 42 | grasp_p = center + pre_grasp_axis * 0.03 43 | grasp_gripper_p = center 44 | grasp_q = pre_grasp_q 45 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 46 | 47 | self.env.class_method("toggle_gripper", open=True) 48 | 49 | res = self.env.gripper_move_to(pre_grasp_pose, time=2, wait=1, planner="path") 50 | 51 | self.env.class_method("toggle_gripper", open=True) 52 | 53 | res = self.env.gripper_move_to(grasp_pose, time=2, wait=1, planner="ik") 54 | 55 | self.env.class_method("toggle_gripper", open=False) 56 | 57 | gripper_p = [grasp_gripper_p + pre_grasp_axis*0.1, grasp_gripper_p] 58 | prev_dir = -pre_grasp_axis 59 | 60 | last_dir = prev_dir 61 | 62 | for step_size in self.cfg["step_sizes"] : 63 | 64 | 65 | next_p = gripper_p[-1] + last_dir / (np.linalg.norm(last_dir, axis=-1, keepdims=True)+1e-4) * step_size 66 | 67 | next_q = pre_grasp_q 68 | 69 | next_pose = np.concatenate([next_p, next_q], axis=-1) 70 | res = self.env.gripper_move_to(next_pose, time=2, wait=1, planner="ik") 71 | 72 | gripper_p.append(self.env.gripper_pose()[:, :3]) 73 | -------------------------------------------------------------------------------- /models/manipulation/pick_mug.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from logging import Logger 7 | 8 | class PickMugManipulation(BaseManipulation) : 9 | 10 | def __init__(self, env : MultiVecEnv, cfg : dict, logger : Logger) : 11 | 12 | super().__init__(env, cfg, logger) 13 | 14 | def plan_pathway(self, center, axis, eval=False) : 15 | 16 | batch_size = center.shape[0] 17 | 18 | x_ = np.array([[1, 0, 0]] * batch_size) 19 | y_ = np.array([[0, 1, 0]] * batch_size) 20 | z_ = np.array([[0, 0, 1]] * batch_size) 21 | 22 | # Computing Pre-grasp Pose 23 | pre_grasp_axis = axis[:, 1] 24 | pre_grasp_axis -= z_ * (pre_grasp_axis * z_).sum(axis=-1, keepdims=True) 25 | norm = np.linalg.norm(pre_grasp_axis, axis=-1, keepdims=True) 26 | pre_grasp_axis = np.where(norm<1e-8, y_, pre_grasp_axis / (norm+1e-8)) 27 | pre_grasp_p = center - pre_grasp_axis * 0.2 28 | pre_grasp_x = z_ 29 | pre_grasp_z = pre_grasp_axis 30 | pre_grasp_y = np.cross(pre_grasp_z, pre_grasp_x) 31 | axis_from = np.concatenate([ 32 | x_[:, np.newaxis, :], 33 | y_[:, np.newaxis, :], 34 | z_[:, np.newaxis, :] 35 | ], axis=1) 36 | axis_to = np.concatenate([ 37 | pre_grasp_x[:, np.newaxis, :], 38 | pre_grasp_y[:, np.newaxis, :], 39 | pre_grasp_z[:, np.newaxis, :] 40 | ], axis=1) 41 | pre_grasp_q = batch_get_quaternion(axis_from, axis_to) 42 | pre_grasp_pose = np.concatenate([pre_grasp_p, pre_grasp_q], axis=-1) 43 | 44 | # Performing Grasp 45 | self.env.class_method("toggle_gripper", open=True) 46 | res = self.env.hand_move_to(pre_grasp_pose, time=2, wait=2, planner="path", no_collision_with_front=False) 47 | 48 | grasp_p = pre_grasp_p + pre_grasp_axis * 0.06 49 | grasp_q = pre_grasp_q 50 | grasp_pose = np.concatenate([grasp_p, grasp_q], axis=-1) 51 | res = self.env.hand_move_to(grasp_pose, time=2, wait=1, planner="ik" if self.cfg["closed_loop"] else "path") 52 | self.env.class_method("_release_target") 53 | self.env.class_method("toggle_gripper", open=False) 54 | 55 | cur_dir = pre_grasp_axis 56 | 57 | for step_size in self.cfg["step_sizes"] : 58 | 59 | cur_p = self.env.gripper_pose()[:, :3] 60 | pred_p = cur_p + z_ * step_size 61 | 62 | next_x = z_ 63 | next_z = cur_dir 64 | next_y = np.cross(next_z, next_x) 65 | axis_from = np.concatenate([ 66 | x_[:, np.newaxis, :], 67 | y_[:, np.newaxis, :], 68 | z_[:, np.newaxis, :] 69 | ], axis=1) 70 | axis_to = np.concatenate([ 71 | next_x[:, np.newaxis, :], 72 | next_y[:, np.newaxis, :], 73 | next_z[:, np.newaxis, :] 74 | ], axis=1) 75 | pred_q = batch_get_quaternion(axis_from, axis_to) 76 | 77 | pred_pose = np.concatenate([pred_p, pred_q], axis=-1) 78 | 79 | self.env.gripper_move_to(pred_pose, time=step_size*10, wait=step_size*5, planner="ik" if self.cfg["closed_loop"] else "path") 80 | -------------------------------------------------------------------------------- /models/manipulation/rl.py: -------------------------------------------------------------------------------- 1 | from models.manipulation.base_manipulation import BaseManipulation 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from env.my_vec_env import MultiVecEnv 5 | from utils.transform import * 6 | from utils.tools import * 7 | from logging import Logger 8 | from gym.spaces import Space, Box 9 | from algo.ppo.ppo import PPO 10 | import torch 11 | 12 | class RLManipulation(BaseManipulation) : 13 | 14 | def __init__(self, vec_env : MultiVecEnv, cfg : dict, logger : Logger) : 15 | 16 | super().__init__(vec_env, cfg, logger) 17 | 18 | self.agent = PPO(vec_env, cfg) 19 | 20 | def learn(self, steps, log_interval=1, save_interval=1) : 21 | 22 | self.agent.run(steps, log_interval, save_interval) 23 | 24 | def plan_pathway(self, obs, eval=False) : 25 | 26 | self.agent.play() 27 | -------------------------------------------------------------------------------- /models/pose_estimator/.gitignore: -------------------------------------------------------------------------------- 1 | data 2 | results -------------------------------------------------------------------------------- /models/pose_estimator/AdaPose/AdaPose.yml: -------------------------------------------------------------------------------- 1 | name: AdaPose 2 | channels: 3 | - pytorch 4 | - conda-forge 5 | - defaults 6 | dependencies: 7 | - _libgcc_mutex=0.1=main 8 | - _openmp_mutex=5.1=1_gnu 9 | - absl-py=1.0.0=pyhd8ed1ab_0 10 | - astor=0.8.1=pyh9f0ad1d_0 11 | - blas=1.0=mkl 12 | - bzip2=1.0.8=h7f98852_4 13 | - c-ares=1.18.1=h7f98852_0 14 | - ca-certificates=2022.5.18.1=ha878542_0 15 | - cached-property=1.5.2=py_0 16 | - cairo=1.16.0=hcf35c78_1003 17 | - certifi=2022.5.18.1=py37h89c1867_0 18 | - cudatoolkit=10.2.89=hfd86e86_1 19 | - cycler=0.11.0=pyhd8ed1ab_0 20 | - dbus=1.13.6=hfdff14a_1 21 | - expat=2.4.8=h27087fc_0 22 | - ffmpeg=4.3.2=hca11adc_0 23 | - fontconfig=2.14.0=h8e229c2_0 24 | - freetype=2.11.0=h70c0345_0 25 | - gast=0.5.3=pyhd8ed1ab_0 26 | - gettext=0.19.8.1=hf34092f_1004 27 | - giflib=5.2.1=h7b6447c_0 28 | - glib=2.66.3=h58526e2_0 29 | - gmp=6.2.1=h58526e2_0 30 | - gnutls=3.6.13=h85f3911_1 31 | - graphite2=1.3.13=h58526e2_1001 32 | - grpcio=1.16.0=py37h4f00d22_1000 33 | - gst-plugins-base=1.14.5=h0935bb2_2 34 | - gstreamer=1.14.5=h36ae1b5_2 35 | - h5py=3.6.0=py37ha0f2276_0 36 | - harfbuzz=2.4.0=h9f30f68_3 37 | - hdf5=1.10.6=nompi_h3c11f04_101 38 | - icu=64.2=he1b5a44_1 39 | - importlib-metadata=4.11.3=py37h89c1867_1 40 | - intel-openmp=2021.4.0=h06a4308_3561 41 | - jasper=1.900.1=h07fcdf6_1006 42 | - jpeg=9e=h7f8727e_0 43 | - keras-applications=1.0.8=py_1 44 | - keras-preprocessing=1.1.2=pyhd8ed1ab_0 45 | - kiwisolver=1.4.2=py37h7cecad7_1 46 | - lame=3.100=h7f98852_1001 47 | - lcms2=2.12=h3be6417_0 48 | - libblas=3.9.0=12_linux64_mkl 49 | - libcblas=3.9.0=12_linux64_mkl 50 | - libclang=9.0.1=default_hb4e5071_5 51 | - libedit=3.1.20210910=h7f8727e_0 52 | - libffi=3.2.1=hf484d3e_1007 53 | - libgcc-ng=11.2.0=h1234567_0 54 | - libgfortran-ng=7.5.0=h14aa051_20 55 | - libgfortran4=7.5.0=h14aa051_20 56 | - libglib=2.66.3=hbe7bbb4_0 57 | - libgomp=11.2.0=h1234567_0 58 | - libiconv=1.16=h516909a_0 59 | - liblapack=3.9.0=12_linux64_mkl 60 | - liblapacke=3.9.0=12_linux64_mkl 61 | - libllvm9=9.0.1=default_hc23dcda_4 62 | - libopencv=4.5.2=py37h8945300_0 63 | - libpng=1.6.37=hbc83047_0 64 | - libprotobuf=3.15.8=h780b84a_1 65 | - libstdcxx-ng=11.2.0=h1234567_0 66 | - libtiff=4.2.0=h2818925_1 67 | - libuuid=2.32.1=h7f98852_1000 68 | - libwebp=1.2.2=h55f646e_0 69 | - libwebp-base=1.2.2=h7f8727e_0 70 | - libxcb=1.13=h7f98852_1004 71 | - libxkbcommon=0.10.0=he1b5a44_0 72 | - libxml2=2.9.10=hee79883_0 73 | - lz4-c=1.9.3=h295c915_1 74 | - markdown=3.3.7=pyhd8ed1ab_0 75 | - matplotlib=3.2.2=1 76 | - matplotlib-base=3.2.2=py37h30547a4_0 77 | - mkl=2021.4.0=h06a4308_640 78 | - mkl-service=2.4.0=py37h7f8727e_0 79 | - mkl_fft=1.3.1=py37hd3c417c_0 80 | - mkl_random=1.2.2=py37h51133e4_0 81 | - mock=4.0.3=py37h89c1867_3 82 | - ncurses=6.3=h7f8727e_2 83 | - nettle=3.6=he412f7d_0 84 | - ninja=1.10.2=h06a4308_5 85 | - ninja-base=1.10.2=hd09550d_5 86 | - nspr=4.32=h9c3ff4c_1 87 | - nss=3.59=h2c00c37_0 88 | - numpy=1.21.5=py37he7a7128_2 89 | - numpy-base=1.21.5=py37hf524024_2 90 | - opencv=4.5.2=py37h89c1867_0 91 | - openh264=2.1.1=h780b84a_0 92 | - openssl=1.0.2u=h516909a_0 93 | - pcre=8.45=h9c3ff4c_0 94 | - pillow=9.0.1=py37h22f2fdc_0 95 | - pip=21.2.2=py37h06a4308_0 96 | - pixman=0.38.0=h516909a_1003 97 | - protobuf=3.15.8=py37hcd2ae1e_0 98 | - pthread-stubs=0.4=h36c2ea0_1001 99 | - py-opencv=4.5.2=py37h085eea5_0 100 | - pyparsing=3.0.9=pyhd8ed1ab_0 101 | - python=3.7.0=h6e4f718_3 102 | - python-dateutil=2.8.2=pyhd8ed1ab_0 103 | - python_abi=3.7=2_cp37m 104 | - pytorch=1.6.0=py3.7_cuda10.2.89_cudnn7.6.5_0 105 | - qt=5.12.5=hd8c4c69_1 106 | - readline=7.0=h7b6447c_5 107 | - scipy=1.5.3=py37h8911b10_0 108 | - setuptools=61.2.0=py37h06a4308_0 109 | - six=1.16.0=pyhd3eb1b0_1 110 | - sqlite=3.33.0=h62c20be_0 111 | - tensorboard=1.13.1=py37_0 112 | - tensorflow=1.13.1=py37h90a7d86_1 113 | - tensorflow-estimator=1.13.0=py_0 114 | - termcolor=1.1.0=py_2 115 | - timm=0.4.12=pyhd8ed1ab_0 116 | - tk=8.6.11=h1ccaba5_1 117 | - torchvision=0.7.0=py37_cu102 118 | - tornado=6.1=py37h540881e_3 119 | - tqdm=4.64.0=py37h06a4308_0 120 | - typing-extensions=4.2.0=hd8ed1ab_1 121 | - typing_extensions=4.2.0=pyha770c72_1 122 | - werkzeug=2.1.2=pyhd8ed1ab_1 123 | - wheel=0.37.1=pyhd3eb1b0_0 124 | - x264=1!161.3030=h7f98852_1 125 | - xorg-kbproto=1.0.7=h7f98852_1002 126 | - xorg-libice=1.0.10=h7f98852_0 127 | - xorg-libsm=1.2.3=hd9c2040_1000 128 | - xorg-libx11=1.7.2=h7f98852_0 129 | - xorg-libxau=1.0.9=h7f98852_0 130 | - xorg-libxdmcp=1.1.3=h7f98852_0 131 | - xorg-libxext=1.3.4=h7f98852_1 132 | - xorg-libxrender=0.9.10=h7f98852_1003 133 | - xorg-renderproto=0.11.1=h7f98852_1002 134 | - xorg-xextproto=7.3.0=h7f98852_1002 135 | - xorg-xproto=7.0.31=h7f98852_1007 136 | - xz=5.2.5=h7f8727e_1 137 | - zipp=3.8.0=pyhd8ed1ab_0 138 | - zlib=1.2.12=h7f8727e_2 139 | - zstd=1.5.2=ha4553b6_0 140 | - pip: 141 | - addict==2.4.0 142 | - anyio==3.6.1 143 | - argon2-cffi==21.3.0 144 | - argon2-cffi-bindings==21.2.0 145 | - attrs==21.4.0 146 | - babel==2.10.3 147 | - backcall==0.2.0 148 | - beautifulsoup4==4.11.1 149 | - bleach==5.0.0 150 | - cffi==1.15.0 151 | - chamfer==2.0.0 152 | - charset-normalizer==2.0.12 153 | - click==8.1.3 154 | - debugpy==1.6.0 155 | - decorator==5.1.1 156 | - defusedxml==0.7.1 157 | - deprecation==2.1.0 158 | - docker-pycreds==0.4.0 159 | - emd-ext==0.0.0 160 | - entrypoints==0.4 161 | - fastjsonschema==2.15.3 162 | - future==0.18.2 163 | - gitdb==4.0.10 164 | - gitpython==3.1.29 165 | - idna==3.3 166 | - importlib-resources==5.8.0 167 | - ipykernel==6.15.0 168 | - ipython==7.34.0 169 | - ipython-genutils==0.2.0 170 | - ipywidgets==7.7.1 171 | - jedi==0.18.1 172 | - jinja2==3.1.2 173 | - json5==0.9.8 174 | - jsonschema==4.6.0 175 | - jupyter-client==7.3.4 176 | - jupyter-core==4.10.0 177 | - jupyter-packaging==0.12.2 178 | - jupyter-server==1.18.0 179 | - jupyterlab==3.4.3 180 | - jupyterlab-pygments==0.2.2 181 | - jupyterlab-server==2.14.0 182 | - jupyterlab-widgets==1.1.1 183 | - knn-cuda==0.2 184 | - markupsafe==2.1.1 185 | - matplotlib-inline==0.1.3 186 | - mistune==0.8.4 187 | - mmcv==1.7.1 188 | - nbclassic==0.3.7 189 | - nbclient==0.6.4 190 | - nbconvert==6.5.0 191 | - nbformat==5.4.0 192 | - nest-asyncio==1.5.5 193 | - nn-distance==0.0.0 194 | - notebook==6.4.12 195 | - notebook-shim==0.1.0 196 | - open3d==0.15.2 197 | - packaging==21.3 198 | - pandas==1.1.5 199 | - pandocfilters==1.5.0 200 | - parso==0.8.3 201 | - pathtools==0.1.2 202 | - pexpect==4.8.0 203 | - pickleshare==0.7.5 204 | - pointnet2==0.0.0 205 | - prometheus-client==0.14.1 206 | - promise==2.3 207 | - prompt-toolkit==3.0.29 208 | - psutil==5.9.1 209 | - ptyprocess==0.7.0 210 | - pycparser==2.21 211 | - pygments==2.12.0 212 | - pyquaternion==0.9.9 213 | - pyrsistent==0.18.1 214 | - pytz==2022.1 215 | - pyyaml==6.0 216 | - pyzmq==23.2.0 217 | - requests==2.28.0 218 | - ripleyk==0.0.3 219 | - send2trash==1.8.0 220 | - sentry-sdk==1.11.1 221 | - setproctitle==1.3.2 222 | - shortuuid==1.0.11 223 | - smmap==5.0.0 224 | - sniffio==1.2.0 225 | - soupsieve==2.3.2.post1 226 | - terminado==0.15.0 227 | - tinycss2==1.1.1 228 | - tomlkit==0.11.0 229 | - traitlets==5.3.0 230 | - transforms3d==0.4.1 231 | - urllib3==1.26.13 232 | - wandb==0.13.5 233 | - wcwidth==0.2.5 234 | - webencodings==0.5.1 235 | - websocket-client==1.3.3 236 | - widgetsnbextension==3.6.1 237 | - yapf==0.32.0 238 | prefix: /research/dept8/kaichen/miniconda3/envs/AdaPose 239 | 240 | -------------------------------------------------------------------------------- /models/pose_estimator/AdaPose/lib/align.py: -------------------------------------------------------------------------------- 1 | """ 2 | RANSAC for Similarity Transformation Estimation 3 | Modified from https://github.com/hughw19/NOCS_CVPR2019 4 | Originally Written by Srinath Sridhar 5 | """ 6 | import time 7 | import numpy as np 8 | import cv2 9 | 10 | def estimateSimilarityUmeyama(SourceHom, TargetHom): 11 | # Copy of original paper is at: http://web.stanford.edu/class/cs273/refs/umeyama.pdf 12 | SourceCentroid = np.mean(SourceHom[:3, :], axis=1) 13 | TargetCentroid = np.mean(TargetHom[:3, :], axis=1) 14 | nPoints = SourceHom.shape[1] 15 | CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose() 16 | CenteredTarget = TargetHom[:3, :] - np.tile(TargetCentroid, (nPoints, 1)).transpose() 17 | CovMatrix = np.matmul(CenteredTarget, np.transpose(CenteredSource)) / nPoints 18 | if np.isnan(CovMatrix).any(): 19 | print('nPoints:', nPoints) 20 | print(SourceHom.shape) 21 | print(TargetHom.shape) 22 | raise RuntimeError('There are NANs in the input.') 23 | 24 | U, D, Vh = np.linalg.svd(CovMatrix, full_matrices=True) 25 | d = (np.linalg.det(U) * np.linalg.det(Vh)) < 0.0 26 | if d: 27 | D[-1] = -D[-1] 28 | U[:, -1] = -U[:, -1] 29 | # rotation 30 | Rotation = np.matmul(U, Vh) 31 | # scale 32 | varP = np.var(SourceHom[:3, :], axis=1).sum() 33 | Scale = 1 / varP * np.sum(D) 34 | # translation 35 | Translation = TargetHom[:3, :].mean(axis=1) - SourceHom[:3, :].mean(axis=1).dot(Scale*Rotation.T) 36 | # transformation matrix 37 | OutTransform = np.identity(4) 38 | OutTransform[:3, :3] = Scale * Rotation 39 | OutTransform[:3, 3] = Translation 40 | 41 | return Scale, Rotation, Translation, OutTransform 42 | 43 | 44 | def estimateSimilarityTransform(source: np.array, target: np.array, verbose=False): 45 | """ Add RANSAC algorithm to account for outliers. 46 | 47 | """ 48 | assert source.shape[0] == target.shape[0], 'Source and Target must have same number of points.' 49 | SourceHom = np.transpose(np.hstack([source, np.ones([source.shape[0], 1])])) 50 | TargetHom = np.transpose(np.hstack([target, np.ones([target.shape[0], 1])])) 51 | # Auto-parameter selection based on source heuristics 52 | # Assume source is object model or gt nocs map, which is of high quality 53 | SourceCentroid = np.mean(SourceHom[:3, :], axis=1) 54 | nPoints = SourceHom.shape[1] 55 | CenteredSource = SourceHom[:3, :] - np.tile(SourceCentroid, (nPoints, 1)).transpose() 56 | SourceDiameter = 2 * np.amax(np.linalg.norm(CenteredSource, axis=0)) 57 | InlierT = SourceDiameter / 10.0 # 0.1 of source diameter 58 | maxIter = 128 59 | confidence = 0.99 60 | 61 | if verbose: 62 | print('Inlier threshold: ', InlierT) 63 | print('Max number of iterations: ', maxIter) 64 | 65 | BestInlierRatio = 0 66 | BestInlierIdx = np.arange(nPoints) 67 | for i in range(0, maxIter): 68 | # Pick 5 random (but corresponding) points from source and target 69 | RandIdx = np.random.randint(nPoints, size=5) 70 | Scale, _, _, OutTransform = estimateSimilarityUmeyama(SourceHom[:, RandIdx], TargetHom[:, RandIdx]) 71 | PassThreshold = Scale * InlierT # propagate inlier threshold to target scale 72 | Diff = TargetHom - np.matmul(OutTransform, SourceHom) 73 | ResidualVec = np.linalg.norm(Diff[:3, :], axis=0) 74 | InlierIdx = np.where(ResidualVec < PassThreshold)[0] 75 | nInliers = InlierIdx.shape[0] 76 | InlierRatio = nInliers / nPoints 77 | # update best hypothesis 78 | if InlierRatio > BestInlierRatio: 79 | BestInlierRatio = InlierRatio 80 | BestInlierIdx = InlierIdx 81 | if verbose: 82 | print('Iteration: ', i) 83 | print('Inlier ratio: ', BestInlierRatio) 84 | # early break 85 | if (1 - (1 - BestInlierRatio ** 5) ** i) > confidence: 86 | break 87 | 88 | if(BestInlierRatio < 0.1): 89 | print('[ WARN ] - Something is wrong. Small BestInlierRatio: ', BestInlierRatio) 90 | return None, None, None, None 91 | 92 | SourceInliersHom = SourceHom[:, BestInlierIdx] 93 | TargetInliersHom = TargetHom[:, BestInlierIdx] 94 | Scale, Rotation, Translation, OutTransform = estimateSimilarityUmeyama(SourceInliersHom, TargetInliersHom) 95 | 96 | if verbose: 97 | print('BestInlierRatio:', BestInlierRatio) 98 | print('Rotation:\n', Rotation) 99 | print('Translation:\n', Translation) 100 | print('Scale:', Scale) 101 | 102 | return Scale, Rotation, Translation, OutTransform 103 | 104 | def estimatePnPRansac(nocsPts, cameraPts, size, intrinsic): 105 | temp_nocsPts = nocsPts * size 106 | # _, r, t, _ = cv2.solvePnPRansac(temp_nocsPts, cameraPts, intrinsic, np.zeros(4), flags=cv2.SOLVEPNP_EPNP) 107 | success, r, t, _ = cv2.solvePnPRansac(temp_nocsPts, cameraPts, intrinsic, np.zeros(4), flags=cv2.SOLVEPNP_EPNP, reprojectionError = 3.0) 108 | if success: 109 | criteria = (cv2.TERM_CRITERIA_MAX_ITER+cv2.TERM_CRITERIA_EPS, 20, 1e-6) 110 | r, t = cv2.solvePnPRefineVVS(temp_nocsPts, cameraPts, intrinsic, None, r, t, criteria=criteria) 111 | R, _ = cv2.Rodrigues(r) 112 | sRT = np.eye(4).astype(np.float32) 113 | sRT[:3, :3] = R * size 114 | sRT[:3, 3] = t.flatten() 115 | return success, size, R, t, sRT 116 | 117 | def backproject(depth, intrinsics, instance_mask): 118 | """ Back-projection, use opencv camera coordinate frame. 119 | 120 | """ 121 | cam_fx = intrinsics[0, 0] 122 | cam_fy = intrinsics[1, 1] 123 | cam_cx = intrinsics[0, 2] 124 | cam_cy = intrinsics[1, 2] 125 | 126 | non_zero_mask = (depth > 0) 127 | final_instance_mask = np.logical_and(instance_mask, non_zero_mask) 128 | idxs = np.where(final_instance_mask) 129 | 130 | z = depth[idxs[0], idxs[1]] 131 | x = (idxs[1] - cam_cx) * z / cam_fx 132 | y = (idxs[0] - cam_cy) * z / cam_fy 133 | pts = np.stack((x, y, z), axis=1) 134 | 135 | return pts, idxs 136 | 137 | 138 | def align_nocs_to_depth(masks, coords, depth, intrinsics, instance_ids, img_path, verbose=False): 139 | num_instances = len(instance_ids) 140 | error_messages = '' 141 | elapses = [] 142 | scales = np.zeros(num_instances) 143 | rotations = np.zeros((num_instances, 3, 3)) 144 | translations = np.zeros((num_instances, 3)) 145 | 146 | for i in range(num_instances): 147 | mask = masks[:, :, i] 148 | coord = coords[:, :, i, :] 149 | pts, idxs = backproject(depth, intrinsics, mask) 150 | coord_pts = coord[idxs[0], idxs[1], :] - 0.5 151 | try: 152 | start = time.time() 153 | s, R, T, outtransform = estimateSimilarityTransform(coord_pts, pts, False) 154 | elapsed = time.time() - start 155 | if verbose: 156 | print('elapsed: ', elapsed) 157 | elapses.append(elapsed) 158 | except Exception as e: 159 | message = '[ Error ] aligning instance {} in {} fails. Message: {}.'.format(instance_ids[i], img_path, str(e)) 160 | print(message) 161 | error_messages += message + '\n' 162 | s = 1.0 163 | R = np.eye(3) 164 | T = np.zeros(3) 165 | outtransform = np.identity(4, dtype=np.float32) 166 | 167 | scales[i] = s / 1000.0 168 | rotations[i, :, :] = R 169 | translations[i, :] = T / 1000.0 170 | 171 | return scales, rotations, translations, error_messages, elapses 172 | -------------------------------------------------------------------------------- /models/pose_estimator/AdaPose/lib/fusion.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import math 3 | import numpy as np 4 | import torch 5 | import torch.nn as nn 6 | import torch.nn.functional as F 7 | 8 | def clones(module, N): 9 | return nn.ModuleList([copy.deepcopy(module) for _ in range(N)]) 10 | 11 | def attention(query, key, value, mask=None, dropout=None, position_embedding=None): 12 | d_k = query.size(-1) 13 | 14 | # scores (b,h,n,n) 15 | scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k) 16 | 17 | if mask is not None: 18 | scores = scores.masked_fill(mask == 0, -1e9) 19 | 20 | if position_embedding is not None: 21 | position_embedding = position_embedding.unsqueeze(1) 22 | scores = scores + position_embedding 23 | 24 | p_attn = F.softmax(scores, dim=-1) 25 | return torch.matmul(p_attn, value), p_attn 26 | 27 | class MultiHeadedAttention(nn.Module): 28 | def __init__(self, h, d_model, fn_attention=attention, dropout=0.1): 29 | super(MultiHeadedAttention, self).__init__() 30 | assert d_model % h == 0 31 | self.d_k = d_model // h 32 | self.h = h 33 | self.linears = clones(nn.Linear(d_model, d_model), 4) 34 | self.fn_attention = fn_attention 35 | self.attn = None 36 | self.dropout = None 37 | 38 | def forward(self, query, key, value, mask=None, position_embedding=None): 39 | if mask is not None: 40 | mask = mask.unsqueeze(1) 41 | nbatches = query.size(0) 42 | 43 | query, key, value = \ 44 | [l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous() 45 | for l, x in zip(self.linears, (query, key, value))] 46 | 47 | x, self.attn = self.fn_attention(query, key, value, mask=mask, dropout=self.dropout) 48 | 49 | x = x.transpose(1, 2).contiguous() \ 50 | .view(nbatches, -1, self.h * self.d_k) 51 | return self.linears[-1](x) 52 | 53 | class ViewFusionBlock(nn.Module): 54 | def __init__(self, emb_dims=512, n_heads=4): 55 | super(ViewFusionBlock, self).__init__() 56 | self.fusion1 = MultiHeadedAttention(n_heads, emb_dims) 57 | self.fusion2 = MultiHeadedAttention(n_heads, emb_dims) 58 | 59 | def forward(self, *input): 60 | view1_feat = input[0] 61 | view2_feat = input[1] 62 | 63 | query = view1_feat.transpose(2, 1).contiguous() 64 | key = view2_feat.transpose(2, 1).contiguous() 65 | 66 | x = self.fusion1(query, key, key).transpose(2, 1).contiguous() # view2 as value, from view2 to view1 67 | y = self.fusion2(key, query, query).transpose(2, 1).contiguous() # view1 as value, from view1 to view2 68 | 69 | return x + view1_feat, y + view2_feat 70 | 71 | class ViewFusion(nn.Module): 72 | def __init__(self, embed_dim = 512, num_heads = 4, depth = 4): 73 | super(ViewFusion, self).__init__() 74 | self.blocks = nn.ModuleList([ 75 | ViewFusionBlock(emb_dims=embed_dim, n_heads=num_heads) for i in range(depth)]) 76 | 77 | def forward(self, view1_feat, view2_feat): 78 | for i, blk in enumerate(self.blocks): 79 | view1_feat, view2_feat = blk(view1_feat, view2_feat) 80 | # x, y = self.blocks(view1_feat, view2_feat) 81 | 82 | return view1_feat, view2_feat 83 | 84 | if __name__ == '__main__': 85 | 86 | model = ViewFusion(embed_dim=128, num_heads=4).cuda() 87 | key = torch.randn(2, 128, 1024).cuda() 88 | query = torch.randn(2, 128, 1024).cuda() 89 | 90 | x, y = model(query, key) 91 | 92 | print(x.shape, y.shape) 93 | -------------------------------------------------------------------------------- /models/pose_estimator/AdaPose/lib/pspnet.py: -------------------------------------------------------------------------------- 1 | import math 2 | import torch 3 | from torch import nn 4 | from torch.nn import functional as F 5 | import torchvision.models as models 6 | 7 | def conv3x3(in_planes, out_planes, stride=1, dilation=1): 8 | return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation, bias=False) 9 | 10 | 11 | class BasicBlock(nn.Module): 12 | expansion = 1 13 | def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=1): 14 | super(BasicBlock, self).__init__() 15 | self.conv1 = conv3x3(inplanes, planes, stride=stride, dilation=dilation) 16 | self.relu = nn.ReLU(inplace=True) 17 | self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation) 18 | self.downsample = downsample 19 | self.stride = stride 20 | 21 | def forward(self, x): 22 | residual = x 23 | out = self.conv1(x) 24 | out = self.relu(out) 25 | out = self.conv2(out) 26 | if self.downsample is not None: 27 | residual = self.downsample(x) 28 | out += residual 29 | out = self.relu(out) 30 | return out 31 | 32 | 33 | class ResNet(nn.Module): 34 | def __init__(self, block, layers=(3, 4, 23, 3)): 35 | self.inplanes = 64 36 | super(ResNet, self).__init__() 37 | self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) 38 | self.relu = nn.ReLU(inplace=True) 39 | self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) 40 | self.layer1 = self._make_layer(block, 64, layers[0]) 41 | self.layer2 = self._make_layer(block, 128, layers[1], stride=2) 42 | self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2) 43 | self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4) 44 | 45 | for m in self.modules(): 46 | if isinstance(m, nn.Conv2d): 47 | n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels 48 | m.weight.data.normal_(0, math.sqrt(2./n)) 49 | elif isinstance(m, nn.BatchNorm2d): 50 | m.weight.data.fill_(1) 51 | m.bias.data.zero_() 52 | 53 | def _make_layer(self, block, planes, blocks, stride=1, dilation=1): 54 | downsample = None 55 | if stride != 1 or self.inplanes != planes*block.expansion: 56 | downsample = nn.Sequential( 57 | nn.Conv2d(self.inplanes, planes*block.expansion, kernel_size=1, stride=stride, bias=False) 58 | ) 59 | layers = [block(self.inplanes, planes, stride, downsample)] 60 | self.inplanes = planes * block.expansion 61 | for i in range(1, blocks): 62 | layers.append(block(self.inplanes, planes, dilation=dilation)) 63 | return nn.Sequential(*layers) 64 | 65 | def forward(self, x): 66 | x = self.conv1(x) 67 | x = self.relu(x) 68 | x = self.maxpool(x) 69 | x = self.layer1(x) 70 | x = self.layer2(x) 71 | x = self.layer3(x) 72 | x = self.layer4(x) 73 | return x 74 | 75 | 76 | class PSPModule(nn.Module): 77 | def __init__(self, feat_dim, bins=(1, 2, 3, 6)): 78 | super(PSPModule, self).__init__() 79 | self.reduction_dim = feat_dim // len(bins) 80 | self.stages = [] 81 | self.stages = nn.ModuleList([self._make_stage(feat_dim, size) for size in bins]) 82 | 83 | def _make_stage(self, feat_dim, size): 84 | prior = nn.AdaptiveAvgPool2d(output_size=(size, size)) 85 | conv = nn.Conv2d(feat_dim, self.reduction_dim, kernel_size=1, bias=False) 86 | relu = nn.ReLU(inplace=True) 87 | return nn.Sequential(prior, conv, relu) 88 | 89 | def forward(self, feats): 90 | h, w = feats.size(2), feats.size(3) 91 | priors = [feats] 92 | for stage in self.stages: 93 | priors.append(F.interpolate(input=stage(feats), size=(h, w), mode='bilinear', align_corners=True)) 94 | return torch.cat(priors, 1) 95 | 96 | 97 | class PSPUpsample(nn.Module): 98 | def __init__(self, in_channels, out_channels): 99 | super(PSPUpsample, self).__init__() 100 | self.conv = nn.Sequential( 101 | nn.Conv2d(in_channels, out_channels, 3, padding=1), 102 | nn.PReLU() 103 | ) 104 | 105 | def forward(self, x): 106 | x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) 107 | return self.conv(x) 108 | 109 | 110 | class PSPNet(nn.Module): 111 | def __init__(self, bins=(1, 2, 3, 6), backend='resnet18'): 112 | super(PSPNet, self).__init__() 113 | if backend == 'resnet18': 114 | self.feats = ResNet(BasicBlock, [2, 2, 2, 2]) 115 | feat_dim = 512 116 | elif backend == 'resnet34': 117 | self.feats = ResNet(BasicBlock, [3, 4, 6, 3]) 118 | feat_dim = 512 119 | else: 120 | raise NotImplementedError 121 | self.psp = PSPModule(feat_dim, bins) 122 | self.drop = nn.Dropout2d(p=0.15) 123 | self.up_1 = PSPUpsample(1024, 256) 124 | self.up_2 = PSPUpsample(256, 64) 125 | self.up_3 = PSPUpsample(64, 64) 126 | self.final = nn.Conv2d(64, 32, kernel_size=1) 127 | 128 | def init_from_pretrain(self): 129 | # pretrained_dict = torch.load(pretrain_path) 130 | import os 131 | os.environ['TORCH_HOME'] = '/research/dept8/kaichen' 132 | pretrain_model = models.resnet18(pretrained=True) 133 | pretrained_dict = pretrain_model.state_dict() 134 | model_dict = self.feats.state_dict() 135 | 136 | pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict} 137 | model_dict.update(pretrained_dict) 138 | self.feats.load_state_dict(model_dict) 139 | 140 | print('load the pretrained model resnet!') 141 | 142 | def forward(self, x): 143 | f = self.feats(x) 144 | p = self.psp(f) 145 | #print("psp:", p.shape) 146 | #psp: torch.Size([32, 1024, 24, 24]) 147 | p = self.up_1(p) 148 | #up1: torch.Size([32, 256, 48, 48]) 149 | #print("up1:", p.shape) 150 | p = self.drop(p) 151 | p = self.up_2(p) 152 | #print("up2:", p.shape) 153 | #up2: torch.Size([32, 64, 96, 96]) 154 | p = self.drop(p) 155 | p = self.up_3(p) 156 | #print("up3:", p.shape) 157 | #up3: torch.Size([32, 64, 192, 192]) 158 | return self.final(p) 159 | 160 | if __name__ == '__main__': 161 | net = PSPNet().cuda() 162 | net.init_from_pretrain() 163 | img = torch.rand(2, 3, 224, 224).cuda() 164 | y = net(img) 165 | 166 | print(y.shape) -------------------------------------------------------------------------------- /models/pose_estimator/AdaPose/lib/rotation_utils.py: -------------------------------------------------------------------------------- 1 | import torch 2 | 3 | 4 | def normalize_vector( v, dim =1, return_mag =False): 5 | v_mag = torch.sqrt(v.pow(2).sum(dim=dim, keepdim=True))# batch 6 | v_mag = torch.max(v_mag, torch.autograd.Variable(torch.FloatTensor([1e-8]).cuda())) 7 | v_mag = v_mag.expand_as(v) 8 | v = v/v_mag 9 | return v 10 | 11 | def cross_product(u, v): 12 | i = u[:,1]*v[:,2] - u[:,2]*v[:,1] 13 | j = u[:,2]*v[:,0] - u[:,0]*v[:,2] 14 | k = u[:,0]*v[:,1] - u[:,1]*v[:,0] 15 | out = torch.cat((i.unsqueeze(1), j.unsqueeze(1), k.unsqueeze(1)),1)#batch*3 16 | return out 17 | 18 | def Ortho6d2Mat(x_raw, y_raw): 19 | y = normalize_vector(y_raw) 20 | z = cross_product(x_raw, y) 21 | z = normalize_vector(z)#batch*3 22 | x = cross_product(y,z)#batch*3 23 | 24 | x = x.unsqueeze(2) 25 | y = y.unsqueeze(2) 26 | z = z.unsqueeze(2) 27 | matrix = torch.cat((x,y,z), 2) #batch*3*3 28 | return matrix -------------------------------------------------------------------------------- /models/pose_estimator/base_estimator.py: -------------------------------------------------------------------------------- 1 | from abc import abstractclassmethod 2 | from env.base_sapien_env import BaseEnv 3 | from logging import Logger 4 | 5 | class BasePoseEstimator : 6 | 7 | def __init__(self, env : BaseEnv, cfg : dict, logger : Logger) : 8 | 9 | self.env : BaseEnv = env 10 | self.cfg = cfg 11 | self.logger = logger 12 | 13 | @abstractclassmethod 14 | def append_picture(self, pic, pose) : 15 | 16 | pass 17 | 18 | @abstractclassmethod 19 | def estimate(self) : 20 | 21 | pass -------------------------------------------------------------------------------- /models/pose_estimator/groundtruth_estimator.py: -------------------------------------------------------------------------------- 1 | from models.pose_estimator.base_estimator import BasePoseEstimator 2 | from env.base_sapien_env import BaseEnv 3 | from env.sapien_envs.open_cabinet import OpenCabinetEnv 4 | from logging import Logger 5 | 6 | class GroundTruthPoseEstimator(BasePoseEstimator) : 7 | 8 | def __init__(self, env : OpenCabinetEnv, cfg : dict, logger : Logger) : 9 | 10 | super().__init__(env, cfg, logger) 11 | 12 | def append_picture(self, pic, pose) : 13 | 14 | pass 15 | 16 | def estimate(self) : 17 | 18 | return self.env.get_observation(gt=True)["handle_bbox"] -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # absl-py==1.4.0 2 | # addict==2.4.0 3 | # antlr4-python3-runtime==4.9.3 4 | # asttokens==2.2.1 5 | # attrs==22.2.0 6 | # backcall==0.2.0 7 | # beautifulsoup4==4.12.0 8 | # charset-normalizer==3.0.1 9 | # cloudpickle==2.2.1 10 | # coacd==0.0.2 11 | # comm==0.1.2 12 | # ConfigArgParse==1.5.3 13 | # contourpy==1.0.7 14 | # cycler==0.11.0 15 | # dash==2.8.1 16 | # dash-core-components==2.0.0 17 | # dash-html-components==2.0.0 18 | # dash-table==5.0.0 19 | # debugpy==1.6.6 20 | # decorator==5.1.1 21 | # dm-tree==0.1.8 22 | # drjit==0.4.1 23 | # executing==1.2.0 24 | # fastjsonschema==2.16.2 25 | # filelock==3.10.0 26 | # Flask==2.2.3 27 | # fonttools==4.38.0 28 | # future==0.18.3 29 | # gdown==4.6.4 30 | # gitdb==4.0.10 31 | # GitPython==3.1.31 32 | # google-auth==2.16.1 33 | # google-auth-oauthlib==0.4.6 34 | # grpcio==1.51.3 35 | # gym==0.21.0 36 | # gym-notices==0.0.8 37 | # h5py==3.8.0 38 | # Hydra==2.5 39 | # hydra-core==1.3.2 40 | # idna==3.4 41 | # imageio==2.26.1 42 | # imageio-ffmpeg==0.4.8 43 | # importlib-metadata==6.0.0 44 | # importlib-resources==5.12.0 45 | # ipdb==0.13.13 46 | # ipykernel==6.21.2 47 | # ipython==8.10.0 48 | # ipywidgets==8.0.4 49 | # itsdangerous==2.1.2 50 | # jedi==0.18.2 51 | # Jinja2==3.1.2 52 | # joblib==1.2.0 53 | # jsonschema==4.17.3 54 | # jupyter_client==8.0.3 55 | # jupyter_core==5.2.0 56 | # jupyterlab-widgets==3.0.5 57 | # kiwisolver==1.4.4 58 | # lmdb==1.4.1 59 | # mani-skill2==0.4.1.post0 60 | # Markdown==3.4.1 61 | # MarkupSafe==2.1.2 62 | # matplotlib==3.7.0 63 | # matplotlib-inline==0.1.6 64 | # mitsuba==3.2.1 65 | # mplib==0.0.8 66 | # msgpack==1.0.5 67 | # msgpack-numpy==0.4.8 68 | # nbformat==5.5.0 69 | # nest-asyncio==1.5.6 70 | numpy==1.23.5 71 | # nvidia-cublas-cu11==11.10.3.66 72 | # nvidia-cuda-nvrtc-cu11==11.7.99 73 | # nvidia-cuda-runtime-cu11==11.7.99 74 | # nvidia-cudnn-cu11==8.5.0.96 75 | # oauthlib==3.2.2 76 | # objprint==0.2.2 77 | # omegaconf==2.3.0 78 | # open3d==0.16.0 79 | # opencv-python==4.7.0.68 80 | # packaging==23.0 81 | # pandas==1.5.3 82 | # parso==0.8.3 83 | # pexpect==4.8.0 84 | # pickle-mixin==1.0.2 85 | # pickleshare==0.7.5 86 | # Pillow==9.4.0 87 | # pkgutil_resolve_name==1.3.10 88 | # platformdirs==3.0.0 89 | # plotly==5.13.0 90 | # prompt-toolkit==3.0.36 91 | # protobuf==4.22.0 92 | # psutil==5.9.4 93 | # ptyprocess==0.7.0 94 | # pure-eval==0.2.2 95 | # pyasn1==0.4.8 96 | # pyasn1-modules==0.2.8 97 | # pyassimp==5.2.5 98 | # Pygments==2.14.0 99 | # pyparsing==3.0.9 100 | # pyquaternion==0.9.9 101 | # pyrsistent==0.19.3 102 | # PySocks==1.7.1 103 | # python-dateutil==2.8.2 104 | # pytorch-lightning==0.7.1 105 | # pytz==2022.7.1 106 | # PyYAML==6.0 107 | # pyzmq==25.0.0 108 | # requests==2.28.2 109 | # requests-oauthlib==1.3.1 110 | # rsa==4.9 111 | # Rtree==1.0.1 112 | # sapien==2.2.1 113 | # scikit-learn==1.2.1 114 | # scipy==1.10.1 115 | # six==1.16.0 116 | # smmap==5.0.0 117 | # soupsieve==2.4 118 | # stack-data==0.6.2 119 | # tabulate==0.9.0 120 | # tenacity==8.2.1 121 | tensorboard==2.12.0 122 | # tensorboard-data-server==0.7.0 123 | # tensorboard-plugin-wit==1.8.1 124 | # threadpoolctl==3.1.0 125 | # tomli==2.0.1 126 | # toppra==0.5.1 127 | torch==1.13.1 128 | # torchaudio==0.13.1 129 | torchvision==0.14.1 130 | # tornado==6.2 131 | tqdm==4.64.1 132 | # traitlets==5.9.0 133 | transforms3d==0.4.1 134 | trimesh==3.21.0 135 | # typing_extensions==4.5.0 136 | # ujson==5.7.0 137 | # urllib3==1.26.14 138 | # viztracer==0.15.6 139 | # wcwidth==0.2.6 140 | # Werkzeug==2.2.3 141 | # widgetsnbextension==4.0.5 142 | # zipp==3.14.0 143 | sapien==2.2.1 144 | six==1.16.0 145 | gym==0.26.2 146 | mplib==0.0.8 147 | ujson==5.7.0 148 | ipdb -------------------------------------------------------------------------------- /utils/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | logging.basicConfig() 4 | 5 | # Some global vars 6 | log = logging.getLogger(__name__) # A logger for this file 7 | graph = None 8 | start_time = None 9 | dry_run = False 10 | 11 | # Set logging level to the logger 12 | log.setLevel(logging.DEBUG) 13 | 14 | log.info("Logger initiated.") -------------------------------------------------------------------------------- /utils/sapien_utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import sapien.core as sapien 3 | from gym import spaces 4 | import numpy as np 5 | from copy import deepcopy 6 | 7 | def parse_urdf_config(config_dict: dict, scene: sapien.Scene) : 8 | """Parse config from dict for SAPIEN URDF loader. 9 | 10 | Args: 11 | config_dict (dict): a dict containing link physical properties. 12 | scene (sapien.Scene): simualtion scene 13 | 14 | Returns: 15 | Dict: urdf config passed to `sapien.URDFLoader.load`. 16 | """ 17 | urdf_config = deepcopy(config_dict) 18 | 19 | # Create the global physical material for all links 20 | mtl_cfg = urdf_config.pop("material", None) 21 | if mtl_cfg is not None: 22 | urdf_config["material"] = scene.create_physical_material(**mtl_cfg) 23 | 24 | # Create link-specific physical materials 25 | materials = {} 26 | for k, v in urdf_config.pop("_materials", {}).items(): 27 | materials[k] = scene.create_physical_material(**v) 28 | 29 | # Specify properties for links 30 | for link_config in urdf_config.get("link", {}).values(): 31 | # Substitute with actual material 32 | link_config["material"] = materials[link_config["material"]] 33 | 34 | return urdf_config 35 | 36 | def check_urdf_config(urdf_config: dict): 37 | """Check whether the urdf config is valid for SAPIEN. 38 | 39 | Args: 40 | urdf_config (dict): dict passed to `sapien.URDFLoader.load`. 41 | """ 42 | allowed_keys = ["material", "density", "link"] 43 | for k in urdf_config.keys(): 44 | if k not in allowed_keys: 45 | raise KeyError( 46 | f"Not allowed key ({k}) for `sapien.URDFLoader.load`. Allowed keys are f{allowed_keys}" 47 | ) 48 | 49 | allowed_keys = ["material", "density", "patch_radius", "min_patch_radius"] 50 | for k, v in urdf_config.get("link", {}).items(): 51 | for kk in v.keys(): 52 | # In fact, it should support specifying collision-shape-level materials. 53 | if kk not in allowed_keys: 54 | raise KeyError( 55 | f"Not allowed key ({kk}) for `sapien.URDFLoader.load`. Allowed keys are f{allowed_keys}" 56 | ) 57 | 58 | def get_entity_by_name(entities, name: str, is_unique=True): 59 | """Get a Sapien.Entity given the name. 60 | 61 | Args: 62 | entities (List[sapien.Entity]): entities (link, joint, ...) to query. 63 | name (str): name for query. 64 | is_unique (bool, optional): 65 | whether the name should be unique. Defaults to True. 66 | 67 | Raises: 68 | RuntimeError: The name is not unique when @is_unique is True. 69 | 70 | Returns: 71 | sapien.Entity or List[sapien.Entity]: 72 | matched entity or entities. None if no matches. 73 | """ 74 | matched_entities = [x for x in entities if x.get_name() == name] 75 | if len(matched_entities) > 1: 76 | if not is_unique: 77 | return matched_entities 78 | else: 79 | raise RuntimeError(f"Multiple entities with the same name {name}.") 80 | elif len(matched_entities) == 1: 81 | return matched_entities[0] 82 | else: 83 | return None 84 | 85 | ''' 86 | 1. obtain the name of each collision_shape via link.get_visual_bodies() 87 | 2. obtain the geometry of each collision shape via link.get_collision_shapes() 88 | 3. assume that orders of visual_bodies and collision_shapes are the same 89 | ''' 90 | def get_part_mesh_and_pose(link : sapien.Link): 91 | link_visual_bodies = link.get_visual_bodies() 92 | link_collision_shapes = link.get_collision_shapes() 93 | 94 | part_vs = {} 95 | global_part_vs = {} 96 | part_fs = {} 97 | vid = 0 98 | 99 | visual_names = [] 100 | for visual_body in link_visual_bodies : 101 | visual_names.append(visual_body.get_name()) 102 | 103 | if len(link_collision_shapes) == len(link_visual_bodies) : 104 | for (visual_body, collision_shape) in zip(link_visual_bodies, link_collision_shapes): 105 | v = np.array(collision_shape.geometry.vertices, dtype=np.float32) 106 | f = np.array(collision_shape.geometry.indices, dtype=np.uint32).reshape(-1, 3) 107 | vscale = collision_shape.geometry.scale 108 | v[:, 0] *= vscale[0] 109 | v[:, 1] *= vscale[1] 110 | v[:, 2] *= vscale[2] 111 | 112 | ones = np.ones((v.shape[0], 1), dtype=np.float32) 113 | v_ones = np.concatenate([v, ones], axis=1) 114 | pose = collision_shape.get_local_pose() 115 | transmat = pose.to_transformation_matrix() 116 | v = (v_ones @ transmat.T)[:, :3] 117 | 118 | if visual_body.get_name() not in part_fs.keys(): 119 | part_vs[visual_body.get_name()] = [] 120 | part_fs[visual_body.get_name()] = [] 121 | vid = 0 122 | else: 123 | vid = part_vs[visual_body.get_name()][0].shape[0] 124 | 125 | part_vs[visual_body.get_name()].append(v) 126 | part_vs[visual_body.get_name()] = [np.concatenate(part_vs[visual_body.get_name()], axis=0)] 127 | part_fs[visual_body.get_name()].append(f + vid) 128 | part_fs[visual_body.get_name()] = [np.concatenate(part_fs[visual_body.get_name()], axis=0)] 129 | else : 130 | 131 | assert(len(link_visual_bodies) == 1) 132 | 133 | for collision_shape in link_collision_shapes : 134 | 135 | v = np.array(collision_shape.geometry.vertices, dtype=np.float32) 136 | f = np.array(collision_shape.geometry.indices, dtype=np.uint32).reshape(-1, 3) 137 | vscale = collision_shape.geometry.scale 138 | v[:, 0] *= vscale[0] 139 | v[:, 1] *= vscale[1] 140 | v[:, 2] *= vscale[2] 141 | 142 | ones = np.ones((v.shape[0], 1), dtype=np.float32) 143 | v_ones = np.concatenate([v, ones], axis=1) 144 | pose = collision_shape.get_local_pose() 145 | transmat = pose.to_transformation_matrix() 146 | v = (v_ones @ transmat.T)[:, :3] 147 | 148 | name = visual_names[0] 149 | if name not in part_fs.keys(): 150 | part_vs[name] = [] 151 | part_fs[name] = [] 152 | vid = 0 153 | else: 154 | vid = part_vs[visual_body.get_name()][0].shape[0] 155 | 156 | part_vs[name].append(v) 157 | part_vs[name] = [np.concatenate(part_vs[name], axis=0)] 158 | part_fs[name].append(f + vid) 159 | part_fs[name] = [np.concatenate(part_fs[name], axis=0)] 160 | 161 | for k in part_vs.keys(): 162 | part_vs[k] = np.concatenate(part_vs[k], axis=0) 163 | part_fs[k] = np.concatenate(part_fs[k], axis=0) 164 | 165 | # if global_transform==True, return vertices in world frame 166 | part_transmat = link.get_pose().to_transformation_matrix() 167 | for k in part_vs.keys(): 168 | ones = np.ones((part_vs[k].shape[0], 1), dtype=np.float32) 169 | vs_ones = np.concatenate([part_vs[k], ones], axis=1) 170 | global_part_vs[k] = (vs_ones @ part_transmat.T)[:, :3] 171 | 172 | return part_vs, global_part_vs, part_fs, part_transmat 173 | 174 | def get_part_mesh(link : sapien.Link, global_transform=True): 175 | final_vs = [] 176 | final_fs = [] 177 | vid = 0 178 | vs = [] 179 | for s in link.get_collision_shapes(): 180 | v = np.array(s.geometry.vertices, dtype=np.float32) 181 | f = np.array(s.geometry.indices, dtype=np.uint32).reshape(-1, 3) 182 | vscale = s.geometry.scale 183 | v[:, 0] *= vscale[0] 184 | v[:, 1] *= vscale[1] 185 | v[:, 2] *= vscale[2] 186 | ones = np.ones((v.shape[0], 1), dtype=np.float32) 187 | v_ones = np.concatenate([v, ones], axis=1) 188 | pose = s.get_local_pose() 189 | transmat = pose.to_transformation_matrix() 190 | v = (v_ones @ transmat.T)[:, :3] 191 | vs.append(v) 192 | final_fs.append(f + vid) 193 | vid += v.shape[0] 194 | part_transmat = None 195 | if len(vs) > 0: 196 | vs = np.concatenate(vs, axis=0) 197 | part_transmat = link.get_pose().to_transformation_matrix() 198 | if global_transform : 199 | ones = np.ones((vs.shape[0], 1), dtype=np.float32) 200 | vs_ones = np.concatenate([vs, ones], axis=1) 201 | vs = (vs_ones @ part_transmat.T)[:, :3] 202 | final_vs.append(vs) 203 | if(final_fs!=[] and final_fs!=[]): 204 | final_vs = np.concatenate(final_vs, axis=0) 205 | final_fs = np.concatenate(final_fs, axis=0) 206 | return final_vs, final_fs, part_transmat -------------------------------------------------------------------------------- /utils/transform.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | def normalize(x) : 4 | ''' 5 | normalize vectors 6 | ''' 7 | 8 | return x / (np.linalg.norm(x, axis=-1, keepdims=True) + 1e-9) 9 | 10 | def quat_mul(q1, q2) : 11 | ''' 12 | multiply two quaternions 13 | ''' 14 | q1 = np.array(q1) 15 | q2 = np.array(q2) 16 | shape = q1.shape 17 | assert(shape[-1] == 4) 18 | assert(q2.shape == shape) 19 | 20 | q1 = q1.reshape(-1, 4) 21 | q2 = q2.reshape(-1, 4) 22 | 23 | w1 = q1[:, 0] 24 | x1 = q1[:, 1] 25 | y1 = q1[:, 2] 26 | z1 = q1[:, 3] 27 | 28 | w2 = q2[:, 0] 29 | x2 = q2[:, 1] 30 | y2 = q2[:, 2] 31 | z2 = q2[:, 3] 32 | 33 | w = w1*w2 - x1*x2 - y1*y2 - z1*z2 34 | x = w1*x2 + x1*w2 + y1*z2 - z1*y2 35 | y = w1*y2 + y1*w2 + z1*x2 - x1*z2 36 | z = w1*z2 + z1*w2 + x1*y2 - y1*x2 37 | 38 | return np.stack([w, x, y, z], axis=-1).reshape(shape) 39 | 40 | # w1, x1, y1, z1 = q1 41 | # w2, x2, y2, z2 = q2 42 | 43 | # w = w1*w2 - x1*x2 - y1*y2 - z1*z2 44 | # x = w1*x2 + x1*w2 + y1*z2 - z1*y2 45 | # y = w1*y2 + y1*w2 + z1*x2 - x1*z2 46 | # z = w1*z2 + z1*w2 + x1*y2 - y1*x2 47 | 48 | # return np.array([w, x, y, z]) 49 | 50 | def lookat_quat(direction) : 51 | 52 | shape = direction.shape 53 | assert(shape[-1] == 3) 54 | direction = direction.reshape(-1, 3) 55 | 56 | direction = direction / (np.linalg.norm(direction, axis=-1, keepdims=True) + 1e-9) 57 | # upaxis = upaxis / np.linalg.norm(upaxis) 58 | 59 | x_ = np.asarray([1., 0, 0]) 60 | y_ = np.asarray([0, 1., 0]) 61 | z_ = np.asarray([0, 0, 1.]) 62 | 63 | res = np.zeros((direction.shape[0], 4)) 64 | 65 | for id, dir in enumerate(direction) : 66 | 67 | dot = (z_ * dir).sum(axis=-1) 68 | 69 | if abs(np.linalg.norm(direction)) < 1e-6 : 70 | 71 | x = x_ 72 | y = y_ 73 | z = z_ 74 | 75 | elif abs(dot - (-1.0)) < 1e-6 : 76 | 77 | x = -z_ 78 | y = y_ 79 | z = x_ 80 | 81 | elif abs(dot - (1.0)) < 1e-6 : 82 | 83 | x = z_ 84 | y = y_ 85 | z = -x_ 86 | 87 | else : 88 | 89 | y = np.cross(z_, dir) 90 | y = y / np.linalg.norm(y, axis=-1, keepdims=True) 91 | z = np.cross(dir, y) 92 | z = z / np.linalg.norm(z, axis=-1, keepdims=True) 93 | x = dir 94 | 95 | quat = get_quaternion([x_, y_, z_], [x, y, z]) 96 | 97 | res[id] = quat 98 | 99 | return res.reshape(*shape[:-1], 4) 100 | 101 | # dot = (np.array([[0, 0, 1]]) * direction).sum(axis=-1) 102 | 103 | # res = np.zeros((direction.shape[0], 4)) 104 | 105 | # res[np.where(np.abs(dot - (-1.0)) < 0.000001)] = np.array([0, 1, 0, np.pi]) 106 | # res[np.where(np.abs(dot - (1.0)) < 0.000001)] = np.array([1, 0, 0, 0]) 107 | 108 | # rot_angle = np.arccos(dot) 109 | # rot_axis = np.cross(np.array([[0, 0, 1]]), direction) 110 | # rot_axis = rot_axis / np.linalg.norm(rot_axis, axis=-1, keepdims=True) 111 | 112 | # return axis_angle_to_quat(rot_axis, rot_angle).reshape(*shape[:-1], 4) 113 | 114 | # public static Quaternion LookAt(Vector3 sourcePoint, Vector3 destPoint) 115 | # { 116 | # Vector3 forwardVector = Vector3.Normalize(destPoint - sourcePoint); 117 | 118 | # float dot = Vector3.Dot(Vector3.forward, forwardVector); 119 | 120 | # if (Math.Abs(dot - (-1.0f)) < 0.000001f) 121 | # { 122 | # return new Quaternion(Vector3.up.x, Vector3.up.y, Vector3.up.z, 3.1415926535897932f); 123 | # } 124 | # if (Math.Abs(dot - (1.0f)) < 0.000001f) 125 | # { 126 | # return Quaternion.identity; 127 | # } 128 | 129 | # float rotAngle = (float)Math.Acos(dot); 130 | # Vector3 rotAxis = Vector3.Cross(Vector3.forward, forwardVector); 131 | # rotAxis = Vector3.Normalize(rotAxis); 132 | # return CreateFromAxisAngle(rotAxis, rotAngle); 133 | # } 134 | 135 | def axis_angle_to_quat(axis, angle): 136 | ''' 137 | axis: [[x, y, z]] or [x, y, z] 138 | angle: rad 139 | return: a quat that rotates angle around axis 140 | ''' 141 | axis = np.array(axis) 142 | shape = axis.shape 143 | assert(shape[-1] == 3) 144 | axis = axis.reshape(-1, 3) 145 | 146 | angle = np.array(angle) 147 | angle = angle.reshape(-1, 1) 148 | 149 | axis = axis / (np.linalg.norm(axis, axis=-1, keepdims=True) + 1e-9) 150 | quat1 = np.concatenate([np.cos(angle/2), axis[:, 0:1]*np.sin(angle/2), axis[:, 1:2]*np.sin(angle/2), axis[:, 2:3]*np.sin(angle/2)], axis=-1) 151 | return quat1.reshape(*shape[:-1], 4) 152 | 153 | def batch_get_quaternion(lst1, lst2) : 154 | ''' 155 | lst1: batch fo list of 3d vectors 156 | lst2: list of 3d vectors 157 | returns: quaternion that rotates lst1 to lst2 158 | ''' 159 | 160 | ret = [] 161 | 162 | for tmp1, tmp2 in zip(lst1, lst2) : 163 | 164 | ret.append(get_quaternion(tmp1, tmp2)) 165 | 166 | return ret 167 | 168 | def get_quaternion(lst1, lst2, matchlist=None): 169 | ''' 170 | lst1: list of 3d vectors 171 | lst2: list of 3d vectors 172 | matchlist: list of indices of lst2 that correspond to lst1 173 | returns: quaternion that rotates lst1 to lst2 174 | ''' 175 | if not matchlist: 176 | matchlist=range(len(lst1)) 177 | M=np.matrix([[0,0,0],[0,0,0],[0,0,0]]) 178 | 179 | for i,coord1 in enumerate(lst1): 180 | x=np.matrix(np.outer(coord1,lst2[matchlist[i]])) 181 | M=M+x 182 | 183 | N11=float(M[0][:,0]+M[1][:,1]+M[2][:,2]) 184 | N22=float(M[0][:,0]-M[1][:,1]-M[2][:,2]) 185 | N33=float(-M[0][:,0]+M[1][:,1]-M[2][:,2]) 186 | N44=float(-M[0][:,0]-M[1][:,1]+M[2][:,2]) 187 | N12=float(M[1][:,2]-M[2][:,1]) 188 | N13=float(M[2][:,0]-M[0][:,2]) 189 | N14=float(M[0][:,1]-M[1][:,0]) 190 | N21=float(N12) 191 | N23=float(M[0][:,1]+M[1][:,0]) 192 | N24=float(M[2][:,0]+M[0][:,2]) 193 | N31=float(N13) 194 | N32=float(N23) 195 | N34=float(M[1][:,2]+M[2][:,1]) 196 | N41=float(N14) 197 | N42=float(N24) 198 | N43=float(N34) 199 | 200 | N=np.matrix([[N11,N12,N13,N14],\ 201 | [N21,N22,N23,N24],\ 202 | [N31,N32,N33,N34],\ 203 | [N41,N42,N43,N44]]) 204 | 205 | 206 | values,vectors=np.linalg.eig(N) 207 | w=list(values) 208 | mw=max(w) 209 | quat= vectors[:,w.index(mw)] 210 | quat=np.array(quat).reshape(-1,) 211 | return quat 212 | 213 | def quat_conjugate(a): 214 | shape = a.shape 215 | a = a.reshape(-1, 4) 216 | return np.concatenate((-a[:, :3], a[:, -1:]), axis=-1).reshape(shape) 217 | 218 | def quat_to_axis(q, id) : 219 | ''' 220 | q: a quat 221 | id: 0, 1, 2 222 | ''' 223 | 224 | q = np.array(q) 225 | shape = q.shape 226 | q = q.reshape(-1, 4) 227 | 228 | q0 = q[..., 0] 229 | q1 = q[..., 1] 230 | q2 = q[..., 2] 231 | q3 = q[..., 3] 232 | 233 | if id == 0 : 234 | return np.concatenate([2*q0**2+2*q1**2-1, 2*q1*q2+2*q0*q3, 2*q1*q3-2*q0*q2], axis=-1).reshape(shape[:-1] + (3,)) 235 | elif id == 1 : 236 | return np.concatenate([2*q1*q2-2*q0*q3, 2*q0**2+2*q2**2-1, 2*q2*q3+2*q0*q1], axis=-1).reshape(shape[:-1] + (3,)) 237 | elif id == 2 : 238 | return np.concatenate([2*q1*q3+2*q0*q2, 2*q2*q3-2*q0*q1, 2*q0**2+2*q3**2-1], axis=-1).reshape(shape[:-1] + (3,)) 239 | 240 | def compute_quat_err(targ, curr) : 241 | 242 | cc = quat_conjugate(curr) 243 | q_r = quat_mul(targ, cc) 244 | return q_r[0:3] * np.sign(q_r[3]) --------------------------------------------------------------------------------